code
stringlengths
22
1.05M
apis
listlengths
1
3.31k
extract_api
stringlengths
75
3.25M
# Generated by Django 2.2.6 on 2019-10-15 04:47 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('meet', '0002_auto_20191010_0543'), ] operations = [ migrations.AlterField( model_name='chatmailbox', name='initiated', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='mailboxes_passive', to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='chatmailbox', name='initiator', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='mailboxes_active', to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='chatmailbox', name='last_sent', field=models.DateTimeField(auto_now=True), ), migrations.AlterField( model_name='chatmessage', name='mailbox', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='messages', to='meet.ChatMailbox'), ), migrations.AlterField( model_name='chatmessage', name='sender', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='messages', to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='friendship', name='initiated', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='friend_passive', to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='friendship', name='initiator', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='friend_active', to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='friendship', name='since', field=models.DateTimeField(auto_now_add=True), ), migrations.AlterField( model_name='marker', name='created', field=models.DateTimeField(auto_now_add=True), ), migrations.AlterField( model_name='marker', name='creator', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='markers', to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='marker', name='last_modified', field=models.DateTimeField(auto_now=True), ), migrations.AlterField( model_name='meetexternallinks', name='parent', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='external_links', to='meet.Meet'), ), migrations.AlterField( model_name='meetstar', name='meet', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='stars', to='meet.Meet'), ), migrations.AlterField( model_name='meetstar', name='owner', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='stars', to=settings.AUTH_USER_MODEL), ), ]
[ "django.db.models.ForeignKey", "django.db.models.DateTimeField" ]
[((407, 537), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""mailboxes_passive"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='mailboxes_passive', to=settings.AUTH_USER_MODEL)\n", (424, 537), False, 'from django.db import migrations, models\n'), ((662, 791), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""mailboxes_active"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='mailboxes_active', to=settings.AUTH_USER_MODEL)\n", (679, 791), False, 'from django.db import migrations, models\n'), ((916, 951), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (936, 951), False, 'from django.db import migrations, models\n'), ((1079, 1194), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""messages"""', 'to': '"""meet.ChatMailbox"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='messages', to='meet.ChatMailbox')\n", (1096, 1194), False, 'from django.db import migrations, models\n'), ((1316, 1437), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""messages"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='messages', to=settings.AUTH_USER_MODEL)\n", (1333, 1437), False, 'from django.db import migrations, models\n'), ((1561, 1688), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""friend_passive"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='friend_passive', to=settings.AUTH_USER_MODEL)\n", (1578, 1688), False, 'from django.db import migrations, models\n'), ((1812, 1938), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""friend_active"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='friend_active', to=settings.AUTH_USER_MODEL)\n", (1829, 1938), False, 'from django.db import migrations, models\n'), ((2058, 2097), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2078, 2097), False, 'from django.db import migrations, models\n'), ((2220, 2259), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2240, 2259), False, 'from django.db import migrations, models\n'), ((2382, 2502), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""markers"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='markers', to=settings.AUTH_USER_MODEL)\n", (2399, 2502), False, 'from django.db import migrations, models\n'), ((2626, 2661), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (2646, 2661), False, 'from django.db import migrations, models\n'), ((2794, 2908), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""external_links"""', 'to': '"""meet.Meet"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='external_links', to='meet.Meet')\n", (2811, 2908), False, 'from django.db import migrations, models\n'), ((3025, 3130), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""stars"""', 'to': '"""meet.Meet"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='stars', to='meet.Meet')\n", (3042, 3130), False, 'from django.db import migrations, models\n'), ((3248, 3366), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""stars"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='stars', to=settings.AUTH_USER_MODEL)\n", (3265, 3366), False, 'from django.db import migrations, models\n')]
from collections import OrderedDict import copy from typing import Dict from typing import Optional import optuna from optuna.distributions import BaseDistribution from optuna.study import BaseStudy class IntersectionSearchSpace(object): """A class to calculate the intersection search space of a :class:`~optuna.study.BaseStudy`. Intersection search space contains the intersection of parameter distributions that have been suggested in the completed trials of the study so far. If there are multiple parameters that have the same name but different distributions, neither is included in the resulting search space (i.e., the parameters with dynamic value ranges are excluded). Note that an instance of this class is supposed to be used for only one study. If different studies are passed to :func:`~optuna.samplers.IntersectionSearchSpace.calculate`, a :obj:`ValueError` is raised. """ def __init__(self) -> None: self._cursor: int = -1 self._search_space: Optional[Dict[str, BaseDistribution]] = None self._study_id: Optional[int] = None def calculate( self, study: BaseStudy, ordered_dict: bool = False ) -> Dict[str, BaseDistribution]: """Returns the intersection search space of the :class:`~optuna.study.BaseStudy`. Args: study: A study with completed trials. ordered_dict: A boolean flag determining the return type. If :obj:`False`, the returned object will be a :obj:`dict`. If :obj:`True`, the returned object will be an :obj:`collections.OrderedDict` sorted by keys, i.e. parameter names. Returns: A dictionary containing the parameter names and parameter's distributions. Raises: ValueError: If different studies are passed into this method. """ if self._study_id is None: self._study_id = study._study_id else: # Note that the check below is meaningless when `InMemoryStorage` is used # because `InMemoryStorage.create_new_study` always returns the same study ID. if self._study_id != study._study_id: raise ValueError("`IntersectionSearchSpace` cannot handle multiple studies.") next_cursor = self._cursor for trial in reversed(study.get_trials(deepcopy=False)): if self._cursor > trial.number: break if not trial.state.is_finished(): next_cursor = trial.number if trial.state != optuna.trial.TrialState.COMPLETE: continue if self._search_space is None: self._search_space = copy.copy(trial.distributions) continue delete_list = [] for param_name, param_distribution in self._search_space.items(): if param_name not in trial.distributions: delete_list.append(param_name) elif trial.distributions[param_name] != param_distribution: delete_list.append(param_name) for param_name in delete_list: del self._search_space[param_name] self._cursor = next_cursor search_space = self._search_space or {} if ordered_dict: search_space = OrderedDict(sorted(search_space.items(), key=lambda x: x[0])) return copy.deepcopy(search_space) def intersection_search_space( study: BaseStudy, ordered_dict: bool = False ) -> Dict[str, BaseDistribution]: """Return the intersection search space of the :class:`~optuna.study.BaseStudy`. Intersection search space contains the intersection of parameter distributions that have been suggested in the completed trials of the study so far. If there are multiple parameters that have the same name but different distributions, neither is included in the resulting search space (i.e., the parameters with dynamic value ranges are excluded). .. note:: :class:`~optuna.samplers.IntersectionSearchSpace` provides the same functionality with a much faster way. Please consider using it if you want to reduce execution time as much as possible. Args: study: A study with completed trials. ordered_dict: A boolean flag determining the return type. If :obj:`False`, the returned object will be a :obj:`dict`. If :obj:`True`, the returned object will be an :obj:`collections.OrderedDict` sorted by keys, i.e. parameter names. Returns: A dictionary containing the parameter names and parameter's distributions. """ return IntersectionSearchSpace().calculate(study, ordered_dict=ordered_dict)
[ "copy.deepcopy", "copy.copy" ]
[((3491, 3518), 'copy.deepcopy', 'copy.deepcopy', (['search_space'], {}), '(search_space)\n', (3504, 3518), False, 'import copy\n'), ((2781, 2811), 'copy.copy', 'copy.copy', (['trial.distributions'], {}), '(trial.distributions)\n', (2790, 2811), False, 'import copy\n')]
from flee import flee, SimulationSettings from datamanager import handle_refugee_data from datamanager import DataTable #DataTable.subtract_dates() from flee import InputGeography import numpy as np import outputanalysis.analysis as a import sys import visualization.vis from datetime import datetime from datetime import timedelta def AddInitialRefugees(e, d, loc): """ Add the initial refugees to a location, using the location name""" num_refugees = int(d.get_field(loc.name, 0, FullInterpolation=True)) for i in range(0, num_refugees): e.addAgent(location=loc) def date_to_sim_days(date): return DataTable.subtract_dates(date,"2013-12-01") if __name__ == "__main__": end_time = 820 last_physical_day = 820 if len(sys.argv)>1: if (sys.argv[1]).isnumeric(): end_time = int(sys.argv[1]) last_physical_day = int(sys.argv[1]) else: end_time = 820 last_physical_day = 820 duration = flee.SimulationSettings.SimulationSettings.ReadFromCSV(sys.argv[1]) if duration>0: end_time = duration last_physical_day = end_time e = flee.Ecosystem() ig = InputGeography.InputGeography() ig.ReadLocationsFromCSV("examples/car_input_csv/locations.csv") ig.ReadLinksFromCSV("examples/car_input_csv/routes.csv") ig.ReadClosuresFromCSV("examples/car_input_csv/closures.csv") e,lm = ig.StoreInputGeographyInEcosystem(e) #print("Network data loaded") d = handle_refugee_data.RefugeeTable(csvformat="generic", data_directory="source_data/car2014/", start_date="2013-12-01", data_layout="data_layout.csv") #Correcting for overestimations due to inaccurate level 1 registrations in five of the camps. #These errors led to a perceived large drop in refugee population in all of these camps. #We correct by linearly scaling the values down to make the last level 1 registration match the first level 2 registration value. #To our knowledge, all level 2 registration procedures were put in place by the end of 2016. d.correctLevel1Registrations("Amboko","2015-09-30") d.correctLevel1Registrations("Belom","2015-08-31") d.correctLevel1Registrations("Dosseye","2015-09-30") d.correctLevel1Registrations("Gondje","2015-09-30") lm["Moyo"].capacity *= d.correctLevel1Registrations("Moyo","2015-06-02") #also "2014-05-11" and "2015-06-02" d.correctLevel1Registrations("East","2014-09-28") d.correctLevel1Registrations("Adamaoua","2014-10-19") d.correctLevel1Registrations("Bili","2016-06-30") d.correctLevel1Registrations("Boyabu","2016-06-30") d.correctLevel1Registrations("Inke","2014-06-30") d.correctLevel1Registrations("Betou","2014-03-22") lm["Amboko"].capacity = d.getMaxFromData("Amboko", last_physical_day) lm["Belom"].capacity = d.getMaxFromData("Belom", last_physical_day) # set manually. lm["Dosseye"].capacity = d.getMaxFromData("Dosseye", last_physical_day) lm["Gondje"].capacity = d.getMaxFromData("Gondje", last_physical_day) #lm["Moyo"].capacity = d.getMaxFromData("Moyo", last_physical_day ) # blip in the data set, set capacity manually. lm["East"].capacity = d.getMaxFromData("East", last_physical_day) lm["Adamaoua"].capacity = d.getMaxFromData("Adamaoua", last_physical_day) lm["Mole"].capacity = d.getMaxFromData("Mole", last_physical_day) lm["Bili"].capacity = d.getMaxFromData("Bili", last_physical_day) #lm["Bossobolo"].capacity = d.getMaxFromData("Bossobolo", last_physical_day) #camp excluded lm["Boyabu"].capacity = d.getMaxFromData("Boyabu", last_physical_day) lm["Mboti"].capacity = d.getMaxFromData("Mboti", last_physical_day) lm["Inke"].capacity = d.getMaxFromData("Inke", last_physical_day) lm["Betou"].capacity = d.getMaxFromData("Betou", last_physical_day) lm["Brazaville"].capacity = d.getMaxFromData("Brazaville", last_physical_day) output_header_string = "Day," camp_locations = ["Amboko","Belom","Dosseye","Gondje","Moyo","East","Adamaoua","Mole","Bili","Boyabu","Mboti","Inke","Betou","Brazaville"] #TODO: Add Camps from CSV based on their location type. for l in camp_locations: AddInitialRefugees(e,d,lm[l]) output_header_string += "%s sim,%s data,%s error," % (lm[l].name, lm[l].name, lm[l].name) output_header_string += "Total error,refugees in camps (UNHCR),total refugees (simulation),raw UNHCR refugee count,refugees in camps (simulation),refugee_debt" print(output_header_string) # Set up a mechanism to incorporate temporary decreases in refugees refugee_debt = 0 refugees_raw = 0 #raw (interpolated) data from TOTAL UNHCR refugee count only. visoutput = visualization.vis.VisManager(SimulationSettings.SimulationSettings.DefaultVisPath / "car.json") start_date = datetime(2013, 12, 1) current_date = datetime(2013, 12, 1) for t in range(0,end_time): ig.AddNewConflictZones(e,t) # Determine number of new refugees to insert into the system. new_refs = d.get_daily_difference(t, FullInterpolation=True) - refugee_debt refugees_raw += d.get_daily_difference(t, FullInterpolation=True) if new_refs < 0: refugee_debt = -new_refs new_refs = 0 elif refugee_debt > 0: refugee_debt = 0 #Insert refugee agents for i in range(0, new_refs): e.addAgent(e.pick_conflict_location()) e.refresh_conflict_weights() t_data = t e.enact_border_closures(t) e.evolve() #Calculation of error terms errors = [] abs_errors = [] loc_data = [] camps = [] for i in camp_locations: camps += [lm[i]] loc_data += [d.get_field(i, t)] refugees_in_camps_sim = 0 for c in camps: refugees_in_camps_sim += c.numAgents # calculate errors j=0 for i in camp_locations: errors += [a.rel_error(lm[i].numAgents, loc_data[j])] abs_errors += [a.abs_error(lm[i].numAgents, loc_data[j])] j += 1 output = "%s" % t for i in range(0,len(errors)): output += ",%s,%s,%s" % (lm[camp_locations[i]].numAgents, loc_data[i], errors[i]) if refugees_raw>0: #output_string += ",%s,%s,%s,%s" % (float(np.sum(abs_errors))/float(refugees_raw), int(sum(loc_data)), e.numAgents(), refugees_raw) output += ",%s,%s,%s,%s,%s,%s" % (float(np.sum(abs_errors))/float(refugees_raw), int(sum(loc_data)), e.numAgents(), refugees_raw, refugees_in_camps_sim, refugee_debt) else: output += ",0,0,0,0,0,0,0" #output_string += ",0" print(output) assert t == visoutput.addTimeStep(current_date.strftime("%Y-%m-%d")) visoutput.addLocationDataAtTime(t, e.locations) current_date = current_date + timedelta(days=1) visoutput.setMetaData([5.725311, 19.488373], start_date.strftime("%Y-%m-%d"), "CAR", "CAR visualization") visoutput.saveVisData()
[ "outputanalysis.analysis.abs_error", "flee.flee.Ecosystem", "flee.InputGeography.InputGeography", "numpy.sum", "datamanager.handle_refugee_data.RefugeeTable", "datamanager.DataTable.subtract_dates", "datetime.datetime", "datetime.timedelta", "outputanalysis.analysis.rel_error", "flee.flee.SimulationSettings.SimulationSettings.ReadFromCSV" ]
[((614, 658), 'datamanager.DataTable.subtract_dates', 'DataTable.subtract_dates', (['date', '"""2013-12-01"""'], {}), "(date, '2013-12-01')\n", (638, 658), False, 'from datamanager import DataTable\n'), ((1104, 1120), 'flee.flee.Ecosystem', 'flee.Ecosystem', ([], {}), '()\n', (1118, 1120), False, 'from flee import flee, SimulationSettings\n'), ((1129, 1160), 'flee.InputGeography.InputGeography', 'InputGeography.InputGeography', ([], {}), '()\n', (1158, 1160), False, 'from flee import InputGeography\n'), ((1440, 1598), 'datamanager.handle_refugee_data.RefugeeTable', 'handle_refugee_data.RefugeeTable', ([], {'csvformat': '"""generic"""', 'data_directory': '"""source_data/car2014/"""', 'start_date': '"""2013-12-01"""', 'data_layout': '"""data_layout.csv"""'}), "(csvformat='generic', data_directory=\n 'source_data/car2014/', start_date='2013-12-01', data_layout=\n 'data_layout.csv')\n", (1472, 1598), False, 'from datamanager import handle_refugee_data\n'), ((4691, 4712), 'datetime.datetime', 'datetime', (['(2013)', '(12)', '(1)'], {}), '(2013, 12, 1)\n', (4699, 4712), False, 'from datetime import datetime\n'), ((4730, 4751), 'datetime.datetime', 'datetime', (['(2013)', '(12)', '(1)'], {}), '(2013, 12, 1)\n', (4738, 4751), False, 'from datetime import datetime\n'), ((943, 1010), 'flee.flee.SimulationSettings.SimulationSettings.ReadFromCSV', 'flee.SimulationSettings.SimulationSettings.ReadFromCSV', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (997, 1010), False, 'from flee import flee, SimulationSettings\n'), ((6577, 6594), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (6586, 6594), False, 'from datetime import timedelta\n'), ((5722, 5763), 'outputanalysis.analysis.rel_error', 'a.rel_error', (['lm[i].numAgents', 'loc_data[j]'], {}), '(lm[i].numAgents, loc_data[j])\n', (5733, 5763), True, 'import outputanalysis.analysis as a\n'), ((5786, 5827), 'outputanalysis.analysis.abs_error', 'a.abs_error', (['lm[i].numAgents', 'loc_data[j]'], {}), '(lm[i].numAgents, loc_data[j])\n', (5797, 5827), True, 'import outputanalysis.analysis as a\n'), ((6199, 6217), 'numpy.sum', 'np.sum', (['abs_errors'], {}), '(abs_errors)\n', (6205, 6217), True, 'import numpy as np\n')]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import matplotlib.patches as mpatches #importing dataset and converting to datasframe data = pd.read_csv('heart.csv', header=None) df = pd.DataFrame(data) #data frame y = df.iloc[:, 13] y = y-1 def chol_age(): x = df.iloc[:, 0:5] x = x.drop(x.columns[1:4], axis=1) chol_avgs = x.groupby(0, sort=True).mean() ages = (chol_avgs[4].index.values) avgs = (chol_avgs[4].values) plt.plot(ages,avgs,'g-') plt.title('Variation of Cholestrol Levels with Age') plt.xlabel('Age(years)') plt.ylabel('Serum Cholestrol in mg/dl') def heart_atrack_heart_rate_bp(): x = df.iloc[:, 0:14] x[14] = np.round(df[3], -1) x_dis = x[x[13] == 2] bp_set_dis = x_dis.groupby(14, sort=True) nums_dis = (bp_set_dis.count()[0]).index.values bps_dis = (bp_set_dis.count()[0]).values bar2 = plt.bar(nums_dis+2, bps_dis, color='r', width=2) x_nor = x[x[13] == 1] bp_set_nor = x_nor.groupby(14, sort=True) nums_nor = (bp_set_nor.count()[0]).index.values bps_nor = (bp_set_nor.count()[0]).values bar1 = plt.bar(nums_nor, bps_nor, color='g', width=2) plt.title('Resting blood pressure as heart risk indicator') plt.xlabel('Resting Blood Pressure Bucket') plt.ylabel('Number of Patients') plt.legend((bar1[0], bar2[0]), ('Safe', 'At Risk')) def pie_chart_chest_pain(): x = df.iloc[:, 0:3] sets = x.groupby(2).count() fin_lab = ['Typical Angina', 'Atypical Angina', 'Non-anginal Pain', 'Asymptotic'] values = (sets[0].values) plt.pie(values, labels=fin_lab, colors=['yellowgreen', 'gold', 'lightskyblue', 'lightcoral'], explode = [0,0.2,0,0], shadow=True, autopct='%1.1f%%', startangle=90) plt.title('Chest Pain Types') def scatter_chart(): x = df.iloc[:, 0:13] sc = plt.scatter(x[7],x[4], c=y, cmap='summer') plt.title('Dataset Scatter') classes = ['Safe', 'At Risk'] class_colours = ['g','y'] recs = [] for i in range(0,len(class_colours)): recs.append(mpatches.Rectangle((0,0),1,1,fc=class_colours[i])) plt.legend(recs, classes) plt.show()
[ "matplotlib.pyplot.title", "pandas.DataFrame", "matplotlib.pyplot.show", "matplotlib.pyplot.plot", "matplotlib.patches.Rectangle", "pandas.read_csv", "matplotlib.pyplot.scatter", "matplotlib.pyplot.bar", "matplotlib.pyplot.legend", "matplotlib.pyplot.pie", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "numpy.round" ]
[((165, 202), 'pandas.read_csv', 'pd.read_csv', (['"""heart.csv"""'], {'header': 'None'}), "('heart.csv', header=None)\n", (176, 202), True, 'import pandas as pd\n'), ((209, 227), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (221, 227), True, 'import pandas as pd\n'), ((2018, 2028), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2026, 2028), True, 'import matplotlib.pyplot as plt\n'), ((453, 479), 'matplotlib.pyplot.plot', 'plt.plot', (['ages', 'avgs', '"""g-"""'], {}), "(ages, avgs, 'g-')\n", (461, 479), True, 'import matplotlib.pyplot as plt\n'), ((479, 531), 'matplotlib.pyplot.title', 'plt.title', (['"""Variation of Cholestrol Levels with Age"""'], {}), "('Variation of Cholestrol Levels with Age')\n", (488, 531), True, 'import matplotlib.pyplot as plt\n'), ((533, 557), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Age(years)"""'], {}), "('Age(years)')\n", (543, 557), True, 'import matplotlib.pyplot as plt\n'), ((559, 598), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Serum Cholestrol in mg/dl"""'], {}), "('Serum Cholestrol in mg/dl')\n", (569, 598), True, 'import matplotlib.pyplot as plt\n'), ((665, 684), 'numpy.round', 'np.round', (['df[3]', '(-1)'], {}), '(df[3], -1)\n', (673, 684), True, 'import numpy as np\n'), ((851, 901), 'matplotlib.pyplot.bar', 'plt.bar', (['(nums_dis + 2)', 'bps_dis'], {'color': '"""r"""', 'width': '(2)'}), "(nums_dis + 2, bps_dis, color='r', width=2)\n", (858, 901), True, 'import matplotlib.pyplot as plt\n'), ((1066, 1112), 'matplotlib.pyplot.bar', 'plt.bar', (['nums_nor', 'bps_nor'], {'color': '"""g"""', 'width': '(2)'}), "(nums_nor, bps_nor, color='g', width=2)\n", (1073, 1112), True, 'import matplotlib.pyplot as plt\n'), ((1115, 1174), 'matplotlib.pyplot.title', 'plt.title', (['"""Resting blood pressure as heart risk indicator"""'], {}), "('Resting blood pressure as heart risk indicator')\n", (1124, 1174), True, 'import matplotlib.pyplot as plt\n'), ((1176, 1219), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Resting Blood Pressure Bucket"""'], {}), "('Resting Blood Pressure Bucket')\n", (1186, 1219), True, 'import matplotlib.pyplot as plt\n'), ((1221, 1253), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of Patients"""'], {}), "('Number of Patients')\n", (1231, 1253), True, 'import matplotlib.pyplot as plt\n'), ((1256, 1307), 'matplotlib.pyplot.legend', 'plt.legend', (['(bar1[0], bar2[0])', "('Safe', 'At Risk')"], {}), "((bar1[0], bar2[0]), ('Safe', 'At Risk'))\n", (1266, 1307), True, 'import matplotlib.pyplot as plt\n'), ((1498, 1670), 'matplotlib.pyplot.pie', 'plt.pie', (['values'], {'labels': 'fin_lab', 'colors': "['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']", 'explode': '[0, 0.2, 0, 0]', 'shadow': '(True)', 'autopct': '"""%1.1f%%"""', 'startangle': '(90)'}), "(values, labels=fin_lab, colors=['yellowgreen', 'gold',\n 'lightskyblue', 'lightcoral'], explode=[0, 0.2, 0, 0], shadow=True,\n autopct='%1.1f%%', startangle=90)\n", (1505, 1670), True, 'import matplotlib.pyplot as plt\n'), ((1663, 1692), 'matplotlib.pyplot.title', 'plt.title', (['"""Chest Pain Types"""'], {}), "('Chest Pain Types')\n", (1672, 1692), True, 'import matplotlib.pyplot as plt\n'), ((1743, 1786), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x[7]', 'x[4]'], {'c': 'y', 'cmap': '"""summer"""'}), "(x[7], x[4], c=y, cmap='summer')\n", (1754, 1786), True, 'import matplotlib.pyplot as plt\n'), ((1787, 1815), 'matplotlib.pyplot.title', 'plt.title', (['"""Dataset Scatter"""'], {}), "('Dataset Scatter')\n", (1796, 1815), True, 'import matplotlib.pyplot as plt\n'), ((1990, 2015), 'matplotlib.pyplot.legend', 'plt.legend', (['recs', 'classes'], {}), '(recs, classes)\n', (2000, 2015), True, 'import matplotlib.pyplot as plt\n'), ((1938, 1991), 'matplotlib.patches.Rectangle', 'mpatches.Rectangle', (['(0, 0)', '(1)', '(1)'], {'fc': 'class_colours[i]'}), '((0, 0), 1, 1, fc=class_colours[i])\n', (1956, 1991), True, 'import matplotlib.patches as mpatches\n')]
from misc import dp, bot from features.mainFunctions import \ createQR, uploadInputFileToTelegram,\ escapeMarkdown # from aiogram.types import \ InlineQuery, inline_keyboard, \ InlineQueryResultPhoto, ChosenInlineResult, \ InputMediaPhoto # from aiogram.utils import markdown from os import remove from time import time import re @dp.inline_handler(regexp=r'(?i)^qr\b.+$') async def qrInlineHandler(inline_query: InlineQuery): awaitingButton = inline_keyboard.InlineKeyboardButton( 'Ожидайте...', callback_data='awaiting' ) awaitingKeyboard = inline_keyboard.InlineKeyboardMarkup(row_width=1).insert(awaitingButton) items = [ InlineQueryResultPhoto( id=str(time() + 1), photo_url="https://i.ibb.co/n16zcs0/rnfoxbot-QR.jpg", thumb_url='https://i.ibb.co/KsbFqjG/rnfoxbot-QR.jpg', photo_width=200, photo_height=200, caption=markdown.italic("QR—code генерируется..."), reply_markup=awaitingKeyboard, parse_mode='MarkdownV2' ) ] await bot.answer_inline_query(inline_query.id, results=items, cache_time=0) @dp.chosen_inline_handler(lambda chosen_inline_query: re.search(r"(?i)^qr\b.+$", chosen_inline_query.query)) async def some_chosen_inline_handler(chosen_inline_query: ChosenInlineResult): txt = chosen_inline_query.query[3:] # Обрезаем "qr" voidInlineKeyboard = inline_keyboard.InlineKeyboardMarkup() qrCodePath = createQR(txt) imgID = await uploadInputFileToTelegram(qrCodePath, bot=bot) await bot.edit_message_reply_markup( reply_markup=voidInlineKeyboard, inline_message_id=chosen_inline_query.inline_message_id ) await bot.edit_message_media( media=InputMediaPhoto(media=imgID), inline_message_id=chosen_inline_query.inline_message_id ) remove(qrCodePath)
[ "misc.bot.edit_message_reply_markup", "features.mainFunctions.createQR", "os.remove", "misc.bot.answer_inline_query", "aiogram.utils.markdown.italic", "aiogram.types.inline_keyboard.InlineKeyboardButton", "aiogram.types.InputMediaPhoto", "time.time", "features.mainFunctions.uploadInputFileToTelegram", "aiogram.types.inline_keyboard.InlineKeyboardMarkup", "misc.dp.inline_handler", "re.search" ]
[((344, 385), 'misc.dp.inline_handler', 'dp.inline_handler', ([], {'regexp': '"""(?i)^qr\\\\b.+$"""'}), "(regexp='(?i)^qr\\\\b.+$')\n", (361, 385), False, 'from misc import dp, bot\n'), ((458, 535), 'aiogram.types.inline_keyboard.InlineKeyboardButton', 'inline_keyboard.InlineKeyboardButton', (['"""Ожидайте..."""'], {'callback_data': '"""awaiting"""'}), "('Ожидайте...', callback_data='awaiting')\n", (494, 535), False, 'from aiogram.types import InlineQuery, inline_keyboard, InlineQueryResultPhoto, ChosenInlineResult, InputMediaPhoto\n'), ((1321, 1359), 'aiogram.types.inline_keyboard.InlineKeyboardMarkup', 'inline_keyboard.InlineKeyboardMarkup', ([], {}), '()\n', (1357, 1359), False, 'from aiogram.types import InlineQuery, inline_keyboard, InlineQueryResultPhoto, ChosenInlineResult, InputMediaPhoto\n'), ((1375, 1388), 'features.mainFunctions.createQR', 'createQR', (['txt'], {}), '(txt)\n', (1383, 1388), False, 'from features.mainFunctions import createQR, uploadInputFileToTelegram, escapeMarkdown\n'), ((1719, 1737), 'os.remove', 'remove', (['qrCodePath'], {}), '(qrCodePath)\n', (1725, 1737), False, 'from os import remove\n'), ((984, 1053), 'misc.bot.answer_inline_query', 'bot.answer_inline_query', (['inline_query.id'], {'results': 'items', 'cache_time': '(0)'}), '(inline_query.id, results=items, cache_time=0)\n', (1007, 1053), False, 'from misc import dp, bot\n'), ((1404, 1450), 'features.mainFunctions.uploadInputFileToTelegram', 'uploadInputFileToTelegram', (['qrCodePath'], {'bot': 'bot'}), '(qrCodePath, bot=bot)\n', (1429, 1450), False, 'from features.mainFunctions import createQR, uploadInputFileToTelegram, escapeMarkdown\n'), ((1459, 1582), 'misc.bot.edit_message_reply_markup', 'bot.edit_message_reply_markup', ([], {'reply_markup': 'voidInlineKeyboard', 'inline_message_id': 'chosen_inline_query.inline_message_id'}), '(reply_markup=voidInlineKeyboard,\n inline_message_id=chosen_inline_query.inline_message_id)\n', (1488, 1582), False, 'from misc import dp, bot\n'), ((1110, 1163), 're.search', 're.search', (['"""(?i)^qr\\\\b.+$"""', 'chosen_inline_query.query'], {}), "('(?i)^qr\\\\b.+$', chosen_inline_query.query)\n", (1119, 1163), False, 'import re\n'), ((564, 613), 'aiogram.types.inline_keyboard.InlineKeyboardMarkup', 'inline_keyboard.InlineKeyboardMarkup', ([], {'row_width': '(1)'}), '(row_width=1)\n', (600, 613), False, 'from aiogram.types import InlineQuery, inline_keyboard, InlineQueryResultPhoto, ChosenInlineResult, InputMediaPhoto\n'), ((864, 906), 'aiogram.utils.markdown.italic', 'markdown.italic', (['"""QR—code генерируется..."""'], {}), "('QR—code генерируется...')\n", (879, 906), False, 'from aiogram.utils import markdown\n'), ((1626, 1654), 'aiogram.types.InputMediaPhoto', 'InputMediaPhoto', ([], {'media': 'imgID'}), '(media=imgID)\n', (1641, 1654), False, 'from aiogram.types import InlineQuery, inline_keyboard, InlineQueryResultPhoto, ChosenInlineResult, InputMediaPhoto\n'), ((684, 690), 'time.time', 'time', ([], {}), '()\n', (688, 690), False, 'from time import time\n')]
from __future__ import annotations from collections import defaultdict from collections.abc import Callable, Iterable, Iterator, Mapping, MutableMapping from itertools import chain from typing import Generic, TypeVar from zict.common import KT, VT, ZictBase, close, flush MKT = TypeVar("MKT") class Sieve(ZictBase[KT, VT], Generic[KT, VT, MKT]): """Store values in different mappings based on a selector's output. This creates a MutableMapping combining several underlying MutableMappings for storage. Items are dispatched based on a selector function provided by the user. Parameters ---------- mappings: dict of {mapping key: MutableMapping} selector: callable (key, value) -> mapping key Examples -------- >>> small = {} >>> large = DataBase() # doctest: +SKIP >>> mappings = {True: small, False: large} # doctest: +SKIP >>> def is_small(key, value): # doctest: +SKIP return sys.getsizeof(value) < 10000 >>> d = Sieve(mappings, is_small) # doctest: +SKIP See Also -------- Buffer """ mappings: Mapping[MKT, MutableMapping[KT, VT]] selector: Callable[[KT, VT], MKT] key_to_mapping: dict[KT, MutableMapping[KT, VT]] def __init__( self, mappings: Mapping[MKT, MutableMapping[KT, VT]], selector: Callable[[KT, VT], MKT], ): self.mappings = mappings # FIXME https://github.com/python/mypy/issues/708 self.selector = selector # type: ignore self.key_to_mapping = {} def __getitem__(self, key: KT) -> VT: return self.key_to_mapping[key][key] def __setitem__(self, key: KT, value: VT) -> None: old_mapping = self.key_to_mapping.get(key) mkey = self.selector(key, value) # type: ignore mapping = self.mappings[mkey] if old_mapping is not None and old_mapping is not mapping: del old_mapping[key] mapping[key] = value self.key_to_mapping[key] = mapping def __delitem__(self, key: KT) -> None: del self.key_to_mapping.pop(key)[key] def _do_update(self, items: Iterable[tuple[KT, VT]]) -> None: # Optimized update() implementation issuing a single update() # call per underlying mapping. updates = defaultdict(list) mapping_ids = {id(m): m for m in self.mappings.values()} for key, value in items: old_mapping = self.key_to_mapping.get(key) mkey = self.selector(key, value) # type: ignore mapping = self.mappings[mkey] if old_mapping is not None and old_mapping is not mapping: del old_mapping[key] # Can't hash a mutable mapping, so use its id() instead updates[id(mapping)].append((key, value)) for mid, mitems in updates.items(): mapping = mapping_ids[mid] mapping.update(mitems) for key, _ in mitems: self.key_to_mapping[key] = mapping # FIXME dictionary views https://github.com/dask/zict/issues/61 def keys(self) -> Iterator[KT]: # type: ignore return chain.from_iterable(self.mappings.values()) def values(self) -> Iterator[VT]: # type: ignore return chain.from_iterable(m.values() for m in self.mappings.values()) def items(self) -> Iterator[tuple[KT, VT]]: # type: ignore return chain.from_iterable(m.items() for m in self.mappings.values()) def __len__(self) -> int: return sum(map(len, self.mappings.values())) def __iter__(self) -> Iterator[KT]: return self.keys() def __contains__(self, key: object) -> bool: return key in self.key_to_mapping def __str__(self) -> str: return f"Sieve<{self.mappings}>" __repr__ = __str__ def flush(self) -> None: flush(*self.mappings.values()) def close(self) -> None: close(*self.mappings.values())
[ "collections.defaultdict", "typing.TypeVar" ]
[((281, 295), 'typing.TypeVar', 'TypeVar', (['"""MKT"""'], {}), "('MKT')\n", (288, 295), False, 'from typing import Generic, TypeVar\n'), ((2345, 2362), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2356, 2362), False, 'from collections import defaultdict\n')]
import models.tscBaseItem as tscBaseItem import logging # setup logger for module _log = logging.getLogger(__name__) class TSC_Item(tscBaseItem.TSC_Base_Item): __server = tscBaseItem.TSC_Base_Item() __item = object() __name = str() __id = str() @property def name(self): return self.__name @property def id(self): return self.__id @property def item(self): return self.__item @property def server(self): return self.__server
[ "models.tscBaseItem.TSC_Base_Item", "logging.getLogger" ]
[((93, 120), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (110, 120), False, 'import logging\n'), ((181, 208), 'models.tscBaseItem.TSC_Base_Item', 'tscBaseItem.TSC_Base_Item', ([], {}), '()\n', (206, 208), True, 'import models.tscBaseItem as tscBaseItem\n')]
import requests session = requests.Session() if __name__ == '__main__': pass
[ "requests.Session" ]
[((27, 45), 'requests.Session', 'requests.Session', ([], {}), '()\n', (43, 45), False, 'import requests\n')]
import json import re import pytest # type: ignore # from custom_types.alternative_string_types import Kaki, URL from modules import wanikani from testing.dict_typing import FullTestDict from testing.dicts import TEST_DICTS from utils import convert_dict_str_keys_to_kaki, convert_list_of_str_to_kaki # For each test, try with every dict in TEST_DICTS @pytest.fixture(params=TEST_DICTS, ids=lambda d:d['id']) def test_dict(request): return request.param class FakeResponse: def __init__(self, text, status_code=200): self.text = text self.status_code = status_code API_KEY_REGEX = r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" ##################### ## TESTS ########### ##################### def test_api_key_import(): """Check the API_KEY is successfully imported""" assert re.match(API_KEY_REGEX, wanikani.API_KEY) is not None def test_main(monkeypatch, test_dict: FullTestDict): """ - GIVEN a list of words - WHEN the accent dict is generated - THEN check all the wanikani info is correct and complete """ word_list = convert_list_of_str_to_kaki(test_dict['input']) expected_output = test_dict['wanikani']['expected_output'] api_response = test_dict['wanikani']['api_response'] monkeypatch.setattr("requests.get", lambda url, headers: FakeResponse(json.dumps(api_response))) assert wanikani.main(word_list) == expected_output def test_main_api_error(monkeypatch, test_dict: FullTestDict): """ - GIVEN a list of words - WHEN the API returns an unsuccessful status code - THEN check the failed dict is returned as expected """ word_list = convert_list_of_str_to_kaki(test_dict['input']) response = json.dumps({"error": "api_error"}) expected_output = { word: { "success": False, "error": { "error_msg": json.dumps({"error": "api_error"}), "status_code": 400, "url": test_dict["wanikani"]["url"] }, "main_data": { "audio": [], "sentences": [], }, } for word in word_list } monkeypatch.setattr("requests.get", lambda url, headers: FakeResponse(response, status_code=400)) assert wanikani.main(word_list) == expected_output def test_empty_input(): """ - GIVEN an empty input - WHEN an audio dictionary is generated - THEN check it returns and empty dict """ assert wanikani.main([]) == {} def test_get_api_response(monkeypatch, test_dict: FullTestDict): """ - GIVEN a list of words - WHEN the API response is returned - THEN check the result is as expected """ word_list = convert_list_of_str_to_kaki(test_dict['input']) api_response = test_dict['wanikani']['api_response'] def check_get_request(url, headers): auth_regex = r"Bearer " + API_KEY_REGEX assert "api.wanikani.com" in url assert "Authorization" in headers assert re.match(auth_regex, headers["Authorization"]) return FakeResponse(json.dumps(api_response)) monkeypatch.setattr("requests.get", check_get_request) assert wanikani.get_api_response(word_list) == api_response def test_get_url(test_dict: FullTestDict): """ - GIVEN a list of words - WHEN the API URL is generated - THEN check the URL is as expected """ word_list = convert_list_of_str_to_kaki(test_dict['input']) expected_url = test_dict['wanikani']['url'] assert wanikani.get_url(word_list) == expected_url def test_call_api(monkeypatch, test_dict: FullTestDict): """ - GIVEN an API URL - WHEN the API is called - THEN check the response is handled correctly """ url = test_dict['wanikani']['url'] api_response = test_dict['wanikani']['api_response'] def validate_get_request(url, headers): auth_regex = r"Bearer " + API_KEY_REGEX assert "api.wanikani.com" in url assert "Authorization" in headers assert re.match(auth_regex, headers["Authorization"]) def mock_get_request(url, headers): validate_get_request(url, headers) return FakeResponse(json.dumps(api_response)) monkeypatch.setattr("requests.get", mock_get_request) assert wanikani.call_api(url) == api_response def test_call_api_failure(monkeypatch, test_dict: FullTestDict): """ - GIVEN a list of words - WHEN an unsuccessful HTTP request is made - THEN check an exception is thrown """ url = test_dict['wanikani']['url'] response = json.dumps({"error": "could not connect"}) monkeypatch.setattr("requests.get", lambda url, headers: FakeResponse(response, status_code=400)) try: wanikani.call_api(url) assert False # Test fails if call_api() doesn't raise an error except wanikani.WanikaniAPIError as api_error: assert api_error.error_msg == json.dumps({"error": "could not connect"}) assert api_error.status_code == 400 assert api_error.url == url def test_call_api_unsuccessful(monkeypatch): """ - GIVEN an API call - WHEN an unsuccessful response is returned - THEN check the response is handled as expected """ unsuccessful_response = FakeResponse('{"error": "call_api failed"}', 400) monkeypatch.setattr("requests.get", lambda url, headers: unsuccessful_response) try: wanikani.call_api("www.testurl.com") assert False # Test fails if call_api() doesn't raise an error except wanikani.WanikaniAPIError as api_error: assert api_error.error_msg == '{"error": "call_api failed"}' assert api_error.status_code == 400 assert api_error.url == "www.testurl.com" def test_build_result_dict(test_dict: FullTestDict): """ - GIVEN an API response - WHEN the result dict is built - THEN check the result is as expected """ api_response = test_dict["wanikani"]["api_response"] expected_result = convert_dict_str_keys_to_kaki(test_dict["wanikani"]["result_dict"]) assert dict(wanikani.build_result_dict(api_response)) == expected_result
[ "modules.wanikani.get_api_response", "modules.wanikani.main", "modules.wanikani.get_url", "modules.wanikani.call_api", "pytest.fixture", "re.match", "json.dumps", "modules.wanikani.build_result_dict", "utils.convert_dict_str_keys_to_kaki", "utils.convert_list_of_str_to_kaki" ]
[((359, 415), 'pytest.fixture', 'pytest.fixture', ([], {'params': 'TEST_DICTS', 'ids': "(lambda d: d['id'])"}), "(params=TEST_DICTS, ids=lambda d: d['id'])\n", (373, 415), False, 'import pytest\n'), ((1112, 1159), 'utils.convert_list_of_str_to_kaki', 'convert_list_of_str_to_kaki', (["test_dict['input']"], {}), "(test_dict['input'])\n", (1139, 1159), False, 'from utils import convert_dict_str_keys_to_kaki, convert_list_of_str_to_kaki\n'), ((1675, 1722), 'utils.convert_list_of_str_to_kaki', 'convert_list_of_str_to_kaki', (["test_dict['input']"], {}), "(test_dict['input'])\n", (1702, 1722), False, 'from utils import convert_dict_str_keys_to_kaki, convert_list_of_str_to_kaki\n'), ((1738, 1772), 'json.dumps', 'json.dumps', (["{'error': 'api_error'}"], {}), "({'error': 'api_error'})\n", (1748, 1772), False, 'import json\n'), ((2744, 2791), 'utils.convert_list_of_str_to_kaki', 'convert_list_of_str_to_kaki', (["test_dict['input']"], {}), "(test_dict['input'])\n", (2771, 2791), False, 'from utils import convert_dict_str_keys_to_kaki, convert_list_of_str_to_kaki\n'), ((3444, 3491), 'utils.convert_list_of_str_to_kaki', 'convert_list_of_str_to_kaki', (["test_dict['input']"], {}), "(test_dict['input'])\n", (3471, 3491), False, 'from utils import convert_dict_str_keys_to_kaki, convert_list_of_str_to_kaki\n'), ((4609, 4651), 'json.dumps', 'json.dumps', (["{'error': 'could not connect'}"], {}), "({'error': 'could not connect'})\n", (4619, 4651), False, 'import json\n'), ((6047, 6114), 'utils.convert_dict_str_keys_to_kaki', 'convert_dict_str_keys_to_kaki', (["test_dict['wanikani']['result_dict']"], {}), "(test_dict['wanikani']['result_dict'])\n", (6076, 6114), False, 'from utils import convert_dict_str_keys_to_kaki, convert_list_of_str_to_kaki\n'), ((840, 881), 're.match', 're.match', (['API_KEY_REGEX', 'wanikani.API_KEY'], {}), '(API_KEY_REGEX, wanikani.API_KEY)\n', (848, 881), False, 'import re\n'), ((1394, 1418), 'modules.wanikani.main', 'wanikani.main', (['word_list'], {}), '(word_list)\n', (1407, 1418), False, 'from modules import wanikani\n'), ((2299, 2323), 'modules.wanikani.main', 'wanikani.main', (['word_list'], {}), '(word_list)\n', (2312, 2323), False, 'from modules import wanikani\n'), ((2510, 2527), 'modules.wanikani.main', 'wanikani.main', (['[]'], {}), '([])\n', (2523, 2527), False, 'from modules import wanikani\n'), ((3037, 3083), 're.match', 're.match', (['auth_regex', "headers['Authorization']"], {}), "(auth_regex, headers['Authorization'])\n", (3045, 3083), False, 'import re\n'), ((3210, 3246), 'modules.wanikani.get_api_response', 'wanikani.get_api_response', (['word_list'], {}), '(word_list)\n', (3235, 3246), False, 'from modules import wanikani\n'), ((3552, 3579), 'modules.wanikani.get_url', 'wanikani.get_url', (['word_list'], {}), '(word_list)\n', (3568, 3579), False, 'from modules import wanikani\n'), ((4061, 4107), 're.match', 're.match', (['auth_regex', "headers['Authorization']"], {}), "(auth_regex, headers['Authorization'])\n", (4069, 4107), False, 'import re\n'), ((4317, 4339), 'modules.wanikani.call_api', 'wanikani.call_api', (['url'], {}), '(url)\n', (4334, 4339), False, 'from modules import wanikani\n'), ((4772, 4794), 'modules.wanikani.call_api', 'wanikani.call_api', (['url'], {}), '(url)\n', (4789, 4794), False, 'from modules import wanikani\n'), ((5457, 5493), 'modules.wanikani.call_api', 'wanikani.call_api', (['"""www.testurl.com"""'], {}), "('www.testurl.com')\n", (5474, 5493), False, 'from modules import wanikani\n'), ((3112, 3136), 'json.dumps', 'json.dumps', (['api_response'], {}), '(api_response)\n', (3122, 3136), False, 'import json\n'), ((4220, 4244), 'json.dumps', 'json.dumps', (['api_response'], {}), '(api_response)\n', (4230, 4244), False, 'import json\n'), ((6132, 6172), 'modules.wanikani.build_result_dict', 'wanikani.build_result_dict', (['api_response'], {}), '(api_response)\n', (6158, 6172), False, 'from modules import wanikani\n'), ((1355, 1379), 'json.dumps', 'json.dumps', (['api_response'], {}), '(api_response)\n', (1365, 1379), False, 'import json\n'), ((1895, 1929), 'json.dumps', 'json.dumps', (["{'error': 'api_error'}"], {}), "({'error': 'api_error'})\n", (1905, 1929), False, 'import json\n'), ((4966, 5008), 'json.dumps', 'json.dumps', (["{'error': 'could not connect'}"], {}), "({'error': 'could not connect'})\n", (4976, 5008), False, 'import json\n')]
def get_last_version(request): from Core.models import VersionControl return {'version': VersionControl.objects.all().last()}
[ "Core.models.VersionControl.objects.all" ]
[((97, 125), 'Core.models.VersionControl.objects.all', 'VersionControl.objects.all', ([], {}), '()\n', (123, 125), False, 'from Core.models import VersionControl\n')]
#!/usr/bin/python # import the necessary packages import rospy import speech_recognition as sr from unidecode import unidecode # import the necessary msgs. Example with msg type String_Int_Arrays: from std_msgs.msg import Float32 from std_msgs.msg import String class voice_recognitor3(): """ Class voice_recognitor This class allows to recognite the voice during an indicated time in seconds. """ def __init__(self): """Class constructor It is the constructor of the class. It does: -Subscribe to recognize_voice topic -Publish the asr text """ #Subscribe to ROS topics self.asr_sub = rospy.Subscriber("recognize_voice3", Float32, self.callback) #Define the ROS publishers self.asr_pub = rospy.Publisher("asr_text", String, queue_size=0) #Define object as msg type self.asr_msg = String() self.asr_msg.data = "" self.duration=3.0 self.configuration() print("[INFO] Node started") def configuration(self): """Configuration void. In this void the token of the wit.ai client is defined. And it is set the sample_rate of the text recorded. """ self.r = sr.Recognizer() self.mic = sr.Microphone(device_index=0) def recognize(self,duration): """Void to recognize voice First, the voice is recorded with the duration time set. After that, the text is recognized and published. """ with self.mic as source: audio = self.r.listen(source, phrase_time_limit=int(duration)) try: answ = self.r.recognize_google(audio, language="es-ES") text = unidecode(answ).lower() except: text="" print(text) self.asr_msg.data = text #Publish msg self.asr_pub.publish(self.asr_msg) def run_loop(self): """ Infinite loop. When ROS is closed, it exits. """ while not rospy.is_shutdown(): #functions to repeat until the node is closed rospy.spin() def stopping_node(self): """ROS closing node Is the function called when ROS node is closed.""" print("\n\nBye bye! :)\n\n") def callback(self, data): """ROS callback This void is executed when a message is received. It simply calls the function to recognize giving the duration of the recording""" self.recognize(data.data) if __name__=='__main__': """ Main void. Is the main void executed when started. It does: - Start the node - Create an object of the class - Run the node """ try: rospy.init_node('asr_node3') # Init ROS node asr_object = voice_recognitor3() rospy.on_shutdown(asr_object.stopping_node) #When ROS is closed, this void is executed asr_object.run_loop() except rospy.ROSInterruptException: pass
[ "unidecode.unidecode", "rospy.Subscriber", "rospy.Publisher", "speech_recognition.Microphone", "rospy.on_shutdown", "rospy.is_shutdown", "rospy.init_node", "rospy.spin", "speech_recognition.Recognizer", "std_msgs.msg.String" ]
[((666, 726), 'rospy.Subscriber', 'rospy.Subscriber', (['"""recognize_voice3"""', 'Float32', 'self.callback'], {}), "('recognize_voice3', Float32, self.callback)\n", (682, 726), False, 'import rospy\n'), ((786, 835), 'rospy.Publisher', 'rospy.Publisher', (['"""asr_text"""', 'String'], {'queue_size': '(0)'}), "('asr_text', String, queue_size=0)\n", (801, 835), False, 'import rospy\n'), ((895, 903), 'std_msgs.msg.String', 'String', ([], {}), '()\n', (901, 903), False, 'from std_msgs.msg import String\n'), ((1245, 1260), 'speech_recognition.Recognizer', 'sr.Recognizer', ([], {}), '()\n', (1258, 1260), True, 'import speech_recognition as sr\n'), ((1280, 1309), 'speech_recognition.Microphone', 'sr.Microphone', ([], {'device_index': '(0)'}), '(device_index=0)\n', (1293, 1309), True, 'import speech_recognition as sr\n'), ((2745, 2773), 'rospy.init_node', 'rospy.init_node', (['"""asr_node3"""'], {}), "('asr_node3')\n", (2760, 2773), False, 'import rospy\n'), ((2846, 2889), 'rospy.on_shutdown', 'rospy.on_shutdown', (['asr_object.stopping_node'], {}), '(asr_object.stopping_node)\n', (2863, 2889), False, 'import rospy\n'), ((2045, 2064), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (2062, 2064), False, 'import rospy\n'), ((2136, 2148), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (2146, 2148), False, 'import rospy\n'), ((1738, 1753), 'unidecode.unidecode', 'unidecode', (['answ'], {}), '(answ)\n', (1747, 1753), False, 'from unidecode import unidecode\n')]
# Copyright 2013 IBM Corp. # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Volume driver for IBM Storwize family and SVC storage systems. Notes: 1. If you specify both a password and a key file, this driver will use the key file only. 2. When using a key file for authentication, it is up to the user or system administrator to store the private key in a safe manner. 3. The defaults for creating volumes are "-rsize 2% -autoexpand -grainsize 256 -warning 0". These can be changed in the configuration file or by using volume types(recommended only for advanced users). Limitations: 1. The driver expects CLI output in English, error messages may be in a localized format. 2. Clones and creating volumes from snapshots, where the source and target are of different sizes, is not supported. """ import math import time from oslo_config import cfg from oslo_utils import excutils from oslo_utils import units from cinder import context from cinder import exception from cinder.i18n import _, _LE, _LW from cinder.openstack.common import log as logging from cinder.openstack.common import loopingcall from cinder import utils from cinder.volume.drivers.ibm.storwize_svc import helpers as storwize_helpers from cinder.volume.drivers.ibm.storwize_svc import replication as storwize_rep from cinder.volume.drivers.san import san from cinder.volume import volume_types from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) storwize_svc_opts = [ cfg.StrOpt('storwize_svc_volpool_name', default='volpool', help='Storage system storage pool for volumes'), cfg.IntOpt('storwize_svc_vol_rsize', default=2, help='Storage system space-efficiency parameter for volumes ' '(percentage)'), cfg.IntOpt('storwize_svc_vol_warning', default=0, help='Storage system threshold for volume capacity warnings ' '(percentage)'), cfg.BoolOpt('storwize_svc_vol_autoexpand', default=True, help='Storage system autoexpand parameter for volumes ' '(True/False)'), cfg.IntOpt('storwize_svc_vol_grainsize', default=256, help='Storage system grain size parameter for volumes ' '(32/64/128/256)'), cfg.BoolOpt('storwize_svc_vol_compression', default=False, help='Storage system compression option for volumes'), cfg.BoolOpt('storwize_svc_vol_easytier', default=True, help='Enable Easy Tier for volumes'), cfg.IntOpt('storwize_svc_vol_iogrp', default=0, help='The I/O group in which to allocate volumes'), cfg.IntOpt('storwize_svc_flashcopy_timeout', default=120, help='Maximum number of seconds to wait for FlashCopy to be ' 'prepared. Maximum value is 600 seconds (10 minutes)'), cfg.StrOpt('storwize_svc_connection_protocol', default='iSCSI', help='Connection protocol (iSCSI/FC)'), cfg.BoolOpt('storwize_svc_iscsi_chap_enabled', default=True, help='Configure CHAP authentication for iSCSI connections ' '(Default: Enabled)'), cfg.BoolOpt('storwize_svc_multipath_enabled', default=False, help='Connect with multipath (FC only; iSCSI multipath is ' 'controlled by Nova)'), cfg.BoolOpt('storwize_svc_multihostmap_enabled', default=True, help='Allows vdisk to multi host mapping'), cfg.BoolOpt('storwize_svc_npiv_compatibility_mode', default=False, help='Indicate whether svc driver is compatible for NPIV ' 'setup. If it is compatible, it will allow no wwpns ' 'being returned on get_conn_fc_wwpns during ' 'initialize_connection'), cfg.BoolOpt('storwize_svc_allow_tenant_qos', default=False, help='Allow tenants to specify QOS on create'), cfg.StrOpt('storwize_svc_stretched_cluster_partner', default=None, help='If operating in stretched cluster mode, specify the ' 'name of the pool in which mirrored copies are stored.' 'Example: "pool2"'), ] CONF = cfg.CONF CONF.register_opts(storwize_svc_opts) class StorwizeSVCDriver(san.SanDriver): """IBM Storwize V7000 and SVC iSCSI/FC volume driver. Version history: 1.0 - Initial driver 1.1 - FC support, create_cloned_volume, volume type support, get_volume_stats, minor bug fixes 1.2.0 - Added retype 1.2.1 - Code refactor, improved exception handling 1.2.2 - Fix bug #1274123 (races in host-related functions) 1.2.3 - Fix Fibre Channel connectivity: bug #1279758 (add delim to lsfabric, clear unused data from connections, ensure matching WWPNs by comparing lower case 1.2.4 - Fix bug #1278035 (async migration/retype) 1.2.5 - Added support for manage_existing (unmanage is inherited) 1.2.6 - Added QoS support in terms of I/O throttling rate 1.3.1 - Added support for volume replication 1.3.2 - Added support for consistency group """ VERSION = "1.3.2" VDISKCOPYOPS_INTERVAL = 600 def __init__(self, *args, **kwargs): super(StorwizeSVCDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(storwize_svc_opts) self._helpers = storwize_helpers.StorwizeHelpers(self._run_ssh) self._vdiskcopyops = {} self._vdiskcopyops_loop = None self.replication = None self._state = {'storage_nodes': {}, 'enabled_protocols': set(), 'compression_enabled': False, 'available_iogrps': [], 'system_name': None, 'system_id': None, 'code_level': None, } # Storwize has the limitation that can not burst more than 3 new ssh # connections within 1 second. So slow down the initialization. time.sleep(1) def do_setup(self, ctxt): """Check that we have all configuration details from the storage.""" LOG.debug('enter: do_setup') # Get storage system name, id, and code level self._state.update(self._helpers.get_system_info()) # Get the replication helpers self.replication = storwize_rep.StorwizeSVCReplication.factory(self) # Validate that the pool exists pool = self.configuration.storwize_svc_volpool_name try: self._helpers.get_pool_attrs(pool) except exception.VolumeBackendAPIException: msg = _('Failed getting details for pool %s') % pool raise exception.InvalidInput(reason=msg) # Check if compression is supported self._state['compression_enabled'] = \ self._helpers.compression_enabled() # Get the available I/O groups self._state['available_iogrps'] = \ self._helpers.get_available_io_groups() # Get the iSCSI and FC names of the Storwize/SVC nodes self._state['storage_nodes'] = self._helpers.get_node_info() # Add the iSCSI IP addresses and WWPNs to the storage node info self._helpers.add_iscsi_ip_addrs(self._state['storage_nodes']) self._helpers.add_fc_wwpns(self._state['storage_nodes']) # For each node, check what connection modes it supports. Delete any # nodes that do not support any types (may be partially configured). to_delete = [] for k, node in self._state['storage_nodes'].iteritems(): if ((len(node['ipv4']) or len(node['ipv6'])) and len(node['iscsi_name'])): node['enabled_protocols'].append('iSCSI') self._state['enabled_protocols'].add('iSCSI') if len(node['WWPN']): node['enabled_protocols'].append('FC') self._state['enabled_protocols'].add('FC') if not len(node['enabled_protocols']): to_delete.append(k) for delkey in to_delete: del self._state['storage_nodes'][delkey] # Make sure we have at least one node configured if not len(self._state['storage_nodes']): msg = _('do_setup: No configured nodes.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) # Build the list of in-progress vdisk copy operations if ctxt is None: admin_context = context.get_admin_context() else: admin_context = ctxt.elevated() volumes = self.db.volume_get_all_by_host(admin_context, self.host) for volume in volumes: metadata = self.db.volume_admin_metadata_get(admin_context, volume['id']) curr_ops = metadata.get('vdiskcopyops', None) if curr_ops: ops = [tuple(x.split(':')) for x in curr_ops.split(';')] self._vdiskcopyops[volume['id']] = ops # if vdiskcopy exists in database, start the looping call if len(self._vdiskcopyops) >= 1: self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall( self._check_volume_copy_ops) self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL) LOG.debug('leave: do_setup') def check_for_setup_error(self): """Ensure that the flags are set properly.""" LOG.debug('enter: check_for_setup_error') # Check that we have the system ID information if self._state['system_name'] is None: exception_msg = (_('Unable to determine system name')) raise exception.VolumeBackendAPIException(data=exception_msg) if self._state['system_id'] is None: exception_msg = (_('Unable to determine system id')) raise exception.VolumeBackendAPIException(data=exception_msg) required_flags = ['san_ip', 'san_ssh_port', 'san_login', 'storwize_svc_volpool_name'] for flag in required_flags: if not self.configuration.safe_get(flag): raise exception.InvalidInput(reason=_('%s is not set') % flag) # Ensure that either password or keyfile were set if not (self.configuration.san_password or self.configuration.san_private_key): raise exception.InvalidInput( reason=_('Password or SSH private key is required for ' 'authentication: set either san_password or ' 'san_private_key option')) # Check that flashcopy_timeout is not more than 10 minutes flashcopy_timeout = self.configuration.storwize_svc_flashcopy_timeout if not (flashcopy_timeout > 0 and flashcopy_timeout <= 600): raise exception.InvalidInput( reason=_('Illegal value %d specified for ' 'storwize_svc_flashcopy_timeout: ' 'valid values are between 0 and 600') % flashcopy_timeout) opts = self._helpers.build_default_opts(self.configuration) self._helpers.check_vdisk_opts(self._state, opts) LOG.debug('leave: check_for_setup_error') def ensure_export(self, ctxt, volume): """Check that the volume exists on the storage. The system does not "export" volumes as a Linux iSCSI target does, and therefore we just check that the volume exists on the storage. """ volume_defined = self._helpers.is_vdisk_defined(volume['name']) if not volume_defined: LOG.error(_LE('ensure_export: Volume %s not found on storage') % volume['name']) def create_export(self, ctxt, volume): model_update = None return model_update def remove_export(self, ctxt, volume): pass def validate_connector(self, connector): """Check connector for at least one enabled protocol (iSCSI/FC).""" valid = False if ('iSCSI' in self._state['enabled_protocols'] and 'initiator' in connector): valid = True if 'FC' in self._state['enabled_protocols'] and 'wwpns' in connector: valid = True if not valid: msg = (_LE('The connector does not contain the required ' 'information.')) LOG.error(msg) raise exception.InvalidConnectorException( missing='initiator or wwpns') def _get_vdisk_params(self, type_id, volume_type=None, volume_metadata=None): return self._helpers.get_vdisk_params(self.configuration, self._state, type_id, volume_type=volume_type, volume_metadata=volume_metadata) @fczm_utils.AddFCZone @utils.synchronized('storwize-host', external=True) def initialize_connection(self, volume, connector): """Perform the necessary work so that an iSCSI/FC connection can be made. To be able to create an iSCSI/FC connection from a given host to a volume, we must: 1. Translate the given iSCSI name or WWNN to a host name 2. Create new host on the storage system if it does not yet exist 3. Map the volume to the host if it is not already done 4. Return the connection information for relevant nodes (in the proper I/O group) """ LOG.debug('enter: initialize_connection: volume %(vol)s with ' 'connector %(conn)s' % {'vol': volume, 'conn': connector}) vol_opts = self._get_vdisk_params(volume['volume_type_id']) volume_name = volume['name'] # Delete irrelevant connection information that later could result # in unwanted behaviour. For example, if FC is used yet the hosts # return iSCSI data, the driver will try to create the iSCSI connection # which can result in a nice error about reaching the per-host maximum # iSCSI initiator limit. # First make a copy so we don't mess with a caller's connector. connector = connector.copy() if vol_opts['protocol'] == 'FC': connector.pop('initiator', None) elif vol_opts['protocol'] == 'iSCSI': connector.pop('wwnns', None) connector.pop('wwpns', None) # Check if a host object is defined for this host name host_name = self._helpers.get_host_from_connector(connector) if host_name is None: # Host does not exist - add a new host to Storwize/SVC host_name = self._helpers.create_host(connector) if vol_opts['protocol'] == 'iSCSI': chap_secret = self._helpers.get_chap_secret_for_host(host_name) chap_enabled = self.configuration.storwize_svc_iscsi_chap_enabled if chap_enabled and chap_secret is None: chap_secret = self._helpers.add_chap_secret_to_host(host_name) elif not chap_enabled and chap_secret: LOG.warning(_LW('CHAP secret exists for host but CHAP is ' 'disabled')) volume_attributes = self._helpers.get_vdisk_attributes(volume_name) if volume_attributes is None: msg = (_('initialize_connection: Failed to get attributes' ' for volume %s') % volume_name) LOG.error(msg) raise exception.VolumeDriverException(message=msg) multihostmap = self.configuration.storwize_svc_multihostmap_enabled lun_id = self._helpers.map_vol_to_host(volume_name, host_name, multihostmap) try: preferred_node = volume_attributes['preferred_node_id'] IO_group = volume_attributes['IO_group_id'] except KeyError as e: LOG.error(_LE('Did not find expected column name in ' 'lsvdisk: %s') % e) msg = (_('initialize_connection: Missing volume ' 'attribute for volume %s') % volume_name) raise exception.VolumeBackendAPIException(data=msg) try: # Get preferred node and other nodes in I/O group preferred_node_entry = None io_group_nodes = [] for node in self._state['storage_nodes'].itervalues(): if vol_opts['protocol'] not in node['enabled_protocols']: continue if node['id'] == preferred_node: preferred_node_entry = node if node['IO_group'] == IO_group: io_group_nodes.append(node) if not len(io_group_nodes): msg = (_('initialize_connection: No node found in ' 'I/O group %(gid)s for volume %(vol)s') % {'gid': IO_group, 'vol': volume_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if not preferred_node_entry and not vol_opts['multipath']: # Get 1st node in I/O group preferred_node_entry = io_group_nodes[0] LOG.warn(_LW('initialize_connection: Did not find a preferred ' 'node for volume %s') % volume_name) properties = {} properties['target_discovered'] = False properties['target_lun'] = lun_id properties['volume_id'] = volume['id'] if vol_opts['protocol'] == 'iSCSI': type_str = 'iscsi' if len(preferred_node_entry['ipv4']): ipaddr = preferred_node_entry['ipv4'][0] else: ipaddr = preferred_node_entry['ipv6'][0] properties['target_portal'] = '%s:%s' % (ipaddr, '3260') properties['target_iqn'] = preferred_node_entry['iscsi_name'] if chap_secret: properties['auth_method'] = 'CHAP' properties['auth_username'] = connector['initiator'] properties['auth_password'] = chap_secret properties['discovery_auth_method'] = 'CHAP' properties['discovery_auth_username'] = ( connector['initiator']) properties['discovery_auth_password'] = chap_secret else: type_str = 'fibre_channel' conn_wwpns = self._helpers.get_conn_fc_wwpns(host_name) # If conn_wwpns is empty, then that means that there were # no target ports with visibility to any of the initiators. # We will either fail the attach, or return all target # ports, depending on the value of the # storwize_svc_npiv_compatibity_mode flag. if len(conn_wwpns) == 0: npiv_compat = self.configuration.\ storwize_svc_npiv_compatibility_mode if not npiv_compat: msg = (_('Could not get FC connection information for ' 'the host-volume connection. Is the host ' 'configured properly for FC connections?')) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: for node in self._state['storage_nodes'].itervalues(): conn_wwpns.extend(node['WWPN']) if not vol_opts['multipath']: # preferred_node_entry can have a list of WWPNs while only # one WWPN may be available on the storage host. Here we # walk through the nodes until we find one that works, # default to the first WWPN otherwise. for WWPN in preferred_node_entry['WWPN']: if WWPN in conn_wwpns: properties['target_wwn'] = WWPN break else: LOG.warning(_LW('Unable to find a preferred node match' ' for node %(node)s in the list of ' 'available WWPNs on %(host)s. ' 'Using first available.') % {'node': preferred_node, 'host': host_name}) properties['target_wwn'] = conn_wwpns[0] else: properties['target_wwn'] = conn_wwpns i_t_map = self._make_initiator_target_map(connector['wwpns'], conn_wwpns) properties['initiator_target_map'] = i_t_map # specific for z/VM, refer to cinder bug 1323993 if "zvm_fcp" in connector: properties['zvm_fcp'] = connector['zvm_fcp'] except Exception: with excutils.save_and_reraise_exception(): self.terminate_connection(volume, connector) LOG.error(_LE('initialize_connection: Failed ' 'to collect return ' 'properties for volume %(vol)s and connector ' '%(conn)s.\n') % {'vol': volume, 'conn': connector}) LOG.debug('leave: initialize_connection:\n volume: %(vol)s\n ' 'connector %(conn)s\n properties: %(prop)s' % {'vol': volume, 'conn': connector, 'prop': properties}) return {'driver_volume_type': type_str, 'data': properties, } def _make_initiator_target_map(self, initiator_wwpns, target_wwpns): """Build a simplistic all-to-all mapping.""" i_t_map = {} for i_wwpn in initiator_wwpns: i_t_map[str(i_wwpn)] = [] for t_wwpn in target_wwpns: i_t_map[i_wwpn].append(t_wwpn) return i_t_map @fczm_utils.RemoveFCZone @utils.synchronized('storwize-host', external=True) def terminate_connection(self, volume, connector, **kwargs): """Cleanup after an iSCSI connection has been terminated. When we clean up a terminated connection between a given connector and volume, we: 1. Translate the given connector to a host name 2. Remove the volume-to-host mapping if it exists 3. Delete the host if it has no more mappings (hosts are created automatically by this driver when mappings are created) """ LOG.debug('enter: terminate_connection: volume %(vol)s with ' 'connector %(conn)s' % {'vol': volume, 'conn': connector}) vol_name = volume['name'] if 'host' in connector: # maybe two hosts on the storage, one is for FC and the other for # iSCSI, so get host according to protocol vol_opts = self._get_vdisk_params(volume['volume_type_id']) connector = connector.copy() if vol_opts['protocol'] == 'FC': connector.pop('initiator', None) elif vol_opts['protocol'] == 'iSCSI': connector.pop('wwnns', None) connector.pop('wwpns', None) host_name = self._helpers.get_host_from_connector(connector) if host_name is None: msg = (_('terminate_connection: Failed to get host name from' ' connector.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) else: # See bug #1244257 host_name = None info = {} if 'wwpns' in connector and host_name: target_wwpns = self._helpers.get_conn_fc_wwpns(host_name) init_targ_map = self._make_initiator_target_map(connector['wwpns'], target_wwpns) info = {'driver_volume_type': 'fibre_channel', 'data': {'initiator_target_map': init_targ_map}} self._helpers.unmap_vol_from_host(vol_name, host_name) LOG.debug('leave: terminate_connection: volume %(vol)s with ' 'connector %(conn)s' % {'vol': volume, 'conn': connector}) return info def create_volume(self, volume): opts = self._get_vdisk_params(volume['volume_type_id'], volume_metadata= volume.get('volume_metadata')) pool = self.configuration.storwize_svc_volpool_name self._helpers.create_vdisk(volume['name'], str(volume['size']), 'gb', pool, opts) if opts['qos']: self._helpers.add_vdisk_qos(volume['name'], opts['qos']) model_update = None if 'replication' in opts and opts['replication']: ctxt = context.get_admin_context() model_update = self.replication.create_replica(ctxt, volume) return model_update def delete_volume(self, volume): self._helpers.delete_vdisk(volume['name'], False) if volume['id'] in self._vdiskcopyops: del self._vdiskcopyops[volume['id']] if not len(self._vdiskcopyops): self._vdiskcopyops_loop.stop() self._vdiskcopyops_loop = None def create_snapshot(self, snapshot): ctxt = context.get_admin_context() try: source_vol = self.db.volume_get(ctxt, snapshot['volume_id']) except Exception: msg = (_('create_snapshot: get source volume failed.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) opts = self._get_vdisk_params(source_vol['volume_type_id']) self._helpers.create_copy(snapshot['volume_name'], snapshot['name'], snapshot['volume_id'], self.configuration, opts, False) def delete_snapshot(self, snapshot): self._helpers.delete_vdisk(snapshot['name'], False) def create_volume_from_snapshot(self, volume, snapshot): if volume['size'] != snapshot['volume_size']: msg = (_('create_volume_from_snapshot: Source and destination ' 'size differ.')) LOG.error(msg) raise exception.InvalidInput(message=msg) opts = self._get_vdisk_params(volume['volume_type_id'], volume_metadata= volume.get('volume_metadata')) self._helpers.create_copy(snapshot['name'], volume['name'], snapshot['id'], self.configuration, opts, True) if opts['qos']: self._helpers.add_vdisk_qos(volume['name'], opts['qos']) if 'replication' in opts and opts['replication']: ctxt = context.get_admin_context() replica_status = self.replication.create_replica(ctxt, volume) if replica_status: return replica_status def create_cloned_volume(self, tgt_volume, src_volume): if src_volume['size'] != tgt_volume['size']: msg = (_('create_cloned_volume: Source and destination ' 'size differ.')) LOG.error(msg) raise exception.InvalidInput(message=msg) opts = self._get_vdisk_params(tgt_volume['volume_type_id'], volume_metadata= tgt_volume.get('volume_metadata')) self._helpers.create_copy(src_volume['name'], tgt_volume['name'], src_volume['id'], self.configuration, opts, True) if opts['qos']: self._helpers.add_vdisk_qos(tgt_volume['name'], opts['qos']) if 'replication' in opts and opts['replication']: ctxt = context.get_admin_context() replica_status = self.replication.create_replica(ctxt, tgt_volume) if replica_status: return replica_status def extend_volume(self, volume, new_size): LOG.debug('enter: extend_volume: volume %s' % volume['id']) ret = self._helpers.ensure_vdisk_no_fc_mappings(volume['name'], allow_snaps=False) if not ret: msg = (_('extend_volume: Extending a volume with snapshots is not ' 'supported.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) extend_amt = int(new_size) - volume['size'] self._helpers.extend_vdisk(volume['name'], extend_amt) LOG.debug('leave: extend_volume: volume %s' % volume['id']) def add_vdisk_copy(self, volume, dest_pool, vol_type): return self._helpers.add_vdisk_copy(volume, dest_pool, vol_type, self._state, self.configuration) def _add_vdisk_copy_op(self, ctxt, volume, new_op): metadata = self.db.volume_admin_metadata_get(ctxt.elevated(), volume['id']) curr_ops = metadata.get('vdiskcopyops', None) if curr_ops: curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')] new_ops_list = curr_ops_list.append(new_op) else: new_ops_list = [new_op] new_ops_str = ';'.join([':'.join(x) for x in new_ops_list]) self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'], {'vdiskcopyops': new_ops_str}, False) if volume['id'] in self._vdiskcopyops: self._vdiskcopyops[volume['id']].append(new_op) else: self._vdiskcopyops[volume['id']] = [new_op] # We added the first copy operation, so start the looping call if len(self._vdiskcopyops) == 1: self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall( self._check_volume_copy_ops) self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL) def _rm_vdisk_copy_op(self, ctxt, volume, orig_copy_id, new_copy_id): try: self._vdiskcopyops[volume['id']].remove((orig_copy_id, new_copy_id)) if not len(self._vdiskcopyops[volume['id']]): del self._vdiskcopyops[volume['id']] if not len(self._vdiskcopyops): self._vdiskcopyops_loop.stop() self._vdiskcopyops_loop = None except KeyError: msg = (_('_rm_vdisk_copy_op: Volume %s does not have any ' 'registered vdisk copy operations.') % volume['id']) LOG.error(msg) return except ValueError: msg = (_('_rm_vdisk_copy_op: Volume %(vol)s does not have the ' 'specified vdisk copy operation: orig=%(orig)s ' 'new=%(new)s.') % {'vol': volume['id'], 'orig': orig_copy_id, 'new': new_copy_id}) LOG.error(msg) return metadata = self.db.volume_admin_metadata_get(ctxt.elevated(), volume['id']) curr_ops = metadata.get('vdiskcopyops', None) if not curr_ops: msg = (_('_rm_vdisk_copy_op: Volume metadata %s does not have any ' 'registered vdisk copy operations.') % volume['id']) LOG.error(msg) return curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')] try: curr_ops_list.remove((orig_copy_id, new_copy_id)) except ValueError: msg = (_('_rm_vdisk_copy_op: Volume %(vol)s metadata does not ' 'have the specified vdisk copy operation: orig=%(orig)s ' 'new=%(new)s.') % {'vol': volume['id'], 'orig': orig_copy_id, 'new': new_copy_id}) LOG.error(msg) return if len(curr_ops_list): new_ops_str = ';'.join([':'.join(x) for x in curr_ops_list]) self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'], {'vdiskcopyops': new_ops_str}, False) else: self.db.volume_admin_metadata_delete(ctxt.elevated(), volume['id'], 'vdiskcopyops') def promote_replica(self, ctxt, volume): return self.replication.promote_replica(volume) def reenable_replication(self, ctxt, volume): return self.replication.reenable_replication(volume) def create_replica_test_volume(self, tgt_volume, src_volume): if src_volume['size'] != tgt_volume['size']: msg = (_('create_cloned_volume: Source and destination ' 'size differ.')) LOG.error(msg) raise exception.InvalidInput(message=msg) replica_status = self.replication.test_replica(tgt_volume, src_volume) return replica_status def get_replication_status(self, ctxt, volume): replica_status = None if self.replication: replica_status = self.replication.get_replication_status(volume) return replica_status def _check_volume_copy_ops(self): LOG.debug("enter: update volume copy status") ctxt = context.get_admin_context() copy_items = self._vdiskcopyops.items() for vol_id, copy_ops in copy_items: try: volume = self.db.volume_get(ctxt, vol_id) except Exception: LOG.warn(_LW('Volume %s does not exist.'), vol_id) del self._vdiskcopyops[vol_id] if not len(self._vdiskcopyops): self._vdiskcopyops_loop.stop() self._vdiskcopyops_loop = None continue for copy_op in copy_ops: try: synced = self._helpers.is_vdisk_copy_synced(volume['name'], copy_op[1]) except Exception: msg = (_('_check_volume_copy_ops: Volume %(vol)s does not ' 'have the specified vdisk copy operation: ' 'orig=%(orig)s new=%(new)s.') % {'vol': volume['id'], 'orig': copy_op[0], 'new': copy_op[1]}) LOG.info(msg) else: if synced: self._helpers.rm_vdisk_copy(volume['name'], copy_op[0]) self._rm_vdisk_copy_op(ctxt, volume, copy_op[0], copy_op[1]) LOG.debug("exit: update volume copy status") def migrate_volume(self, ctxt, volume, host): """Migrate directly if source and dest are managed by same storage. We create a new vdisk copy in the desired pool, and add the original vdisk copy to the admin_metadata of the volume to be deleted. The deletion will occur using a periodic task once the new copy is synced. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. """ LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s' % {'id': volume['id'], 'host': host['host']}) false_ret = (False, None) dest_pool = self._helpers.can_migrate_to_host(host, self._state) if dest_pool is None: return false_ret ctxt = context.get_admin_context() if volume['volume_type_id'] is not None: volume_type_id = volume['volume_type_id'] vol_type = volume_types.get_volume_type(ctxt, volume_type_id) else: vol_type = None self._check_volume_copy_ops() new_op = self.add_vdisk_copy(volume['name'], dest_pool, vol_type) self._add_vdisk_copy_op(ctxt, volume, new_op) LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s' % {'id': volume['id'], 'host': host['host']}) return (True, None) def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type. Returns a boolean indicating whether the retype occurred. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. """ def retype_iogrp_property(volume, new, old): if new != old: self._helpers.change_vdisk_iogrp(volume['name'], self._state, (new, old)) LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,' 'diff=%(diff)s, host=%(host)s' % {'id': volume['id'], 'new_type': new_type, 'diff': diff, 'host': host}) ignore_keys = ['protocol', 'multipath'] no_copy_keys = ['warning', 'autoexpand', 'easytier'] copy_keys = ['rsize', 'grainsize', 'compression'] all_keys = ignore_keys + no_copy_keys + copy_keys old_opts = self._get_vdisk_params(volume['volume_type_id'], volume_metadata= volume.get('volume_matadata')) new_opts = self._get_vdisk_params(new_type['id'], volume_type=new_type) # Check if retype affects volume replication model_update = None old_type_replication = old_opts.get('replication', False) new_type_replication = new_opts.get('replication', False) # Delete replica if needed if old_type_replication and not new_type_replication: self.replication.delete_replica(volume) model_update = {'replication_status': 'disabled', 'replication_driver_data': None, 'replication_extended_status': None} vdisk_changes = [] need_copy = False for key in all_keys: if old_opts[key] != new_opts[key]: if key in copy_keys: need_copy = True break elif key in no_copy_keys: vdisk_changes.append(key) dest_location = host['capabilities'].get('location_info') if self._stats['location_info'] != dest_location: need_copy = True if need_copy: self._check_volume_copy_ops() dest_pool = self._helpers.can_migrate_to_host(host, self._state) if dest_pool is None: return False # If volume is replicated, can't copy if new_type_replication: msg = (_('Unable to retype: Current action needs volume-copy,' ' it is not allowed when new type is replication.' ' Volume = %s'), volume['id']) raise exception.VolumeDriverException(message=msg) retype_iogrp_property(volume, new_opts['iogrp'], old_opts['iogrp']) try: new_op = self.add_vdisk_copy(volume['name'], dest_pool, new_type) self._add_vdisk_copy_op(ctxt, volume, new_op) except exception.VolumeDriverException: # roll back changing iogrp property retype_iogrp_property(volume, old_opts['iogrp'], new_opts['iogrp']) msg = (_('Unable to retype: A copy of volume %s exists. ' 'Retyping would exceed the limit of 2 copies.'), volume['id']) raise exception.VolumeDriverException(message=msg) else: retype_iogrp_property(volume, new_opts['iogrp'], old_opts['iogrp']) self._helpers.change_vdisk_options(volume['name'], vdisk_changes, new_opts, self._state) if new_opts['qos']: # Add the new QoS setting to the volume. If the volume has an # old QoS setting, it will be overwritten. self._helpers.update_vdisk_qos(volume['name'], new_opts['qos']) elif old_opts['qos']: # If the old_opts contain QoS keys, disable them. self._helpers.disable_vdisk_qos(volume['name'], old_opts['qos']) # Add replica if needed if not old_type_replication and new_type_replication: model_update = self.replication.create_replica(ctxt, volume, new_type) LOG.debug('exit: retype: ild=%(id)s, new_type=%(new_type)s,' 'diff=%(diff)s, host=%(host)s' % {'id': volume['id'], 'new_type': new_type, 'diff': diff, 'host': host['host']}) return True, model_update def manage_existing(self, volume, ref): """Manages an existing vdisk. Renames the vdisk to match the expected name for the volume. Error checking done by manage_existing_get_size is not repeated - if we got here then we have a vdisk that isn't in use (or we don't care if it is in use. """ vdisk = self._helpers.vdisk_by_uid(ref['source-id']) if vdisk is None: reason = (_('No vdisk with the UID specified by source-id %s.') % ref['source-id']) raise exception.ManageExistingInvalidReference(existing_ref=ref, reason=reason) self._helpers.rename_vdisk(vdisk['name'], volume['name']) def manage_existing_get_size(self, volume, ref): """Return size of an existing Vdisk for manage_existing. existing_ref is a dictionary of the form: {'source-id': <uid of disk>} Optional elements are: 'manage_if_in_use': True/False (default is False) If set to True, a volume will be managed even if it is currently attached to a host system. """ # Check that the reference is valid if 'source-id' not in ref: reason = _('Reference must contain source-id element.') raise exception.ManageExistingInvalidReference(existing_ref=ref, reason=reason) # Check for existence of the vdisk vdisk = self._helpers.vdisk_by_uid(ref['source-id']) if vdisk is None: reason = (_('No vdisk with the UID specified by source-id %s.') % (ref['source-id'])) raise exception.ManageExistingInvalidReference(existing_ref=ref, reason=reason) # Check if the disk is in use, if we need to. manage_if_in_use = ref.get('manage_if_in_use', False) if (not manage_if_in_use and self._helpers.is_vdisk_in_use(vdisk['name'])): reason = _('The specified vdisk is mapped to a host.') raise exception.ManageExistingInvalidReference(existing_ref=ref, reason=reason) return int(math.ceil(float(vdisk['capacity']) / units.Gi)) def get_volume_stats(self, refresh=False): """Get volume stats. If we haven't gotten stats yet or 'refresh' is True, run update the stats first. """ if not self._stats or refresh: self._update_volume_stats() return self._stats def create_consistencygroup(self, context, group): """Create a consistency group. IBM Storwize will create CG until cg-snapshot creation, db will maintain the volumes and CG relationship. """ LOG.debug("Creating consistency group") model_update = {'status': 'available'} return model_update def delete_consistencygroup(self, context, group): """Deletes a consistency group. IBM Storwize will delete the volumes of the CG. """ LOG.debug("deleting consistency group") model_update = {} model_update['status'] = 'deleted' volumes = self.db.volume_get_all_by_group(context, group['id']) for volume in volumes: try: self._helpers.delete_vdisk(volume['name'], True) volume['status'] = 'deleted' except exception.VolumeBackendAPIException as err: volume['status'] = 'error_deleting' if model_update['status'] != 'error_deleting': model_update['status'] = 'error_deleting' LOG.error(_LE("Failed to delete the volume %(vol)s of CG. " "Exception: %(exception)s."), {'vol': volume['name'], 'exception': err}) return model_update, volumes def create_cgsnapshot(self, ctxt, cgsnapshot): """Creates a cgsnapshot.""" # Use cgsnapshot id as cg name cg_name = 'cg_snap-' + cgsnapshot['id'] # Create new cg as cg_snapshot self._helpers.create_fc_consistgrp(cg_name) snapshots = self.db.snapshot_get_all_for_cgsnapshot( ctxt, cgsnapshot['id']) timeout = self.configuration.storwize_svc_flashcopy_timeout model_update, snapshots_model = ( self._helpers.run_consistgrp_snapshots(cg_name, snapshots, self._state, self.configuration, timeout)) return model_update, snapshots_model def delete_cgsnapshot(self, context, cgsnapshot): """Deletes a cgsnapshot.""" cgsnapshot_id = cgsnapshot['id'] cg_name = 'cg_snap-' + cgsnapshot_id snapshots = self.db.snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id) model_update, snapshots_model = ( self._helpers.delete_consistgrp_snapshots(cg_name, snapshots)) return model_update, snapshots_model def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats") data = {} data['vendor_name'] = 'IBM' data['driver_version'] = self.VERSION data['storage_protocol'] = list(self._state['enabled_protocols']) data['total_capacity_gb'] = 0 # To be overwritten data['free_capacity_gb'] = 0 # To be overwritten data['reserved_percentage'] = self.configuration.reserved_percentage data['QoS_support'] = True data['consistencygroup_support'] = True pool = self.configuration.storwize_svc_volpool_name backend_name = self.configuration.safe_get('volume_backend_name') if not backend_name: backend_name = '%s_%s' % (self._state['system_name'], pool) data['volume_backend_name'] = backend_name attributes = self._helpers.get_pool_attrs(pool) if not attributes: LOG.error(_LE('Could not get pool data from the storage')) exception_message = (_('_update_volume_stats: ' 'Could not get storage pool data')) raise exception.VolumeBackendAPIException(data=exception_message) data['total_capacity_gb'] = (float(attributes['capacity']) / units.Gi) data['free_capacity_gb'] = (float(attributes['free_capacity']) / units.Gi) data['easytier_support'] = attributes['easy_tier'] in ['on', 'auto'] data['compression_support'] = self._state['compression_enabled'] data['location_info'] = ('StorwizeSVCDriver:%(sys_id)s:%(pool)s' % {'sys_id': self._state['system_id'], 'pool': pool}) if self.replication: data.update(self.replication.get_replication_info()) self._stats = data
[ "cinder.openstack.common.loopingcall.FixedIntervalLoopingCall", "cinder.i18n._LW", "cinder.i18n._LE", "cinder.volume.volume_types.get_volume_type", "cinder.exception.InvalidConnectorException", "cinder.exception.VolumeDriverException", "cinder.i18n._", "cinder.volume.drivers.ibm.storwize_svc.helpers.StorwizeHelpers", "oslo_config.cfg.StrOpt", "cinder.utils.synchronized", "time.sleep", "oslo_utils.excutils.save_and_reraise_exception", "cinder.volume.drivers.ibm.storwize_svc.replication.StorwizeSVCReplication.factory", "cinder.exception.VolumeBackendAPIException", "oslo_config.cfg.IntOpt", "cinder.openstack.common.log.getLogger", "oslo_config.cfg.BoolOpt", "cinder.context.get_admin_context", "cinder.exception.InvalidInput", "cinder.exception.ManageExistingInvalidReference" ]
[((2038, 2065), 'cinder.openstack.common.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2055, 2065), True, 'from cinder.openstack.common import log as logging\n'), ((2093, 2204), 'oslo_config.cfg.StrOpt', 'cfg.StrOpt', (['"""storwize_svc_volpool_name"""'], {'default': '"""volpool"""', 'help': '"""Storage system storage pool for volumes"""'}), "('storwize_svc_volpool_name', default='volpool', help=\n 'Storage system storage pool for volumes')\n", (2103, 2204), False, 'from oslo_config import cfg\n'), ((2235, 2362), 'oslo_config.cfg.IntOpt', 'cfg.IntOpt', (['"""storwize_svc_vol_rsize"""'], {'default': '(2)', 'help': '"""Storage system space-efficiency parameter for volumes (percentage)"""'}), "('storwize_svc_vol_rsize', default=2, help=\n 'Storage system space-efficiency parameter for volumes (percentage)')\n", (2245, 2362), False, 'from oslo_config import cfg\n'), ((2416, 2545), 'oslo_config.cfg.IntOpt', 'cfg.IntOpt', (['"""storwize_svc_vol_warning"""'], {'default': '(0)', 'help': '"""Storage system threshold for volume capacity warnings (percentage)"""'}), "('storwize_svc_vol_warning', default=0, help=\n 'Storage system threshold for volume capacity warnings (percentage)')\n", (2426, 2545), False, 'from oslo_config import cfg\n'), ((2599, 2729), 'oslo_config.cfg.BoolOpt', 'cfg.BoolOpt', (['"""storwize_svc_vol_autoexpand"""'], {'default': '(True)', 'help': '"""Storage system autoexpand parameter for volumes (True/False)"""'}), "('storwize_svc_vol_autoexpand', default=True, help=\n 'Storage system autoexpand parameter for volumes (True/False)')\n", (2610, 2729), False, 'from oslo_config import cfg\n'), ((2786, 2916), 'oslo_config.cfg.IntOpt', 'cfg.IntOpt', (['"""storwize_svc_vol_grainsize"""'], {'default': '(256)', 'help': '"""Storage system grain size parameter for volumes (32/64/128/256)"""'}), "('storwize_svc_vol_grainsize', default=256, help=\n 'Storage system grain size parameter for volumes (32/64/128/256)')\n", (2796, 2916), False, 'from oslo_config import cfg\n'), ((2970, 3087), 'oslo_config.cfg.BoolOpt', 'cfg.BoolOpt', (['"""storwize_svc_vol_compression"""'], {'default': '(False)', 'help': '"""Storage system compression option for volumes"""'}), "('storwize_svc_vol_compression', default=False, help=\n 'Storage system compression option for volumes')\n", (2981, 3087), False, 'from oslo_config import cfg\n'), ((3120, 3216), 'oslo_config.cfg.BoolOpt', 'cfg.BoolOpt', (['"""storwize_svc_vol_easytier"""'], {'default': '(True)', 'help': '"""Enable Easy Tier for volumes"""'}), "('storwize_svc_vol_easytier', default=True, help=\n 'Enable Easy Tier for volumes')\n", (3131, 3216), False, 'from oslo_config import cfg\n'), ((3249, 3352), 'oslo_config.cfg.IntOpt', 'cfg.IntOpt', (['"""storwize_svc_vol_iogrp"""'], {'default': '(0)', 'help': '"""The I/O group in which to allocate volumes"""'}), "('storwize_svc_vol_iogrp', default=0, help=\n 'The I/O group in which to allocate volumes')\n", (3259, 3352), False, 'from oslo_config import cfg\n'), ((3383, 3564), 'oslo_config.cfg.IntOpt', 'cfg.IntOpt', (['"""storwize_svc_flashcopy_timeout"""'], {'default': '(120)', 'help': '"""Maximum number of seconds to wait for FlashCopy to be prepared. Maximum value is 600 seconds (10 minutes)"""'}), "('storwize_svc_flashcopy_timeout', default=120, help=\n 'Maximum number of seconds to wait for FlashCopy to be prepared. Maximum value is 600 seconds (10 minutes)'\n )\n", (3393, 3564), False, 'from oslo_config import cfg\n'), ((3613, 3720), 'oslo_config.cfg.StrOpt', 'cfg.StrOpt', (['"""storwize_svc_connection_protocol"""'], {'default': '"""iSCSI"""', 'help': '"""Connection protocol (iSCSI/FC)"""'}), "('storwize_svc_connection_protocol', default='iSCSI', help=\n 'Connection protocol (iSCSI/FC)')\n", (3623, 3720), False, 'from oslo_config import cfg\n'), ((3751, 3895), 'oslo_config.cfg.BoolOpt', 'cfg.BoolOpt', (['"""storwize_svc_iscsi_chap_enabled"""'], {'default': '(True)', 'help': '"""Configure CHAP authentication for iSCSI connections (Default: Enabled)"""'}), "('storwize_svc_iscsi_chap_enabled', default=True, help=\n 'Configure CHAP authentication for iSCSI connections (Default: Enabled)')\n", (3762, 3895), False, 'from oslo_config import cfg\n'), ((3952, 4097), 'oslo_config.cfg.BoolOpt', 'cfg.BoolOpt', (['"""storwize_svc_multipath_enabled"""'], {'default': '(False)', 'help': '"""Connect with multipath (FC only; iSCSI multipath is controlled by Nova)"""'}), "('storwize_svc_multipath_enabled', default=False, help=\n 'Connect with multipath (FC only; iSCSI multipath is controlled by Nova)')\n", (3963, 4097), False, 'from oslo_config import cfg\n'), ((4154, 4264), 'oslo_config.cfg.BoolOpt', 'cfg.BoolOpt', (['"""storwize_svc_multihostmap_enabled"""'], {'default': '(True)', 'help': '"""Allows vdisk to multi host mapping"""'}), "('storwize_svc_multihostmap_enabled', default=True, help=\n 'Allows vdisk to multi host mapping')\n", (4165, 4264), False, 'from oslo_config import cfg\n'), ((4297, 4548), 'oslo_config.cfg.BoolOpt', 'cfg.BoolOpt', (['"""storwize_svc_npiv_compatibility_mode"""'], {'default': '(False)', 'help': '"""Indicate whether svc driver is compatible for NPIV setup. If it is compatible, it will allow no wwpns being returned on get_conn_fc_wwpns during initialize_connection"""'}), "('storwize_svc_npiv_compatibility_mode', default=False, help=\n 'Indicate whether svc driver is compatible for NPIV setup. If it is compatible, it will allow no wwpns being returned on get_conn_fc_wwpns during initialize_connection'\n )\n", (4308, 4548), False, 'from oslo_config import cfg\n'), ((4648, 4759), 'oslo_config.cfg.BoolOpt', 'cfg.BoolOpt', (['"""storwize_svc_allow_tenant_qos"""'], {'default': '(False)', 'help': '"""Allow tenants to specify QOS on create"""'}), "('storwize_svc_allow_tenant_qos', default=False, help=\n 'Allow tenants to specify QOS on create')\n", (4659, 4759), False, 'from oslo_config import cfg\n'), ((4792, 4998), 'oslo_config.cfg.StrOpt', 'cfg.StrOpt', (['"""storwize_svc_stretched_cluster_partner"""'], {'default': 'None', 'help': '"""If operating in stretched cluster mode, specify the name of the pool in which mirrored copies are stored.Example: "pool2\\""""'}), '(\'storwize_svc_stretched_cluster_partner\', default=None, help=\n \'If operating in stretched cluster mode, specify the name of the pool in which mirrored copies are stored.Example: "pool2"\'\n )\n', (4802, 4998), False, 'from oslo_config import cfg\n'), ((13868, 13918), 'cinder.utils.synchronized', 'utils.synchronized', (['"""storwize-host"""'], {'external': '(True)'}), "('storwize-host', external=True)\n", (13886, 13918), False, 'from cinder import utils\n'), ((23257, 23307), 'cinder.utils.synchronized', 'utils.synchronized', (['"""storwize-host"""'], {'external': '(True)'}), "('storwize-host', external=True)\n", (23275, 23307), False, 'from cinder import utils\n'), ((6253, 6300), 'cinder.volume.drivers.ibm.storwize_svc.helpers.StorwizeHelpers', 'storwize_helpers.StorwizeHelpers', (['self._run_ssh'], {}), '(self._run_ssh)\n', (6285, 6300), True, 'from cinder.volume.drivers.ibm.storwize_svc import helpers as storwize_helpers\n'), ((6910, 6923), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6920, 6923), False, 'import time\n'), ((7250, 7299), 'cinder.volume.drivers.ibm.storwize_svc.replication.StorwizeSVCReplication.factory', 'storwize_rep.StorwizeSVCReplication.factory', (['self'], {}), '(self)\n', (7293, 7299), True, 'from cinder.volume.drivers.ibm.storwize_svc import replication as storwize_rep\n'), ((26677, 26704), 'cinder.context.get_admin_context', 'context.get_admin_context', ([], {}), '()\n', (26702, 26704), False, 'from cinder import context\n'), ((35042, 35069), 'cinder.context.get_admin_context', 'context.get_admin_context', ([], {}), '()\n', (35067, 35069), False, 'from cinder import context\n'), ((37488, 37515), 'cinder.context.get_admin_context', 'context.get_admin_context', ([], {}), '()\n', (37513, 37515), False, 'from cinder import context\n'), ((9167, 9202), 'cinder.i18n._', '_', (['"""do_setup: No configured nodes."""'], {}), "('do_setup: No configured nodes.')\n", (9168, 9202), False, 'from cinder.i18n import _, _LE, _LW\n'), ((9248, 9292), 'cinder.exception.VolumeDriverException', 'exception.VolumeDriverException', ([], {'message': 'msg'}), '(message=msg)\n', (9279, 9292), False, 'from cinder import exception\n'), ((9409, 9436), 'cinder.context.get_admin_context', 'context.get_admin_context', ([], {}), '()\n', (9434, 9436), False, 'from cinder import context\n'), ((10102, 10167), 'cinder.openstack.common.loopingcall.FixedIntervalLoopingCall', 'loopingcall.FixedIntervalLoopingCall', (['self._check_volume_copy_ops'], {}), '(self._check_volume_copy_ops)\n', (10138, 10167), False, 'from cinder.openstack.common import loopingcall\n'), ((10576, 10612), 'cinder.i18n._', '_', (['"""Unable to determine system name"""'], {}), "('Unable to determine system name')\n", (10577, 10612), False, 'from cinder.i18n import _, _LE, _LW\n'), ((10632, 10687), 'cinder.exception.VolumeBackendAPIException', 'exception.VolumeBackendAPIException', ([], {'data': 'exception_msg'}), '(data=exception_msg)\n', (10667, 10687), False, 'from cinder import exception\n'), ((10762, 10796), 'cinder.i18n._', '_', (['"""Unable to determine system id"""'], {}), "('Unable to determine system id')\n", (10763, 10796), False, 'from cinder.i18n import _, _LE, _LW\n'), ((10816, 10871), 'cinder.exception.VolumeBackendAPIException', 'exception.VolumeBackendAPIException', ([], {'data': 'exception_msg'}), '(data=exception_msg)\n', (10851, 10871), False, 'from cinder import exception\n'), ((13270, 13333), 'cinder.i18n._LE', '_LE', (['"""The connector does not contain the required information."""'], {}), "('The connector does not contain the required information.')\n", (13273, 13333), False, 'from cinder.i18n import _, _LE, _LW\n'), ((13406, 13471), 'cinder.exception.InvalidConnectorException', 'exception.InvalidConnectorException', ([], {'missing': '"""initiator or wwpns"""'}), "(missing='initiator or wwpns')\n", (13441, 13471), False, 'from cinder import exception\n'), ((16481, 16525), 'cinder.exception.VolumeDriverException', 'exception.VolumeDriverException', ([], {'message': 'msg'}), '(message=msg)\n', (16512, 16525), False, 'from cinder import exception\n'), ((26159, 26186), 'cinder.context.get_admin_context', 'context.get_admin_context', ([], {}), '()\n', (26184, 26186), False, 'from cinder import context\n'), ((27481, 27550), 'cinder.i18n._', '_', (['"""create_volume_from_snapshot: Source and destination size differ."""'], {}), "('create_volume_from_snapshot: Source and destination size differ.')\n", (27482, 27550), False, 'from cinder.i18n import _, _LE, _LW\n'), ((27621, 27656), 'cinder.exception.InvalidInput', 'exception.InvalidInput', ([], {'message': 'msg'}), '(message=msg)\n', (27643, 27656), False, 'from cinder import exception\n'), ((28201, 28228), 'cinder.context.get_admin_context', 'context.get_admin_context', ([], {}), '()\n', (28226, 28228), False, 'from cinder import context\n'), ((28506, 28568), 'cinder.i18n._', '_', (['"""create_cloned_volume: Source and destination size differ."""'], {}), "('create_cloned_volume: Source and destination size differ.')\n", (28507, 28568), False, 'from cinder.i18n import _, _LE, _LW\n'), ((28639, 28674), 'cinder.exception.InvalidInput', 'exception.InvalidInput', ([], {'message': 'msg'}), '(message=msg)\n', (28661, 28674), False, 'from cinder import exception\n'), ((29239, 29266), 'cinder.context.get_admin_context', 'context.get_admin_context', ([], {}), '()\n', (29264, 29266), False, 'from cinder import context\n'), ((29717, 29788), 'cinder.i18n._', '_', (['"""extend_volume: Extending a volume with snapshots is not supported."""'], {}), "('extend_volume: Extending a volume with snapshots is not supported.')\n", (29718, 29788), False, 'from cinder.i18n import _, _LE, _LW\n'), ((29859, 29903), 'cinder.exception.VolumeDriverException', 'exception.VolumeDriverException', ([], {'message': 'msg'}), '(message=msg)\n', (29890, 29903), False, 'from cinder import exception\n'), ((31396, 31461), 'cinder.openstack.common.loopingcall.FixedIntervalLoopingCall', 'loopingcall.FixedIntervalLoopingCall', (['self._check_volume_copy_ops'], {}), '(self._check_volume_copy_ops)\n', (31432, 31461), False, 'from cinder.openstack.common import loopingcall\n'), ((34382, 34444), 'cinder.i18n._', '_', (['"""create_cloned_volume: Source and destination size differ."""'], {}), "('create_cloned_volume: Source and destination size differ.')\n", (34383, 34444), False, 'from cinder.i18n import _, _LE, _LW\n'), ((34515, 34550), 'cinder.exception.InvalidInput', 'exception.InvalidInput', ([], {'message': 'msg'}), '(message=msg)\n', (34537, 34550), False, 'from cinder import exception\n'), ((37642, 37692), 'cinder.volume.volume_types.get_volume_type', 'volume_types.get_volume_type', (['ctxt', 'volume_type_id'], {}), '(ctxt, volume_type_id)\n', (37670, 37692), False, 'from cinder.volume import volume_types\n'), ((44152, 44225), 'cinder.exception.ManageExistingInvalidReference', 'exception.ManageExistingInvalidReference', ([], {'existing_ref': 'ref', 'reason': 'reason'}), '(existing_ref=ref, reason=reason)\n', (44192, 44225), False, 'from cinder import exception\n'), ((44880, 44926), 'cinder.i18n._', '_', (['"""Reference must contain source-id element."""'], {}), "('Reference must contain source-id element.')\n", (44881, 44926), False, 'from cinder.i18n import _, _LE, _LW\n'), ((44945, 45018), 'cinder.exception.ManageExistingInvalidReference', 'exception.ManageExistingInvalidReference', ([], {'existing_ref': 'ref', 'reason': 'reason'}), '(existing_ref=ref, reason=reason)\n', (44985, 45018), False, 'from cinder import exception\n'), ((45347, 45420), 'cinder.exception.ManageExistingInvalidReference', 'exception.ManageExistingInvalidReference', ([], {'existing_ref': 'ref', 'reason': 'reason'}), '(existing_ref=ref, reason=reason)\n', (45387, 45420), False, 'from cinder import exception\n'), ((45718, 45763), 'cinder.i18n._', '_', (['"""The specified vdisk is mapped to a host."""'], {}), "('The specified vdisk is mapped to a host.')\n", (45719, 45763), False, 'from cinder.i18n import _, _LE, _LW\n'), ((45782, 45855), 'cinder.exception.ManageExistingInvalidReference', 'exception.ManageExistingInvalidReference', ([], {'existing_ref': 'ref', 'reason': 'reason'}), '(existing_ref=ref, reason=reason)\n', (45822, 45855), False, 'from cinder import exception\n'), ((50067, 50125), 'cinder.i18n._', '_', (['"""_update_volume_stats: Could not get storage pool data"""'], {}), "('_update_volume_stats: Could not get storage pool data')\n", (50068, 50125), False, 'from cinder.i18n import _, _LE, _LW\n'), ((50183, 50242), 'cinder.exception.VolumeBackendAPIException', 'exception.VolumeBackendAPIException', ([], {'data': 'exception_message'}), '(data=exception_message)\n', (50218, 50242), False, 'from cinder import exception\n'), ((7596, 7630), 'cinder.exception.InvalidInput', 'exception.InvalidInput', ([], {'reason': 'msg'}), '(reason=msg)\n', (7618, 7630), False, 'from cinder import exception\n'), ((16330, 16396), 'cinder.i18n._', '_', (['"""initialize_connection: Failed to get attributes for volume %s"""'], {}), "('initialize_connection: Failed to get attributes for volume %s')\n", (16331, 16396), False, 'from cinder.i18n import _, _LE, _LW\n'), ((17157, 17202), 'cinder.exception.VolumeBackendAPIException', 'exception.VolumeBackendAPIException', ([], {'data': 'msg'}), '(data=msg)\n', (17192, 17202), False, 'from cinder import exception\n'), ((18006, 18051), 'cinder.exception.VolumeBackendAPIException', 'exception.VolumeBackendAPIException', ([], {'data': 'msg'}), '(data=msg)\n', (18041, 18051), False, 'from cinder import exception\n'), ((24630, 24696), 'cinder.i18n._', '_', (['"""terminate_connection: Failed to get host name from connector."""'], {}), "('terminate_connection: Failed to get host name from connector.')\n", (24631, 24696), False, 'from cinder.i18n import _, _LE, _LW\n'), ((24779, 24823), 'cinder.exception.VolumeDriverException', 'exception.VolumeDriverException', ([], {'message': 'msg'}), '(message=msg)\n', (24810, 24823), False, 'from cinder import exception\n'), ((26836, 26883), 'cinder.i18n._', '_', (['"""create_snapshot: get source volume failed."""'], {}), "('create_snapshot: get source volume failed.')\n", (26837, 26883), False, 'from cinder.i18n import _, _LE, _LW\n'), ((26930, 26974), 'cinder.exception.VolumeDriverException', 'exception.VolumeDriverException', ([], {'message': 'msg'}), '(message=msg)\n', (26961, 26974), False, 'from cinder import exception\n'), ((32845, 32944), 'cinder.i18n._', '_', (['"""_rm_vdisk_copy_op: Volume metadata %s does not have any registered vdisk copy operations."""'], {}), "('_rm_vdisk_copy_op: Volume metadata %s does not have any registered vdisk copy operations.'\n )\n", (32846, 32944), False, 'from cinder.i18n import _, _LE, _LW\n'), ((41387, 41431), 'cinder.exception.VolumeDriverException', 'exception.VolumeDriverException', ([], {'message': 'msg'}), '(message=msg)\n', (41418, 41431), False, 'from cinder import exception\n'), ((44038, 44091), 'cinder.i18n._', '_', (['"""No vdisk with the UID specified by source-id %s."""'], {}), "('No vdisk with the UID specified by source-id %s.')\n", (44039, 44091), False, 'from cinder.i18n import _, _LE, _LW\n'), ((45231, 45284), 'cinder.i18n._', '_', (['"""No vdisk with the UID specified by source-id %s."""'], {}), "('No vdisk with the UID specified by source-id %s.')\n", (45232, 45284), False, 'from cinder.i18n import _, _LE, _LW\n'), ((49985, 50032), 'cinder.i18n._LE', '_LE', (['"""Could not get pool data from the storage"""'], {}), "('Could not get pool data from the storage')\n", (49988, 50032), False, 'from cinder.i18n import _, _LE, _LW\n'), ((7531, 7570), 'cinder.i18n._', '_', (['"""Failed getting details for pool %s"""'], {}), "('Failed getting details for pool %s')\n", (7532, 7570), False, 'from cinder.i18n import _, _LE, _LW\n'), ((11390, 11509), 'cinder.i18n._', '_', (['"""Password or SSH private key is required for authentication: set either san_password or san_private_key option"""'], {}), "('Password or SSH private key is required for authentication: set either san_password or san_private_key option'\n )\n", (11391, 11509), False, 'from cinder.i18n import _, _LE, _LW\n'), ((12604, 12656), 'cinder.i18n._LE', '_LE', (['"""ensure_export: Volume %s not found on storage"""'], {}), "('ensure_export: Volume %s not found on storage')\n", (12607, 12656), False, 'from cinder.i18n import _, _LE, _LW\n'), ((17033, 17099), 'cinder.i18n._', '_', (['"""initialize_connection: Missing volume attribute for volume %s"""'], {}), "('initialize_connection: Missing volume attribute for volume %s')\n", (17034, 17099), False, 'from cinder.i18n import _, _LE, _LW\n'), ((17779, 17865), 'cinder.i18n._', '_', (['"""initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s"""'], {}), "('initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s'\n )\n", (17780, 17865), False, 'from cinder.i18n import _, _LE, _LW\n'), ((22183, 22220), 'oslo_utils.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {}), '()\n', (22218, 22220), False, 'from oslo_utils import excutils\n'), ((32073, 32163), 'cinder.i18n._', '_', (['"""_rm_vdisk_copy_op: Volume %s does not have any registered vdisk copy operations."""'], {}), "('_rm_vdisk_copy_op: Volume %s does not have any registered vdisk copy operations.'\n )\n", (32074, 32163), False, 'from cinder.i18n import _, _LE, _LW\n'), ((32291, 32411), 'cinder.i18n._', '_', (['"""_rm_vdisk_copy_op: Volume %(vol)s does not have the specified vdisk copy operation: orig=%(orig)s new=%(new)s."""'], {}), "('_rm_vdisk_copy_op: Volume %(vol)s does not have the specified vdisk copy operation: orig=%(orig)s new=%(new)s.'\n )\n", (32292, 32411), False, 'from cinder.i18n import _, _LE, _LW\n'), ((33222, 33351), 'cinder.i18n._', '_', (['"""_rm_vdisk_copy_op: Volume %(vol)s metadata does not have the specified vdisk copy operation: orig=%(orig)s new=%(new)s."""'], {}), "('_rm_vdisk_copy_op: Volume %(vol)s metadata does not have the specified vdisk copy operation: orig=%(orig)s new=%(new)s.'\n )\n", (33223, 33351), False, 'from cinder.i18n import _, _LE, _LW\n'), ((41177, 41298), 'cinder.i18n._', '_', (['"""Unable to retype: Current action needs volume-copy, it is not allowed when new type is replication. Volume = %s"""'], {}), "('Unable to retype: Current action needs volume-copy, it is not allowed when new type is replication. Volume = %s'\n )\n", (41178, 41298), False, 'from cinder.i18n import _, _LE, _LW\n'), ((42266, 42310), 'cinder.exception.VolumeDriverException', 'exception.VolumeDriverException', ([], {'message': 'msg'}), '(message=msg)\n', (42297, 42310), False, 'from cinder import exception\n'), ((11842, 11949), 'cinder.i18n._', '_', (['"""Illegal value %d specified for storwize_svc_flashcopy_timeout: valid values are between 0 and 600"""'], {}), "('Illegal value %d specified for storwize_svc_flashcopy_timeout: valid values are between 0 and 600'\n )\n", (11843, 11949), False, 'from cinder.i18n import _, _LE, _LW\n'), ((16104, 16159), 'cinder.i18n._LW', '_LW', (['"""CHAP secret exists for host but CHAP is disabled"""'], {}), "('CHAP secret exists for host but CHAP is disabled')\n", (16107, 16159), False, 'from cinder.i18n import _, _LE, _LW\n'), ((16924, 16979), 'cinder.i18n._LE', '_LE', (['"""Did not find expected column name in lsvdisk: %s"""'], {}), "('Did not find expected column name in lsvdisk: %s')\n", (16927, 16979), False, 'from cinder.i18n import _, _LE, _LW\n'), ((18250, 18323), 'cinder.i18n._LW', '_LW', (['"""initialize_connection: Did not find a preferred node for volume %s"""'], {}), "('initialize_connection: Did not find a preferred node for volume %s')\n", (18253, 18323), False, 'from cinder.i18n import _, _LE, _LW\n'), ((20147, 20280), 'cinder.i18n._', '_', (['"""Could not get FC connection information for the host-volume connection. Is the host configured properly for FC connections?"""'], {}), "('Could not get FC connection information for the host-volume connection. Is the host configured properly for FC connections?'\n )\n", (20148, 20280), False, 'from cinder.i18n import _, _LE, _LW\n'), ((20418, 20463), 'cinder.exception.VolumeBackendAPIException', 'exception.VolumeBackendAPIException', ([], {'data': 'msg'}), '(data=msg)\n', (20453, 20463), False, 'from cinder import exception\n'), ((35292, 35324), 'cinder.i18n._LW', '_LW', (['"""Volume %s does not exist."""'], {}), "('Volume %s does not exist.')\n", (35295, 35324), False, 'from cinder.i18n import _, _LE, _LW\n'), ((42081, 42182), 'cinder.i18n._', '_', (['"""Unable to retype: A copy of volume %s exists. Retyping would exceed the limit of 2 copies."""'], {}), "('Unable to retype: A copy of volume %s exists. Retyping would exceed the limit of 2 copies.'\n )\n", (42082, 42182), False, 'from cinder.i18n import _, _LE, _LW\n'), ((47409, 47484), 'cinder.i18n._LE', '_LE', (['"""Failed to delete the volume %(vol)s of CG. Exception: %(exception)s."""'], {}), "('Failed to delete the volume %(vol)s of CG. Exception: %(exception)s.')\n", (47412, 47484), False, 'from cinder.i18n import _, _LE, _LW\n'), ((11135, 11153), 'cinder.i18n._', '_', (['"""%s is not set"""'], {}), "('%s is not set')\n", (11136, 11153), False, 'from cinder.i18n import _, _LE, _LW\n'), ((22309, 22427), 'cinder.i18n._LE', '_LE', (['"""initialize_connection: Failed to collect return properties for volume %(vol)s and connector %(conn)s.\n"""'], {}), '("""initialize_connection: Failed to collect return properties for volume %(vol)s and connector %(conn)s.\n"""\n )\n', (22312, 22427), False, 'from cinder.i18n import _, _LE, _LW\n'), ((35832, 35957), 'cinder.i18n._', '_', (['"""_check_volume_copy_ops: Volume %(vol)s does not have the specified vdisk copy operation: orig=%(orig)s new=%(new)s."""'], {}), "('_check_volume_copy_ops: Volume %(vol)s does not have the specified vdisk copy operation: orig=%(orig)s new=%(new)s.'\n )\n", (35833, 35957), False, 'from cinder.i18n import _, _LE, _LW\n'), ((21232, 21366), 'cinder.i18n._LW', '_LW', (['"""Unable to find a preferred node match for node %(node)s in the list of available WWPNs on %(host)s. Using first available."""'], {}), "('Unable to find a preferred node match for node %(node)s in the list of available WWPNs on %(host)s. Using first available.'\n )\n", (21235, 21366), False, 'from cinder.i18n import _, _LE, _LW\n')]
import fileinput import collections f = fileinput.input() def build_count(vals): n = len(vals) r = [[0 for i in range(n + 1)] for j in range(n + 1)] r[1][1] = vals[0] r[1][0] = 1 for i in range(2, n + 1): r[i][0] = 1 for k in range(1, i + 1): r[i][k] = r[i - 1][k] + r[i - 1][k - 1] * (vals[i - 1] - k + 1) return r[-1] def build(n): if n == 1: return [1, 1] diagonals = collections.Counter(i + j for i in range(n) for j in range(n)).most_common() diagonals.reverse() black = [v for k,v in diagonals if k % 2 == 0] white = [v for k,v in diagonals if k % 2 == 1] black_count = build_count(black) while_count = build_count(white) re = [] for k in range(2*n): count = 0 for i in range(k + 1): b = black_count[i] if i < len(black_count) else 0 j = k - i w = while_count[j] if j < len(while_count) else 0 count += w * b re.append(count) # print(re) return re cache = [build(i) for i in range(1, 31)] def solve(n, k): if k >= 2*n: return 0 return cache[n - 1][k] assert solve(2, 1) == 4 assert solve(1, 0) == 1 assert solve(1, 1) == 1 for l in f: n, k = map(int, l.split()) if n == 0 and k == 0: break print(solve(n, k))
[ "fileinput.input" ]
[((41, 58), 'fileinput.input', 'fileinput.input', ([], {}), '()\n', (56, 58), False, 'import fileinput\n')]
import os from zipfile import ZipFile, ZIP_DEFLATED from shutil import rmtree from consolemenu import * from consolemenu.items import * files_abs_path = [] filenames = [] extracted = [] class Zipper: @staticmethod def get_zip_files(): print("FILES:\n") os.chdir('./') for item in os.listdir('./'): if item.endswith('.zip'): global files_abs_path files_abs_path.append(os.path.abspath(item)) global filenames filenames.append(item) def main_menu(self, file_name_list): menu = ConsoleMenu("FILES") for item in file_name_list: menu_item = FunctionItem(item, self.unzip, [file_name_list.index(item)]) menu.append_item(menu_item) menu.show() @staticmethod def unzip(self, index): print("Unzipping...") zip_ref = ZipFile(filenames[index]) global extracted extracted = zip_ref.namelist() zip_ref.extractall('./') zip_ref.close() self.create_zip_file(index) @staticmethod def create_zip_file(self, index): print("Creating symbols.zip...") zipped_files = ZipFile(filenames[index], 'w', ZIP_DEFLATED) for item in extracted: zipped_files.write(item) zipped_files.close() self.clean() @staticmethod def clean(): global files_abs_path, filenames, extracted print("Cleaning...") # os.remove(filesAbsPath[index]) for item in extracted: if os.path.isdir(item) or os.path.isfile(item): rmtree(item) files_abs_path = [] filenames = [] extracted = [] print("Finished successfully!") def main(): zipper = Zipper() zipper.get_zip_files() zipper.main_menu(filenames) if __name__ == "__main__": main()
[ "os.path.abspath", "zipfile.ZipFile", "os.path.isdir", "os.path.isfile", "os.chdir", "shutil.rmtree", "os.listdir" ]
[((282, 296), 'os.chdir', 'os.chdir', (['"""./"""'], {}), "('./')\n", (290, 296), False, 'import os\n'), ((317, 333), 'os.listdir', 'os.listdir', (['"""./"""'], {}), "('./')\n", (327, 333), False, 'import os\n'), ((900, 925), 'zipfile.ZipFile', 'ZipFile', (['filenames[index]'], {}), '(filenames[index])\n', (907, 925), False, 'from zipfile import ZipFile, ZIP_DEFLATED\n'), ((1206, 1250), 'zipfile.ZipFile', 'ZipFile', (['filenames[index]', '"""w"""', 'ZIP_DEFLATED'], {}), "(filenames[index], 'w', ZIP_DEFLATED)\n", (1213, 1250), False, 'from zipfile import ZipFile, ZIP_DEFLATED\n'), ((1578, 1597), 'os.path.isdir', 'os.path.isdir', (['item'], {}), '(item)\n', (1591, 1597), False, 'import os\n'), ((1601, 1621), 'os.path.isfile', 'os.path.isfile', (['item'], {}), '(item)\n', (1615, 1621), False, 'import os\n'), ((1639, 1651), 'shutil.rmtree', 'rmtree', (['item'], {}), '(item)\n', (1645, 1651), False, 'from shutil import rmtree\n'), ((449, 470), 'os.path.abspath', 'os.path.abspath', (['item'], {}), '(item)\n', (464, 470), False, 'import os\n')]
import sys import cx_Oracle from werkzeug._compat import text_type, to_bytes, to_unicode DATABASE = 'ORACLE' NEED_DATABASE_NAME = True NEED_LOGIN = True NEED_PASSWORD = True NEED_ENCODING = False NEED_HOST = False NEED_PORT = False CAN_CHANGE_TYPE = False CAN_CHANGE_SIZE = False DDL_ROLLBACK = False NEED_GENERATOR = True FROM = '"%s" %s ' LEFT_OUTER_JOIN = 'LEFT OUTER JOIN "%s" %s' FIELD_AS = 'AS' LIKE = 'LIKE' DESC = 'DESC NULLS LAST' JAM_TYPES = TEXT, INTEGER, FLOAT, CURRENCY, DATE, DATETIME, BOOLEAN, LONGTEXT, KEYS, FILE, IMAGE = range(1, 12) FIELD_TYPES = { INTEGER: 'NUMBER', TEXT: 'VARCHAR2', FLOAT: 'DOUBLE PRECISION', CURRENCY: 'DOUBLE PRECISION', DATE: 'DATE', DATETIME: 'TIMESTAMP', BOOLEAN: 'NUMBER', LONGTEXT: 'CLOB', KEYS: 'CLOB', FILE: 'CLOB', IMAGE: 'CLOB' } def connect(database, user, password, host, port, encoding, server): if database and user and password: return cx_Oracle.connect(user=user, password=password, dsn=database) elif database: return cx_Oracle.connect(dsn=database) get_lastrowid = None def get_fields(query, fields, alias): sql = '' for field in fields: if field.master_field: pass elif field.calculated: sql += 'NULL AS "%s", ' % field.db_field_name else: sql += '%s."%s", ' % (alias, field.db_field_name) if query['__expanded']: for field in fields: if field.lookup_item: sql += '%s_LOOKUP, ' % field.db_field_name sql = sql[:-2] return sql def get_select(query, fields_clause, from_clause, where_clause, group_clause, order_clause, fields): start = fields_clause end = ''.join([from_clause, where_clause, group_clause, order_clause]) offset = query['__offset'] limit = query['__limit'] result = 'SELECT %s FROM %s' % (start, end) if limit: flds = get_fields(query, fields, 'b') rnum = offset + 1 rownum = offset + limit if offset == 0: rnum = 0 result = "SELECT %s FROM (SELECT a.*, rownum rnum FROM (%s) a WHERE rownum <= %s) b WHERE rnum >= %s" % \ (flds, result, rownum, rnum) return result def process_sql_params(params, cursor): result = [] for i, p in enumerate(params): if type(p) == tuple: value, data_type = p else: value = p result.append(value) return result def process_sql_result(rows): result = [] for row in rows: fields = [] for field in row: if isinstance(field, cx_Oracle.LOB): field = field.read() field = to_unicode(field, 'utf-8') fields.append(field) result.append(fields) return result def cast_date(date_str): return "TO_DATE('" + date_str + "', 'YYYY-MM-DD')" def cast_datetime(datetime_str): return "TO_DATE('" + date_str + "', 'YYYY-MM-DD HH24:MI')" def value_literal(index): return ':f%d' % index def convert_like(field_name, val, data_type): if data_type in [INTEGER, FLOAT, CURRENCY]: return 'TO_CHAR(%s, 99999999999990.999999999999)' % field_name, val else: return field_name, val def create_table_sql(table_name, fields, gen_name=None, foreign_fields=None): result = [] primary_key = '' sql = 'CREATE TABLE "%s"\n(\n' % table_name lines = [] for field in fields: line = '"%s" %s' % (field['field_name'], FIELD_TYPES[field['data_type']]) if field['size'] != 0 and field['data_type'] == TEXT: line += '(%d)' % field['size'] if field['default_value'] and not field['primary_key']: if field['data_type'] == TEXT: line += " DEFAULT '%s'" % field['default_value'] else: line += ' DEFAULT %s' % field['default_value'] if field['primary_key']: primary_key = field['field_name'] lines.append(line) if primary_key: lines.append('CONSTRAINT %s_PR_INDEX PRIMARY KEY ("%s")\n' % \ (table_name, primary_key)) sql += ',\n'.join(lines) sql += ')\n' result.append(sql) if primary_key: result.append('CREATE SEQUENCE "%s"' % gen_name) return result def delete_table_sql(table_name, gen_name): result = [] result.append('DROP TABLE "%s"' % table_name) if gen_name: result.append('DROP SEQUENCE "%s"' % gen_name) return result def create_index_sql(index_name, table_name, unique, fields, desc): return 'CREATE %s INDEX "%s" ON "%s" (%s)' % \ (unique, index_name, table_name, fields) def create_foreign_index_sql(table_name, index_name, key, ref, primary_key): return 'ALTER TABLE "%s" ADD CONSTRAINT "%s" FOREIGN KEY ("%s") REFERENCES "%s"("%s")' % \ (table_name, index_name, key, ref, primary_key) def delete_index(table_name, index_name): return 'DROP INDEX "%s"' % index_name def delete_foreign_index(table_name, index_name): return 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (table_name, index_name) def add_field_sql(table_name, field): result = 'ALTER TABLE "%s" ADD "%s" %s' result = result % (table_name, field['field_name'], FIELD_TYPES[field['data_type']]) if field['size']: result += '(%d)' % field['size'] if field['default_value']: if field['data_type'] == TEXT: result += " DEFAULT '%s'" % field['default_value'] else: result += ' DEFAULT %s' % field['default_value'] return result def del_field_sql(table_name, field): return 'ALTER TABLE "%s" DROP COLUMN "%s"' % (table_name, field['field_name']) def change_field_sql(table_name, old_field, new_field): result = [] if FIELD_TYPES[old_field['data_type']] != FIELD_TYPES[new_field['data_type']] \ or old_field['size'] != new_field['size']: raise Exception(u"Don't know how to change field's size or type of %s" % old_field['field_name']) if old_field['field_name'] != new_field['field_name']: sql = 'ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % \ (table_name, old_field['field_name'], new_field['field_name']) result.append(sql) if old_field['default_value'] != new_field['default_value']: if new_field['default_value']: if new_field['data_type'] == TEXT: sql = 'ALTER TABLE "%s" MODIFY "%s" DEFAULT' % \ (table_name, new_field['field_name']) sql += " '%s'" % new_field['default_value'] else: sql = 'ALTER TABLE "%s" MODIFY "%s" DEFAULT %s' % \ (table_name, new_field['field_name'], new_field['default_value']) else: sql = 'ALTER TABLE "%s" MODIFY "%s" DEFAULT %s' % \ (table_name, new_field['field_name'], 'NULL') result.append(sql) return result def param_literal(): return '?' def next_sequence_value_sql(gen_name): return 'SELECT "%s".NEXTVAL FROM DUAL' % gen_name def restart_sequence_sql(gen_name, value): result = [] result.append('DROP SEQUENCE "%s"' % gen_name) result.append('CREATE SEQUENCE "%s" START WITH %s' % (gen_name, value)) return result def identifier_case(name): return name.upper() def get_table_names(connection): cursor = connection.cursor() cursor.execute('SELECT table_name FROM user_tables') result = cursor.fetchall() return [r[0] for r in result] def get_table_info(connection, table_name, db_name): cursor = connection.cursor() sql = "SELECT COLUMN_NAME, DATA_TYPE, CHAR_LENGTH, DATA_DEFAULT FROM USER_TAB_COLUMNS WHERE TABLE_NAME='%s'" % table_name cursor.execute(sql) result = cursor.fetchall() fields = [] for (field_name, data_type, size, default_value) in result: fields.append({ 'field_name': field_name, 'data_type': data_type, 'size': size, 'default_value': default_value, 'pk': False }) return {'fields': fields, 'field_types': FIELD_TYPES}
[ "werkzeug._compat.to_unicode", "cx_Oracle.connect" ]
[((951, 1012), 'cx_Oracle.connect', 'cx_Oracle.connect', ([], {'user': 'user', 'password': 'password', 'dsn': 'database'}), '(user=user, password=password, dsn=database)\n', (968, 1012), False, 'import cx_Oracle\n'), ((1047, 1078), 'cx_Oracle.connect', 'cx_Oracle.connect', ([], {'dsn': 'database'}), '(dsn=database)\n', (1064, 1078), False, 'import cx_Oracle\n'), ((2683, 2709), 'werkzeug._compat.to_unicode', 'to_unicode', (['field', '"""utf-8"""'], {}), "(field, 'utf-8')\n", (2693, 2709), False, 'from werkzeug._compat import text_type, to_bytes, to_unicode\n')]
import os import random import re import logging # PYTHON 2 - py2 - update to ABC direct use rather than __metaclass__ once we drop py2 support from abc import ABCMeta from six import string_types from great_expectations.data_context.store.store_backend import StoreBackend from great_expectations.data_context.util import safe_mmkdir from great_expectations.exceptions import StoreBackendError logger = logging.getLogger(__name__) class TupleStoreBackend(StoreBackend): __metaclass__ = ABCMeta """ If filepath_template is provided, the key to this StoreBackend abstract class must be a tuple with fixed length equal to the number of unique components matching the regex r"{\d+}" For example, in the following template path: expectations/{0}/{1}/{2}/prefix-{2}.json, keys must have three components. """ def __init__(self, filepath_template=None, filepath_prefix=None, filepath_suffix=None, forbidden_substrings=None, platform_specific_separator=True, fixed_length_key=False): super().__init__(fixed_length_key=fixed_length_key) if forbidden_substrings is None: forbidden_substrings = ["/", "\\"] self.forbidden_substrings = forbidden_substrings self.platform_specific_separator = platform_specific_separator if filepath_template is not None and filepath_suffix is not None: raise ValueError("filepath_suffix may only be used when filepath_template is None") self.filepath_template = filepath_template if filepath_prefix and len(filepath_prefix) > 0: # Validate that the filepath prefix does not end with a forbidden substring if filepath_prefix[-1] in self.forbidden_substrings: raise StoreBackendError("Unable to initialize TupleStoreBackend: filepath_prefix may not end with a " "forbidden substring. Current forbidden substrings are " + str(forbidden_substrings)) self.filepath_prefix = filepath_prefix self.filepath_suffix = filepath_suffix if filepath_template is not None: # key length is the number of unique values to be substituted in the filepath_template self.key_length = len( set( re.findall(r"{\d+}", filepath_template) ) ) self.verify_that_key_to_filepath_operation_is_reversible() self._fixed_length_key = True def _validate_key(self, key): super()._validate_key(key) for key_element in key: for substring in self.forbidden_substrings: if substring in key_element: raise ValueError("Keys in {0} must not contain substrings in {1} : {2}".format( self.__class__.__name__, self.forbidden_substrings, key, )) def _validate_value(self, value): if not isinstance(value, string_types) and not isinstance(value, bytes): raise TypeError("Values in {0} must be instances of {1} or {2}, not {3}".format( self.__class__.__name__, string_types, bytes, type(value), )) def _convert_key_to_filepath(self, key): # NOTE: This method uses a hard-coded forward slash as a separator, # and then replaces that with a platform-specific separator if requested (the default) self._validate_key(key) if self.filepath_template: converted_string = self.filepath_template.format(*list(key)) else: converted_string = '/'.join(key) if self.filepath_prefix: converted_string = self.filepath_prefix + "/" + converted_string if self.filepath_suffix: converted_string += self.filepath_suffix if self.platform_specific_separator: converted_string = os.path.normpath(converted_string) return converted_string def _convert_filepath_to_key(self, filepath): if self.platform_specific_separator: filepath = os.path.normpath(filepath) if self.filepath_prefix: if not filepath.startswith(self.filepath_prefix) and len(filepath) >= len(self.filepath_prefix) + 1: # If filepath_prefix is set, we expect that it is the first component of a valid filepath. raise ValueError("filepath must start with the filepath_prefix when one is set by the store_backend") else: # Remove the prefix before processing # Also remove the separator that was added, which may have been platform-dependent filepath = filepath[len(self.filepath_prefix) + 1:] if self.filepath_suffix: if not filepath.endswith(self.filepath_suffix): # If filepath_suffix is set, we expect that it is the last component of a valid filepath. raise ValueError("filepath must end with the filepath_suffix when one is set by the store_backend") else: # Remove the suffix before processing filepath = filepath[:-len(self.filepath_suffix)] if self.filepath_template: # filepath_template is always specified with forward slashes, but it is then # used to (1) dynamically construct and evaluate a regex, and (2) split the provided (observed) filepath if self.platform_specific_separator: filepath_template = os.path.join(*self.filepath_template.split('/')) filepath_template = filepath_template.replace('\\', '\\\\') else: filepath_template = self.filepath_template # Convert the template to a regex indexed_string_substitutions = re.findall(r"{\d+}", filepath_template) tuple_index_list = ["(?P<tuple_index_{0}>.*)".format(i, ) for i in range(len(indexed_string_substitutions))] intermediate_filepath_regex = re.sub( r"{\d+}", lambda m, r=iter(tuple_index_list): next(r), filepath_template ) filepath_regex = intermediate_filepath_regex.format(*tuple_index_list) # Apply the regex to the filepath matches = re.compile(filepath_regex).match(filepath) if matches is None: return None # Map key elements into the appropriate parts of the tuple new_key = [None] * self.key_length for i in range(len(tuple_index_list)): tuple_index = int(re.search(r'\d+', indexed_string_substitutions[i]).group(0)) key_element = matches.group('tuple_index_' + str(i)) new_key[tuple_index] = key_element new_key = tuple(new_key) else: filepath = os.path.normpath(filepath) new_key = tuple(filepath.split(os.sep)) return new_key def verify_that_key_to_filepath_operation_is_reversible(self): def get_random_hex(size=4): return "".join([random.choice(list("<KEY>")) for _ in range(size)]) key = tuple([get_random_hex() for _ in range(self.key_length)]) filepath = self._convert_key_to_filepath(key) new_key = self._convert_filepath_to_key(filepath) if key != new_key: raise ValueError( "filepath template {0} for class {1} is not reversible for a tuple of length {2}. " "Have you included all elements in the key tuple?".format( self.filepath_template, self.__class__.__name__, self.key_length, )) class TupleFilesystemStoreBackend(TupleStoreBackend): """Uses a local filepath as a store. The key to this StoreBackend must be a tuple with fixed length based on the filepath_template, or a variable-length tuple may be used and returned with an optional filepath_suffix (to be) added. The filepath_template is a string template used to convert the key to a filepath. """ def __init__(self, base_directory, filepath_template=None, filepath_prefix=None, filepath_suffix=None, forbidden_substrings=None, platform_specific_separator=True, root_directory=None, fixed_length_key=False): super().__init__( filepath_template=filepath_template, filepath_prefix=filepath_prefix, filepath_suffix=filepath_suffix, forbidden_substrings=forbidden_substrings, platform_specific_separator=platform_specific_separator, fixed_length_key=fixed_length_key ) if os.path.isabs(base_directory): self.full_base_directory = base_directory else: if root_directory is None: raise ValueError("base_directory must be an absolute path if root_directory is not provided") elif not os.path.isabs(root_directory): raise ValueError("root_directory must be an absolute path. Got {0} instead.".format(root_directory)) else: self.full_base_directory = os.path.join(root_directory, base_directory) safe_mmkdir(str(os.path.dirname(self.full_base_directory))) def _get(self, key): filepath = os.path.join( self.full_base_directory, self._convert_key_to_filepath(key) ) with open(filepath, 'r') as infile: return infile.read() def _set(self, key, value, **kwargs): if not isinstance(key, tuple): key = key.to_tuple() filepath = os.path.join( self.full_base_directory, self._convert_key_to_filepath(key) ) path, filename = os.path.split(filepath) safe_mmkdir(str(path)) with open(filepath, "wb") as outfile: if isinstance(value, string_types): # Following try/except is to support py2, since both str and bytes objects pass above condition try: outfile.write(value.encode("utf-8")) except UnicodeDecodeError: outfile.write(value) else: outfile.write(value) return filepath def list_keys(self, prefix=()): key_list = [] for root, dirs, files in os.walk(os.path.join(self.full_base_directory, *prefix)): for file_ in files: full_path, file_name = os.path.split(os.path.join(root, file_)) relative_path = os.path.relpath( full_path, self.full_base_directory, ) if relative_path == ".": filepath = file_name else: filepath = os.path.join( relative_path, file_name ) if self.filepath_prefix and not filepath.startswith(self.filepath_prefix): continue elif self.filepath_suffix and not filepath.endswith(self.filepath_suffix): continue key = self._convert_filepath_to_key(filepath) if key and not self.is_ignored_key(key): key_list.append(key) return key_list def get_url_for_key(self, key, protocol=None): path = self._convert_key_to_filepath(key) full_path = os.path.join(self.full_base_directory, path) if protocol is None: protocol = "file:" url = protocol + "//" + full_path return url def _has_key(self, key): return os.path.isfile(os.path.join(self.full_base_directory, self._convert_key_to_filepath(key))) class TupleS3StoreBackend(TupleStoreBackend): """ Uses an S3 bucket as a store. The key to this StoreBackend must be a tuple with fixed length based on the filepath_template, or a variable-length tuple may be used and returned with an optional filepath_suffix (to be) added. The filepath_template is a string template used to convert the key to a filepath. """ def __init__( self, bucket, prefix="", filepath_template=None, filepath_prefix=None, filepath_suffix=None, forbidden_substrings=None, platform_specific_separator=False, fixed_length_key=False ): super().__init__( filepath_template=filepath_template, filepath_prefix=filepath_prefix, filepath_suffix=filepath_suffix, forbidden_substrings=forbidden_substrings, platform_specific_separator=platform_specific_separator, fixed_length_key=fixed_length_key ) self.bucket = bucket self.prefix = prefix def _get(self, key): s3_object_key = os.path.join( self.prefix, self._convert_key_to_filepath(key) ) import boto3 s3 = boto3.client('s3') s3_response_object = s3.get_object(Bucket=self.bucket, Key=s3_object_key) return s3_response_object['Body'].read().decode(s3_response_object.get("ContentEncoding", 'utf-8')) def _set(self, key, value, content_encoding='utf-8', content_type='application/json'): s3_object_key = os.path.join( self.prefix, self._convert_key_to_filepath(key) ) import boto3 s3 = boto3.resource('s3') result_s3 = s3.Object(self.bucket, s3_object_key) if isinstance(value, string_types): # Following try/except is to support py2, since both str and bytes objects pass above condition try: result_s3.put(Body=value.encode(content_encoding), ContentEncoding=content_encoding, ContentType=content_type) except TypeError: result_s3.put(Body=value, ContentType=content_type) else: result_s3.put(Body=value, ContentType=content_type) return s3_object_key def list_keys(self): key_list = [] import boto3 s3 = boto3.client('s3') s3_objects = s3.list_objects(Bucket=self.bucket, Prefix=self.prefix) if "Contents" in s3_objects: objects = s3_objects["Contents"] elif "CommonPrefixes" in s3_objects: logger.warning("TupleS3StoreBackend returned CommonPrefixes, but delimiter should not have been set.") objects = [] else: # No objects found in store objects = [] for s3_object_info in objects: s3_object_key = s3_object_info['Key'] s3_object_key = os.path.relpath( s3_object_key, self.prefix, ) if self.filepath_prefix and not s3_object_key.startswith(self.filepath_prefix): continue elif self.filepath_suffix and not s3_object_key.endswith(self.filepath_suffix): continue key = self._convert_filepath_to_key(s3_object_key) if key: key_list.append(key) return key_list def get_url_for_key(self, key, protocol=None): import boto3 location = boto3.client('s3').get_bucket_location(Bucket=self.bucket)['LocationConstraint'] if location is None: location = "s3" else: location = "s3-" + location s3_key = self._convert_key_to_filepath(key) return "https://%s.amazonaws.com/%s/%s%s" % (location, self.bucket, self.prefix, s3_key) def _has_key(self, key): all_keys = self.list_keys() return key in all_keys class TupleGCSStoreBackend(TupleStoreBackend): """ Uses a GCS bucket as a store. The key to this StoreBackend must be a tuple with fixed length based on the filepath_template, or a variable-length tuple may be used and returned with an optional filepath_suffix (to be) added. The filepath_template is a string template used to convert the key to a filepath. """ def __init__( self, bucket, prefix, project, filepath_template=None, filepath_prefix=None, filepath_suffix=None, forbidden_substrings=None, platform_specific_separator=False, fixed_length_key=False ): super().__init__( filepath_template=filepath_template, filepath_prefix=filepath_prefix, filepath_suffix=filepath_suffix, forbidden_substrings=forbidden_substrings, platform_specific_separator=platform_specific_separator, fixed_length_key=fixed_length_key ) self.bucket = bucket self.prefix = prefix self.project = project def _get(self, key): gcs_object_key = os.path.join( self.prefix, self._convert_key_to_filepath(key) ) from google.cloud import storage gcs = storage.Client(project=self.project) bucket = gcs.get_bucket(self.bucket) gcs_response_object = bucket.get_blob(gcs_object_key) return gcs_response_object.download_as_string().decode("utf-8") def _set(self, key, value, content_encoding='utf-8', content_type='application/json'): gcs_object_key = os.path.join( self.prefix, self._convert_key_to_filepath(key) ) from google.cloud import storage gcs = storage.Client(project=self.project) bucket = gcs.get_bucket(self.bucket) blob = bucket.blob(gcs_object_key) if isinstance(value, string_types): # Following try/except is to support py2, since both str and bytes objects pass above condition try: blob.upload_from_string(value.encode(content_encoding), content_encoding=content_encoding, content_type=content_type) except TypeError: blob.upload_from_string(value, content_type=content_type) else: blob.upload_from_string(value, content_type=content_type) return gcs_object_key def list_keys(self): key_list = [] from google.cloud import storage gcs = storage.Client(self.project) for blob in gcs.list_blobs(self.bucket, prefix=self.prefix): gcs_object_name = blob.name gcs_object_key = os.path.relpath( gcs_object_name, self.prefix, ) if self.filepath_prefix and not gcs_object_key.startswith(self.filepath_prefix): continue elif self.filepath_suffix and not gcs_object_key.endswith(self.filepath_suffix): continue key = self._convert_filepath_to_key(gcs_object_key) if key: key_list.append(key) return key_list def get_url_for_key(self, key, protocol=None): path = self._convert_key_to_filepath(key) return "https://storage.googleapis.com/" + self.bucket + "/" + path def _has_key(self, key): all_keys = self.list_keys() return key in all_keys
[ "os.path.isabs", "boto3.client", "os.path.dirname", "boto3.resource", "google.cloud.storage.Client", "os.path.normpath", "re.findall", "os.path.relpath", "re.search", "os.path.split", "os.path.join", "logging.getLogger", "re.compile" ]
[((407, 434), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (424, 434), False, 'import logging\n'), ((8930, 8959), 'os.path.isabs', 'os.path.isabs', (['base_directory'], {}), '(base_directory)\n', (8943, 8959), False, 'import os\n'), ((10021, 10044), 'os.path.split', 'os.path.split', (['filepath'], {}), '(filepath)\n', (10034, 10044), False, 'import os\n'), ((11722, 11766), 'os.path.join', 'os.path.join', (['self.full_base_directory', 'path'], {}), '(self.full_base_directory, path)\n', (11734, 11766), False, 'import os\n'), ((13309, 13327), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (13321, 13327), False, 'import boto3\n'), ((13765, 13785), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (13779, 13785), False, 'import boto3\n'), ((14458, 14476), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (14470, 14476), False, 'import boto3\n'), ((17377, 17413), 'google.cloud.storage.Client', 'storage.Client', ([], {'project': 'self.project'}), '(project=self.project)\n', (17391, 17413), False, 'from google.cloud import storage\n'), ((17862, 17898), 'google.cloud.storage.Client', 'storage.Client', ([], {'project': 'self.project'}), '(project=self.project)\n', (17876, 17898), False, 'from google.cloud import storage\n'), ((18652, 18680), 'google.cloud.storage.Client', 'storage.Client', (['self.project'], {}), '(self.project)\n', (18666, 18680), False, 'from google.cloud import storage\n'), ((4021, 4055), 'os.path.normpath', 'os.path.normpath', (['converted_string'], {}), '(converted_string)\n', (4037, 4055), False, 'import os\n'), ((4208, 4234), 'os.path.normpath', 'os.path.normpath', (['filepath'], {}), '(filepath)\n', (4224, 4234), False, 'import os\n'), ((5918, 5957), 're.findall', 're.findall', (['"""{\\\\d+}"""', 'filepath_template'], {}), "('{\\\\d+}', filepath_template)\n", (5928, 5957), False, 'import re\n'), ((6979, 7005), 'os.path.normpath', 'os.path.normpath', (['filepath'], {}), '(filepath)\n', (6995, 7005), False, 'import os\n'), ((10624, 10671), 'os.path.join', 'os.path.join', (['self.full_base_directory', '*prefix'], {}), '(self.full_base_directory, *prefix)\n', (10636, 10671), False, 'import os\n'), ((15019, 15062), 'os.path.relpath', 'os.path.relpath', (['s3_object_key', 'self.prefix'], {}), '(s3_object_key, self.prefix)\n', (15034, 15062), False, 'import os\n'), ((18820, 18865), 'os.path.relpath', 'os.path.relpath', (['gcs_object_name', 'self.prefix'], {}), '(gcs_object_name, self.prefix)\n', (18835, 18865), False, 'import os\n'), ((9478, 9519), 'os.path.dirname', 'os.path.dirname', (['self.full_base_directory'], {}), '(self.full_base_directory)\n', (9493, 9519), False, 'import os\n'), ((10818, 10870), 'os.path.relpath', 'os.path.relpath', (['full_path', 'self.full_base_directory'], {}), '(full_path, self.full_base_directory)\n', (10833, 10870), False, 'import os\n'), ((2339, 2378), 're.findall', 're.findall', (['"""{\\\\d+}"""', 'filepath_template'], {}), "('{\\\\d+}', filepath_template)\n", (2349, 2378), False, 'import re\n'), ((6416, 6442), 're.compile', 're.compile', (['filepath_regex'], {}), '(filepath_regex)\n', (6426, 6442), False, 'import re\n'), ((9199, 9228), 'os.path.isabs', 'os.path.isabs', (['root_directory'], {}), '(root_directory)\n', (9212, 9228), False, 'import os\n'), ((9408, 9452), 'os.path.join', 'os.path.join', (['root_directory', 'base_directory'], {}), '(root_directory, base_directory)\n', (9420, 9452), False, 'import os\n'), ((10759, 10784), 'os.path.join', 'os.path.join', (['root', 'file_'], {}), '(root, file_)\n', (10771, 10784), False, 'import os\n'), ((11065, 11103), 'os.path.join', 'os.path.join', (['relative_path', 'file_name'], {}), '(relative_path, file_name)\n', (11077, 11103), False, 'import os\n'), ((15582, 15600), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (15594, 15600), False, 'import boto3\n'), ((6723, 6773), 're.search', 're.search', (['"""\\\\d+"""', 'indexed_string_substitutions[i]'], {}), "('\\\\d+', indexed_string_substitutions[i])\n", (6732, 6773), False, 'import re\n')]
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of NVIDIA CORPORATION nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys sys.path.append("../common") import math import unittest import numpy as np from tensorrtserver.api import * import test_util as tu class LargePayLoadTest(unittest.TestCase): def setUp(self): self.data_type_ = np.float32 # n GB divided by element size self.input_size_ = math.trunc(6 * (1024 * 1024 * 1024) / np.dtype(self.data_type_).itemsize) self.protocols_ = ((ProtocolType.HTTP, 'localhost:8000'), (ProtocolType.GRPC, 'localhost:8001')) def _test_helper(self, ctx, tensor_shape, small_tensor_shape, input_name='INPUT0', output_name='OUTPUT0'): try: in0 = np.random.random(tensor_shape).astype(self.data_type_) results = ctx.run({ input_name : (in0,)}, { output_name : InferContext.ResultFormat.RAW}, 1) # if the inference is completed, examine results to ensure that # the framework and protocol do support large payload self.assertTrue(np.array_equal(in0, results[output_name][0]), "output is different from input") except InferenceServerException as ex: # if the inference failed, inference server should return error # gracefully. In addition to this, send a small payload to # verify if the server is still functional sin0 = np.random.random(small_tensor_shape).astype(self.data_type_) results = ctx.run({ input_name : (sin0,)}, { output_name : InferContext.ResultFormat.RAW}, 1) self.assertTrue(np.array_equal(sin0, results[output_name][0]), "output is different from input") def test_graphdef(self): tensor_shape = (self.input_size_,) small_tensor_shape = (1,) # graphdef_nobatch_zero_1_float32 is identity model with input shape [-1] for protocol, url in self.protocols_: model_name = tu.get_zero_model_name("graphdef_nobatch", 1, self.data_type_) ctx = InferContext(url, protocol, model_name, None, True) self._test_helper(ctx, tensor_shape, small_tensor_shape) def test_savedmodel(self): tensor_shape = (self.input_size_,) small_tensor_shape = (1,) # savedmodel_nobatch_zero_1_float32 is identity model with input shape [-1] for protocol, url in self.protocols_: model_name = tu.get_zero_model_name("savedmodel_nobatch", 1, self.data_type_) ctx = InferContext(url, protocol, model_name, None, True) self._test_helper(ctx, tensor_shape, small_tensor_shape) def test_netdef(self): tensor_shape = (self.input_size_,) small_tensor_shape = (1,) # netdef_nobatch_zero_1_float32 is identity model with input shape [-1] for protocol, url in self.protocols_: model_name = tu.get_zero_model_name("netdef_nobatch", 1, self.data_type_) ctx = InferContext(url, protocol, model_name, None, True) self._test_helper(ctx, tensor_shape, small_tensor_shape) def test_onnx(self): tensor_shape = (self.input_size_,) small_tensor_shape = (1,) # onnx_nobatch_zero_1_float32 is identity model with input shape [-1] for protocol, url in self.protocols_: model_name = tu.get_zero_model_name("onnx_nobatch", 1, self.data_type_) ctx = InferContext(url, protocol, model_name, None, True) self._test_helper(ctx, tensor_shape, small_tensor_shape) def test_plan(self): tensor_shape = (self.input_size_,) small_tensor_shape = (1,) # plan_nobatch_zero_1_float32 is identity model with input shape [-1] for protocol, url in self.protocols_: model_name = tu.get_zero_model_name("plan_nobatch", 1, self.data_type_) ctx = InferContext(url, protocol, model_name, None, True) self._test_helper(ctx, tensor_shape, small_tensor_shape) def test_libtorch(self): tensor_shape = (self.input_size_,) small_tensor_shape = (1,) # libtorch_nobatch_zero_1_float32 is identity model with input shape [-1] for protocol, url in self.protocols_: model_name = tu.get_zero_model_name("libtorch_nobatch", 1, self.data_type_) ctx = InferContext(url, protocol, model_name, None, True) self._test_helper(ctx, tensor_shape, small_tensor_shape, 'INPUT__0', 'OUTPUT__0') def test_custom(self): tensor_shape = (self.input_size_,) small_tensor_shape = (1,) # custom_zero_1_float32 is identity model with input shape [-1] for protocol, url in self.protocols_: model_name = tu.get_zero_model_name("custom", 1, self.data_type_) ctx = InferContext(url, protocol, model_name, None, True) self._test_helper(ctx, tensor_shape, small_tensor_shape) if __name__ == '__main__': unittest.main()
[ "sys.path.append", "unittest.main", "numpy.dtype", "test_util.get_zero_model_name", "numpy.random.random", "numpy.array_equal" ]
[((1545, 1573), 'sys.path.append', 'sys.path.append', (['"""../common"""'], {}), "('../common')\n", (1560, 1573), False, 'import sys\n'), ((6563, 6578), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6576, 6578), False, 'import unittest\n'), ((3541, 3603), 'test_util.get_zero_model_name', 'tu.get_zero_model_name', (['"""graphdef_nobatch"""', '(1)', 'self.data_type_'], {}), "('graphdef_nobatch', 1, self.data_type_)\n", (3563, 3603), True, 'import test_util as tu\n'), ((4008, 4072), 'test_util.get_zero_model_name', 'tu.get_zero_model_name', (['"""savedmodel_nobatch"""', '(1)', 'self.data_type_'], {}), "('savedmodel_nobatch', 1, self.data_type_)\n", (4030, 4072), True, 'import test_util as tu\n'), ((4469, 4529), 'test_util.get_zero_model_name', 'tu.get_zero_model_name', (['"""netdef_nobatch"""', '(1)', 'self.data_type_'], {}), "('netdef_nobatch', 1, self.data_type_)\n", (4491, 4529), True, 'import test_util as tu\n'), ((4922, 4980), 'test_util.get_zero_model_name', 'tu.get_zero_model_name', (['"""onnx_nobatch"""', '(1)', 'self.data_type_'], {}), "('onnx_nobatch', 1, self.data_type_)\n", (4944, 4980), True, 'import test_util as tu\n'), ((5373, 5431), 'test_util.get_zero_model_name', 'tu.get_zero_model_name', (['"""plan_nobatch"""', '(1)', 'self.data_type_'], {}), "('plan_nobatch', 1, self.data_type_)\n", (5395, 5431), True, 'import test_util as tu\n'), ((5832, 5894), 'test_util.get_zero_model_name', 'tu.get_zero_model_name', (['"""libtorch_nobatch"""', '(1)', 'self.data_type_'], {}), "('libtorch_nobatch', 1, self.data_type_)\n", (5854, 5894), True, 'import test_util as tu\n'), ((6338, 6390), 'test_util.get_zero_model_name', 'tu.get_zero_model_name', (['"""custom"""', '(1)', 'self.data_type_'], {}), "('custom', 1, self.data_type_)\n", (6360, 6390), True, 'import test_util as tu\n'), ((2599, 2643), 'numpy.array_equal', 'np.array_equal', (['in0', 'results[output_name][0]'], {}), '(in0, results[output_name][0])\n', (2613, 2643), True, 'import numpy as np\n'), ((1884, 1909), 'numpy.dtype', 'np.dtype', (['self.data_type_'], {}), '(self.data_type_)\n', (1892, 1909), True, 'import numpy as np\n'), ((2213, 2243), 'numpy.random.random', 'np.random.random', (['tensor_shape'], {}), '(tensor_shape)\n', (2229, 2243), True, 'import numpy as np\n'), ((3199, 3244), 'numpy.array_equal', 'np.array_equal', (['sin0', 'results[output_name][0]'], {}), '(sin0, results[output_name][0])\n', (3213, 3244), True, 'import numpy as np\n'), ((2948, 2984), 'numpy.random.random', 'np.random.random', (['small_tensor_shape'], {}), '(small_tensor_shape)\n', (2964, 2984), True, 'import numpy as np\n')]
# coding=utf-8 from __future__ import unicode_literals import os import platform import mock from decimal import Decimal import pytest try: import setproctitle except ImportError: setproctitle = None from pgcli.main import ( obfuscate_process_password, format_output, PGCli, OutputSettings ) from utils import dbtest, run @pytest.mark.skipif(platform.system() == 'Windows', reason='Not applicable in windows') @pytest.mark.skipif(not setproctitle, reason='setproctitle not available') def test_obfuscate_process_password(): original_title = setproctitle.getproctitle() setproctitle.setproctitle("pgcli user=root password=secret host=localhost") obfuscate_process_password() title = setproctitle.getproctitle() expected = "pgcli user=root password=xxxx host=localhost" assert title == expected setproctitle.setproctitle("pgcli user=root password=top secret host=localhost") obfuscate_process_password() title = setproctitle.getproctitle() expected = "pgcli user=root password=xxxx host=localhost" assert title == expected setproctitle.setproctitle("pgcli user=root password=top secret") obfuscate_process_password() title = setproctitle.getproctitle() expected = "pgcli user=root password=xxxx" assert title == expected setproctitle.setproctitle("pgcli postgres://root:secret@localhost/db") obfuscate_process_password() title = setproctitle.getproctitle() expected = "pgcli postgres://root:xxxx@localhost/db" assert title == expected setproctitle.setproctitle(original_title) def test_format_output(): settings = OutputSettings(table_format='psql', dcmlfmt='d', floatfmt='g') results = format_output('Title', [('abc', 'def')], ['head1', 'head2'], 'test status', settings) expected = [ 'Title', '+---------+---------+', '| head1 | head2 |', '|---------+---------|', '| abc | def |', '+---------+---------+', 'test status' ] assert list(results) == expected @dbtest def test_format_array_output(executor): statement = u""" SELECT array[1, 2, 3]::bigint[] as bigint_array, '{{1,2},{3,4}}'::numeric[] as nested_numeric_array, '{å,魚,текст}'::text[] as 配列 UNION ALL SELECT '{}', NULL, array[NULL] """ results = run(executor, statement) expected = [ '+----------------+------------------------+--------------+', '| bigint_array | nested_numeric_array | 配列 |', '|----------------+------------------------+--------------|', '| {1,2,3} | {{1,2},{3,4}} | {å,魚,текст} |', '| {} | <null> | {<null>} |', '+----------------+------------------------+--------------+', 'SELECT 2' ] assert list(results) == expected @dbtest def test_format_array_output_expanded(executor): statement = u""" SELECT array[1, 2, 3]::bigint[] as bigint_array, '{{1,2},{3,4}}'::numeric[] as nested_numeric_array, '{å,魚,текст}'::text[] as 配列 UNION ALL SELECT '{}', NULL, array[NULL] """ results = run(executor, statement, expanded=True) expected = [ '-[ RECORD 1 ]-------------------------', 'bigint_array | {1,2,3}', 'nested_numeric_array | {{1,2},{3,4}}', '配列 | {å,魚,текст}', '-[ RECORD 2 ]-------------------------', 'bigint_array | {}', 'nested_numeric_array | <null>', '配列 | {<null>}', 'SELECT 2' ] assert '\n'.join(results) == '\n'.join(expected) def test_format_output_auto_expand(): settings = OutputSettings( table_format='psql', dcmlfmt='d', floatfmt='g', max_width=100) table_results = format_output('Title', [('abc', 'def')], ['head1', 'head2'], 'test status', settings) table = [ 'Title', '+---------+---------+', '| head1 | head2 |', '|---------+---------|', '| abc | def |', '+---------+---------+', 'test status' ] assert list(table_results) == table expanded_results = format_output( 'Title', [('abc', 'def')], ['head1', 'head2'], 'test status', settings._replace(max_width=1) ) expanded = [ 'Title', '-[ RECORD 1 ]-------------------------', 'head1 | abc', 'head2 | def', 'test status' ] assert '\n'.join(expanded_results) == '\n'.join(expanded) @dbtest def test_i_works(tmpdir, executor): sqlfile = tmpdir.join("test.sql") sqlfile.write("SELECT NOW()") rcfile = str(tmpdir.join("rcfile")) cli = PGCli( pgexecute=executor, pgclirc_file=rcfile, ) statement = r"\i {0}".format(sqlfile) run(executor, statement, pgspecial=cli.pgspecial) def test_missing_rc_dir(tmpdir): rcfile = str(tmpdir.join("subdir").join("rcfile")) PGCli(pgclirc_file=rcfile) assert os.path.exists(rcfile) def test_quoted_db_uri(tmpdir): with mock.patch.object(PGCli, 'connect') as mock_connect: cli = PGCli(pgclirc_file=str(tmpdir.join("rcfile"))) cli.connect_uri('postgres://bar%5E:%5Dfoo@baz.com/testdb%5B') mock_connect.assert_called_with(database='testdb[', port=None, host='baz.com', user='bar^', passwd=']<PASSWORD>') def test_ssl_db_uri(tmpdir): with mock.patch.object(PGCli, 'connect') as mock_connect: cli = PGCli(pgclirc_file=str(tmpdir.join("rcfile"))) cli.connect_uri( 'postgres://bar%5E:%5D<PASSWORD>@<EMAIL>/testdb%5B?' 'sslmode=verify-full&sslcert=m%79.pem&sslkey=my-key.pem&sslrootcert=c%61.pem') mock_connect.assert_called_with(database='testdb[', host='baz.com', port=None, user='bar^', passwd=']<PASSWORD>', sslmode='verify-full', sslcert='my.pem', sslkey='my-key.pem', sslrootcert='ca.pem') def test_port_db_uri(tmpdir): with mock.patch.object(PGCli, 'connect') as mock_connect: cli = PGCli(pgclirc_file=str(tmpdir.join("rcfile"))) cli.connect_uri('postgres://bar:<EMAIL>:2543/testdb') mock_connect.assert_called_with(database='testdb', host='baz.com', user='bar', passwd='<PASSWORD>', port='2543')
[ "mock.patch.object", "pgcli.main.OutputSettings", "utils.run", "os.path.exists", "setproctitle.setproctitle", "pgcli.main.obfuscate_process_password", "pytest.mark.skipif", "pgcli.main.PGCli", "setproctitle.getproctitle", "platform.system", "pgcli.main.format_output" ]
[((447, 520), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not setproctitle)'], {'reason': '"""setproctitle not available"""'}), "(not setproctitle, reason='setproctitle not available')\n", (465, 520), False, 'import pytest\n'), ((601, 628), 'setproctitle.getproctitle', 'setproctitle.getproctitle', ([], {}), '()\n', (626, 628), False, 'import setproctitle\n'), ((634, 709), 'setproctitle.setproctitle', 'setproctitle.setproctitle', (['"""pgcli user=root password=secret host=localhost"""'], {}), "('pgcli user=root password=secret host=localhost')\n", (659, 709), False, 'import setproctitle\n'), ((714, 742), 'pgcli.main.obfuscate_process_password', 'obfuscate_process_password', ([], {}), '()\n', (740, 742), False, 'from pgcli.main import obfuscate_process_password, format_output, PGCli, OutputSettings\n'), ((755, 782), 'setproctitle.getproctitle', 'setproctitle.getproctitle', ([], {}), '()\n', (780, 782), False, 'import setproctitle\n'), ((879, 958), 'setproctitle.setproctitle', 'setproctitle.setproctitle', (['"""pgcli user=root password=top secret host=localhost"""'], {}), "('pgcli user=root password=top secret host=localhost')\n", (904, 958), False, 'import setproctitle\n'), ((963, 991), 'pgcli.main.obfuscate_process_password', 'obfuscate_process_password', ([], {}), '()\n', (989, 991), False, 'from pgcli.main import obfuscate_process_password, format_output, PGCli, OutputSettings\n'), ((1004, 1031), 'setproctitle.getproctitle', 'setproctitle.getproctitle', ([], {}), '()\n', (1029, 1031), False, 'import setproctitle\n'), ((1128, 1192), 'setproctitle.setproctitle', 'setproctitle.setproctitle', (['"""pgcli user=root password=top secret"""'], {}), "('pgcli user=root password=top secret')\n", (1153, 1192), False, 'import setproctitle\n'), ((1197, 1225), 'pgcli.main.obfuscate_process_password', 'obfuscate_process_password', ([], {}), '()\n', (1223, 1225), False, 'from pgcli.main import obfuscate_process_password, format_output, PGCli, OutputSettings\n'), ((1238, 1265), 'setproctitle.getproctitle', 'setproctitle.getproctitle', ([], {}), '()\n', (1263, 1265), False, 'import setproctitle\n'), ((1347, 1417), 'setproctitle.setproctitle', 'setproctitle.setproctitle', (['"""pgcli postgres://root:secret@localhost/db"""'], {}), "('pgcli postgres://root:secret@localhost/db')\n", (1372, 1417), False, 'import setproctitle\n'), ((1422, 1450), 'pgcli.main.obfuscate_process_password', 'obfuscate_process_password', ([], {}), '()\n', (1448, 1450), False, 'from pgcli.main import obfuscate_process_password, format_output, PGCli, OutputSettings\n'), ((1463, 1490), 'setproctitle.getproctitle', 'setproctitle.getproctitle', ([], {}), '()\n', (1488, 1490), False, 'import setproctitle\n'), ((1582, 1623), 'setproctitle.setproctitle', 'setproctitle.setproctitle', (['original_title'], {}), '(original_title)\n', (1607, 1623), False, 'import setproctitle\n'), ((1667, 1729), 'pgcli.main.OutputSettings', 'OutputSettings', ([], {'table_format': '"""psql"""', 'dcmlfmt': '"""d"""', 'floatfmt': '"""g"""'}), "(table_format='psql', dcmlfmt='d', floatfmt='g')\n", (1681, 1729), False, 'from pgcli.main import obfuscate_process_password, format_output, PGCli, OutputSettings\n'), ((1744, 1833), 'pgcli.main.format_output', 'format_output', (['"""Title"""', "[('abc', 'def')]", "['head1', 'head2']", '"""test status"""', 'settings'], {}), "('Title', [('abc', 'def')], ['head1', 'head2'], 'test status',\n settings)\n", (1757, 1833), False, 'from pgcli.main import obfuscate_process_password, format_output, PGCli, OutputSettings\n'), ((2421, 2445), 'utils.run', 'run', (['executor', 'statement'], {}), '(executor, statement)\n', (2424, 2445), False, 'from utils import dbtest, run\n'), ((3250, 3289), 'utils.run', 'run', (['executor', 'statement'], {'expanded': '(True)'}), '(executor, statement, expanded=True)\n', (3253, 3289), False, 'from utils import dbtest, run\n'), ((3797, 3874), 'pgcli.main.OutputSettings', 'OutputSettings', ([], {'table_format': '"""psql"""', 'dcmlfmt': '"""d"""', 'floatfmt': '"""g"""', 'max_width': '(100)'}), "(table_format='psql', dcmlfmt='d', floatfmt='g', max_width=100)\n", (3811, 3874), False, 'from pgcli.main import obfuscate_process_password, format_output, PGCli, OutputSettings\n'), ((3904, 3993), 'pgcli.main.format_output', 'format_output', (['"""Title"""', "[('abc', 'def')]", "['head1', 'head2']", '"""test status"""', 'settings'], {}), "('Title', [('abc', 'def')], ['head1', 'head2'], 'test status',\n settings)\n", (3917, 3993), False, 'from pgcli.main import obfuscate_process_password, format_output, PGCli, OutputSettings\n'), ((4853, 4899), 'pgcli.main.PGCli', 'PGCli', ([], {'pgexecute': 'executor', 'pgclirc_file': 'rcfile'}), '(pgexecute=executor, pgclirc_file=rcfile)\n', (4858, 4899), False, 'from pgcli.main import obfuscate_process_password, format_output, PGCli, OutputSettings\n'), ((4969, 5018), 'utils.run', 'run', (['executor', 'statement'], {'pgspecial': 'cli.pgspecial'}), '(executor, statement, pgspecial=cli.pgspecial)\n', (4972, 5018), False, 'from utils import dbtest, run\n'), ((5114, 5140), 'pgcli.main.PGCli', 'PGCli', ([], {'pgclirc_file': 'rcfile'}), '(pgclirc_file=rcfile)\n', (5119, 5140), False, 'from pgcli.main import obfuscate_process_password, format_output, PGCli, OutputSettings\n'), ((5152, 5174), 'os.path.exists', 'os.path.exists', (['rcfile'], {}), '(rcfile)\n', (5166, 5174), False, 'import os\n'), ((358, 375), 'platform.system', 'platform.system', ([], {}), '()\n', (373, 375), False, 'import platform\n'), ((5218, 5253), 'mock.patch.object', 'mock.patch.object', (['PGCli', '"""connect"""'], {}), "(PGCli, 'connect')\n", (5235, 5253), False, 'import mock\n'), ((5704, 5739), 'mock.patch.object', 'mock.patch.object', (['PGCli', '"""connect"""'], {}), "(PGCli, 'connect')\n", (5721, 5739), False, 'import mock\n'), ((6530, 6565), 'mock.patch.object', 'mock.patch.object', (['PGCli', '"""connect"""'], {}), "(PGCli, 'connect')\n", (6547, 6565), False, 'import mock\n')]
# Copyright 2021 UW-IT, University of Washington # SPDX-License-Identifier: Apache-2.0 from unittest import TestCase, skipUnless from commonconf import override_settings from restclients_core.dao import DAO, MockDAO from restclients_core.cache import NoCache from restclients_core.models import MockHTTP, CacheHTTP from restclients_core.exceptions import ImproperlyConfigured class TDAO(DAO): def service_name(self): return "backend_test" def get_default_service_setting(self, key): if "DAO_CLASS" == key: return ('restclients_core.tests.dao_implementation.' 'test_backend.Backend') class E1DAO(TDAO): def get_default_service_setting(self, key): if "DAO_CLASS" == key: return ('restclients_core.tests.dao_implementation.' 'test_backendX.Backend') class E2DAO(TDAO): def get_default_service_setting(self, key): if "DAO_CLASS" == key: return ('restclients_core.tests.dao_implementation.' 'test_backend.BackendX') class TCache(): def getCache(self, service, url, headers): if url == '/ok': response = CacheHTTP() response.cache_class = self.__class__ response.status = 200 response.data = 'ok - GET' return {'response': response} def processResponse(self, service, url, response): response.status = 404 response.cache_class = self.__class__ return {'response': response} class TestBackend(TestCase): def setUp(self): DAO._cache_instance = None def test_get(self): response = TDAO().getURL('/ok') self.assertEquals(response.data, 'ok - GET') def test_post(self): response = TDAO().postURL('/ok') self.assertEquals(response.data, 'ok - POST') def test_put(self): response = TDAO().putURL('/ok', {}, '') self.assertEquals(response.data, 'ok - PUT') def test_delete(self): response = TDAO().deleteURL('/ok') self.assertEquals(response.data, 'ok - DELETE') def test_patch(self): response = TDAO().patchURL('/ok', {}, '') self.assertEquals(response.data, 'ok - PATCH') def test_error_level1(self): self.assertRaises(ImproperlyConfigured, E1DAO().getURL, '/ok') def test_error_level2(self): self.assertRaises(ImproperlyConfigured, E2DAO().getURL, '/ok') @override_settings(RESTCLIENTS_BACKEND_TEST_FOO=True, RESTCLIENTS_FOO=False, RESTCLIENTS_BAR=True) def test_service_settings(self): dao = TDAO() self.assertEquals(dao.get_setting('FOO'), False) self.assertEquals(dao.get_setting('BAR'), True) self.assertEquals(dao.get_service_setting('FOO'), True) self.assertEquals(dao.get_service_setting('BAR'), True) @skipUnless(hasattr(TestCase, 'assertLogs'), 'Python < 3.4') @override_settings(RESTCLIENTS_TIMING_LOG_ENABLED=True, RESTCLIENTS_TIMING_LOG_RATE=1.0) def test_log(self): with self.assertLogs('restclients_core.dao', level='INFO') as cm: response = TDAO().getURL('/ok') self.assertEquals(len(cm.output), 1) (msg, time) = cm.output[0].split(' time:') self.assertEquals(msg, 'INFO:restclients_core.dao:service:backend_test ' 'method:GET url:/ok status:200 from_cache:no' ' cache_class:None') self.assertGreater(float(time), 0) with self.assertLogs('restclients_core.dao', level='INFO') as cm: response = TDAO().putURL('/api', {}, '') self.assertEquals(len(cm.output), 1) (msg, time) = cm.output[0].split(' time:') self.assertEquals(msg, 'INFO:restclients_core.dao:service:backend_test ' 'method:PUT url:/api status:200 from_cache:no' ' cache_class:None') self.assertGreater(float(time), 0) @skipUnless(hasattr(TestCase, 'assertLogs'), 'Python < 3.4') @override_settings(RESTCLIENTS_TIMING_LOG_ENABLED=True, RESTCLIENTS_TIMING_LOG_RATE=1.0, RESTCLIENTS_DAO_CACHE_CLASS=( 'restclients_core.tests.dao_implementation.' 'test_backend.TCache')) def test_log_cache(self): # Cached response with self.assertLogs('restclients_core.dao', level='INFO') as cm: response = TDAO().getURL('/ok') self.assertEquals(len(cm.output), 1) (msg, time) = cm.output[0].split(' time:') self.assertEquals(msg, 'INFO:restclients_core.dao:service:backend_test ' 'method:GET url:/ok status:200 from_cache:yes' ' cache_class:TCache') self.assertGreater(float(time), 0) # Cached post response with self.assertLogs('restclients_core.dao', level='INFO') as cm: response = TDAO().getURL('/ok2') self.assertEquals(len(cm.output), 1) (msg, time) = cm.output[0].split(' time:') self.assertEquals(msg, 'INFO:restclients_core.dao:service:backend_test ' 'method:GET url:/ok2 status:404 from_cache:yes' ' cache_class:TCache') self.assertGreater(float(time), 0) @override_settings(RESTCLIENTS_DAO_CACHE_CLASS=( 'restclients_core.tests.dao_implementation.' 'test_backend.TCache')) def test_cache_backend(self): cache1 = TDAO().get_cache() cache2 = TDAO().get_cache() self.assertTrue(cache1 == cache2, 'Cache objects are same instance') cache3 = E1DAO().get_cache() self.assertTrue(cache1 == cache3, 'Cache objects are same instance') cache4 = E2DAO().get_cache() self.assertTrue(cache1 == cache4, 'Cache objects are same instance') class Backend(MockDAO): def load(self, method, url, headers, body): response = MockHTTP() response.status = 200 response.data = "ok - {}".format(method) return response
[ "commonconf.override_settings", "restclients_core.models.MockHTTP", "restclients_core.models.CacheHTTP" ]
[((2450, 2551), 'commonconf.override_settings', 'override_settings', ([], {'RESTCLIENTS_BACKEND_TEST_FOO': '(True)', 'RESTCLIENTS_FOO': '(False)', 'RESTCLIENTS_BAR': '(True)'}), '(RESTCLIENTS_BACKEND_TEST_FOO=True, RESTCLIENTS_FOO=False,\n RESTCLIENTS_BAR=True)\n', (2467, 2551), False, 'from commonconf import override_settings\n'), ((2965, 3056), 'commonconf.override_settings', 'override_settings', ([], {'RESTCLIENTS_TIMING_LOG_ENABLED': '(True)', 'RESTCLIENTS_TIMING_LOG_RATE': '(1.0)'}), '(RESTCLIENTS_TIMING_LOG_ENABLED=True,\n RESTCLIENTS_TIMING_LOG_RATE=1.0)\n', (2982, 3056), False, 'from commonconf import override_settings\n'), ((4204, 4393), 'commonconf.override_settings', 'override_settings', ([], {'RESTCLIENTS_TIMING_LOG_ENABLED': '(True)', 'RESTCLIENTS_TIMING_LOG_RATE': '(1.0)', 'RESTCLIENTS_DAO_CACHE_CLASS': '"""restclients_core.tests.dao_implementation.test_backend.TCache"""'}), "(RESTCLIENTS_TIMING_LOG_ENABLED=True,\n RESTCLIENTS_TIMING_LOG_RATE=1.0, RESTCLIENTS_DAO_CACHE_CLASS=\n 'restclients_core.tests.dao_implementation.test_backend.TCache')\n", (4221, 4393), False, 'from commonconf import override_settings\n'), ((5615, 5730), 'commonconf.override_settings', 'override_settings', ([], {'RESTCLIENTS_DAO_CACHE_CLASS': '"""restclients_core.tests.dao_implementation.test_backend.TCache"""'}), "(RESTCLIENTS_DAO_CACHE_CLASS=\n 'restclients_core.tests.dao_implementation.test_backend.TCache')\n", (5632, 5730), False, 'from commonconf import override_settings\n'), ((6292, 6302), 'restclients_core.models.MockHTTP', 'MockHTTP', ([], {}), '()\n', (6300, 6302), False, 'from restclients_core.models import MockHTTP, CacheHTTP\n'), ((1176, 1187), 'restclients_core.models.CacheHTTP', 'CacheHTTP', ([], {}), '()\n', (1185, 1187), False, 'from restclients_core.models import MockHTTP, CacheHTTP\n')]
# This code is part of Qiskit. # # (C) Copyright IBM 2020. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """ PrimitiveOp Class """ from typing import Dict, List, Optional, Set, Union, cast import numpy as np import scipy.linalg from scipy.sparse import spmatrix from qiskit import QuantumCircuit from qiskit.circuit import Instruction, ParameterExpression from qiskit.opflow.operator_base import OperatorBase from qiskit.quantum_info import Operator, Pauli, SparsePauliOp, Statevector class PrimitiveOp(OperatorBase): r""" A class for representing basic Operators, backed by Operator primitives from Terra. This class (and inheritors) primarily serves to allow the underlying primitives to "flow" - i.e. interoperability and adherence to the Operator formalism - while the core computational logic mostly remains in the underlying primitives. For example, we would not produce an interface in Terra in which ``QuantumCircuit1 + QuantumCircuit2`` equaled the Operator sum of the circuit unitaries, rather than simply appending the circuits. However, within the Operator flow summing the unitaries is the expected behavior. Note that all mathematical methods are not in-place, meaning that they return a new object, but the underlying primitives are not copied. """ def __init_subclass__(cls): cls.__new__ = lambda cls, *args, **kwargs: super().__new__(cls) @staticmethod # pylint: disable=unused-argument def __new__(cls, primitive: Union[Instruction, QuantumCircuit, List, np.ndarray, spmatrix, Operator, Pauli, SparsePauliOp], coeff: Union[complex, ParameterExpression] = 1.0) -> 'PrimitiveOp': """ A factory method to produce the correct type of PrimitiveOp subclass based on the primitive passed in. Primitive and coeff arguments are passed into subclass's init() as-is automatically by new(). Args: primitive: The operator primitive being wrapped. coeff: A coefficient multiplying the primitive. Returns: The appropriate PrimitiveOp subclass for ``primitive``. Raises: TypeError: Unsupported primitive type passed. """ # pylint: disable=cyclic-import if isinstance(primitive, (Instruction, QuantumCircuit)): from .circuit_op import CircuitOp return super().__new__(CircuitOp) if isinstance(primitive, (list, np.ndarray, spmatrix, Operator)): from .matrix_op import MatrixOp return super().__new__(MatrixOp) if isinstance(primitive, Pauli): from .pauli_op import PauliOp return super().__new__(PauliOp) if isinstance(primitive, SparsePauliOp): from .pauli_sum_op import PauliSumOp return super().__new__(PauliSumOp) raise TypeError('Unsupported primitive type {} passed into PrimitiveOp ' 'factory constructor'.format(type(primitive))) def __init__(self, primitive: Union[QuantumCircuit, Operator, Pauli, SparsePauliOp, OperatorBase], coeff: Union[complex, ParameterExpression] = 1.0) -> None: """ Args: primitive: The operator primitive being wrapped. coeff: A coefficient multiplying the primitive. """ super().__init__() self._primitive = primitive self._coeff = coeff @property def primitive(self) -> Union[QuantumCircuit, Operator, Pauli, SparsePauliOp, OperatorBase]: """ The primitive defining the underlying function of the Operator. Returns: The primitive object. """ return self._primitive @property def coeff(self) -> Union[complex, ParameterExpression]: """ The scalar coefficient multiplying the Operator. Returns: The coefficient. """ return self._coeff @property def num_qubits(self) -> int: raise NotImplementedError def primitive_strings(self) -> Set[str]: raise NotImplementedError def add(self, other: OperatorBase) -> OperatorBase: raise NotImplementedError def adjoint(self) -> OperatorBase: raise NotImplementedError def equals(self, other: OperatorBase) -> bool: raise NotImplementedError def mul(self, scalar: Union[complex, ParameterExpression]) -> OperatorBase: if not isinstance(scalar, (int, float, complex, ParameterExpression)): raise ValueError('Operators can only be scalar multiplied by float or complex, not ' '{} of type {}.'.format(scalar, type(scalar))) # Need to return self.__class__ in case the object is one of the inherited OpPrimitives return self.__class__(self.primitive, coeff=self.coeff * scalar) def tensor(self, other: OperatorBase) -> OperatorBase: raise NotImplementedError def tensorpower(self, other: int) -> Union[OperatorBase, int]: # Hack to make Z^(I^0) work as intended. if other == 0: return 1 if not isinstance(other, int) or other < 0: raise TypeError('Tensorpower can only take positive int arguments') temp = PrimitiveOp(self.primitive, coeff=self.coeff) # type: OperatorBase for _ in range(other - 1): temp = temp.tensor(self) return temp def compose(self, other: OperatorBase, permutation: Optional[List[int]] = None, front: bool = False) -> \ OperatorBase: # pylint: disable=cyclic-import from ..list_ops.composed_op import ComposedOp new_self, other = self._expand_shorter_operator_and_permute(other, permutation) if isinstance(other, ComposedOp): comp_with_first = new_self.compose(other.oplist[0]) if not isinstance(comp_with_first, ComposedOp): new_oplist = [comp_with_first] + other.oplist[1:] return ComposedOp(new_oplist, coeff=other.coeff) return ComposedOp([new_self] + other.oplist, coeff=other.coeff) return ComposedOp([new_self, other]) def power(self, exponent: int) -> OperatorBase: if not isinstance(exponent, int) or exponent <= 0: raise TypeError('power can only take positive int arguments') temp = PrimitiveOp(self.primitive, coeff=self.coeff) # type: OperatorBase for _ in range(exponent - 1): temp = temp.compose(self) return temp def _expand_dim(self, num_qubits: int) -> OperatorBase: raise NotImplementedError def permute(self, permutation: List[int]) -> OperatorBase: raise NotImplementedError def exp_i(self) -> OperatorBase: """ Return Operator exponentiation, equaling e^(-i * op)""" # pylint: disable=cyclic-import from ..evolutions.evolved_op import EvolvedOp return EvolvedOp(self) def log_i(self, massive: bool = False) -> OperatorBase: """Return a ``MatrixOp`` equivalent to log(H)/-i for this operator H. This function is the effective inverse of exp_i, equivalent to finding the Hermitian Operator which produces self when exponentiated.""" # pylint: disable=cyclic-import from ..operator_globals import EVAL_SIG_DIGITS from .matrix_op import MatrixOp return MatrixOp(np.around(scipy.linalg.logm(self.to_matrix(massive=massive)) / -1j, decimals=EVAL_SIG_DIGITS)) def __str__(self) -> str: raise NotImplementedError def __repr__(self) -> str: return "{}({}, coeff={})".format(type(self).__name__, repr(self.primitive), self.coeff) def eval( self, front: Optional[ Union[str, Dict[str, complex], np.ndarray, OperatorBase, Statevector] ] = None, ) -> Union[OperatorBase, complex]: raise NotImplementedError @property def parameters(self): params = set() if isinstance(self.primitive, (OperatorBase, QuantumCircuit)): params.update(self.primitive.parameters) if isinstance(self.coeff, ParameterExpression): params.update(self.coeff.parameters) return params def assign_parameters(self, param_dict: dict) -> OperatorBase: param_value = self.coeff if isinstance(self.coeff, ParameterExpression): unrolled_dict = self._unroll_param_dict(param_dict) if isinstance(unrolled_dict, list): # pylint: disable=cyclic-import from ..list_ops.list_op import ListOp return ListOp([self.assign_parameters(param_dict) for param_dict in unrolled_dict]) if self.coeff.parameters <= set(unrolled_dict.keys()): binds = {param: unrolled_dict[param] for param in self.coeff.parameters} param_value = float(self.coeff.bind(binds)) return self.__class__(self.primitive, coeff=param_value) # Nothing to collapse here. def reduce(self) -> OperatorBase: return self def to_matrix(self, massive: bool = False) -> np.ndarray: raise NotImplementedError def to_matrix_op(self, massive: bool = False) -> OperatorBase: """ Returns a ``MatrixOp`` equivalent to this Operator. """ coeff = self.coeff op = self.copy() op._coeff = 1 prim_mat = op.to_matrix(massive=massive) from .matrix_op import MatrixOp return MatrixOp(prim_mat, coeff=coeff) def to_instruction(self) -> Instruction: """ Returns an ``Instruction`` equivalent to this Operator. """ raise NotImplementedError def to_circuit(self) -> QuantumCircuit: """ Returns a ``QuantumCircuit`` equivalent to this Operator. """ qc = QuantumCircuit(self.num_qubits) qc.append(self.to_instruction(), qargs=range(self.primitive.num_qubits)) return qc.decompose() def to_circuit_op(self) -> OperatorBase: """ Returns a ``CircuitOp`` equivalent to this Operator. """ from .circuit_op import CircuitOp if self.coeff == 0: return CircuitOp(QuantumCircuit(self.num_qubits), coeff=0) return CircuitOp(self.to_circuit(), coeff=self.coeff) def to_pauli_op(self, massive: bool = False) -> OperatorBase: """ Returns a sum of ``PauliOp`` s equivalent to this Operator. """ # pylint: disable=cyclic-import from .matrix_op import MatrixOp mat_op = cast(MatrixOp, self.to_matrix_op(massive=massive)) sparse_pauli = SparsePauliOp.from_operator(mat_op.primitive) if not sparse_pauli.to_list(): from ..operator_globals import I return (I ^ self.num_qubits) * 0.0 from .pauli_op import PauliOp if len(sparse_pauli) == 1: label, coeff = sparse_pauli.to_list()[0] coeff = coeff.real if np.isreal(coeff) else coeff return PauliOp(Pauli(label), coeff * self.coeff) from ..list_ops.summed_op import SummedOp return SummedOp( [ PrimitiveOp( Pauli(label), coeff.real if coeff == coeff.real else coeff, ) for (label, coeff) in sparse_pauli.to_list() ], self.coeff, )
[ "numpy.isreal", "qiskit.quantum_info.Pauli", "qiskit.quantum_info.SparsePauliOp.from_operator", "qiskit.QuantumCircuit" ]
[((10316, 10347), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['self.num_qubits'], {}), '(self.num_qubits)\n', (10330, 10347), False, 'from qiskit import QuantumCircuit\n'), ((11091, 11136), 'qiskit.quantum_info.SparsePauliOp.from_operator', 'SparsePauliOp.from_operator', (['mat_op.primitive'], {}), '(mat_op.primitive)\n', (11118, 11136), False, 'from qiskit.quantum_info import Operator, Pauli, SparsePauliOp, Statevector\n'), ((10673, 10704), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['self.num_qubits'], {}), '(self.num_qubits)\n', (10687, 10704), False, 'from qiskit import QuantumCircuit\n'), ((11428, 11444), 'numpy.isreal', 'np.isreal', (['coeff'], {}), '(coeff)\n', (11437, 11444), True, 'import numpy as np\n'), ((11483, 11495), 'qiskit.quantum_info.Pauli', 'Pauli', (['label'], {}), '(label)\n', (11488, 11495), False, 'from qiskit.quantum_info import Operator, Pauli, SparsePauliOp, Statevector\n'), ((11656, 11668), 'qiskit.quantum_info.Pauli', 'Pauli', (['label'], {}), '(label)\n', (11661, 11668), False, 'from qiskit.quantum_info import Operator, Pauli, SparsePauliOp, Statevector\n')]
# Generated by Django 2.2.1 on 2020-06-30 06:44 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('webapi', '0030_auto_20200630_1526'), ] operations = [ migrations.RemoveField( model_name='datum', name='process', ), migrations.AddField( model_name='datum', name='figure', field=models.ForeignKey(default='', on_delete=django.db.models.deletion.PROTECT, to='webapi.Figure'), preserve_default=False, ), ]
[ "django.db.migrations.RemoveField", "django.db.models.ForeignKey" ]
[((267, 325), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""datum"""', 'name': '"""process"""'}), "(model_name='datum', name='process')\n", (289, 325), False, 'from django.db import migrations, models\n'), ((468, 566), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'default': '""""""', 'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""webapi.Figure"""'}), "(default='', on_delete=django.db.models.deletion.PROTECT,\n to='webapi.Figure')\n", (485, 566), False, 'from django.db import migrations, models\n')]
from json.tool import main from fastapi import FastAPI from fastapi.testclient import TestClient from app.main import app client = TestClient(app) def test_read_main(): response = client.get("/plants") assert response.status_code == 200 assert response.json() == [{"name": "flower"}, {"name": "grass"}]
[ "fastapi.testclient.TestClient" ]
[((133, 148), 'fastapi.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (143, 148), False, 'from fastapi.testclient import TestClient\n')]
import cv2 import torch from torchvision import transforms import math import numpy as np import torchvision.models as models import torch.utils.data as data from torchvision import transforms import cv2 import torch.nn.functional as F from torch.autograd import Variable import pandas as pd import os ,torch import torch.nn as nn import time import argparse result = ["Surprise","Fear","Disgust","Happiness","Sadness","Anger","Neutral"] class Res18Feature(nn.Module): def __init__(self, pretrained, num_classes = 7): super(Res18Feature, self).__init__() resnet = models.resnet18(pretrained) self.features = nn.Sequential(*list(resnet.children())[:-1]) fc_in_dim = list(resnet.children())[-1].in_features self.fc = nn.Linear(fc_in_dim, num_classes) self.alpha = nn.Sequential(nn.Linear(fc_in_dim, 1),nn.Sigmoid()) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) attention_weights = self.alpha(x) out = attention_weights * self.fc(x) return attention_weights, out model_save_path = "./checkpoint/wiki2020.pth" #mode path def main(args): preprocess_transform = transforms.Compose([transforms.ToPILImage(),transforms.Resize((224, 224)),transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])]) res18 = Res18Feature(pretrained = False) checkpoint = torch.load(model_save_path) res18.load_state_dict(checkpoint['model_state_dict']) res18.cuda() res18.eval() for i in [0]: time1=time.time() image = cv2.imread(args.img) image = image[:, :, ::-1] image_tensor = preprocess_transform(image) tensor = Variable(torch.unsqueeze(image_tensor, dim=0).float(), requires_grad=False) tensor=tensor.cuda() time2=time.time() _, outputs = res18(tensor) _, predicts = torch.max(outputs, 1) print(result[int(predicts.cpu().data)]) def parse_args(): parser = argparse.ArgumentParser(description='Testing') parser.add_argument('--img',default="./img/suripse.jpg",type=str) args = parser.parse_args() return args if __name__ == "__main__": args = parse_args() main(args)
[ "torchvision.models.resnet18", "argparse.ArgumentParser", "torch.load", "torchvision.transforms.Normalize", "torchvision.transforms.ToPILImage", "time.time", "torchvision.transforms.ToTensor", "cv2.imread", "torch.max", "torch.nn.Linear", "torch.unsqueeze", "torchvision.transforms.Resize", "torch.nn.Sigmoid" ]
[((1384, 1411), 'torch.load', 'torch.load', (['model_save_path'], {}), '(model_save_path)\n', (1394, 1411), False, 'import os, torch\n'), ((1933, 1979), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Testing"""'}), "(description='Testing')\n", (1956, 1979), False, 'import argparse\n'), ((577, 604), 'torchvision.models.resnet18', 'models.resnet18', (['pretrained'], {}), '(pretrained)\n', (592, 604), True, 'import torchvision.models as models\n'), ((742, 775), 'torch.nn.Linear', 'nn.Linear', (['fc_in_dim', 'num_classes'], {}), '(fc_in_dim, num_classes)\n', (751, 775), True, 'import torch.nn as nn\n'), ((1525, 1536), 'time.time', 'time.time', ([], {}), '()\n', (1534, 1536), False, 'import time\n'), ((1554, 1574), 'cv2.imread', 'cv2.imread', (['args.img'], {}), '(args.img)\n', (1564, 1574), False, 'import cv2\n'), ((1776, 1787), 'time.time', 'time.time', ([], {}), '()\n', (1785, 1787), False, 'import time\n'), ((1837, 1858), 'torch.max', 'torch.max', (['outputs', '(1)'], {}), '(outputs, 1)\n', (1846, 1858), False, 'import os, torch\n'), ((808, 831), 'torch.nn.Linear', 'nn.Linear', (['fc_in_dim', '(1)'], {}), '(fc_in_dim, 1)\n', (817, 831), True, 'import torch.nn as nn\n'), ((832, 844), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (842, 844), True, 'import torch.nn as nn\n'), ((1159, 1182), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (1180, 1182), False, 'from torchvision import transforms\n'), ((1183, 1212), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (1200, 1212), False, 'from torchvision import transforms\n'), ((1213, 1234), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1232, 1234), False, 'from torchvision import transforms\n'), ((1240, 1315), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (1260, 1315), False, 'from torchvision import transforms\n'), ((1674, 1710), 'torch.unsqueeze', 'torch.unsqueeze', (['image_tensor'], {'dim': '(0)'}), '(image_tensor, dim=0)\n', (1689, 1710), False, 'import os, torch\n')]
''' Defines a datapipeline that can be constructed from string. author: <NAME> ''' from argonaut import datasets as ds_mod from argonaut.utils import Summary class Pipeline(): '''Loads a data pipeline based on string operations and use data for the results. In general these pipelines have 3 types of operations: 1. loading - load data from file and generate a dataset 2. modify - Modify the datasource (e.g. augmentation or permutation) 3. modify and copy - Same as modify, but the operations are applied to a copy of the dataset Output data types can be of 2 categories: - loaded - data stored directly in RAM - generated - data stored in generator that is loaded at runtime (for larger datasets) Args: data_args (dict): Dict that contains relevant arguments for the datasets to load train_args (dict): Dict that contains information about the training process (i.e. which datasets are relevant to keep) clean (bool): defines if unused datasets should be discarded ''' def __init__(self, data_args, train_args, clean=True): # store the arguments self.args = data_args self.clean = clean # iterate to find relevant datasets used_ds = set() for task in train_args: used_ds.add(task["dataset"]) self.datasets = used_ds def transform(self, summary=None): '''Executes the data loading and transformation.''' if summary is None: summary = Summary() datasets = {} # iterate through all sets for ds_args in self.args: # load the relevan dataset ds = None if "input" in ds_args: if ds_args["input"] not in datasets: raise LookupError("Could not find the input ({}) for dataset ({}). Please check execution order! Note that the system might append `_X` to the name in case the function returns multiple results.".format(ds_args["input"], ds_args["id"])) ds = datasets[ds_args["input"]] else: # load dataset ds_class = ds_mod for name in ds_args["dataset"].split("."): ds_class = getattr(ds_class, name) # init dataset params = {} if "params" in ds_args: params = ds_args["params"] ds, summary = ds_class(**params, summary=summary) # FEAT: avoid loading of dataset filtered by curriculum (rebuild pipeline with better curriculum filtering from base functions on) # check operations if "operations" in ds_args: for op in ds_args["operations"]: # NOTE: might change the utils to a more general module? tf_cls = getattr(ds_mod.transformer, op["name"]) params = {} if "params" in op: params = op["params"] tf = tf_cls(**params) # handle array outputs for chained results if isinstance(ds, list): ds_arr = [] for ds_i in ds: ds_out, summary = tf.transform(ds_i, summary=summary) ds_arr.append(ds_out) ds = ds_arr else: ds, summary = tf.transform(ds, summary=summary) # add completed data if isinstance(ds, list): for i, ids in enumerate(ds): datasets["{}_{}".format(ds_args["id"], i)] = ids else: datasets[ds_args["id"]] = ds if self.clean is False: return datasets, summary # check which datasets should be kept res = {} for key in datasets: if key in self.datasets: res[key] = datasets[key] del datasets return res, summary
[ "argonaut.utils.Summary" ]
[((1426, 1435), 'argonaut.utils.Summary', 'Summary', ([], {}), '()\n', (1433, 1435), False, 'from argonaut.utils import Summary\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: © 2021 Massachusetts Institute of Technology. # SPDX-FileCopyrightText: © 2021 <NAME> <<EMAIL>> # NOTICE: authors should document their contributions in concisely in NOTICE # with details inline in source files, comments, and docstrings. """ Some utilities to help with dataset creation and annotation """ from wavestate.bunch import Bunch import numpy as np from .. import annotate from .. import TFmath from .. import representations from .. import fitters_ZPK from . import plots def make_description( generator, sets=1, instances=1, description=None, annotation=None, **kwargs ): doc = generator.__doc__ if description is None: if doc is not None: description = annotate.padding_remove(doc) else: description = "<no description>" else: description = annotate.padding_remove(description) if annotation is None and doc is not None: annotation = annotate.padding_remove(doc) return Bunch( generator=generator, instances=instances, description=description, annotation=annotation, **kwargs ) def generator_autofill( F_Hz, SNR, F_nyquist_Hz, data=None, bestfit_ZPK_z=None, bestfit_ZPK_s=None, delay_s=0, residuals_log_best=None, sN=0, iN=0, **kwargs ): if bestfit_ZPK_s: # replace with the ZPKs filled out fully bestfit_ZPK_s = TFmath.ZPK_fill(bestfit_ZPK_s) if bestfit_ZPK_z: # replace with the ZPKs filled out fully bestfit_ZPK_z = TFmath.ZPK_fill(bestfit_ZPK_z) if not bestfit_ZPK_z and F_nyquist_Hz is not None: if bestfit_ZPK_s: bestfit_ZPK_z = TFmath.StoZ( bestfit_ZPK_s, F_nyquist_Hz=F_nyquist_Hz, ) if not bestfit_ZPK_s and F_nyquist_Hz is not None: if bestfit_ZPK_z: bestfit_ZPK_s = TFmath.ZtoS( bestfit_ZPK_z, F_nyquist_Hz=F_nyquist_Hz, ) if data is None: if SNR is not None: rand = np.random.RandomState() rand.seed(iN) N = len(F_Hz) rel_noise = rand.normal(1, 1 / SNR, N) + 1j * rand.normal(0, 1 / SNR, N) else: rel_noise = 1 if bestfit_ZPK_s: data = rel_noise * TFmath.TF_ZPK( F_Hz, ZPK=bestfit_ZPK_s, ) elif bestfit_ZPK_z: data = rel_noise * TFmath.TF_ZPK( F_Hz, ZPK=bestfit_ZPK_s, F_nyquist_Hz=F_nyquist_Hz, ) if delay_s is not None: data = data * np.exp(-2j * np.pi * delay_s * F_Hz) if bestfit_ZPK_z is not None: rep_z = representations.ZPKwData( data=data, F_Hz=F_Hz, W=SNR, F_nyquist_Hz=F_nyquist_Hz, ZPK=bestfit_ZPK_z, delay_s=delay_s, ) else: rep_z = None if bestfit_ZPK_s is not None: rep_s = representations.ZPKwData( data=data, F_Hz=F_Hz, W=SNR, F_nyquist_Hz=None, ZPK=bestfit_ZPK_s, delay_s=delay_s, ) else: rep_s = None return Bunch( F_Hz=F_Hz, data=data, SNR=SNR, F_nyquist_Hz=F_nyquist_Hz, residuals_log_best=residuals_log_best, bestfit_ZPK_z=bestfit_ZPK_z, bestfit_ZPK_s=bestfit_ZPK_s, rep_z=rep_z, rep_s=rep_s, iN=iN, sN=sN, **kwargs ) def assert_almost_equal(arr1, arr2, decimals): np.testing.assert_allclose(arr1, arr2, decimals) # np.testing.assert_allclose(arr1.real, arr2.real, decimals) # np.testing.assert_allclose(arr1.imag, arr2.imag, decimals) def sign_validate(aid, fitter): """ To be added as a hint to data2filter """ rep = fitter.ZPKrep xfer = rep.xfer_fit data = rep.data rat = data / xfer rat_ang = np.exp(1j * np.angle(rat)) ang_avg_rep = np.sum(rat_ang * rep.W ** 2) / np.sum(rep.W ** 2) xfer = fitter.xfer_fit data = fitter.data rat = data / xfer rat_ang = np.exp(1j * np.angle(rat)) ang_avg_fit = np.sum(rat_ang * fitter.W ** 2) / np.sum(fitter.W ** 2) # print("SGN: ", ang_avg_rep, ang_avg_fit) # axB = plots.plots.plot_fit( # fitter, # fname = 'test1.png', # ) # axB = plots.plots.plot_fitter_flag( # fitter, # fname = 'test3.png', # ) # axB = plots.plots.plot_fit( # fitter.ZPKrep, # fname = 'test2.png', # ) if isinstance(fitter, fitters_ZPK.MultiReprFilterBase): for coding in list(fitter.num_codings) + list(fitter.den_codings): rB = representations.RootBunch( u=coding.roots(), constraint=representations.root_constraints.no_constraint, ) h1 = coding.transfer() h, lnG = rB.val_lnG(fitter.Xex_grid) h = h * np.exp(lnG) assert_almost_equal(h / h1, 1, 4) assert ang_avg_fit.real > 0 and ang_avg_rep.real > 0 sign_validate_hint = { "fitter_update_validate": sign_validate, "fitter_check_validate": sign_validate, } def rational_fitter_validate(rat_fitter, fitter): """ To be added as a hint to data2filter """ rep1 = rat_fitter.ZPKrep.xfer_fit rep2 = fitter.ZPKrep.xfer_fit rat = rat_fitter.xfer_fit mrf = fitter.xfer_fit assert_almost_equal(rep1 / rep2, 1, 5) assert_almost_equal(rat / rep2, 1, 5) assert_almost_equal(mrf / rep2, 1, 5) print("Checking Rational Fitter") def sign_validate_and_plot_hint(pyfile, request): def sign_validate_plot(aid, fitter): with plots.plot_on_assert(pyfile, request, fitter, plot_anyway=False): try: sign_validate(aid, fitter) except AssertionError: print(fitter) print(fitter.F_nyquist_Hz) print(fitter.zeros) print(fitter.zeros_overlay) print(fitter.ZPKrep) # assert(False) raise hint = { "fitter_update_validate": sign_validate_plot, "fitter_check_validate": sign_validate_plot, "rational_fitter_validate": rational_fitter_validate, } return hint def stability_validate_and_plot_hint(pyfile, request): def sign_validate_plot(aid, fitter): with plots.plot_on_assert(pyfile, request, fitter, plot_anyway=False): assert np.all(fitter.ZPKrep.poles.c.real <= 0) assert np.all(fitter.ZPKrep.poles.r.real <= 0) try: sign_validate(aid, fitter) except AssertionError: print(fitter) print(fitter.F_nyquist_Hz) print(fitter.zeros) print(fitter.zeros_overlay) print(fitter.ZPKrep) # assert(False) raise hint = { "fitter_update_validate": sign_validate_plot, "fitter_check_validate": sign_validate_plot, "rational_fitter_validate": rational_fitter_validate, } return hint def sign_validate_and_digest_hint(pyfile, request): def sign_validate_plot(aid, fitter): with plots.digest_on_assert(pyfile, request, aid, plot_anyway=False): try: sign_validate(aid, fitter) except AssertionError: print(fitter) print(fitter.F_nyquist_Hz) print(fitter.zeros) print(fitter.zeros_overlay) print(fitter.ZPKrep) # assert(False) raise hint = { "fitter_update_validate": sign_validate_plot, "fitter_check_validate": sign_validate_plot, "rational_fitter_validate": rational_fitter_validate, } return hint
[ "numpy.sum", "numpy.angle", "numpy.random.RandomState", "numpy.exp", "wavestate.bunch.Bunch", "numpy.testing.assert_allclose", "numpy.all" ]
[((1086, 1195), 'wavestate.bunch.Bunch', 'Bunch', ([], {'generator': 'generator', 'instances': 'instances', 'description': 'description', 'annotation': 'annotation'}), '(generator=generator, instances=instances, description=description,\n annotation=annotation, **kwargs)\n', (1091, 1195), False, 'from wavestate.bunch import Bunch\n'), ((3385, 3608), 'wavestate.bunch.Bunch', 'Bunch', ([], {'F_Hz': 'F_Hz', 'data': 'data', 'SNR': 'SNR', 'F_nyquist_Hz': 'F_nyquist_Hz', 'residuals_log_best': 'residuals_log_best', 'bestfit_ZPK_z': 'bestfit_ZPK_z', 'bestfit_ZPK_s': 'bestfit_ZPK_s', 'rep_z': 'rep_z', 'rep_s': 'rep_s', 'iN': 'iN', 'sN': 'sN'}), '(F_Hz=F_Hz, data=data, SNR=SNR, F_nyquist_Hz=F_nyquist_Hz,\n residuals_log_best=residuals_log_best, bestfit_ZPK_z=bestfit_ZPK_z,\n bestfit_ZPK_s=bestfit_ZPK_s, rep_z=rep_z, rep_s=rep_s, iN=iN, sN=sN, **\n kwargs)\n', (3390, 3608), False, 'from wavestate.bunch import Bunch\n'), ((3751, 3799), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['arr1', 'arr2', 'decimals'], {}), '(arr1, arr2, decimals)\n', (3777, 3799), True, 'import numpy as np\n'), ((4170, 4198), 'numpy.sum', 'np.sum', (['(rat_ang * rep.W ** 2)'], {}), '(rat_ang * rep.W ** 2)\n', (4176, 4198), True, 'import numpy as np\n'), ((4201, 4219), 'numpy.sum', 'np.sum', (['(rep.W ** 2)'], {}), '(rep.W ** 2)\n', (4207, 4219), True, 'import numpy as np\n'), ((4352, 4383), 'numpy.sum', 'np.sum', (['(rat_ang * fitter.W ** 2)'], {}), '(rat_ang * fitter.W ** 2)\n', (4358, 4383), True, 'import numpy as np\n'), ((4386, 4407), 'numpy.sum', 'np.sum', (['(fitter.W ** 2)'], {}), '(fitter.W ** 2)\n', (4392, 4407), True, 'import numpy as np\n'), ((2188, 2211), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (2209, 2211), True, 'import numpy as np\n'), ((4137, 4150), 'numpy.angle', 'np.angle', (['rat'], {}), '(rat)\n', (4145, 4150), True, 'import numpy as np\n'), ((4319, 4332), 'numpy.angle', 'np.angle', (['rat'], {}), '(rat)\n', (4327, 4332), True, 'import numpy as np\n'), ((6691, 6730), 'numpy.all', 'np.all', (['(fitter.ZPKrep.poles.c.real <= 0)'], {}), '(fitter.ZPKrep.poles.c.real <= 0)\n', (6697, 6730), True, 'import numpy as np\n'), ((6750, 6789), 'numpy.all', 'np.all', (['(fitter.ZPKrep.poles.r.real <= 0)'], {}), '(fitter.ZPKrep.poles.r.real <= 0)\n', (6756, 6789), True, 'import numpy as np\n'), ((2780, 2818), 'numpy.exp', 'np.exp', (['(-2.0j * np.pi * delay_s * F_Hz)'], {}), '(-2.0j * np.pi * delay_s * F_Hz)\n', (2786, 2818), True, 'import numpy as np\n'), ((5144, 5155), 'numpy.exp', 'np.exp', (['lnG'], {}), '(lnG)\n', (5150, 5155), True, 'import numpy as np\n')]
import torch import torch.nn as nn import torch.nn.functional as F import dgcn log = dgcn.utils.get_logger() class Classifier(nn.Module): def __init__(self, input_dim, hidden_size, tag_size, args): super(Classifier, self).__init__() self.emotion_att = MaskedEmotionAtt(input_dim) self.lin1 = nn.Linear(input_dim, hidden_size) self.drop = nn.Dropout(args.drop_rate) self.lin2 = nn.Linear(hidden_size, tag_size) if args.class_weight: self.loss_weights = torch.tensor([1 / 0.086747, 1 / 0.144406, 1 / 0.227883, 1 / 0.160585, 1 / 0.127711, 1 / 0.252668]).to(args.device) self.nll_loss = nn.NLLLoss(self.loss_weights) else: self.nll_loss = nn.NLLLoss() def get_prob(self, h, text_len_tensor): # h_hat = self.emotion_att(h, text_len_tensor) # hidden = self.drop(F.relu(self.lin1(h_hat))) hidden = self.drop(F.relu(self.lin1(h))) scores = self.lin2(hidden) log_prob = F.log_softmax(scores, dim=-1) return log_prob def forward(self, h, text_len_tensor): log_prob = self.get_prob(h, text_len_tensor) y_hat = torch.argmax(log_prob, dim=-1) return y_hat def get_loss(self, h, label_tensor, text_len_tensor): log_prob = self.get_prob(h, text_len_tensor) loss = self.nll_loss(log_prob, label_tensor) return loss class MaskedEmotionAtt(nn.Module): def __init__(self, input_dim): super(MaskedEmotionAtt, self).__init__() self.lin = nn.Linear(input_dim, input_dim) def forward(self, h, text_len_tensor): batch_size = text_len_tensor.size(0) x = self.lin(h) # [node_num, H] ret = torch.zeros_like(h) s = 0 for bi in range(batch_size): cur_len = text_len_tensor[bi].item() y = x[s: s + cur_len] z = h[s: s + cur_len] scores = torch.mm(z, y.t()) # [L, L] probs = F.softmax(scores, dim=1) out = z.unsqueeze(0) * probs.unsqueeze(-1) # [1, L, H] x [L, L, 1] --> [L, L, H] out = torch.sum(out, dim=1) # [L, H] ret[s: s + cur_len, :] = out s += cur_len return ret
[ "torch.nn.Dropout", "torch.zeros_like", "torch.argmax", "dgcn.utils.get_logger", "torch.nn.functional.softmax", "torch.nn.NLLLoss", "torch.nn.functional.log_softmax", "torch.nn.Linear", "torch.sum", "torch.tensor" ]
[((93, 116), 'dgcn.utils.get_logger', 'dgcn.utils.get_logger', ([], {}), '()\n', (114, 116), False, 'import dgcn\n'), ((337, 370), 'torch.nn.Linear', 'nn.Linear', (['input_dim', 'hidden_size'], {}), '(input_dim, hidden_size)\n', (346, 370), True, 'import torch.nn as nn\n'), ((392, 418), 'torch.nn.Dropout', 'nn.Dropout', (['args.drop_rate'], {}), '(args.drop_rate)\n', (402, 418), True, 'import torch.nn as nn\n'), ((440, 472), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'tag_size'], {}), '(hidden_size, tag_size)\n', (449, 472), True, 'import torch.nn as nn\n'), ((1080, 1109), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['scores'], {'dim': '(-1)'}), '(scores, dim=-1)\n', (1093, 1109), True, 'import torch.nn.functional as F\n'), ((1254, 1284), 'torch.argmax', 'torch.argmax', (['log_prob'], {'dim': '(-1)'}), '(log_prob, dim=-1)\n', (1266, 1284), False, 'import torch\n'), ((1649, 1680), 'torch.nn.Linear', 'nn.Linear', (['input_dim', 'input_dim'], {}), '(input_dim, input_dim)\n', (1658, 1680), True, 'import torch.nn as nn\n'), ((1830, 1849), 'torch.zeros_like', 'torch.zeros_like', (['h'], {}), '(h)\n', (1846, 1849), False, 'import torch\n'), ((728, 757), 'torch.nn.NLLLoss', 'nn.NLLLoss', (['self.loss_weights'], {}), '(self.loss_weights)\n', (738, 757), True, 'import torch.nn as nn\n'), ((802, 814), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (812, 814), True, 'import torch.nn as nn\n'), ((2095, 2119), 'torch.nn.functional.softmax', 'F.softmax', (['scores'], {'dim': '(1)'}), '(scores, dim=1)\n', (2104, 2119), True, 'import torch.nn.functional as F\n'), ((2234, 2255), 'torch.sum', 'torch.sum', (['out'], {'dim': '(1)'}), '(out, dim=1)\n', (2243, 2255), False, 'import torch\n'), ((537, 640), 'torch.tensor', 'torch.tensor', (['[1 / 0.086747, 1 / 0.144406, 1 / 0.227883, 1 / 0.160585, 1 / 0.127711, 1 / \n 0.252668]'], {}), '([1 / 0.086747, 1 / 0.144406, 1 / 0.227883, 1 / 0.160585, 1 / \n 0.127711, 1 / 0.252668])\n', (549, 640), False, 'import torch\n')]
# # Composite electrolyte potential employing stefan-maxwell # import pybamm from .base_electrolyte_conductivity import BaseElectrolyteConductivity class Composite(BaseElectrolyteConductivity): """Base class for conservation of charge in the electrolyte employing the Stefan-Maxwell constitutive equations. Parameters ---------- param : parameter class The parameters to use for this submodel domain : str, optional The domain in which the model holds options : dict, optional A dictionary of options to be passed to the model. higher_order_terms : str What kind of higher-order terms to use ('composite' or 'first-order') **Extends:** :class:`pybamm.electrolyte_conductivity.BaseElectrolyteConductivity` """ def __init__( self, param, domain=None, options=None, higher_order_terms="composite" ): super().__init__(param, domain, options=options) self.higher_order_terms = higher_order_terms def _higher_order_macinnes_function(self, x): "Function to differentiate between composite and first-order models" if self.higher_order_terms == "composite": return pybamm.log(x) elif self.higher_order_terms == "first-order": return x def get_coupled_variables(self, variables): if self.higher_order_terms == "composite": c_e_av = variables["X-averaged electrolyte concentration"] elif self.higher_order_terms == "first-order": c_e_av = variables["Leading-order x-averaged electrolyte concentration"] i_boundary_cc_0 = variables["Leading-order current collector current density"] if not self.half_cell: c_e_n = variables["Negative electrolyte concentration"] delta_phi_n_av = variables[ "X-averaged negative electrode surface potential difference" ] phi_s_n_av = variables["X-averaged negative electrode potential"] tor_n_av = variables[ "Leading-order x-averaged negative electrolyte transport efficiency" ] c_e_s = variables["Separator electrolyte concentration"] c_e_p = variables["Positive electrolyte concentration"] tor_s_av = variables["Leading-order x-averaged separator transport efficiency"] tor_p_av = variables[ "Leading-order x-averaged positive electrolyte transport efficiency" ] T_av = variables["X-averaged cell temperature"] T_av_s = pybamm.PrimaryBroadcast(T_av, "separator") T_av_p = pybamm.PrimaryBroadcast(T_av, "positive electrode") param = self.param l_n = param.l_n l_p = param.l_p x_s = pybamm.standard_spatial_vars.x_s x_p = pybamm.standard_spatial_vars.x_p # bulk conductivities kappa_s_av = param.kappa_e(c_e_av, T_av) * tor_s_av kappa_p_av = param.kappa_e(c_e_av, T_av) * tor_p_av chi_av = param.chi(c_e_av, T_av) chi_av_s = pybamm.PrimaryBroadcast(chi_av, "separator") chi_av_p = pybamm.PrimaryBroadcast(chi_av, "positive electrode") # electrolyte current if self.half_cell: i_e_n = None else: x_n = pybamm.standard_spatial_vars.x_n chi_av_n = pybamm.PrimaryBroadcast(chi_av, "negative electrode") T_av_n = pybamm.PrimaryBroadcast(T_av, "negative electrode") kappa_n_av = param.kappa_e(c_e_av, T_av) * tor_n_av i_e_n = i_boundary_cc_0 * x_n / l_n i_e_s = pybamm.PrimaryBroadcast(i_boundary_cc_0, "separator") i_e_p = i_boundary_cc_0 * (1 - x_p) / l_p i_e = pybamm.concatenation(i_e_n, i_e_s, i_e_p) # electrolyte potential if self.half_cell: phi_e_li = variables["Lithium metal interface electrolyte potential"] c_e_n = pybamm.boundary_value(c_e_s, "left") phi_e_const = ( phi_e_li - chi_av * (1 + param.Theta * T_av) * self._higher_order_macinnes_function(c_e_n / c_e_av) + (i_boundary_cc_0 * param.C_e / param.gamma_e / kappa_s_av) * l_n ) phi_e_n = None else: phi_e_const = ( -delta_phi_n_av + phi_s_n_av - ( chi_av * (1 + param.Theta * T_av) * pybamm.x_average( self._higher_order_macinnes_function(c_e_n / c_e_av) ) ) - ( (i_boundary_cc_0 * param.C_e * l_n / param.gamma_e) * (1 / (3 * kappa_n_av) - 1 / kappa_s_av) ) ) phi_e_n = ( phi_e_const + ( chi_av_n * (1 + param.Theta * T_av_n) * self._higher_order_macinnes_function(c_e_n / c_e_av) ) - (i_boundary_cc_0 * (param.C_e / param.gamma_e) / kappa_n_av) * (x_n ** 2 - l_n ** 2) / (2 * l_n) - i_boundary_cc_0 * l_n * (param.C_e / param.gamma_e) / kappa_s_av ) phi_e_s = ( phi_e_const + ( chi_av_s * (1 + param.Theta * T_av_s) * self._higher_order_macinnes_function(c_e_s / c_e_av) ) - (i_boundary_cc_0 * param.C_e / param.gamma_e / kappa_s_av) * x_s ) phi_e_p = ( phi_e_const + ( chi_av_p * (1 + param.Theta * T_av_p) * self._higher_order_macinnes_function(c_e_p / c_e_av) ) - (i_boundary_cc_0 * (param.C_e / param.gamma_e) / kappa_p_av) * (x_p * (2 - x_p) + l_p ** 2 - 1) / (2 * l_p) - i_boundary_cc_0 * (1 - l_p) * (param.C_e / param.gamma_e) / kappa_s_av ) # concentration overpotential macinnes_c_e_p = pybamm.x_average( self._higher_order_macinnes_function(c_e_p / c_e_av) ) if self.half_cell: macinnes_c_e_n = 0 ohmic_n = 0 else: macinnes_c_e_n = pybamm.x_average( self._higher_order_macinnes_function(c_e_n / c_e_av) ) ohmic_n = param.l_n / (3 * kappa_n_av) eta_c_av = chi_av * (1 + param.Theta * T_av) * (macinnes_c_e_p - macinnes_c_e_n) # average electrolyte ohmic losses delta_phi_e_av = -(param.C_e * i_boundary_cc_0 / param.gamma_e) * ( ohmic_n + param.l_s / (kappa_s_av) + param.l_p / (3 * kappa_p_av) ) variables.update( self._get_standard_potential_variables(phi_e_n, phi_e_s, phi_e_p) ) variables.update(self._get_standard_current_variables(i_e)) variables.update(self._get_split_overpotential(eta_c_av, delta_phi_e_av)) # Override print_name i_e.print_name = "i_e" return variables
[ "pybamm.log", "pybamm.PrimaryBroadcast", "pybamm.boundary_value", "pybamm.concatenation" ]
[((2542, 2584), 'pybamm.PrimaryBroadcast', 'pybamm.PrimaryBroadcast', (['T_av', '"""separator"""'], {}), "(T_av, 'separator')\n", (2565, 2584), False, 'import pybamm\n'), ((2602, 2653), 'pybamm.PrimaryBroadcast', 'pybamm.PrimaryBroadcast', (['T_av', '"""positive electrode"""'], {}), "(T_av, 'positive electrode')\n", (2625, 2653), False, 'import pybamm\n'), ((3036, 3080), 'pybamm.PrimaryBroadcast', 'pybamm.PrimaryBroadcast', (['chi_av', '"""separator"""'], {}), "(chi_av, 'separator')\n", (3059, 3080), False, 'import pybamm\n'), ((3100, 3153), 'pybamm.PrimaryBroadcast', 'pybamm.PrimaryBroadcast', (['chi_av', '"""positive electrode"""'], {}), "(chi_av, 'positive electrode')\n", (3123, 3153), False, 'import pybamm\n'), ((3580, 3633), 'pybamm.PrimaryBroadcast', 'pybamm.PrimaryBroadcast', (['i_boundary_cc_0', '"""separator"""'], {}), "(i_boundary_cc_0, 'separator')\n", (3603, 3633), False, 'import pybamm\n'), ((3698, 3739), 'pybamm.concatenation', 'pybamm.concatenation', (['i_e_n', 'i_e_s', 'i_e_p'], {}), '(i_e_n, i_e_s, i_e_p)\n', (3718, 3739), False, 'import pybamm\n'), ((1198, 1211), 'pybamm.log', 'pybamm.log', (['x'], {}), '(x)\n', (1208, 1211), False, 'import pybamm\n'), ((3325, 3378), 'pybamm.PrimaryBroadcast', 'pybamm.PrimaryBroadcast', (['chi_av', '"""negative electrode"""'], {}), "(chi_av, 'negative electrode')\n", (3348, 3378), False, 'import pybamm\n'), ((3400, 3451), 'pybamm.PrimaryBroadcast', 'pybamm.PrimaryBroadcast', (['T_av', '"""negative electrode"""'], {}), "(T_av, 'negative electrode')\n", (3423, 3451), False, 'import pybamm\n'), ((3902, 3938), 'pybamm.boundary_value', 'pybamm.boundary_value', (['c_e_s', '"""left"""'], {}), "(c_e_s, 'left')\n", (3923, 3938), False, 'import pybamm\n')]
""" The :mod:`surprise.prediction_algorithms.algo_base` module defines the base class :class:`AlgoBase` from which every single prediction algorithm has to inherit. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import warnings from six import get_unbound_function as guf import numpy as np from .. import similarities as sims from .predictions import PredictionImpossible from .predictions import Prediction from .optimize_baselines import baseline_als from .optimize_baselines import baseline_sgd class AlgoBase(object): """Abstract class where is defined the basic behavior of a prediction algorithm. Keyword Args: baseline_options(dict, optional): If the algorithm needs to compute a baseline estimate, the ``baseline_options`` parameter is used to configure how they are computed. See :ref:`baseline_estimates_configuration` for usage. """ def __init__(self, **kwargs): self.bsl_options = kwargs.get('bsl_options', {}) self.sim_options = kwargs.get('sim_options', {}) if 'user_based' not in self.sim_options: self.sim_options['user_based'] = True self.skip_train = False if (guf(self.__class__.fit) is guf(AlgoBase.fit) and guf(self.__class__.train) is not guf(AlgoBase.train)): warnings.warn('It looks like this algorithm (' + str(self.__class__) + ') implements train() ' 'instead of fit(): train() is deprecated, ' 'please use fit() instead.', UserWarning) def train(self, trainset): '''Deprecated method: use :meth:`fit() <AlgoBase.fit>` instead.''' warnings.warn('train() is deprecated. Use fit() instead', UserWarning) self.skip_train = True self.fit(trainset) return self def fit(self, trainset): """Train an algorithm on a given training set. This method is called by every derived class as the first basic step for training an algorithm. It basically just initializes some internal structures and set the self.trainset attribute. Args: trainset(:obj:`Trainset <surprise.Trainset>`) : A training set, as returned by the :meth:`folds <surprise.dataset.Dataset.folds>` method. Returns: self """ # Check if train method is overridden: this means the object is an old # style algo (new algo only have fit() so self.__class__.train will be # AlgoBase.train). If true, there are 2 possible cases: # - algo.fit() was called. In this case algo.train() was skipped which # is bad. We call it and skip this part next time we enter fit(). # Then return immediatly because fit() has already been called by # AlgoBase.train() (which has been called by algo.train()). # - algo.train() was called, which is the old way. In that case, # the skip flag will ignore this. # This is fairly ugly and hacky but I did not find anything better so # far, in order to maintain backward compatibility... See # tests/test_train2fit.py for supported cases. if (guf(self.__class__.train) is not guf(AlgoBase.train) and not self.skip_train): self.train(trainset) return self.skip_train = False self.trainset = trainset # (re) Initialise baselines self.bu = self.bi = None return self def predict(self, uid, iid, r_ui=None, clip=True, verbose=False): """Compute the rating prediction for given user and item. The ``predict`` method converts raw ids to inner ids and then calls the ``estimate`` method which is defined in every derived class. If the prediction is impossible (e.g. because the user and/or the item is unkown), the prediction is set according to :meth:`default_prediction() <surprise.prediction_algorithms.algo_base.AlgoBase.default_prediction>`. Args: uid: (Raw) id of the user. See :ref:`this note<raw_inner_note>`. iid: (Raw) id of the item. See :ref:`this note<raw_inner_note>`. r_ui(float): The true rating :math:`r_{ui}`. Optional, default is ``None``. clip(bool): Whether to clip the estimation into the rating scale. For example, if :math:`\\hat{r}_{ui}` is :math:`5.5` while the rating scale is :math:`[1, 5]`, then :math:`\\hat{r}_{ui}` is set to :math:`5`. Same goes if :math:`\\hat{r}_{ui} < 1`. Default is ``True``. verbose(bool): Whether to print details of the prediction. Default is False. Returns: A :obj:`Prediction\ <surprise.prediction_algorithms.predictions.Prediction>` object containing: - The (raw) user id ``uid``. - The (raw) item id ``iid``. - The true rating ``r_ui`` (:math:`\\hat{r}_{ui}`). - The estimated rating (:math:`\\hat{r}_{ui}`). - Some additional details about the prediction that might be useful for later analysis. """ # Convert raw ids to inner ids try: iuid = self.trainset.to_inner_uid(uid) except ValueError: iuid = 'UKN__' + str(uid) try: iiid = self.trainset.to_inner_iid(iid) except ValueError: iiid = 'UKN__' + str(iid) details = {} try: est = self.estimate(iuid, iiid) # If the details dict was also returned if isinstance(est, tuple): est, details = est details['was_impossible'] = False except PredictionImpossible as e: est = self.default_prediction() details['was_impossible'] = True details['reason'] = str(e) # Remap the rating into its initial rating scale (because the rating # scale was translated so that ratings are all >= 1) est -= self.trainset.offset # clip estimate into [lower_bound, higher_bound] if clip: lower_bound, higher_bound = self.trainset.rating_scale est = min(higher_bound, est) est = max(lower_bound, est) pred = Prediction(uid, iid, r_ui, est, details) if verbose: print(pred) return pred def default_prediction(self): '''Used when the ``PredictionImpossible`` exception is raised during a call to :meth:`predict() <surprise.prediction_algorithms.algo_base.AlgoBase.predict>`. By default, return the global mean of all ratings (can be overridden in child classes). Returns: (float): The mean of all ratings in the trainset. ''' return self.trainset.global_mean def test(self, testset, verbose=False): """Test the algorithm on given testset, i.e. estimate all the ratings in the given testset. Args: testset: A test set, as returned by a :ref:`cross-validation itertor<use_cross_validation_iterators>` or by the :meth:`build_testset() <surprise.Trainset.build_testset>` method. verbose(bool): Whether to print details for each predictions. Default is False. Returns: A list of :class:`Prediction\ <surprise.prediction_algorithms.predictions.Prediction>` objects that contains all the estimated ratings. """ if isinstance(testset, np.ndarray): iterate_on = testset.tolist() else: iterate_on = testset predictions = [self.predict(uid, iid, r_ui_trans - self.trainset.offset, verbose=verbose) for (uid, iid, r_ui_trans) in iterate_on] return predictions def compute_baselines(self): """Compute users and items baselines. The way baselines are computed depends on the ``bsl_options`` parameter passed at the creation of the algorithm (see :ref:`baseline_estimates_configuration`). This method is only relevant for algorithms using :func:`Pearson baseline similarty<surprise.similarities.pearson_baseline>` or the :class:`BaselineOnly <surprise.prediction_algorithms.baseline_only.BaselineOnly>` algorithm. Returns: A tuple ``(bu, bi)``, which are users and items baselines.""" # Firt of, if this method has already been called before on the same # trainset, then just return. Indeed, compute_baselines may be called # more than one time, for example when a similarity metric (e.g. # pearson_baseline) uses baseline estimates. if self.bu is not None: return self.bu, self.bi method = dict(als=baseline_als, sgd=baseline_sgd) method_name = self.bsl_options.get('method', 'als') try: # print('Estimating biases using', method_name + '...') self.bu, self.bi = method[method_name](self) return self.bu, self.bi except KeyError: raise ValueError('Invalid method ' + method_name + ' for baseline computation.' + ' Available methods are als and sgd.') def compute_similarities(self): """Build the similarity matrix. The way the similarity matrix is computed depends on the ``sim_options`` parameter passed at the creation of the algorithm (see :ref:`similarity_measures_configuration`). This method is only relevant for algorithms using a similarity measure, such as the :ref:`k-NN algorithms <pred_package_knn_inpired>`. Returns: The similarity matrix.""" construction_func = {'cosine': sims.cosine, 'msd': sims.msd, 'pearson': sims.pearson, 'pearson_baseline': sims.pearson_baseline} if self.sim_options['user_based']: n_x, yr = self.trainset.n_users, self.trainset.ir else: n_x, yr = self.trainset.n_items, self.trainset.ur min_support = self.sim_options.get('min_support', 1) args = [n_x, yr, min_support] name = self.sim_options.get('name', 'msd').lower() if name == 'pearson_baseline': shrinkage = self.sim_options.get('shrinkage', 100) bu, bi = self.compute_baselines() if self.sim_options['user_based']: bx, by = bu, bi else: bx, by = bi, bu args += [self.trainset.global_mean, bx, by, shrinkage] try: # print('Computing the {0} similarity matrix...'.format(name)) sim = construction_func[name](*args) # print('Done computing similarity matrix.') return sim except KeyError: raise NameError('Wrong sim name ' + name + '. Allowed values ' + 'are ' + ', '.join(construction_func.keys()) + '.') def get_neighbors(self, iid, k): """Return the ``k`` nearest neighbors of ``iid``, which is the inner id of a user or an item, depending on the ``user_based`` field of ``sim_options`` (see :ref:`similarity_measures_configuration`). As the similarities are computed on the basis of a similarity measure, this method is only relevant for algorithms using a similarity measure, such as the :ref:`k-NN algorithms <pred_package_knn_inpired>`. For a usage example, see the :ref:`FAQ <get_k_nearest_neighbors>`. Args: iid(int): The (inner) id of the user (or item) for which we want the nearest neighbors. See :ref:`this note<raw_inner_note>`. k(int): The number of neighbors to retrieve. Returns: The list of the ``k`` (inner) ids of the closest users (or items) to ``iid``. """ if self.sim_options['user_based']: all_instances = self.trainset.all_users else: all_instances = self.trainset.all_items others = [(x, self.sim[iid, x]) for x in all_instances() if x != iid] others.sort(key=lambda tple: tple[1], reverse=True) k_nearest_neighbors = [j for (j, _) in others[:k]] return k_nearest_neighbors
[ "warnings.warn", "six.get_unbound_function" ]
[((1808, 1878), 'warnings.warn', 'warnings.warn', (['"""train() is deprecated. Use fit() instead"""', 'UserWarning'], {}), "('train() is deprecated. Use fit() instead', UserWarning)\n", (1821, 1878), False, 'import warnings\n'), ((1272, 1295), 'six.get_unbound_function', 'guf', (['self.__class__.fit'], {}), '(self.__class__.fit)\n', (1275, 1295), True, 'from six import get_unbound_function as guf\n'), ((1299, 1316), 'six.get_unbound_function', 'guf', (['AlgoBase.fit'], {}), '(AlgoBase.fit)\n', (1302, 1316), True, 'from six import get_unbound_function as guf\n'), ((1332, 1357), 'six.get_unbound_function', 'guf', (['self.__class__.train'], {}), '(self.__class__.train)\n', (1335, 1357), True, 'from six import get_unbound_function as guf\n'), ((1365, 1384), 'six.get_unbound_function', 'guf', (['AlgoBase.train'], {}), '(AlgoBase.train)\n', (1368, 1384), True, 'from six import get_unbound_function as guf\n'), ((3353, 3378), 'six.get_unbound_function', 'guf', (['self.__class__.train'], {}), '(self.__class__.train)\n', (3356, 3378), True, 'from six import get_unbound_function as guf\n'), ((3386, 3405), 'six.get_unbound_function', 'guf', (['AlgoBase.train'], {}), '(AlgoBase.train)\n', (3389, 3405), True, 'from six import get_unbound_function as guf\n')]
from string import Template def start_response(resp="text/html"): return 'Content-type: ' + resp + '\n\n' def include_header(the_title): with open('templates/header.html') as headf: head_text = headf.read() header = Template(head_text) return header.substitute(title=the_title) def include_footer(the_links): with open('templates/footer.html') as footf: foot_text = footf.read() link_string = '' for key in the_links: link_string += '<a href="' + the_links[key] + '">' + key + '</a>&nbsp;&nbsp;&nbsp;&nbsp;' footer = Template(foot_text) return footer.substitute(links=link_string) def start_form(the_url, form_type="POST"): return '<form action="' + the_url + '" method="' + form_type + '">' def end_form(submit_msg="Submit"): return '<p></p><input type=submit value="' + submit_msg + '"></form>' def radio_button(rb_name, rb_value): return ('<input type="radio" name="' + rb_name + '" value="' + rb_value + '"> ' + rb_value + '<br />') def text_input(rb_name, rb_value=""): return ('<input type="text" name="' + rb_name + '" value="' + rb_value + '"><br />') def hidden_input(rb_name, rb_value): return '<input type="hidden" name="' + rb_name + '" value="' + rb_value + '">' def u_list(items): u_string = '<ul>' for item in items: u_string += '<li>' + item + '</li>' u_string += '</ul>' return u_string def header(header_text, header_level=2): return ('<h' + str(header_level) + '>' + header_text + '</h' + str(header_level) + '>') def para(para_text): return '<p>' + para_text + '</p>'
[ "string.Template" ]
[((240, 259), 'string.Template', 'Template', (['head_text'], {}), '(head_text)\n', (248, 259), False, 'from string import Template\n'), ((579, 598), 'string.Template', 'Template', (['foot_text'], {}), '(foot_text)\n', (587, 598), False, 'from string import Template\n')]
from django.shortcuts import render from django.http import HttpResponse from django.contrib.auth.models import User from commons.permission_controllers import general_permission_controller as PERM_CONT # Create your views here. def test(request): return HttpResponse('In common/views/test.') def permission_controller_test(request): permissions = {'healthrecords':'change_testrecord', 'healthstandards' : 'view_standardexamfieldmapping' , 'healthstandards' : 'add_standardexamfieldmapping'} user1 = User.objects.get(id = 1) print(user1) PERM_CONT(user1, **permissions) print('--------------------------------------------------------------') superuser = User.objects.get(username = 'amir') print(superuser) PERM_CONT(superuser, **permissions) return HttpResponse('in permission_controller_test: I Did It.') from django.dispatch import receiver from django_rest_passwordreset.signals import reset_password_token_created from django.urls import reverse @receiver(reset_password_token_created) def password_reset_token_created(sender, reset_password_token, *args, **kwargs): """ Handles password reset tokens When a token is created, an e-mail needs to be sent to the user :param sender: :param reset_password_token: :param args: :param kwargs: :return: """ # send an e-mail to the user context = { 'current_user': reset_password_token.user, 'username': reset_password_token.user.username, 'email': reset_password_token.user.email, 'reset_password_url': "{}?token={}".format(reverse('password_reset:reset-password-request'), reset_password_token.key) } # render email text email_html_message = render_to_string('email/user_reset_password.html', context) email_plaintext_message = render_to_string('email/user_reset_password.txt', context) msg = EmailMultiAlternatives( # title: _("Password Reset for {title}".format(title="Some website title")), # message: email_plaintext_message, # from: "<EMAIL>", # to: [reset_password_token.user.email] ) msg.attach_alternative(email_html_message, "text/html") msg.send()
[ "django.http.HttpResponse", "django.contrib.auth.models.User.objects.get", "django.dispatch.receiver", "django.urls.reverse", "commons.permission_controllers.general_permission_controller" ]
[((1007, 1045), 'django.dispatch.receiver', 'receiver', (['reset_password_token_created'], {}), '(reset_password_token_created)\n', (1015, 1045), False, 'from django.dispatch import receiver\n'), ((260, 297), 'django.http.HttpResponse', 'HttpResponse', (['"""In common/views/test."""'], {}), "('In common/views/test.')\n", (272, 297), False, 'from django.http import HttpResponse\n'), ((523, 545), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'id': '(1)'}), '(id=1)\n', (539, 545), False, 'from django.contrib.auth.models import User\n'), ((569, 600), 'commons.permission_controllers.general_permission_controller', 'PERM_CONT', (['user1'], {}), '(user1, **permissions)\n', (578, 600), True, 'from commons.permission_controllers import general_permission_controller as PERM_CONT\n'), ((693, 726), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'username': '"""amir"""'}), "(username='amir')\n", (709, 726), False, 'from django.contrib.auth.models import User\n'), ((754, 789), 'commons.permission_controllers.general_permission_controller', 'PERM_CONT', (['superuser'], {}), '(superuser, **permissions)\n', (763, 789), True, 'from commons.permission_controllers import general_permission_controller as PERM_CONT\n'), ((801, 857), 'django.http.HttpResponse', 'HttpResponse', (['"""in permission_controller_test: I Did It."""'], {}), "('in permission_controller_test: I Did It.')\n", (813, 857), False, 'from django.http import HttpResponse\n'), ((1603, 1651), 'django.urls.reverse', 'reverse', (['"""password_reset:reset-password-request"""'], {}), "('password_reset:reset-password-request')\n", (1610, 1651), False, 'from django.urls import reverse\n')]
import numpy as np import matplotlib matplotlib.use('pdf') import matplotlib.pyplot as plt import traceback Fs = 30.72e6 x1 = [] x2=[] x5=[] x_comp_0 = [] y_comp_0 = [] x_comp_1 = [] y_comp_1 = [] x_comp_2 = [] y_comp_2 = [] try: f1 = open("/root/.files/free5GRAN/execution_raw_files/studied_frame.txt", "r") for x in f1: x = x.split("(")[1] x = x.split(")")[0] re = float(x.split(',')[0]) im = float(x.split(',')[1]) x1.append(complex(re,im)) fig1 = plt.figure() plt.specgram(x1, NFFT=1024, Fs=Fs) plt.xticks(np.arange(0, 0.01, 0.001)) plt.title("Studied Frame") plt.ylim(-Fs/2, Fs/2) plt.savefig("/root/.files/free5GRAN/visualization_files/studied_frame.pdf", bbox_inches='tight', pad_inches=0.5) plt.close(fig1) except Exception: traceback.print_exc() try: f2 = open("/root/.files/free5GRAN/execution_raw_files/moniroting_slots.txt", "r") for x in f2: x = x.split("(")[1] x = x.split(")")[0] re = float(x.split(',')[0]) im = float(x.split(',')[1]) x2.append(complex(re,im)) fig2 = plt.figure() plt.specgram(x2, NFFT=1024, Fs=Fs) plt.yticks(np.arange(-15e6, 15e6, 2e6)) plt.title("Monitoring slots") plt.ylim(-Fs/2, Fs/2) plt.savefig("/root/.files/free5GRAN/visualization_files/moniroting_slots.pdf", bbox_inches='tight', pad_inches=0.5) plt.close(fig2) except Exception: traceback.print_exc() try: f5 = open("/root/.files/free5GRAN/execution_raw_files/global_signal.txt", "r") for x in f5: x = x.split("(")[1] x = x.split(")")[0] re = float(x.split(',')[0]) im = float(x.split(',')[1]) x5.append(complex(re,im)) fig2 = plt.figure() plt.specgram(x5, NFFT=1024, Fs=Fs) plt.yticks(np.arange(-15e6, 15e6, 2e6)) plt.title("Global signal") plt.ylim(-Fs/2, Fs/2) plt.savefig("/root/.files/free5GRAN/visualization_files/global_signal.pdf", bbox_inches='tight', pad_inches=0.5) plt.close(fig2) except Exception: traceback.print_exc() try: f3 = open("/root/.files/free5GRAN/execution_raw_files/pdcch_constellation.txt", "r") for x in f3: x = x.split("(")[1] x = x.split(")")[0] x_comp_0.append(float(x.split(',')[0])) y_comp_0.append(float(x.split(',')[1])) fig3 = plt.figure() plt.scatter(x_comp_0,y_comp_0, color='red') plt.title("PDCCH constellation") plt.savefig("/root/.files/free5GRAN/visualization_files/pdcch_constellation.pdf") plt.close(fig3) except Exception: traceback.print_exc() try: f4 = open("/root/.files/free5GRAN/execution_raw_files/pdsch_constellation.txt", "r") for x in f4: x = x.split("(")[1] x = x.split(")")[0] x_comp_1.append(float(x.split(',')[0])) y_comp_1.append(float(x.split(',')[1])) fig4 = plt.figure() plt.scatter(x_comp_1,y_comp_1, color='red') plt.title("PDSCH constellation") plt.savefig("/root/.files/free5GRAN/visualization_files/pdsch_constellation.pdf") plt.close(fig4) except Exception: traceback.print_exc()
[ "matplotlib.pyplot.title", "matplotlib.pyplot.specgram", "traceback.print_exc", "matplotlib.pyplot.ylim", "matplotlib.pyplot.close", "matplotlib.pyplot.scatter", "matplotlib.pyplot.figure", "matplotlib.use", "numpy.arange", "matplotlib.pyplot.savefig" ]
[((37, 58), 'matplotlib.use', 'matplotlib.use', (['"""pdf"""'], {}), "('pdf')\n", (51, 58), False, 'import matplotlib\n'), ((509, 521), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (519, 521), True, 'import matplotlib.pyplot as plt\n'), ((526, 560), 'matplotlib.pyplot.specgram', 'plt.specgram', (['x1'], {'NFFT': '(1024)', 'Fs': 'Fs'}), '(x1, NFFT=1024, Fs=Fs)\n', (538, 560), True, 'import matplotlib.pyplot as plt\n'), ((607, 633), 'matplotlib.pyplot.title', 'plt.title', (['"""Studied Frame"""'], {}), "('Studied Frame')\n", (616, 633), True, 'import matplotlib.pyplot as plt\n'), ((638, 663), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-Fs / 2)', '(Fs / 2)'], {}), '(-Fs / 2, Fs / 2)\n', (646, 663), True, 'import matplotlib.pyplot as plt\n'), ((664, 780), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/root/.files/free5GRAN/visualization_files/studied_frame.pdf"""'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.5)'}), "('/root/.files/free5GRAN/visualization_files/studied_frame.pdf',\n bbox_inches='tight', pad_inches=0.5)\n", (675, 780), True, 'import matplotlib.pyplot as plt\n'), ((781, 796), 'matplotlib.pyplot.close', 'plt.close', (['fig1'], {}), '(fig1)\n', (790, 796), True, 'import matplotlib.pyplot as plt\n'), ((1127, 1139), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1137, 1139), True, 'import matplotlib.pyplot as plt\n'), ((1144, 1178), 'matplotlib.pyplot.specgram', 'plt.specgram', (['x2'], {'NFFT': '(1024)', 'Fs': 'Fs'}), '(x2, NFFT=1024, Fs=Fs)\n', (1156, 1178), True, 'import matplotlib.pyplot as plt\n'), ((1227, 1256), 'matplotlib.pyplot.title', 'plt.title', (['"""Monitoring slots"""'], {}), "('Monitoring slots')\n", (1236, 1256), True, 'import matplotlib.pyplot as plt\n'), ((1261, 1286), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-Fs / 2)', '(Fs / 2)'], {}), '(-Fs / 2, Fs / 2)\n', (1269, 1286), True, 'import matplotlib.pyplot as plt\n'), ((1287, 1406), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/root/.files/free5GRAN/visualization_files/moniroting_slots.pdf"""'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.5)'}), "('/root/.files/free5GRAN/visualization_files/moniroting_slots.pdf',\n bbox_inches='tight', pad_inches=0.5)\n", (1298, 1406), True, 'import matplotlib.pyplot as plt\n'), ((1407, 1422), 'matplotlib.pyplot.close', 'plt.close', (['fig2'], {}), '(fig2)\n', (1416, 1422), True, 'import matplotlib.pyplot as plt\n'), ((1749, 1761), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1759, 1761), True, 'import matplotlib.pyplot as plt\n'), ((1766, 1800), 'matplotlib.pyplot.specgram', 'plt.specgram', (['x5'], {'NFFT': '(1024)', 'Fs': 'Fs'}), '(x5, NFFT=1024, Fs=Fs)\n', (1778, 1800), True, 'import matplotlib.pyplot as plt\n'), ((1849, 1875), 'matplotlib.pyplot.title', 'plt.title', (['"""Global signal"""'], {}), "('Global signal')\n", (1858, 1875), True, 'import matplotlib.pyplot as plt\n'), ((1880, 1905), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-Fs / 2)', '(Fs / 2)'], {}), '(-Fs / 2, Fs / 2)\n', (1888, 1905), True, 'import matplotlib.pyplot as plt\n'), ((1906, 2022), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/root/.files/free5GRAN/visualization_files/global_signal.pdf"""'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.5)'}), "('/root/.files/free5GRAN/visualization_files/global_signal.pdf',\n bbox_inches='tight', pad_inches=0.5)\n", (1917, 2022), True, 'import matplotlib.pyplot as plt\n'), ((2023, 2038), 'matplotlib.pyplot.close', 'plt.close', (['fig2'], {}), '(fig2)\n', (2032, 2038), True, 'import matplotlib.pyplot as plt\n'), ((2361, 2373), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2371, 2373), True, 'import matplotlib.pyplot as plt\n'), ((2378, 2422), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_comp_0', 'y_comp_0'], {'color': '"""red"""'}), "(x_comp_0, y_comp_0, color='red')\n", (2389, 2422), True, 'import matplotlib.pyplot as plt\n'), ((2426, 2458), 'matplotlib.pyplot.title', 'plt.title', (['"""PDCCH constellation"""'], {}), "('PDCCH constellation')\n", (2435, 2458), True, 'import matplotlib.pyplot as plt\n'), ((2463, 2549), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/root/.files/free5GRAN/visualization_files/pdcch_constellation.pdf"""'], {}), "(\n '/root/.files/free5GRAN/visualization_files/pdcch_constellation.pdf')\n", (2474, 2549), True, 'import matplotlib.pyplot as plt\n'), ((2549, 2564), 'matplotlib.pyplot.close', 'plt.close', (['fig3'], {}), '(fig3)\n', (2558, 2564), True, 'import matplotlib.pyplot as plt\n'), ((2888, 2900), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2898, 2900), True, 'import matplotlib.pyplot as plt\n'), ((2905, 2949), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_comp_1', 'y_comp_1'], {'color': '"""red"""'}), "(x_comp_1, y_comp_1, color='red')\n", (2916, 2949), True, 'import matplotlib.pyplot as plt\n'), ((2953, 2985), 'matplotlib.pyplot.title', 'plt.title', (['"""PDSCH constellation"""'], {}), "('PDSCH constellation')\n", (2962, 2985), True, 'import matplotlib.pyplot as plt\n'), ((2990, 3076), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/root/.files/free5GRAN/visualization_files/pdsch_constellation.pdf"""'], {}), "(\n '/root/.files/free5GRAN/visualization_files/pdsch_constellation.pdf')\n", (3001, 3076), True, 'import matplotlib.pyplot as plt\n'), ((3076, 3091), 'matplotlib.pyplot.close', 'plt.close', (['fig4'], {}), '(fig4)\n', (3085, 3091), True, 'import matplotlib.pyplot as plt\n'), ((576, 601), 'numpy.arange', 'np.arange', (['(0)', '(0.01)', '(0.001)'], {}), '(0, 0.01, 0.001)\n', (585, 601), True, 'import numpy as np\n'), ((820, 841), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (839, 841), False, 'import traceback\n'), ((1194, 1239), 'numpy.arange', 'np.arange', (['(-15000000.0)', '(15000000.0)', '(2000000.0)'], {}), '(-15000000.0, 15000000.0, 2000000.0)\n', (1203, 1239), True, 'import numpy as np\n'), ((1446, 1467), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1465, 1467), False, 'import traceback\n'), ((1816, 1861), 'numpy.arange', 'np.arange', (['(-15000000.0)', '(15000000.0)', '(2000000.0)'], {}), '(-15000000.0, 15000000.0, 2000000.0)\n', (1825, 1861), True, 'import numpy as np\n'), ((2062, 2083), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2081, 2083), False, 'import traceback\n'), ((2588, 2609), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2607, 2609), False, 'import traceback\n'), ((3114, 3135), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3133, 3135), False, 'import traceback\n')]
# Android Device Testing Framework ("dtf") # Copyright 2013-2015 <NAME> (@jake_valletta) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ dtf's package manager """ from __future__ import absolute_import from __future__ import print_function import os import os.path import tempfile import zipfile from argparse import ArgumentParser import requests from dtf.module import Module import dtf.globals import dtf.logging as log import dtf.core.compat as compat import dtf.core.item import dtf.core.manifestparser as mp import dtf.core.packagemanager as packagemanager import dtf.core.utils as utils TAG = "pm" DTF_DATA_DIR = dtf.globals.DTF_DATA_DIR DTF_BINARIES_DIR = dtf.globals.DTF_BINARIES_DIR DTF_LIBRARIES_DIR = dtf.globals.DTF_LIBRARIES_DIR DTF_MODULES_DIR = dtf.globals.DTF_MODULES_DIR DTF_PACKAGES_DIR = dtf.globals.DTF_PACKAGES_DIR DTF_DB = dtf.globals.DTF_DB TYPE_BINARY = dtf.core.item.TYPE_BINARY TYPE_LIBRARY = dtf.core.item.TYPE_LIBRARY TYPE_MODULE = dtf.core.item.TYPE_MODULE TYPE_PACKAGE = dtf.core.item.TYPE_PACKAGE # No log to file. log.LOG_LEVEL_FILE = 0 LIST_QUIET = 0 LIST_DEFAULT = 1 LIST_VERBOSE = 2 class pm(Module): # pylint: disable=invalid-name,too-many-public-methods """Module class for dtf pm""" @classmethod def usage(cls): """Print Usage""" print('dtf Package Manager') print('') print('Subcommands:') print(' delete Delete an item from main database.') print(' export Export entire main database to dtf ZIP.') print(' install Install a dtf ZIP or single item.') print(' list List all installed items.') print(' purge Purge all installed items, reset DB.') print(' repo Manage content repos.') print(' upgrade Upgrade repo content.') print('') return 0 def do_install(self, args): """Attempt to install new content""" parser = ArgumentParser( prog='pm install', description='Install a item or DTF ZIP of items.') parser.add_argument('--zip', dest='zipfile', default=None, help='Install a DTF ZIP file containing items.') parser.add_argument('--single', metavar="ITEM", dest='single_type', default=None, help='Install a single item.') parser.add_argument('--name', metavar="val", dest='single_name', default=None, help="Item name [SINGLE ONLY].") parser.add_argument('--local_name', metavar="val", dest='single_local_name', default=None, help="Item local name [SINGLE ONLY].") parser.add_argument('--install_name', metavar="val", dest='single_install_name', default=None, help="Item install name [SINGLE ONLY].") parser.add_argument('--version', metavar="val", dest='single_version', default=None, help="Item version (#.# format) [SINGLE ONLY].") parser.add_argument('--author', nargs='+', metavar="val", dest='single_author', default=None, help="Item author (email is fine). [SINGLE ONLY].") parser.add_argument('--about', nargs='+', metavar="val", dest='single_about', default=None, help="About string for a module. [SINGLE ONLY].") parser.add_argument('--auto', dest='single_auto', action='store_const', const=True, default=False, help="Automatically parse module [SINGLE ONLY].") parser.add_argument('--force', dest='force', action='store_const', const=True, default=False, help="Force installation of component(s).") parser.add_argument('--new-only', dest='new_only', action='store_const', const=True, default=False, help="Install only if new version.") parsed_args = parser.parse_args(args) zip_file_name = parsed_args.zipfile single_type = parsed_args.single_type force_mode = parsed_args.force new_only = parsed_args.new_only if zip_file_name is not None and single_type is not None: log.e(TAG, "Cannot install both DTF ZIP and single item. Exiting.") return -1 if zip_file_name is None and single_type is None: log.e(TAG, "ZIP mode or single item mode not detected. Exiting.") return -2 # Install zip. if zip_file_name is not None: if zipfile.is_zipfile(zip_file_name): return packagemanager.install_zip(zip_file_name, force=force_mode, new_only=new_only) else: log.e(TAG, "'%s' is not a valid ZIP file or does not exist." % (zip_file_name)) return -3 # Install single. else: return self.parse_and_install_single(parsed_args, single_type) @classmethod def do_delete(cls, args): """Attempt to remove content""" parser = ArgumentParser( prog='pm delete', description='Remove a item from disk and database.') parser.add_argument('--type', metavar="val", dest='item_type', default=None, help='The type of the item') parser.add_argument('--name', metavar="val", dest='item_name', default=None, help="Item to uninstall.") parser.add_argument('--force', dest='force', action='store_const', const=True, default=False, help="Force deletion of component.") parsed_args = parser.parse_args(args) force_mode = parsed_args.force name = parsed_args.item_name if name is None: log.e(TAG, "'--name' is required for delete mode. Exiting.") return -1 item_type = parsed_args.item_type if item_type == TYPE_BINARY: rtn = packagemanager.delete_binary(name, force=force_mode) elif item_type == TYPE_LIBRARY: rtn = packagemanager.delete_library(name, force=force_mode) elif item_type == TYPE_MODULE: rtn = packagemanager.delete_module(name, force=force_mode) elif item_type == TYPE_PACKAGE: rtn = packagemanager.delete_package(name, force=force_mode) else: log.e(TAG, "Invalid type passed to delete. Exiting.") rtn = -2 return rtn def do_export(self, args): """Perform an export""" rtn = 0 parser = ArgumentParser(prog='pm export', description='Export installed content.') parser.add_argument('output_name', type=str, help='The output file name.') parsed_args = parser.parse_args(args) output_name = parsed_args.output_name if os.path.isfile(output_name): log.e(TAG, "Output file already exists!") return -1 # Generate a list of populated items. export_items = self.generate_export_items() if len(export_items) == 0: log.e(TAG, "Nothing to export!") return -2 export_zip = mp.ExportZip(output_name) for item in export_items: export_zip.add_item(item) export_zip.finalize() log.i(TAG, "Export completed!") return rtn def do_list(self, args): """List installed content""" rtn = 0 parser = ArgumentParser(prog='pm list', description='List installed components.') parser.add_argument('-v', dest='verbose', action='store_const', const=True, default=False, help="Show additional details about components.") parser.add_argument('-q', dest='quiet', action='store_const', const=True, default=False, help="Show only names of components.") parser.add_argument('type', type=str, nargs='?', help='Show only requested type.') parsed_args = parser.parse_args(args) d_filter = parsed_args.type verbose = parsed_args.verbose quiet = parsed_args.quiet if verbose and quiet: log.e(TAG, "Unable to be verbose and quiet!") return -1 if verbose: verbosity = LIST_VERBOSE elif quiet: verbosity = LIST_QUIET else: verbosity = LIST_DEFAULT if d_filter is not None: if d_filter == "binaries": self.print_installed_binaries(verbosity) elif d_filter == "libraries": self.print_installed_libraries(verbosity) elif d_filter == "modules": self.print_installed_modules(verbosity) elif d_filter == "packages": self.print_installed_packages(verbosity) else: log.e(TAG, "Unknown filter specified : %s" % d_filter) rtn = -3 else: self.print_installed_binaries(verbosity) self.print_installed_libraries(verbosity) self.print_installed_modules(verbosity) self.print_installed_packages(verbosity) return rtn @classmethod def do_purge(cls): """Purge dtf DB""" print('!!!! WARNING !!!!') print('') print('This will delete all installed content and reset the database!') print('Note: This will not delete any project data.') print('Are you sure you want to do this? [N/y]', end=" ") res = compat.raw_input() if res.lower() == "y": return packagemanager.purge() else: return 0 def do_repo(self, args): """Manage repos""" if len(args) < 1: print('Usage: dtf pm repo ACTION [args]') print('') print(' ACTIONs') print(' add [repo_name] [url]') print(' remove [repo_name]') print(' list') return 0 cmd = args.pop(0) if cmd == 'add': return self.do_repo_add(args) elif cmd == 'remove': return self.do_repo_remove(args) elif cmd == 'list': return self.do_repo_list() else: log.e(TAG, "Invalid repo command: %s" % cmd) return -1 def do_upgrade(self, args): """Do content upgrade""" parser = ArgumentParser(prog='pm upgrade', description='Upgrade managed content.') parser.add_argument('-v', '--dont-verify-ssl', dest='verify', action='store_const', const=False, default=True, help="Allow SSL certificate issues.") parser.add_argument('-a', '--allow-http', dest='allow_http', action='store_const', const=True, default=False, help="Allow HTTP downloads.") parser.add_argument('-f', '--force', dest='force', action='store_const', const=True, default=False, help="Force install of component(s).") parser.add_argument('-p', '--prompt-all', dest='new_only', action='store_const', const=False, default=True, help="Prompt install regardless of version.") parsed_args = parser.parse_args(args) verify = parsed_args.verify allow_http = parsed_args.allow_http force = parsed_args.force new_only = parsed_args.new_only for repo_name, url in packagemanager.get_repos(): log.i(TAG, "Requesting content from '%s' (%s).." % (repo_name, url)) if utils.is_http_url(url) and not allow_http: log.w(TAG, "Skipping '%s' due to HTTP (use --allow-http)" % repo_name) continue file_f = self.download_temp_file(url, verify=verify) if file_f is None: continue if not zipfile.is_zipfile(file_f.name): log.w(TAG, "Pulled content is not a valid ZIP file, skipping!") continue log.i(TAG, "Starting install...") packagemanager.install_zip(file_f.name, force=force, new_only=new_only) file_f.close() log.i(TAG, "Upgrade complete.") return 0 @classmethod def do_repo_add(cls, args): """Add a repo""" if len(args) != 2: log.e(TAG, "A repo name and URL is required!") return -1 repo_name = args.pop(0) url = args.pop(0) return packagemanager.add_repo(repo_name, url) @classmethod def do_repo_remove(cls, args): """remove a repo""" if len(args) != 1: log.e(TAG, "Must specify a repo name!") return -1 repo_name = args.pop() return packagemanager.remove_repo(repo_name) @classmethod def do_repo_list(cls): """List out repos""" print('Configured repos:') for repo, url in packagemanager.get_repos(): print(" %s (%s)" % (repo, url)) return 0 @classmethod def format_version(cls, version_string): """Format version of item""" if version_string is None: return "No Version" else: return "v%s" % version_string @classmethod def generate_export_items(cls): """Create a list of items""" items = list() # Get all binaries for binary in packagemanager.get_binaries(): binary.install_name = binary.name binary.local_name = "%s/%s" % (DTF_BINARIES_DIR, binary.name) items.append(binary) # Get all libraries for library in packagemanager.get_libraries(): library.install_name = library.name library.local_name = "%s/%s" % (DTF_LIBRARIES_DIR, library.name) items.append(library) # Get all modules for module in packagemanager.get_modules(): module.install_name = module.name module.local_name = "%s/%s" % (DTF_MODULES_DIR, module.name) items.append(module) # Get all packages for package in packagemanager.get_packages(): package.install_name = package.name package.local_name = "%s/%s" % (DTF_PACKAGES_DIR, package.name) items.append(package) return items def print_installed_binaries(self, verbosity): """Print installed binaries""" binary_list = packagemanager.get_binaries() # If we are trying to be quiet, just print each item. if verbosity == LIST_QUIET: for binary in binary_list: print(binary.name) return # Otherwise, iterate over and print more print('Installed Binaries') for binary in binary_list: # Format version version = self.format_version(binary.version) print("\t%s (%s)" % (binary.name, version)) if verbosity == LIST_VERBOSE: print("\t About: %s" % binary.about) print("\t Author: %s" % binary.author) return 0 def print_installed_libraries(self, verbosity): """Print installed libraries""" library_list = packagemanager.get_libraries() # If we are trying to be quiet, just print each item. if verbosity == LIST_QUIET: for library in library_list: print(library.name) return # Otherwise, iterate over and print more print('Installed Libraries') for library in library_list: # Format version version = self.format_version(library.version) print("\t%s (%s)" % (library.name, version)) if verbosity == LIST_VERBOSE: print("\t About: %s" % library.about) print("\t Author: %s" % library.author) return 0 def print_installed_modules(self, verbosity): """Print installed modules""" module_list = packagemanager.get_modules() # If we are trying to be quiet, just print each item. if verbosity == LIST_QUIET: for module in module_list: print(module.name) return # Otherwise, iterate over and print more print('Installed Modules') for module in module_list: # Format version version = self.format_version(module.version) print("\t%s (%s)" % (module.name, version)) if verbosity == LIST_VERBOSE: print("\t About: %s" % module.about) print("\t Author: %s" % module.author) return 0 def print_installed_packages(self, verbosity): """Print installed packages""" package_list = packagemanager.get_packages() # If we are trying to be quiet, just print each item. if verbosity == LIST_QUIET: for package in package_list: print(package.name) return # Otherwise, iterate over and print more print('Installed Packages') for package in package_list: # Format version version = self.format_version(package.version) print("\t%s (%s)" % (package.name, version)) if verbosity == LIST_VERBOSE: print("\t About: %s" % package.about) print("\t Author: %s" % package.author) return 0 @classmethod def auto_parse_module(cls, args): """Automatically parse module and return Item""" item = None name = args.single_name install_name = args.single_install_name local_name = args.single_local_name if install_name is None: log.d(TAG, "install_name is null, using name...") install_name = os.path.basename(name) if local_name is None: log.d(TAG, "local_name is null, using name...") local_name = name # Does the resource even exist? if not os.path.isfile(local_name): log.e(TAG, "Local module resource '%s' does not exist!" % (local_name)) return None if packagemanager.is_python_module(local_name, install_name): log.d(TAG, "Python mode selected") item = packagemanager.parse_python_module(local_name, install_name) if item is None: log.e(TAG, "Error parsing Python module!") return None elif packagemanager.is_bash_module(local_name): log.d(TAG, "Bash mode selected") item = packagemanager.parse_bash_module(local_name, install_name) if item is None: log.e(TAG, "Error parsing Bash module!") return None else: log.e(TAG, "Auto parse for Python and Bash failed!") return None return item def parse_single_item(self, args): # pylint: disable=too-many-branches """Parse args, return Item""" item = dtf.core.item.Item() if args.single_name is None: log.e(TAG, "No '--name' specified in single item mode. Exiting.") return None item.name = args.single_name if args.single_type not in dtf.core.item.VALID_TYPES: log.e(TAG, "Invalid type passed to single. Exiting.") return None item.type = args.single_type version = args.single_version if version is not None: if dtf.core.item.is_valid_version(version): item.version = version else: log.e(TAG, "Version string is not valid. Exiting.") return None else: log.w(TAG, "No version provided, using v1.0.0") item.version = "1.0.0" try: item.author = " ".join(args.single_author) except TypeError: item.author = None try: item.about = " ".join(args.single_about) except TypeError: item.about = None install_name = args.single_install_name local_name = args.single_local_name if install_name is None: log.d(TAG, "install_name is null, using name...") install_name = os.path.basename(args.single_name) if local_name is None: log.d(TAG, "local_name is null, using name...") local_name = args.single_name item.install_name = install_name item.local_name = local_name if self.check_local_exists(item): return item else: return None def parse_and_install_single(self, args, single_type): """Parse and install single item""" force_mode = args.force # Check for auto-mode: if args.single_auto: # Only modules can be auto-parsed if single_type == TYPE_MODULE: log.i(TAG, "Attempting to auto parse...") item = self.auto_parse_module(args) if item is None: log.e(TAG, "Error autoparsing module!") return -9 else: log.e(TAG, "Autoparse is only available for modules!") return -4 # Not auto else: item = self.parse_single_item(args) if item is None: log.e(TAG, "Error parsing single item!") return -5 return packagemanager.install_single(item, force=force_mode) @classmethod def check_local_exists(cls, item): """Check if local item exists and print error""" if item.type == TYPE_BINARY: if not os.path.isfile(item.local_name): log.e(TAG, "Local item '%s' does not exist. Exiting." % (item.local_name)) return None elif item.type == TYPE_LIBRARY: if not os.path.isdir(item.local_name): log.e(TAG, "Local directory '%s' does not exist. Exiting." % (item.local_name)) return None elif item.type == TYPE_MODULE: if not os.path.isfile(item.local_name): log.e(TAG, "Local item '%s' does not exist. Exiting." % (item.local_name)) return None elif item.type == TYPE_PACKAGE: if not os.path.isdir(item.local_name): log.e(TAG, "Local directory '%s' does not exist. Exiting." % (item.local_name)) return None return item @classmethod def download_temp_file(cls, url, verify=True): """Download a file from URL to tempfile""" try: req = requests.get(url, verify=verify, stream=True) except requests.exceptions.RequestException as excpt: log.e(TAG, "Error downloading repo data!") print(excpt) return None temp_f = tempfile.NamedTemporaryFile() for chunk in req.iter_content(chunk_size=1024): if chunk: temp_f.write(chunk) # Reset the seek temp_f.seek(0) return temp_f def execute(self, args): """Main module executor""" self.name = self.__self__ rtn = 0 if len(args) < 1: return self.usage() sub_cmd = args.pop(0) if sub_cmd == "install": rtn = self.do_install(args) elif sub_cmd == "delete": rtn = self.do_delete(args) elif sub_cmd == "export": rtn = self.do_export(args) elif sub_cmd == "list": rtn = self.do_list(args) elif sub_cmd == "purge": rtn = self.do_purge() elif sub_cmd == "repo": rtn = self.do_repo(args) elif sub_cmd == "upgrade": rtn = self.do_upgrade(args) else: log.e(TAG, "Sub-command '%s' not found!" % sub_cmd) rtn = self.usage() return rtn
[ "dtf.core.packagemanager.parse_bash_module", "dtf.core.compat.raw_input", "dtf.core.packagemanager.get_repos", "dtf.core.manifestparser.ExportZip", "argparse.ArgumentParser", "dtf.core.packagemanager.get_packages", "dtf.logging.e", "dtf.logging.d", "dtf.core.packagemanager.install_zip", "os.path.isfile", "dtf.logging.w", "dtf.core.packagemanager.delete_package", "dtf.core.packagemanager.is_python_module", "zipfile.is_zipfile", "dtf.core.packagemanager.install_single", "dtf.core.packagemanager.parse_python_module", "dtf.core.utils.is_http_url", "dtf.core.packagemanager.is_bash_module", "dtf.core.packagemanager.delete_binary", "requests.get", "dtf.core.packagemanager.add_repo", "dtf.core.packagemanager.get_binaries", "os.path.basename", "dtf.core.packagemanager.remove_repo", "dtf.logging.i", "dtf.core.packagemanager.delete_module", "dtf.core.packagemanager.purge", "tempfile.NamedTemporaryFile", "dtf.core.packagemanager.get_libraries", "dtf.core.packagemanager.delete_library", "os.path.isdir", "dtf.core.packagemanager.get_modules" ]
[((2469, 2558), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'prog': '"""pm install"""', 'description': '"""Install a item or DTF ZIP of items."""'}), "(prog='pm install', description=\n 'Install a item or DTF ZIP of items.')\n", (2483, 2558), False, 'from argparse import ArgumentParser\n'), ((5889, 5979), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'prog': '"""pm delete"""', 'description': '"""Remove a item from disk and database."""'}), "(prog='pm delete', description=\n 'Remove a item from disk and database.')\n", (5903, 5979), False, 'from argparse import ArgumentParser\n'), ((7429, 7502), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'prog': '"""pm export"""', 'description': '"""Export installed content."""'}), "(prog='pm export', description='Export installed content.')\n", (7443, 7502), False, 'from argparse import ArgumentParser\n'), ((7752, 7779), 'os.path.isfile', 'os.path.isfile', (['output_name'], {}), '(output_name)\n', (7766, 7779), False, 'import os\n'), ((8081, 8106), 'dtf.core.manifestparser.ExportZip', 'mp.ExportZip', (['output_name'], {}), '(output_name)\n', (8093, 8106), True, 'import dtf.core.manifestparser as mp\n'), ((8219, 8250), 'dtf.logging.i', 'log.i', (['TAG', '"""Export completed!"""'], {}), "(TAG, 'Export completed!')\n", (8224, 8250), True, 'import dtf.logging as log\n'), ((8374, 8446), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'prog': '"""pm list"""', 'description': '"""List installed components."""'}), "(prog='pm list', description='List installed components.')\n", (8388, 8446), False, 'from argparse import ArgumentParser\n'), ((10558, 10576), 'dtf.core.compat.raw_input', 'compat.raw_input', ([], {}), '()\n', (10574, 10576), True, 'import dtf.core.compat as compat\n'), ((11454, 11527), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'prog': '"""pm upgrade"""', 'description': '"""Upgrade managed content."""'}), "(prog='pm upgrade', description='Upgrade managed content.')\n", (11468, 11527), False, 'from argparse import ArgumentParser\n'), ((12631, 12657), 'dtf.core.packagemanager.get_repos', 'packagemanager.get_repos', ([], {}), '()\n', (12655, 12657), True, 'import dtf.core.packagemanager as packagemanager\n'), ((13440, 13471), 'dtf.logging.i', 'log.i', (['TAG', '"""Upgrade complete."""'], {}), "(TAG, 'Upgrade complete.')\n", (13445, 13471), True, 'import dtf.logging as log\n'), ((13749, 13788), 'dtf.core.packagemanager.add_repo', 'packagemanager.add_repo', (['repo_name', 'url'], {}), '(repo_name, url)\n', (13772, 13788), True, 'import dtf.core.packagemanager as packagemanager\n'), ((14021, 14058), 'dtf.core.packagemanager.remove_repo', 'packagemanager.remove_repo', (['repo_name'], {}), '(repo_name)\n', (14047, 14058), True, 'import dtf.core.packagemanager as packagemanager\n'), ((14195, 14221), 'dtf.core.packagemanager.get_repos', 'packagemanager.get_repos', ([], {}), '()\n', (14219, 14221), True, 'import dtf.core.packagemanager as packagemanager\n'), ((14678, 14707), 'dtf.core.packagemanager.get_binaries', 'packagemanager.get_binaries', ([], {}), '()\n', (14705, 14707), True, 'import dtf.core.packagemanager as packagemanager\n'), ((14915, 14945), 'dtf.core.packagemanager.get_libraries', 'packagemanager.get_libraries', ([], {}), '()\n', (14943, 14945), True, 'import dtf.core.packagemanager as packagemanager\n'), ((15156, 15184), 'dtf.core.packagemanager.get_modules', 'packagemanager.get_modules', ([], {}), '()\n', (15182, 15184), True, 'import dtf.core.packagemanager as packagemanager\n'), ((15390, 15419), 'dtf.core.packagemanager.get_packages', 'packagemanager.get_packages', ([], {}), '()\n', (15417, 15419), True, 'import dtf.core.packagemanager as packagemanager\n'), ((15717, 15746), 'dtf.core.packagemanager.get_binaries', 'packagemanager.get_binaries', ([], {}), '()\n', (15744, 15746), True, 'import dtf.core.packagemanager as packagemanager\n'), ((16496, 16526), 'dtf.core.packagemanager.get_libraries', 'packagemanager.get_libraries', ([], {}), '()\n', (16524, 16526), True, 'import dtf.core.packagemanager as packagemanager\n'), ((17281, 17309), 'dtf.core.packagemanager.get_modules', 'packagemanager.get_modules', ([], {}), '()\n', (17307, 17309), True, 'import dtf.core.packagemanager as packagemanager\n'), ((18056, 18085), 'dtf.core.packagemanager.get_packages', 'packagemanager.get_packages', ([], {}), '()\n', (18083, 18085), True, 'import dtf.core.packagemanager as packagemanager\n'), ((19474, 19531), 'dtf.core.packagemanager.is_python_module', 'packagemanager.is_python_module', (['local_name', 'install_name'], {}), '(local_name, install_name)\n', (19505, 19531), True, 'import dtf.core.packagemanager as packagemanager\n'), ((22877, 22930), 'dtf.core.packagemanager.install_single', 'packagemanager.install_single', (['item'], {'force': 'force_mode'}), '(item, force=force_mode)\n', (22906, 22930), True, 'import dtf.core.packagemanager as packagemanager\n'), ((24389, 24418), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (24416, 24418), False, 'import tempfile\n'), ((4940, 5007), 'dtf.logging.e', 'log.e', (['TAG', '"""Cannot install both DTF ZIP and single item. Exiting."""'], {}), "(TAG, 'Cannot install both DTF ZIP and single item. Exiting.')\n", (4945, 5007), True, 'import dtf.logging as log\n'), ((5101, 5166), 'dtf.logging.e', 'log.e', (['TAG', '"""ZIP mode or single item mode not detected. Exiting."""'], {}), "(TAG, 'ZIP mode or single item mode not detected. Exiting.')\n", (5106, 5166), True, 'import dtf.logging as log\n'), ((5266, 5299), 'zipfile.is_zipfile', 'zipfile.is_zipfile', (['zip_file_name'], {}), '(zip_file_name)\n', (5284, 5299), False, 'import zipfile\n'), ((6639, 6699), 'dtf.logging.e', 'log.e', (['TAG', '"""\'--name\' is required for delete mode. Exiting."""'], {}), '(TAG, "\'--name\' is required for delete mode. Exiting.")\n', (6644, 6699), True, 'import dtf.logging as log\n'), ((6821, 6873), 'dtf.core.packagemanager.delete_binary', 'packagemanager.delete_binary', (['name'], {'force': 'force_mode'}), '(name, force=force_mode)\n', (6849, 6873), True, 'import dtf.core.packagemanager as packagemanager\n'), ((7793, 7834), 'dtf.logging.e', 'log.e', (['TAG', '"""Output file already exists!"""'], {}), "(TAG, 'Output file already exists!')\n", (7798, 7834), True, 'import dtf.logging as log\n'), ((8004, 8036), 'dtf.logging.e', 'log.e', (['TAG', '"""Nothing to export!"""'], {}), "(TAG, 'Nothing to export!')\n", (8009, 8036), True, 'import dtf.logging as log\n'), ((9194, 9239), 'dtf.logging.e', 'log.e', (['TAG', '"""Unable to be verbose and quiet!"""'], {}), "(TAG, 'Unable to be verbose and quiet!')\n", (9199, 9239), True, 'import dtf.logging as log\n'), ((10628, 10650), 'dtf.core.packagemanager.purge', 'packagemanager.purge', ([], {}), '()\n', (10648, 10650), True, 'import dtf.core.packagemanager as packagemanager\n'), ((12672, 12740), 'dtf.logging.i', 'log.i', (['TAG', '("Requesting content from \'%s\' (%s).." % (repo_name, url))'], {}), '(TAG, "Requesting content from \'%s\' (%s).." % (repo_name, url))\n', (12677, 12740), True, 'import dtf.logging as log\n'), ((13246, 13279), 'dtf.logging.i', 'log.i', (['TAG', '"""Starting install..."""'], {}), "(TAG, 'Starting install...')\n", (13251, 13279), True, 'import dtf.logging as log\n'), ((13292, 13363), 'dtf.core.packagemanager.install_zip', 'packagemanager.install_zip', (['file_f.name'], {'force': 'force', 'new_only': 'new_only'}), '(file_f.name, force=force, new_only=new_only)\n', (13318, 13363), True, 'import dtf.core.packagemanager as packagemanager\n'), ((13605, 13651), 'dtf.logging.e', 'log.e', (['TAG', '"""A repo name and URL is required!"""'], {}), "(TAG, 'A repo name and URL is required!')\n", (13610, 13651), True, 'import dtf.logging as log\n'), ((13911, 13950), 'dtf.logging.e', 'log.e', (['TAG', '"""Must specify a repo name!"""'], {}), "(TAG, 'Must specify a repo name!')\n", (13916, 13950), True, 'import dtf.logging as log\n'), ((19031, 19080), 'dtf.logging.d', 'log.d', (['TAG', '"""install_name is null, using name..."""'], {}), "(TAG, 'install_name is null, using name...')\n", (19036, 19080), True, 'import dtf.logging as log\n'), ((19108, 19130), 'os.path.basename', 'os.path.basename', (['name'], {}), '(name)\n', (19124, 19130), False, 'import os\n'), ((19174, 19221), 'dtf.logging.d', 'log.d', (['TAG', '"""local_name is null, using name..."""'], {}), "(TAG, 'local_name is null, using name...')\n", (19179, 19221), True, 'import dtf.logging as log\n'), ((19308, 19334), 'os.path.isfile', 'os.path.isfile', (['local_name'], {}), '(local_name)\n', (19322, 19334), False, 'import os\n'), ((19348, 19417), 'dtf.logging.e', 'log.e', (['TAG', '("Local module resource \'%s\' does not exist!" % local_name)'], {}), '(TAG, "Local module resource \'%s\' does not exist!" % local_name)\n', (19353, 19417), True, 'import dtf.logging as log\n'), ((19545, 19579), 'dtf.logging.d', 'log.d', (['TAG', '"""Python mode selected"""'], {}), "(TAG, 'Python mode selected')\n", (19550, 19579), True, 'import dtf.logging as log\n'), ((19600, 19660), 'dtf.core.packagemanager.parse_python_module', 'packagemanager.parse_python_module', (['local_name', 'install_name'], {}), '(local_name, install_name)\n', (19634, 19660), True, 'import dtf.core.packagemanager as packagemanager\n'), ((19845, 19886), 'dtf.core.packagemanager.is_bash_module', 'packagemanager.is_bash_module', (['local_name'], {}), '(local_name)\n', (19874, 19886), True, 'import dtf.core.packagemanager as packagemanager\n'), ((20506, 20571), 'dtf.logging.e', 'log.e', (['TAG', '"""No \'--name\' specified in single item mode. Exiting."""'], {}), '(TAG, "No \'--name\' specified in single item mode. Exiting.")\n', (20511, 20571), True, 'import dtf.logging as log\n'), ((20709, 20762), 'dtf.logging.e', 'log.e', (['TAG', '"""Invalid type passed to single. Exiting."""'], {}), "(TAG, 'Invalid type passed to single. Exiting.')\n", (20714, 20762), True, 'import dtf.logging as log\n'), ((21131, 21178), 'dtf.logging.w', 'log.w', (['TAG', '"""No version provided, using v1.0.0"""'], {}), "(TAG, 'No version provided, using v1.0.0')\n", (21136, 21178), True, 'import dtf.logging as log\n'), ((21602, 21651), 'dtf.logging.d', 'log.d', (['TAG', '"""install_name is null, using name..."""'], {}), "(TAG, 'install_name is null, using name...')\n", (21607, 21651), True, 'import dtf.logging as log\n'), ((21679, 21713), 'os.path.basename', 'os.path.basename', (['args.single_name'], {}), '(args.single_name)\n', (21695, 21713), False, 'import os\n'), ((21757, 21804), 'dtf.logging.d', 'log.d', (['TAG', '"""local_name is null, using name..."""'], {}), "(TAG, 'local_name is null, using name...')\n", (21762, 21804), True, 'import dtf.logging as log\n'), ((24157, 24202), 'requests.get', 'requests.get', (['url'], {'verify': 'verify', 'stream': '(True)'}), '(url, verify=verify, stream=True)\n', (24169, 24202), False, 'import requests\n'), ((5324, 5402), 'dtf.core.packagemanager.install_zip', 'packagemanager.install_zip', (['zip_file_name'], {'force': 'force_mode', 'new_only': 'new_only'}), '(zip_file_name, force=force_mode, new_only=new_only)\n', (5350, 5402), True, 'import dtf.core.packagemanager as packagemanager\n'), ((5538, 5615), 'dtf.logging.e', 'log.e', (['TAG', '("\'%s\' is not a valid ZIP file or does not exist." % zip_file_name)'], {}), '(TAG, "\'%s\' is not a valid ZIP file or does not exist." % zip_file_name)\n', (5543, 5615), True, 'import dtf.logging as log\n'), ((6932, 6985), 'dtf.core.packagemanager.delete_library', 'packagemanager.delete_library', (['name'], {'force': 'force_mode'}), '(name, force=force_mode)\n', (6961, 6985), True, 'import dtf.core.packagemanager as packagemanager\n'), ((12775, 12797), 'dtf.core.utils.is_http_url', 'utils.is_http_url', (['url'], {}), '(url)\n', (12792, 12797), True, 'import dtf.core.utils as utils\n'), ((12834, 12904), 'dtf.logging.w', 'log.w', (['TAG', '("Skipping \'%s\' due to HTTP (use --allow-http)" % repo_name)'], {}), '(TAG, "Skipping \'%s\' due to HTTP (use --allow-http)" % repo_name)\n', (12839, 12904), True, 'import dtf.logging as log\n'), ((13095, 13126), 'zipfile.is_zipfile', 'zipfile.is_zipfile', (['file_f.name'], {}), '(file_f.name)\n', (13113, 13126), False, 'import zipfile\n'), ((13144, 13207), 'dtf.logging.w', 'log.w', (['TAG', '"""Pulled content is not a valid ZIP file, skipping!"""'], {}), "(TAG, 'Pulled content is not a valid ZIP file, skipping!')\n", (13149, 13207), True, 'import dtf.logging as log\n'), ((19760, 19802), 'dtf.logging.e', 'log.e', (['TAG', '"""Error parsing Python module!"""'], {}), "(TAG, 'Error parsing Python module!')\n", (19765, 19802), True, 'import dtf.logging as log\n'), ((19900, 19932), 'dtf.logging.d', 'log.d', (['TAG', '"""Bash mode selected"""'], {}), "(TAG, 'Bash mode selected')\n", (19905, 19932), True, 'import dtf.logging as log\n'), ((19953, 20011), 'dtf.core.packagemanager.parse_bash_module', 'packagemanager.parse_bash_module', (['local_name', 'install_name'], {}), '(local_name, install_name)\n', (19985, 20011), True, 'import dtf.core.packagemanager as packagemanager\n'), ((20205, 20257), 'dtf.logging.e', 'log.e', (['TAG', '"""Auto parse for Python and Bash failed!"""'], {}), "(TAG, 'Auto parse for Python and Bash failed!')\n", (20210, 20257), True, 'import dtf.logging as log\n'), ((21025, 21076), 'dtf.logging.e', 'log.e', (['TAG', '"""Version string is not valid. Exiting."""'], {}), "(TAG, 'Version string is not valid. Exiting.')\n", (21030, 21076), True, 'import dtf.logging as log\n'), ((22335, 22376), 'dtf.logging.i', 'log.i', (['TAG', '"""Attempting to auto parse..."""'], {}), "(TAG, 'Attempting to auto parse...')\n", (22340, 22376), True, 'import dtf.logging as log\n'), ((22587, 22641), 'dtf.logging.e', 'log.e', (['TAG', '"""Autoparse is only available for modules!"""'], {}), "(TAG, 'Autoparse is only available for modules!')\n", (22592, 22641), True, 'import dtf.logging as log\n'), ((22794, 22834), 'dtf.logging.e', 'log.e', (['TAG', '"""Error parsing single item!"""'], {}), "(TAG, 'Error parsing single item!')\n", (22799, 22834), True, 'import dtf.logging as log\n'), ((23103, 23134), 'os.path.isfile', 'os.path.isfile', (['item.local_name'], {}), '(item.local_name)\n', (23117, 23134), False, 'import os\n'), ((23152, 23224), 'dtf.logging.e', 'log.e', (['TAG', '("Local item \'%s\' does not exist. Exiting." % item.local_name)'], {}), '(TAG, "Local item \'%s\' does not exist. Exiting." % item.local_name)\n', (23157, 23224), True, 'import dtf.logging as log\n'), ((24279, 24321), 'dtf.logging.e', 'log.e', (['TAG', '"""Error downloading repo data!"""'], {}), "(TAG, 'Error downloading repo data!')\n", (24284, 24321), True, 'import dtf.logging as log\n'), ((7043, 7095), 'dtf.core.packagemanager.delete_module', 'packagemanager.delete_module', (['name'], {'force': 'force_mode'}), '(name, force=force_mode)\n', (7071, 7095), True, 'import dtf.core.packagemanager as packagemanager\n'), ((11284, 11328), 'dtf.logging.e', 'log.e', (['TAG', "('Invalid repo command: %s' % cmd)"], {}), "(TAG, 'Invalid repo command: %s' % cmd)\n", (11289, 11328), True, 'import dtf.logging as log\n'), ((20109, 20149), 'dtf.logging.e', 'log.e', (['TAG', '"""Error parsing Bash module!"""'], {}), "(TAG, 'Error parsing Bash module!')\n", (20114, 20149), True, 'import dtf.logging as log\n'), ((22483, 22522), 'dtf.logging.e', 'log.e', (['TAG', '"""Error autoparsing module!"""'], {}), "(TAG, 'Error autoparsing module!')\n", (22488, 22522), True, 'import dtf.logging as log\n'), ((23336, 23366), 'os.path.isdir', 'os.path.isdir', (['item.local_name'], {}), '(item.local_name)\n', (23349, 23366), False, 'import os\n'), ((23384, 23461), 'dtf.logging.e', 'log.e', (['TAG', '("Local directory \'%s\' does not exist. Exiting." % item.local_name)'], {}), '(TAG, "Local directory \'%s\' does not exist. Exiting." % item.local_name)\n', (23389, 23461), True, 'import dtf.logging as log\n'), ((7154, 7207), 'dtf.core.packagemanager.delete_package', 'packagemanager.delete_package', (['name'], {'force': 'force_mode'}), '(name, force=force_mode)\n', (7183, 7207), True, 'import dtf.core.packagemanager as packagemanager\n'), ((7234, 7287), 'dtf.logging.e', 'log.e', (['TAG', '"""Invalid type passed to delete. Exiting."""'], {}), "(TAG, 'Invalid type passed to delete. Exiting.')\n", (7239, 7287), True, 'import dtf.logging as log\n'), ((23572, 23603), 'os.path.isfile', 'os.path.isfile', (['item.local_name'], {}), '(item.local_name)\n', (23586, 23603), False, 'import os\n'), ((23621, 23693), 'dtf.logging.e', 'log.e', (['TAG', '("Local item \'%s\' does not exist. Exiting." % item.local_name)'], {}), '(TAG, "Local item \'%s\' does not exist. Exiting." % item.local_name)\n', (23626, 23693), True, 'import dtf.logging as log\n'), ((9885, 9939), 'dtf.logging.e', 'log.e', (['TAG', "('Unknown filter specified : %s' % d_filter)"], {}), "(TAG, 'Unknown filter specified : %s' % d_filter)\n", (9890, 9939), True, 'import dtf.logging as log\n'), ((23805, 23835), 'os.path.isdir', 'os.path.isdir', (['item.local_name'], {}), '(item.local_name)\n', (23818, 23835), False, 'import os\n'), ((23853, 23930), 'dtf.logging.e', 'log.e', (['TAG', '("Local directory \'%s\' does not exist. Exiting." % item.local_name)'], {}), '(TAG, "Local directory \'%s\' does not exist. Exiting." % item.local_name)\n', (23858, 23930), True, 'import dtf.logging as log\n'), ((25340, 25391), 'dtf.logging.e', 'log.e', (['TAG', '("Sub-command \'%s\' not found!" % sub_cmd)'], {}), '(TAG, "Sub-command \'%s\' not found!" % sub_cmd)\n', (25345, 25391), True, 'import dtf.logging as log\n')]
import unittest from dlgo.agent.helpers import is_point_an_eye from dlgo.goboard_fast import Board, GameState, Move from dlgo.gotypes import Player, Point from dlgo.encoders.alphago import AlphaGoEncoder class AlphaGoEncoderTest(unittest.TestCase): def test_encoder(self): alphago = AlphaGoEncoder() start = GameState.new_game(19) next_state = start.apply_move(Move.play(Point(16, 16))) alphago.encode(next_state) self.assertEquals(alphago.name(), 'alphago') self.assertEquals(alphago.board_height, 19) self.assertEquals(alphago.board_width, 19) self.assertEquals(alphago.num_planes, 49) self.assertEquals(alphago.shape(), (49, 19, 19)) if __name__ == '__main__': unittest.main()
[ "unittest.main", "dlgo.encoders.alphago.AlphaGoEncoder", "dlgo.goboard_fast.GameState.new_game", "dlgo.gotypes.Point" ]
[((752, 767), 'unittest.main', 'unittest.main', ([], {}), '()\n', (765, 767), False, 'import unittest\n'), ((298, 314), 'dlgo.encoders.alphago.AlphaGoEncoder', 'AlphaGoEncoder', ([], {}), '()\n', (312, 314), False, 'from dlgo.encoders.alphago import AlphaGoEncoder\n'), ((332, 354), 'dlgo.goboard_fast.GameState.new_game', 'GameState.new_game', (['(19)'], {}), '(19)\n', (350, 354), False, 'from dlgo.goboard_fast import Board, GameState, Move\n'), ((403, 416), 'dlgo.gotypes.Point', 'Point', (['(16)', '(16)'], {}), '(16, 16)\n', (408, 416), False, 'from dlgo.gotypes import Player, Point\n')]
import datetime import itertools import logging import os import tempfile import time from collections import Counter import torch from fvcore.common.checkpoint import PeriodicCheckpointer as _PeriodicCheckpointer from fvcore.common.file_io import PathManager from fvcore.common.timer import Timer from fvcore.nn.precise_bn import get_bn_modules from fvcore.nn.precise_bn import update_bn_stats import tkdet.utils.comm as comm from tkdet.evaluation.testing import flatten_results_dict from tkdet.utils.events import EventStorage from tkdet.utils.events import EventWriter from .train_loop import HookBase __all__ = [ "CallbackHook", "IterationTimer", "PeriodicWriter", "PeriodicCheckpointer", "LRScheduler", "AutogradProfiler", "EvalHook", "PreciseBN", ] class CallbackHook(HookBase): def __init__(self, *, before_train=None, after_train=None, before_step=None, after_step=None): self._before_train = before_train self._before_step = before_step self._after_step = after_step self._after_train = after_train def before_train(self): if self._before_train: self._before_train(self.trainer) def after_train(self): if self._after_train: self._after_train(self.trainer) del self._before_train, self._after_train del self._before_step, self._after_step def before_step(self): if self._before_step: self._before_step(self.trainer) def after_step(self): if self._after_step: self._after_step(self.trainer) class IterationTimer(HookBase): def __init__(self, warmup_iter=3): self._warmup_iter = warmup_iter self._step_timer = Timer() self._start_time = time.perf_counter() self._total_timer = Timer() def before_train(self): self._start_time = time.perf_counter() self._total_timer.reset() self._total_timer.pause() def after_train(self): logger = logging.getLogger(__name__) total_time = time.perf_counter() - self._start_time total_time_minus_hooks = self._total_timer.seconds() hook_time = total_time - total_time_minus_hooks num_iter = self.trainer.iter + 1 - self.trainer.start_iter - self._warmup_iter if num_iter > 0 and total_time_minus_hooks > 0: logger.info( "Overall training speed: {} iterations in {} ({:.4f} s / it)".format( num_iter, str(datetime.timedelta(seconds=int(total_time_minus_hooks))), total_time_minus_hooks / num_iter, ) ) logger.info( "Total training time: {} ({} on hooks)".format( str(datetime.timedelta(seconds=int(total_time))), str(datetime.timedelta(seconds=int(hook_time))), ) ) def before_step(self): self._step_timer.reset() self._total_timer.resume() def after_step(self): iter_done = self.trainer.iter - self.trainer.start_iter + 1 if iter_done >= self._warmup_iter: sec = self._step_timer.seconds() self.trainer.storage.put_scalars(time=sec) else: self._start_time = time.perf_counter() self._total_timer.reset() self._total_timer.pause() class PeriodicWriter(HookBase): def __init__(self, writers, period=20): self._writers = writers for w in writers: assert isinstance(w, EventWriter), w self._period = period def after_step(self): if ( (self.trainer.iter + 1) % self._period == 0 or self.trainer.iter == self.trainer.max_iter - 1 ): for writer in self._writers: writer.write() def after_train(self): for writer in self._writers: writer.close() class PeriodicCheckpointer(_PeriodicCheckpointer, HookBase): def before_train(self): self.max_iter = self.trainer.max_iter def after_step(self): self.step(self.trainer.iter) class LRScheduler(HookBase): def __init__(self, optimizer, scheduler): self._optimizer = optimizer self._scheduler = scheduler largest_group = max(len(g["params"]) for g in optimizer.param_groups) if largest_group == 1: lr_count = Counter([g["lr"] for g in optimizer.param_groups]) lr = lr_count.most_common()[0][0] for i, g in enumerate(optimizer.param_groups): if g["lr"] == lr: self._best_param_group_id = i break else: for i, g in enumerate(optimizer.param_groups): if len(g["params"]) == largest_group: self._best_param_group_id = i break def after_step(self): lr = self._optimizer.param_groups[self._best_param_group_id]["lr"] self.trainer.storage.put_scalar("lr", lr, smoothing_hint=False) self._scheduler.step() class AutogradProfiler(HookBase): def __init__(self, enable_predicate, output_dir, *, use_cuda=True): self._enable_predicate = enable_predicate self._use_cuda = use_cuda self._output_dir = output_dir def before_step(self): if self._enable_predicate(self.trainer): self._profiler = torch.autograd.profiler.profile(use_cuda=self._use_cuda) self._profiler.__enter__() else: self._profiler = None def after_step(self): if self._profiler is None: return self._profiler.__exit__(None, None, None) PathManager.mkdirs(self._output_dir) out_file = os.path.join( self._output_dir, "profiler-trace-iter{}.json".format(self.trainer.iter) ) if "://" not in out_file: self._profiler.export_chrome_trace(out_file) else: with tempfile.TemporaryDirectory(prefix="tkdet_profiler") as d: tmp_file = os.path.join(d, "tmp.json") self._profiler.export_chrome_trace(tmp_file) with open(tmp_file) as f: content = f.read() with PathManager.open(out_file, "w") as f: f.write(content) class EvalHook(HookBase): def __init__(self, eval_period, eval_function): self._period = eval_period self._func = eval_function def _do_eval(self): results = self._func() if results: assert isinstance(results, dict), \ f"Eval function must return a dict. Got {results} instead." flattened_results = flatten_results_dict(results) for k, v in flattened_results.items(): try: v = float(v) except Exception: raise ValueError( "[EvalHook] eval_function should return a nested dict of float. " "Got '{}: {}' instead.".format(k, v) ) self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False) comm.synchronize() def after_step(self): next_iter = self.trainer.iter + 1 is_final = next_iter == self.trainer.max_iter if is_final or (self._period > 0 and next_iter % self._period == 0): self._do_eval() def after_train(self): del self._func class PreciseBN(HookBase): def __init__(self, period, model, data_loader, num_iter): self._logger = logging.getLogger(__name__) if len(get_bn_modules(model)) == 0: self._logger.info( "PreciseBN is disabled because model does not contain BN layers in training mode." ) self._disabled = True return self._model = model self._data_loader = data_loader self._num_iter = num_iter self._period = period self._disabled = False self._data_iter = None def after_step(self): next_iter = self.trainer.iter + 1 is_final = next_iter == self.trainer.max_iter if is_final or (self._period > 0 and next_iter % self._period == 0): self.update_stats() def update_stats(self): if self._disabled: return if self._data_iter is None: self._data_iter = iter(self._data_loader) def data_loader(): for num_iter in itertools.count(1): if num_iter % 100 == 0: self._logger.info( f"Running precise-BN ... {num_iter}/{self._num_iter} iterations." ) yield next(self._data_iter) with EventStorage(): self._logger.info( "Running precise-BN for {} iterations... ".format(self._num_iter) + "Note that this could produce different statistics every time." ) update_bn_stats(self._model, data_loader(), self._num_iter)
[ "fvcore.common.timer.Timer", "tempfile.TemporaryDirectory", "torch.autograd.profiler.profile", "fvcore.nn.precise_bn.get_bn_modules", "collections.Counter", "time.perf_counter", "tkdet.evaluation.testing.flatten_results_dict", "itertools.count", "fvcore.common.file_io.PathManager.open", "tkdet.utils.comm.synchronize", "fvcore.common.file_io.PathManager.mkdirs", "os.path.join", "logging.getLogger", "tkdet.utils.events.EventStorage" ]
[((1727, 1734), 'fvcore.common.timer.Timer', 'Timer', ([], {}), '()\n', (1732, 1734), False, 'from fvcore.common.timer import Timer\n'), ((1762, 1781), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1779, 1781), False, 'import time\n'), ((1810, 1817), 'fvcore.common.timer.Timer', 'Timer', ([], {}), '()\n', (1815, 1817), False, 'from fvcore.common.timer import Timer\n'), ((1874, 1893), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1891, 1893), False, 'import time\n'), ((2007, 2034), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2024, 2034), False, 'import logging\n'), ((5701, 5737), 'fvcore.common.file_io.PathManager.mkdirs', 'PathManager.mkdirs', (['self._output_dir'], {}), '(self._output_dir)\n', (5719, 5737), False, 'from fvcore.common.file_io import PathManager\n'), ((7205, 7223), 'tkdet.utils.comm.synchronize', 'comm.synchronize', ([], {}), '()\n', (7221, 7223), True, 'import tkdet.utils.comm as comm\n'), ((7617, 7644), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (7634, 7644), False, 'import logging\n'), ((2056, 2075), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2073, 2075), False, 'import time\n'), ((3283, 3302), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3300, 3302), False, 'import time\n'), ((4408, 4458), 'collections.Counter', 'Counter', (["[g['lr'] for g in optimizer.param_groups]"], {}), "([g['lr'] for g in optimizer.param_groups])\n", (4415, 4458), False, 'from collections import Counter\n'), ((5418, 5474), 'torch.autograd.profiler.profile', 'torch.autograd.profiler.profile', ([], {'use_cuda': 'self._use_cuda'}), '(use_cuda=self._use_cuda)\n', (5449, 5474), False, 'import torch\n'), ((6728, 6757), 'tkdet.evaluation.testing.flatten_results_dict', 'flatten_results_dict', (['results'], {}), '(results)\n', (6748, 6757), False, 'from tkdet.evaluation.testing import flatten_results_dict\n'), ((8536, 8554), 'itertools.count', 'itertools.count', (['(1)'], {}), '(1)\n', (8551, 8554), False, 'import itertools\n'), ((8805, 8819), 'tkdet.utils.events.EventStorage', 'EventStorage', ([], {}), '()\n', (8817, 8819), False, 'from tkdet.utils.events import EventStorage\n'), ((6000, 6052), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {'prefix': '"""tkdet_profiler"""'}), "(prefix='tkdet_profiler')\n", (6027, 6052), False, 'import tempfile\n'), ((6086, 6113), 'os.path.join', 'os.path.join', (['d', '"""tmp.json"""'], {}), "(d, 'tmp.json')\n", (6098, 6113), False, 'import os\n'), ((6273, 6304), 'fvcore.common.file_io.PathManager.open', 'PathManager.open', (['out_file', '"""w"""'], {}), "(out_file, 'w')\n", (6289, 6304), False, 'from fvcore.common.file_io import PathManager\n'), ((7660, 7681), 'fvcore.nn.precise_bn.get_bn_modules', 'get_bn_modules', (['model'], {}), '(model)\n', (7674, 7681), False, 'from fvcore.nn.precise_bn import get_bn_modules\n')]
from setuptools import setup, find_packages from distutils.core import setup setup(name='pyts', version='0.5', description='A package for transformation and classification of time series', classifiers=[ 'Development Status :: 3 - Alpha', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.5', 'Topic :: Scientific/Engineering', ], keywords='time series machine learning transformation classification', url='https://github.com/johannfaouzi/pyts', author='<NAME>', author_email='<EMAIL>', license='MIT', packages=find_packages(), install_requires=[ 'numpy', 'scipy', 'sklearn', 'matplotlib' ], zip_safe=False)
[ "setuptools.find_packages" ]
[((633, 648), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (646, 648), False, 'from setuptools import setup, find_packages\n')]
"""Sensor to indicate whether the current day is a workday.""" from datetime import datetime, timedelta import logging import holidays import voluptuous as vol from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorDevice from homeassistant.const import CONF_NAME, WEEKDAYS import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) # List of all countries currently supported by holidays # Source: https://github.com/dr-prodigy/python-holidays#available-countries # There seems to be no way to get the list out at runtime ALL_COUNTRIES = [ "Argentina", "AR", "Aruba", "AW", "Australia", "AU", "Austria", "AT", "Brazil", "BR", "Belarus", "BY", "Belgium", "BE", "Bulgaria", "BG", "Canada", "CA", "Colombia", "CO", "Croatia", "HR", "Czech", "CZ", "Denmark", "DK", "England", "Estonia", "EE", "EuropeanCentralBank", "ECB", "TAR", "Finland", "FI", "France", "FRA", "Germany", "DE", "Hungary", "HU", "Honduras", "HND", "Iceland", "IS", "India", "IND", "Ireland", "IE", "Isle of Man", "Italy", "IT", "Japan", "JP", "Kenya", "KE", "Lithuania", "LT", "Luxembourg", "LU", "Mexico", "MX", "Netherlands", "NL", "NewZealand", "NZ", "Northern Ireland", "Norway", "NO", "Peru", "PE", "Poland", "Polish", "PL", "Portugal", "PT", "PortugalExt", "PTE", "Russia", "RU", "Scotland", "Slovenia", "SI", "Slovakia", "SK", "South Africa", "ZA", "Spain", "ES", "Sweden", "SE", "Switzerland", "CH", "Ukraine", "UA", "UnitedKingdom", "UK", "UnitedStates", "US", "Wales", ] ALLOWED_DAYS = WEEKDAYS + ["holiday"] CONF_COUNTRY = "country" CONF_PROVINCE = "province" CONF_WORKDAYS = "workdays" CONF_EXCLUDES = "excludes" CONF_OFFSET = "days_offset" CONF_ADD_HOLIDAYS = "add_holidays" # By default, Monday - Friday are workdays DEFAULT_WORKDAYS = ["mon", "tue", "wed", "thu", "fri"] # By default, public holidays, Saturdays and Sundays are excluded from workdays DEFAULT_EXCLUDES = ["sat", "sun", "holiday"] DEFAULT_NAME = "Workday Sensor" DEFAULT_OFFSET = 0 PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_COUNTRY): vol.In(ALL_COUNTRIES), vol.Optional(CONF_EXCLUDES, default=DEFAULT_EXCLUDES): vol.All( cv.ensure_list, [vol.In(ALLOWED_DAYS)] ), vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_OFFSET, default=DEFAULT_OFFSET): vol.Coerce(int), vol.Optional(CONF_PROVINCE): cv.string, vol.Optional(CONF_WORKDAYS, default=DEFAULT_WORKDAYS): vol.All( cv.ensure_list, [vol.In(ALLOWED_DAYS)] ), vol.Optional(CONF_ADD_HOLIDAYS): vol.All(cv.ensure_list, [cv.string]), } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Workday sensor.""" sensor_name = config.get(CONF_NAME) country = config.get(CONF_COUNTRY) province = config.get(CONF_PROVINCE) workdays = config.get(CONF_WORKDAYS) excludes = config.get(CONF_EXCLUDES) days_offset = config.get(CONF_OFFSET) add_holidays = config.get(CONF_ADD_HOLIDAYS) year = (get_date(datetime.today()) + timedelta(days=days_offset)).year obj_holidays = getattr(holidays, country)(years=year) if province: # 'state' and 'prov' are not interchangeable, so need to make # sure we use the right one if hasattr(obj_holidays, "PROVINCES") and province in obj_holidays.PROVINCES: obj_holidays = getattr(holidays, country)(prov=province, years=year) elif hasattr(obj_holidays, "STATES") and province in obj_holidays.STATES: obj_holidays = getattr(holidays, country)(state=province, years=year) else: _LOGGER.error( "There is no province/state %s in country %s", province, country ) return # Add custom holidays try: obj_holidays.append(add_holidays) except TypeError: _LOGGER.debug("No custom holidays or invalid holidays") _LOGGER.debug("Found the following holidays for your configuration:") for date, name in sorted(obj_holidays.items()): _LOGGER.debug("%s %s", date, name) add_entities( [IsWorkdaySensor(obj_holidays, workdays, excludes, days_offset, sensor_name)], True, ) def day_to_string(day): """Convert day index 0 - 7 to string.""" try: return ALLOWED_DAYS[day] except IndexError: return None def get_date(date): """Return date. Needed for testing.""" return date class IsWorkdaySensor(BinarySensorDevice): """Implementation of a Workday sensor.""" def __init__(self, obj_holidays, workdays, excludes, days_offset, name): """Initialize the Workday sensor.""" self._name = name self._obj_holidays = obj_holidays self._workdays = workdays self._excludes = excludes self._days_offset = days_offset self._state = None @property def name(self): """Return the name of the sensor.""" return self._name @property def is_on(self): """Return the state of the device.""" return self._state def is_include(self, day, now): """Check if given day is in the includes list.""" if day in self._workdays: return True if "holiday" in self._workdays and now in self._obj_holidays: return True return False def is_exclude(self, day, now): """Check if given day is in the excludes list.""" if day in self._excludes: return True if "holiday" in self._excludes and now in self._obj_holidays: return True return False @property def state_attributes(self): """Return the attributes of the entity.""" # return self._attributes return { CONF_WORKDAYS: self._workdays, CONF_EXCLUDES: self._excludes, CONF_OFFSET: self._days_offset, } async def async_update(self): """Get date and look whether it is a holiday.""" # Default is no workday self._state = False # Get iso day of the week (1 = Monday, 7 = Sunday) date = get_date(datetime.today()) + timedelta(days=self._days_offset) day = date.isoweekday() - 1 day_of_week = day_to_string(day) if self.is_include(day_of_week, date): self._state = True if self.is_exclude(day_of_week, date): self._state = False
[ "voluptuous.Optional", "datetime.datetime.today", "voluptuous.All", "voluptuous.Required", "datetime.timedelta", "voluptuous.In", "logging.getLogger", "voluptuous.Coerce" ]
[((365, 392), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (382, 392), False, 'import logging\n'), ((2445, 2471), 'voluptuous.Required', 'vol.Required', (['CONF_COUNTRY'], {}), '(CONF_COUNTRY)\n', (2457, 2471), True, 'import voluptuous as vol\n'), ((2504, 2557), 'voluptuous.Optional', 'vol.Optional', (['CONF_EXCLUDES'], {'default': 'DEFAULT_EXCLUDES'}), '(CONF_EXCLUDES, default=DEFAULT_EXCLUDES)\n', (2516, 2557), True, 'import voluptuous as vol\n'), ((2638, 2683), 'voluptuous.Optional', 'vol.Optional', (['CONF_NAME'], {'default': 'DEFAULT_NAME'}), '(CONF_NAME, default=DEFAULT_NAME)\n', (2650, 2683), True, 'import voluptuous as vol\n'), ((2704, 2753), 'voluptuous.Optional', 'vol.Optional', (['CONF_OFFSET'], {'default': 'DEFAULT_OFFSET'}), '(CONF_OFFSET, default=DEFAULT_OFFSET)\n', (2716, 2753), True, 'import voluptuous as vol\n'), ((2780, 2807), 'voluptuous.Optional', 'vol.Optional', (['CONF_PROVINCE'], {}), '(CONF_PROVINCE)\n', (2792, 2807), True, 'import voluptuous as vol\n'), ((2828, 2881), 'voluptuous.Optional', 'vol.Optional', (['CONF_WORKDAYS'], {'default': 'DEFAULT_WORKDAYS'}), '(CONF_WORKDAYS, default=DEFAULT_WORKDAYS)\n', (2840, 2881), True, 'import voluptuous as vol\n'), ((2962, 2993), 'voluptuous.Optional', 'vol.Optional', (['CONF_ADD_HOLIDAYS'], {}), '(CONF_ADD_HOLIDAYS)\n', (2974, 2993), True, 'import voluptuous as vol\n'), ((2473, 2494), 'voluptuous.In', 'vol.In', (['ALL_COUNTRIES'], {}), '(ALL_COUNTRIES)\n', (2479, 2494), True, 'import voluptuous as vol\n'), ((2755, 2770), 'voluptuous.Coerce', 'vol.Coerce', (['int'], {}), '(int)\n', (2765, 2770), True, 'import voluptuous as vol\n'), ((2995, 3031), 'voluptuous.All', 'vol.All', (['cv.ensure_list', '[cv.string]'], {}), '(cv.ensure_list, [cv.string])\n', (3002, 3031), True, 'import voluptuous as vol\n'), ((3484, 3511), 'datetime.timedelta', 'timedelta', ([], {'days': 'days_offset'}), '(days=days_offset)\n', (3493, 3511), False, 'from datetime import datetime, timedelta\n'), ((6598, 6631), 'datetime.timedelta', 'timedelta', ([], {'days': 'self._days_offset'}), '(days=self._days_offset)\n', (6607, 6631), False, 'from datetime import datetime, timedelta\n'), ((2597, 2617), 'voluptuous.In', 'vol.In', (['ALLOWED_DAYS'], {}), '(ALLOWED_DAYS)\n', (2603, 2617), True, 'import voluptuous as vol\n'), ((2921, 2941), 'voluptuous.In', 'vol.In', (['ALLOWED_DAYS'], {}), '(ALLOWED_DAYS)\n', (2927, 2941), True, 'import voluptuous as vol\n'), ((3464, 3480), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (3478, 3480), False, 'from datetime import datetime, timedelta\n'), ((6578, 6594), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (6592, 6594), False, 'from datetime import datetime, timedelta\n')]
# -*- coding: utf-8 -*- r""" werkzeug.contrib.sessions ~~~~~~~~~~~~~~~~~~~~~~~~~ This module contains some helper classes that help one to add session support to a python WSGI application. For full client-side session storage see :mod:`~werkzeug.contrib.securecookie` which implements a secure, client-side session storage. Application Integration ======================= :: from werkzeug.contrib.sessions import SessionMiddleware, \ FilesystemSessionStore app = SessionMiddleware(app, FilesystemSessionStore()) The current session will then appear in the WSGI environment as `werkzeug.session`. However it's recommended to not use the middleware but the stores directly in the application. However for very simple scripts a middleware for sessions could be sufficient. This module does not implement methods or ways to check if a session is expired. That should be done by a cronjob and storage specific. For example to prune unused filesystem sessions one could check the modified time of the files. If sessions are stored in the database the new() method should add an expiration timestamp for the session. For better flexibility it's recommended to not use the middleware but the store and session object directly in the application dispatching:: session_store = FilesystemSessionStore() def application(environ, start_response): request = Request(environ) sid = request.cookies.get('cookie_name') if sid is None: request.session = session_store.new() else: request.session = session_store.get(sid) response = get_the_response_object(request) if request.session.should_save: session_store.save(request.session) response.set_cookie('cookie_name', request.session.sid) return response(environ, start_response) :copyright: 2007 Pallets :license: BSD-3-Clause """ import os import re import tempfile import warnings from hashlib import sha1 from os import path from pickle import dump from pickle import HIGHEST_PROTOCOL from pickle import load from random import random from time import time from .._compat import PY2 from .._compat import text_type from ..datastructures import CallbackDict from ..filesystem import get_filesystem_encoding from ..posixemulation import rename from ..utils import dump_cookie from ..utils import parse_cookie from ..wsgi import ClosingIterator warnings.warn( "'werkzeug.contrib.sessions' is deprecated as of version 0.15 and" " will be removed in version 1.0. It has moved to" " https://github.com/pallets/secure-cookie.", DeprecationWarning, stacklevel=2, ) _sha1_re = re.compile(r"^[a-f0-9]{40}$") def _urandom(): if hasattr(os, "urandom"): return os.urandom(30) return text_type(random()).encode("ascii") def generate_key(salt=None): if salt is None: salt = repr(salt).encode("ascii") return sha1(b"".join([salt, str(time()).encode("ascii"), _urandom()])).hexdigest() class ModificationTrackingDict(CallbackDict): __slots__ = ("modified",) def __init__(self, *args, **kwargs): def on_update(self): self.modified = True self.modified = False CallbackDict.__init__(self, on_update=on_update) dict.update(self, *args, **kwargs) def copy(self): """Create a flat copy of the dict.""" missing = object() result = object.__new__(self.__class__) for name in self.__slots__: val = getattr(self, name, missing) if val is not missing: setattr(result, name, val) return result def __copy__(self): return self.copy() class Session(ModificationTrackingDict): """Subclass of a dict that keeps track of direct object changes. Changes in mutable structures are not tracked, for those you have to set `modified` to `True` by hand. """ __slots__ = ModificationTrackingDict.__slots__ + ("sid", "new") def __init__(self, data, sid, new=False): ModificationTrackingDict.__init__(self, data) self.sid = sid self.new = new def __repr__(self): return "<%s %s%s>" % ( self.__class__.__name__, dict.__repr__(self), "*" if self.should_save else "", ) @property def should_save(self): """True if the session should be saved. .. versionchanged:: 0.6 By default the session is now only saved if the session is modified, not if it is new like it was before. """ return self.modified class SessionStore(object): """Baseclass for all session stores. The Werkzeug contrib module does not implement any useful stores besides the filesystem store, application developers are encouraged to create their own stores. :param session_class: The session class to use. Defaults to :class:`Session`. """ def __init__(self, session_class=None): if session_class is None: session_class = Session self.session_class = session_class def is_valid_key(self, key): """Check if a key has the correct format.""" return _sha1_re.match(key) is not None def generate_key(self, salt=None): """Simple function that generates a new session key.""" return generate_key(salt) def new(self): """Generate a new session.""" return self.session_class({}, self.generate_key(), True) def save(self, session): """Save a session.""" def save_if_modified(self, session): """Save if a session class wants an update.""" if session.should_save: self.save(session) def delete(self, session): """Delete a session.""" def get(self, sid): """Get a session for this sid or a new session object. This method has to check if the session key is valid and create a new session if that wasn't the case. """ return self.session_class({}, sid, True) #: used for temporary files by the filesystem session store _fs_transaction_suffix = ".__wz_sess" class FilesystemSessionStore(SessionStore): """Simple example session store that saves sessions on the filesystem. This store works best on POSIX systems and Windows Vista / Windows Server 2008 and newer. .. versionchanged:: 0.6 `renew_missing` was added. Previously this was considered `True`, now the default changed to `False` and it can be explicitly deactivated. :param path: the path to the folder used for storing the sessions. If not provided the default temporary directory is used. :param filename_template: a string template used to give the session a filename. ``%s`` is replaced with the session id. :param session_class: The session class to use. Defaults to :class:`Session`. :param renew_missing: set to `True` if you want the store to give the user a new sid if the session was not yet saved. """ def __init__( self, path=None, filename_template="werkzeug_%s.sess", session_class=None, renew_missing=False, mode=0o644, ): SessionStore.__init__(self, session_class) if path is None: path = tempfile.gettempdir() self.path = path if isinstance(filename_template, text_type) and PY2: filename_template = filename_template.encode(get_filesystem_encoding()) assert not filename_template.endswith(_fs_transaction_suffix), ( "filename templates may not end with %s" % _fs_transaction_suffix ) self.filename_template = filename_template self.renew_missing = renew_missing self.mode = mode def get_session_filename(self, sid): # out of the box, this should be a strict ASCII subset but # you might reconfigure the session object to have a more # arbitrary string. if isinstance(sid, text_type) and PY2: sid = sid.encode(get_filesystem_encoding()) return path.join(self.path, self.filename_template % sid) def save(self, session): fn = self.get_session_filename(session.sid) fd, tmp = tempfile.mkstemp(suffix=_fs_transaction_suffix, dir=self.path) f = os.fdopen(fd, "wb") try: dump(dict(session), f, HIGHEST_PROTOCOL) finally: f.close() try: rename(tmp, fn) os.chmod(fn, self.mode) except (IOError, OSError): pass def delete(self, session): fn = self.get_session_filename(session.sid) try: os.unlink(fn) except OSError: pass def get(self, sid): if not self.is_valid_key(sid): return self.new() try: f = open(self.get_session_filename(sid), "rb") except IOError: if self.renew_missing: return self.new() data = {} else: try: try: data = load(f) except Exception: data = {} finally: f.close() return self.session_class(data, sid, False) def list(self): """Lists all sessions in the store. .. versionadded:: 0.6 """ before, after = self.filename_template.split("%s", 1) filename_re = re.compile( r"%s(.{5,})%s$" % (re.escape(before), re.escape(after)) ) result = [] for filename in os.listdir(self.path): #: this is a session that is still being saved. if filename.endswith(_fs_transaction_suffix): continue match = filename_re.match(filename) if match is not None: result.append(match.group(1)) return result class SessionMiddleware(object): """A simple middleware that puts the session object of a store provided into the WSGI environ. It automatically sets cookies and restores sessions. However a middleware is not the preferred solution because it won't be as fast as sessions managed by the application itself and will put a key into the WSGI environment only relevant for the application which is against the concept of WSGI. The cookie parameters are the same as for the :func:`~dump_cookie` function just prefixed with ``cookie_``. Additionally `max_age` is called `cookie_age` and not `cookie_max_age` because of backwards compatibility. """ def __init__( self, app, store, cookie_name="session_id", cookie_age=None, cookie_expires=None, cookie_path="/", cookie_domain=None, cookie_secure=None, cookie_httponly=False, cookie_samesite="Lax", environ_key="werkzeug.session", ): self.app = app self.store = store self.cookie_name = cookie_name self.cookie_age = cookie_age self.cookie_expires = cookie_expires self.cookie_path = cookie_path self.cookie_domain = cookie_domain self.cookie_secure = cookie_secure self.cookie_httponly = cookie_httponly self.cookie_samesite = cookie_samesite self.environ_key = environ_key def __call__(self, environ, start_response): cookie = parse_cookie(environ.get("HTTP_COOKIE", "")) sid = cookie.get(self.cookie_name, None) if sid is None: session = self.store.new() else: session = self.store.get(sid) environ[self.environ_key] = session def injecting_start_response(status, headers, exc_info=None): if session.should_save: self.store.save(session) headers.append( ( "Set-Cookie", dump_cookie( self.cookie_name, session.sid, self.cookie_age, self.cookie_expires, self.cookie_path, self.cookie_domain, self.cookie_secure, self.cookie_httponly, samesite=self.cookie_samesite, ), ) ) return start_response(status, headers, exc_info) return ClosingIterator( self.app(environ, injecting_start_response), lambda: self.store.save_if_modified(session), )
[ "os.listdir", "os.chmod", "os.unlink", "tempfile.mkstemp", "tempfile.gettempdir", "re.escape", "time.time", "random.random", "pickle.load", "os.fdopen", "warnings.warn", "os.path.join", "os.urandom", "re.compile" ]
[((2579, 2794), 'warnings.warn', 'warnings.warn', (['"""\'werkzeug.contrib.sessions\' is deprecated as of version 0.15 and will be removed in version 1.0. It has moved to https://github.com/pallets/secure-cookie."""', 'DeprecationWarning'], {'stacklevel': '(2)'}), '(\n "\'werkzeug.contrib.sessions\' is deprecated as of version 0.15 and will be removed in version 1.0. It has moved to https://github.com/pallets/secure-cookie."\n , DeprecationWarning, stacklevel=2)\n', (2592, 2794), False, 'import warnings\n'), ((2826, 2854), 're.compile', 're.compile', (['"""^[a-f0-9]{40}$"""'], {}), "('^[a-f0-9]{40}$')\n", (2836, 2854), False, 'import re\n'), ((2920, 2934), 'os.urandom', 'os.urandom', (['(30)'], {}), '(30)\n', (2930, 2934), False, 'import os\n'), ((8442, 8492), 'os.path.join', 'path.join', (['self.path', '(self.filename_template % sid)'], {}), '(self.path, self.filename_template % sid)\n', (8451, 8492), False, 'from os import path\n'), ((8593, 8655), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '_fs_transaction_suffix', 'dir': 'self.path'}), '(suffix=_fs_transaction_suffix, dir=self.path)\n', (8609, 8655), False, 'import tempfile\n'), ((8668, 8687), 'os.fdopen', 'os.fdopen', (['fd', '"""wb"""'], {}), "(fd, 'wb')\n", (8677, 8687), False, 'import os\n'), ((9943, 9964), 'os.listdir', 'os.listdir', (['self.path'], {}), '(self.path)\n', (9953, 9964), False, 'import os\n'), ((7649, 7670), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (7668, 7670), False, 'import tempfile\n'), ((8846, 8869), 'os.chmod', 'os.chmod', (['fn', 'self.mode'], {}), '(fn, self.mode)\n', (8854, 8869), False, 'import os\n'), ((9031, 9044), 'os.unlink', 'os.unlink', (['fn'], {}), '(fn)\n', (9040, 9044), False, 'import os\n'), ((2956, 2964), 'random.random', 'random', ([], {}), '()\n', (2962, 2964), False, 'from random import random\n'), ((9446, 9453), 'pickle.load', 'load', (['f'], {}), '(f)\n', (9450, 9453), False, 'from pickle import load\n'), ((9852, 9869), 're.escape', 're.escape', (['before'], {}), '(before)\n', (9861, 9869), False, 'import re\n'), ((9871, 9887), 're.escape', 're.escape', (['after'], {}), '(after)\n', (9880, 9887), False, 'import re\n'), ((3112, 3118), 'time.time', 'time', ([], {}), '()\n', (3116, 3118), False, 'from time import time\n')]
from typing import Dict import copy def align_dictionaries(a: Dict, b: Dict) -> Dict: """Return dictionary a aligned to dictionary b. Parameters --------------------------------- a: Dict, The dictionary to be aligned to the second one, adding missing keys to values 0 as required. b: Dict, The dictionary to align to. Returns ---------------------------------- Aligned dictionary a. """ a = copy.deepcopy(a) for key in set(b.keys()) - set(a.keys()): a[key] = 0 return a
[ "copy.deepcopy" ]
[((451, 467), 'copy.deepcopy', 'copy.deepcopy', (['a'], {}), '(a)\n', (464, 467), False, 'import copy\n')]
# -*- coding: utf-8 -*- """ Spyder Editor This is a temporary script file. """ from functions_scenario import derivatives, sat_conc import numpy as np import matplotlib.pyplot as plt import pandas as pd from pandas import DataFrame # Start hour h2 is for test only - in live version will be current time h2 = 1095 h1 = h2-240 # select previous 10 days ndp = int((h2-h1)/12) # number of data points used for calibration # Input calibrated parameters output from calibration model # Stored in database? Currently output to csv file ACHinp = np.genfromtxt('ACH_out.csv', delimiter=',') IASinp = np.genfromtxt('IAS_out.csv', delimiter=',') # Calculate mean calibrated ACH, IAS and 5%, 95% quantiles ACHcal = ACHinp[1:,-1*ndp:]*9+1 # selects ACH values corresponding to the last ndp data points ACHmean = np.mean(ACHcal,0) ACHuq = np.quantile(ACHcal,0.95,0) ACHlq = np.quantile(ACHcal,0.05,0) IAScal = IASinp[1:,-1*ndp:]*0.75+0.1 IASmean = np.mean(IAScal,0) IASuq = np.quantile(IAScal,0.95,0) IASlq = np.quantile(IAScal,0.05,0) # Set up parameters for runs: 1) BAU mean, 2) Scenario, 3) BAU UQ, 4) BAU LQ # Scenario values N, ndh and lshift will come from sliders on dashboard test = np.zeros((ndp,4,4)) #for i in range(np.size(test,2)): test[:,0,0] = ACHmean test[:,1,0] = IASmean test[:,2,0] = 1 test[:,3,0] = 0 test[:,0,1] = ACHmean test[:,1,1] = IASmean test[:,2,1] = 1 test[:,3,1] = 0 test[:,0,2] = ACHuq test[:,1,2] = IASlq test[:,2,2] = 1 test[:,3,2] = 0 test[:,0,3] = ACHlq test[:,1,3] = IASuq test[:,2,3] = 1 test[:,3,3] = 0 # Scenario 1 - vary ACH N = 1 # ventilation rate inout from slider ScenEval = np.zeros((8,4,4)) ScenEval[:,0,0] = ACHmean[-1] ScenEval[:,0,1] = N # input from slider ScenEval[:,0,2] = ACHuq[-1] ScenEval[:,0,3] = ACHlq[-1] ScenEval[:,1,0] = IASmean[-1] ScenEval[:,1,1] = IASmean[-1] ScenEval[:,1,2] = IASlq[-1] ScenEval[:,1,3] = IASuq[-1] # Scenario 2 - vary number of dehumidifiers ndh = 2 # number of dehumidifiers ScenEval[:,2,0] = 1 ScenEval[:,2,1] = ndh/2 # ndh input from slider (integer) (/2 as half farm modelled) ScenEval[:,2,2] = 1 ScenEval[:,2,3] = 1 # Scenario 3 - shift lighting schedule (+/-hours) lshift = -3 ScenEval[:,3,0] = 1 ScenEval[:,3,1] = lshift # lshift input from slider ScenEval[:,3,2] = 1 ScenEval[:,3,3] = 1 params = np.concatenate((test, ScenEval)) # put scenario on the end of the calibrated parameters ## Run model, using time varying ACH, IAS corresponding to outputs from calibration for # first 10 days, then scenario evaluation values for last 3 days results = derivatives(h1, h2, params, ndp) T_air = results[1,:,:] Cw_air = results[11,:,:] RH_air = Cw_air/sat_conc(T_air) ## Plot Results p1 = h1 # start hour delta_h = 12 # hours between data points p2 = ndp*delta_h+p1 # end data point seq = np.linspace(p1,p2,ndp+1,endpoint='true') date_cols = ["DateTimex"] Data = pd.read_csv("TRHE2018.csv", parse_dates=date_cols) RHData =Data['MidFarmRH2'] TData =Data['MidFarmT'] #t = np.linspace(h1,h2+3*24,1+240+3*24) t = np.linspace(h1-h2,3*24,1+240+3*24) t1 = np.linspace(0,3*24,1+3*24) td = np.linspace(h1-h2,0,21) dpRH = RHData[seq] dpT = TData[seq]+273.15 dpCw = dpRH/100 * sat_conc(dpT) fig = plt.figure() ax1 = fig.add_subplot(1,2,1) plt.plot(t,T_air[:,0]-273.15,'r') #p1 = plt.plot(t,T_air[:,2]-273.15,'r:') #p2 = plt.plot(t,T_air[:,3]-273.15,'r:') ax1.fill_between(t[:-73], T_air[:-73,2]-273.15, T_air[:-73,3]-273.15, color='red', alpha = 0.2) #plt.plot(t,T_air[:,1],'b--') plt.plot(t1,T_air[-73:,1]-273.15,'b--') plt.scatter(td,dpT-273.15, marker='.', color='k') plt.xticks(fontsize=8) plt.yticks(fontsize=8) ax1.set_xlabel('Hour', fontsize=8) ax1.set_ylabel('Temperature ($\mathregular{^{o}}$C)', fontsize=8) ax1.set_title('Temperature', fontsize=10) ax1.axvline(x=0, color='k') ax1.set_xlim(-120, 72) ax3 = fig.add_subplot(1,2,2) lbl1 = str(int(N)) + ' ACH' lbl2 = ', ' + str(int(ndh)) + ' DH' lbl3 = ', ' + str(int(lshift)) + ' hours' plt.plot(t,100*RH_air[:,0],'r', label='BAU') ax3.fill_between(t[:-73], 100*RH_air[:-73,2], 100*RH_air[:-73,3], color='red', alpha = 0.2) #plt.plot(t,100*RH_air[:,1],'b--', label='Max N') plt.plot(t1,100*RH_air[-73:,1],'b--', label= lbl1 + lbl2 + lbl3) plt.scatter(td,dpRH, marker='.', color='k', label='Data') ax3.set_title('Relative Humidity', fontsize=10) plt.xticks(fontsize=8) plt.yticks(fontsize=8) ax3.set_xlabel('Hour', fontsize=8) ax3.set_ylabel('Relative Humidity (%)', fontsize=8) ax3.legend(loc='best', fontsize='small') ax3.axvline(x=0, color='k') ax3.set_xlim(-120, 72) plt.subplots_adjust(wspace=0.5) # Calculate statistics and produce pie charts. Note need too cold as well? Check # setpoints with Mel setTmax = 25 + 273.15 setTmin = 20 + 273.15 setRHmax = 0.85 setRHmin = 0.5 TBAUstat = T_air[-73:,0] TSEstat = T_air[-73:,1] RHBAUstat = RH_air[-73:,0] RHSEstat = RH_air[-73:,1] testTBAU = TBAUstat>setTmax testTBAU_low = TBAUstat<setTmin testTSE = TSEstat>setTmax testTSE_low = TSEstat<setTmin testRHBAU = RHBAUstat>setRHmax testRHBAU_low = RHBAUstat<setRHmin testRHSE = RHSEstat>setRHmax testRHSE_low = RHSEstat<setRHmin #y1 = ([np.sum(testTBAU), 72]) y1 = {'T<Tmin': np.sum(testTBAU_low), 'T OK': 72-np.sum(testTBAU)-np.sum(testTBAU_low), 'T>Tset': np.sum(testTBAU)} names1 = [key for key,value in y1.items() if value!=0] values1 = [value for value in y1.values() if value!=0] #y2 = ([np.sum(testTSE), 72]) #y2 = {'T<Tset': 72-np.sum(testTSE), 'T>Tset': np.sum(testTSE)} y2 = {'T<Tmin': np.sum(testTSE_low), 'T OK': 72-np.sum(testTSE)-np.sum(testTSE_low), 'T>Tset': np.sum(testTSE)} names2 = [key for key,value in y2.items() if value!=0] values2 = [value for value in y2.values() if value!=0] #y3 = ([np.sum(testRHBAU), 72]) #y3 = {'RH<RHset': 72-np.sum(testRHBAU), 'RH>RHset': np.sum(testRHBAU)} y3 = {'RH<RHmin': np.sum(testRHBAU_low), 'RH OK': 72-np.sum(testRHBAU)-np.sum(testRHBAU_low), 'RH>RHset': np.sum(testRHBAU)} names3 = [key for key,value in y3.items()] values3 = [value for value in y3.values()] #y4 = ([np.sum(testRHSE), 72]) #y4 = {'RH<RHset': 72-np.sum(testRHSE), 'RH>RHset': np.sum(testRHSE)} y4 = {'RH<RHmin': np.sum(testRHSE_low), 'RH OK': 72-np.sum(testRHSE)-np.sum(testRHSE_low), 'RH>RHset': np.sum(testRHSE)} names4 = [key for key,value in y4.items()] values4 = [value for value in y4.values()] fig2 = plt.figure() #Tlabels = ['T>Tset', 'T<Tset'] #RHlabels = ['RH>RHset', 'RH<RHset'] ax11 = fig2.add_subplot(2,2,1) plt.pie(values1, colors = ['blue','green','red'], startangle = 90, labels = names1, textprops={'fontsize': 8}) ax11.set_title('Temperature - BAU', fontsize = 8) ax12 = fig2.add_subplot(2,2,3) plt.pie(values2, colors = ['blue','green','red'], startangle = 90, labels = names2, textprops={'fontsize': 8}) ax12.set_title('Temperature - Scenario', fontsize = 8) ax31 = fig2.add_subplot(2,2,2) plt.pie(values3, colors = ['blue','green','red'], startangle = 90, labels = names3, textprops={'fontsize': 8}) ax31.set_title('RH - BAU', fontsize = 8) ax32 = fig2.add_subplot(2,2,4) plt.pie(values4, colors = ['blue','green','red'], startangle = 90, labels = names4, textprops={'fontsize': 8}) ax32.set_title('RH - Scenario', fontsize = 8)
[ "numpy.quantile", "numpy.sum", "matplotlib.pyplot.plot", "pandas.read_csv", "matplotlib.pyplot.scatter", "functions_scenario.derivatives", "matplotlib.pyplot.yticks", "numpy.zeros", "numpy.genfromtxt", "matplotlib.pyplot.figure", "numpy.mean", "numpy.linspace", "matplotlib.pyplot.subplots_adjust", "functions_scenario.sat_conc", "matplotlib.pyplot.xticks", "numpy.concatenate", "matplotlib.pyplot.pie" ]
[((544, 587), 'numpy.genfromtxt', 'np.genfromtxt', (['"""ACH_out.csv"""'], {'delimiter': '""","""'}), "('ACH_out.csv', delimiter=',')\n", (557, 587), True, 'import numpy as np\n'), ((597, 640), 'numpy.genfromtxt', 'np.genfromtxt', (['"""IAS_out.csv"""'], {'delimiter': '""","""'}), "('IAS_out.csv', delimiter=',')\n", (610, 640), True, 'import numpy as np\n'), ((807, 825), 'numpy.mean', 'np.mean', (['ACHcal', '(0)'], {}), '(ACHcal, 0)\n', (814, 825), True, 'import numpy as np\n'), ((833, 861), 'numpy.quantile', 'np.quantile', (['ACHcal', '(0.95)', '(0)'], {}), '(ACHcal, 0.95, 0)\n', (844, 861), True, 'import numpy as np\n'), ((868, 896), 'numpy.quantile', 'np.quantile', (['ACHcal', '(0.05)', '(0)'], {}), '(ACHcal, 0.05, 0)\n', (879, 896), True, 'import numpy as np\n'), ((943, 961), 'numpy.mean', 'np.mean', (['IAScal', '(0)'], {}), '(IAScal, 0)\n', (950, 961), True, 'import numpy as np\n'), ((969, 997), 'numpy.quantile', 'np.quantile', (['IAScal', '(0.95)', '(0)'], {}), '(IAScal, 0.95, 0)\n', (980, 997), True, 'import numpy as np\n'), ((1004, 1032), 'numpy.quantile', 'np.quantile', (['IAScal', '(0.05)', '(0)'], {}), '(IAScal, 0.05, 0)\n', (1015, 1032), True, 'import numpy as np\n'), ((1189, 1210), 'numpy.zeros', 'np.zeros', (['(ndp, 4, 4)'], {}), '((ndp, 4, 4))\n', (1197, 1210), True, 'import numpy as np\n'), ((1622, 1641), 'numpy.zeros', 'np.zeros', (['(8, 4, 4)'], {}), '((8, 4, 4))\n', (1630, 1641), True, 'import numpy as np\n'), ((2296, 2328), 'numpy.concatenate', 'np.concatenate', (['(test, ScenEval)'], {}), '((test, ScenEval))\n', (2310, 2328), True, 'import numpy as np\n'), ((2551, 2583), 'functions_scenario.derivatives', 'derivatives', (['h1', 'h2', 'params', 'ndp'], {}), '(h1, h2, params, ndp)\n', (2562, 2583), False, 'from functions_scenario import derivatives, sat_conc\n'), ((2796, 2841), 'numpy.linspace', 'np.linspace', (['p1', 'p2', '(ndp + 1)'], {'endpoint': '"""true"""'}), "(p1, p2, ndp + 1, endpoint='true')\n", (2807, 2841), True, 'import numpy as np\n'), ((2871, 2921), 'pandas.read_csv', 'pd.read_csv', (['"""TRHE2018.csv"""'], {'parse_dates': 'date_cols'}), "('TRHE2018.csv', parse_dates=date_cols)\n", (2882, 2921), True, 'import pandas as pd\n'), ((3022, 3068), 'numpy.linspace', 'np.linspace', (['(h1 - h2)', '(3 * 24)', '(1 + 240 + 3 * 24)'], {}), '(h1 - h2, 3 * 24, 1 + 240 + 3 * 24)\n', (3033, 3068), True, 'import numpy as np\n'), ((3062, 3096), 'numpy.linspace', 'np.linspace', (['(0)', '(3 * 24)', '(1 + 3 * 24)'], {}), '(0, 3 * 24, 1 + 3 * 24)\n', (3073, 3096), True, 'import numpy as np\n'), ((3094, 3121), 'numpy.linspace', 'np.linspace', (['(h1 - h2)', '(0)', '(21)'], {}), '(h1 - h2, 0, 21)\n', (3105, 3121), True, 'import numpy as np\n'), ((3201, 3213), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3211, 3213), True, 'import matplotlib.pyplot as plt\n'), ((3244, 3282), 'matplotlib.pyplot.plot', 'plt.plot', (['t', '(T_air[:, 0] - 273.15)', '"""r"""'], {}), "(t, T_air[:, 0] - 273.15, 'r')\n", (3252, 3282), True, 'import matplotlib.pyplot as plt\n'), ((3486, 3530), 'matplotlib.pyplot.plot', 'plt.plot', (['t1', '(T_air[-73:, 1] - 273.15)', '"""b--"""'], {}), "(t1, T_air[-73:, 1] - 273.15, 'b--')\n", (3494, 3530), True, 'import matplotlib.pyplot as plt\n'), ((3526, 3578), 'matplotlib.pyplot.scatter', 'plt.scatter', (['td', '(dpT - 273.15)'], {'marker': '"""."""', 'color': '"""k"""'}), "(td, dpT - 273.15, marker='.', color='k')\n", (3537, 3578), True, 'import matplotlib.pyplot as plt\n'), ((3576, 3598), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(8)'}), '(fontsize=8)\n', (3586, 3598), True, 'import matplotlib.pyplot as plt\n'), ((3599, 3621), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(8)'}), '(fontsize=8)\n', (3609, 3621), True, 'import matplotlib.pyplot as plt\n'), ((3952, 4001), 'matplotlib.pyplot.plot', 'plt.plot', (['t', '(100 * RH_air[:, 0])', '"""r"""'], {'label': '"""BAU"""'}), "(t, 100 * RH_air[:, 0], 'r', label='BAU')\n", (3960, 4001), True, 'import matplotlib.pyplot as plt\n'), ((4140, 4208), 'matplotlib.pyplot.plot', 'plt.plot', (['t1', '(100 * RH_air[-73:, 1])', '"""b--"""'], {'label': '(lbl1 + lbl2 + lbl3)'}), "(t1, 100 * RH_air[-73:, 1], 'b--', label=lbl1 + lbl2 + lbl3)\n", (4148, 4208), True, 'import matplotlib.pyplot as plt\n'), ((4205, 4263), 'matplotlib.pyplot.scatter', 'plt.scatter', (['td', 'dpRH'], {'marker': '"""."""', 'color': '"""k"""', 'label': '"""Data"""'}), "(td, dpRH, marker='.', color='k', label='Data')\n", (4216, 4263), True, 'import matplotlib.pyplot as plt\n'), ((4311, 4333), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(8)'}), '(fontsize=8)\n', (4321, 4333), True, 'import matplotlib.pyplot as plt\n'), ((4334, 4356), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(8)'}), '(fontsize=8)\n', (4344, 4356), True, 'import matplotlib.pyplot as plt\n'), ((4538, 4569), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.5)'}), '(wspace=0.5)\n', (4557, 4569), True, 'import matplotlib.pyplot as plt\n'), ((6307, 6319), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6317, 6319), True, 'import matplotlib.pyplot as plt\n'), ((6423, 6534), 'matplotlib.pyplot.pie', 'plt.pie', (['values1'], {'colors': "['blue', 'green', 'red']", 'startangle': '(90)', 'labels': 'names1', 'textprops': "{'fontsize': 8}"}), "(values1, colors=['blue', 'green', 'red'], startangle=90, labels=\n names1, textprops={'fontsize': 8})\n", (6430, 6534), True, 'import matplotlib.pyplot as plt\n'), ((6624, 6735), 'matplotlib.pyplot.pie', 'plt.pie', (['values2'], {'colors': "['blue', 'green', 'red']", 'startangle': '(90)', 'labels': 'names2', 'textprops': "{'fontsize': 8}"}), "(values2, colors=['blue', 'green', 'red'], startangle=90, labels=\n names2, textprops={'fontsize': 8})\n", (6631, 6735), True, 'import matplotlib.pyplot as plt\n'), ((6824, 6935), 'matplotlib.pyplot.pie', 'plt.pie', (['values3'], {'colors': "['blue', 'green', 'red']", 'startangle': '(90)', 'labels': 'names3', 'textprops': "{'fontsize': 8}"}), "(values3, colors=['blue', 'green', 'red'], startangle=90, labels=\n names3, textprops={'fontsize': 8})\n", (6831, 6935), True, 'import matplotlib.pyplot as plt\n'), ((7017, 7128), 'matplotlib.pyplot.pie', 'plt.pie', (['values4'], {'colors': "['blue', 'green', 'red']", 'startangle': '(90)', 'labels': 'names4', 'textprops': "{'fontsize': 8}"}), "(values4, colors=['blue', 'green', 'red'], startangle=90, labels=\n names4, textprops={'fontsize': 8})\n", (7024, 7128), True, 'import matplotlib.pyplot as plt\n'), ((2654, 2669), 'functions_scenario.sat_conc', 'sat_conc', (['T_air'], {}), '(T_air)\n', (2662, 2669), False, 'from functions_scenario import derivatives, sat_conc\n'), ((3180, 3193), 'functions_scenario.sat_conc', 'sat_conc', (['dpT'], {}), '(dpT)\n', (3188, 3193), False, 'from functions_scenario import derivatives, sat_conc\n'), ((5147, 5167), 'numpy.sum', 'np.sum', (['testTBAU_low'], {}), '(testTBAU_low)\n', (5153, 5167), True, 'import numpy as np\n'), ((5229, 5245), 'numpy.sum', 'np.sum', (['testTBAU'], {}), '(testTBAU)\n', (5235, 5245), True, 'import numpy as np\n'), ((5468, 5487), 'numpy.sum', 'np.sum', (['testTSE_low'], {}), '(testTSE_low)\n', (5474, 5487), True, 'import numpy as np\n'), ((5547, 5562), 'numpy.sum', 'np.sum', (['testTSE'], {}), '(testTSE)\n', (5553, 5562), True, 'import numpy as np\n'), ((5797, 5818), 'numpy.sum', 'np.sum', (['testRHBAU_low'], {}), '(testRHBAU_low)\n', (5803, 5818), True, 'import numpy as np\n'), ((5885, 5902), 'numpy.sum', 'np.sum', (['testRHBAU'], {}), '(testRHBAU)\n', (5891, 5902), True, 'import numpy as np\n'), ((6110, 6130), 'numpy.sum', 'np.sum', (['testRHSE_low'], {}), '(testRHSE_low)\n', (6116, 6130), True, 'import numpy as np\n'), ((6195, 6211), 'numpy.sum', 'np.sum', (['testRHSE'], {}), '(testRHSE)\n', (6201, 6211), True, 'import numpy as np\n'), ((5197, 5217), 'numpy.sum', 'np.sum', (['testTBAU_low'], {}), '(testTBAU_low)\n', (5203, 5217), True, 'import numpy as np\n'), ((5516, 5535), 'numpy.sum', 'np.sum', (['testTSE_low'], {}), '(testTSE_low)\n', (5522, 5535), True, 'import numpy as np\n'), ((5850, 5871), 'numpy.sum', 'np.sum', (['testRHBAU_low'], {}), '(testRHBAU_low)\n', (5856, 5871), True, 'import numpy as np\n'), ((6161, 6181), 'numpy.sum', 'np.sum', (['testRHSE_low'], {}), '(testRHSE_low)\n', (6167, 6181), True, 'import numpy as np\n'), ((5180, 5196), 'numpy.sum', 'np.sum', (['testTBAU'], {}), '(testTBAU)\n', (5186, 5196), True, 'import numpy as np\n'), ((5500, 5515), 'numpy.sum', 'np.sum', (['testTSE'], {}), '(testTSE)\n', (5506, 5515), True, 'import numpy as np\n'), ((5832, 5849), 'numpy.sum', 'np.sum', (['testRHBAU'], {}), '(testRHBAU)\n', (5838, 5849), True, 'import numpy as np\n'), ((6144, 6160), 'numpy.sum', 'np.sum', (['testRHSE'], {}), '(testRHSE)\n', (6150, 6160), True, 'import numpy as np\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = 'ipetrash' """ Скрипт для уведомления о появлении новых игр серии Spirits of Mystery. """ # Чтобы можно было импортировать all_common.py, находящийся уровнем выше import sys sys.path.append('..') # Чтобы импортировать функцию для получения списка игр sys.path.append('../../bigfishgames_com__hidden_object') from all_common import make_backslashreplace_console, run_notification_job from find__Spirits_of_Mystery__CE import get_games make_backslashreplace_console() run_notification_job( 'new game Spirits of Mystery', 'games', get_games, notified_by_sms=True, format_current_items='Текущий список игр (%s): %s', format_get_items='Запрос списка игр', format_items='Список игр (%s): %s', format_new_item='Появилась новая игра "%s"', format_no_new_items='Новых игр нет', )
[ "sys.path.append", "all_common.run_notification_job", "all_common.make_backslashreplace_console" ]
[((240, 261), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (255, 261), False, 'import sys\n'), ((318, 374), 'sys.path.append', 'sys.path.append', (['"""../../bigfishgames_com__hidden_object"""'], {}), "('../../bigfishgames_com__hidden_object')\n", (333, 374), False, 'import sys\n'), ((504, 535), 'all_common.make_backslashreplace_console', 'make_backslashreplace_console', ([], {}), '()\n', (533, 535), False, 'from all_common import make_backslashreplace_console, run_notification_job\n'), ((538, 857), 'all_common.run_notification_job', 'run_notification_job', (['"""new game Spirits of Mystery"""', '"""games"""', 'get_games'], {'notified_by_sms': '(True)', 'format_current_items': '"""Текущий список игр (%s): %s"""', 'format_get_items': '"""Запрос списка игр"""', 'format_items': '"""Список игр (%s): %s"""', 'format_new_item': '"""Появилась новая игра "%s\\""""', 'format_no_new_items': '"""Новых игр нет"""'}), '(\'new game Spirits of Mystery\', \'games\', get_games,\n notified_by_sms=True, format_current_items=\n \'Текущий список игр (%s): %s\', format_get_items=\'Запрос списка игр\',\n format_items=\'Список игр (%s): %s\', format_new_item=\n \'Появилась новая игра "%s"\', format_no_new_items=\'Новых игр нет\')\n', (558, 857), False, 'from all_common import make_backslashreplace_console, run_notification_job\n')]
# Generated by Django 3.2 on 2021-05-11 23:22 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('respostas', '0009_resposta_pergunta_respondida'), ] operations = [ migrations.RenameField( model_name='resposta', old_name='pergunta_respondida', new_name='respondida', ), ]
[ "django.db.migrations.RenameField" ]
[((237, 342), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""resposta"""', 'old_name': '"""pergunta_respondida"""', 'new_name': '"""respondida"""'}), "(model_name='resposta', old_name=\n 'pergunta_respondida', new_name='respondida')\n", (259, 342), False, 'from django.db import migrations\n')]
import nltk texto = 'Hello, My name is Victor. How are you?' #Dividindo uma string em array de substrings quando tiver um ponto . no meio da frase frases = nltk.tokenize.sent_tokenize(texto) print(frases) #Tokenizando uma frase tokens = nltk.word_tokenize(texto) print(tokens) '''Obtendo uma lista de tuplas onde cada elememto corresponde a um par de palavras e suas respectivas classes gramaticais''' classes = nltk.pos_tag(tokens) print(classes) #Para mais informações sobre as classes gramaticais, visitar o site https://cs.nyu.edu/grishman/jet/guide/PennPOS.html #Detecção de entidades entidades = nltk.chunk.ne_chunk(classes) print(entidades) '''OBS: NLTK não suporta bem em português. Mesmo colocando o parâmetro language='portuguese' na função sent_tokenize(), o algoritmo trata a tokenização como se as palavras estivessem em inglês'''
[ "nltk.pos_tag", "nltk.tokenize.sent_tokenize", "nltk.chunk.ne_chunk", "nltk.word_tokenize" ]
[((158, 192), 'nltk.tokenize.sent_tokenize', 'nltk.tokenize.sent_tokenize', (['texto'], {}), '(texto)\n', (185, 192), False, 'import nltk\n'), ((241, 266), 'nltk.word_tokenize', 'nltk.word_tokenize', (['texto'], {}), '(texto)\n', (259, 266), False, 'import nltk\n'), ((420, 440), 'nltk.pos_tag', 'nltk.pos_tag', (['tokens'], {}), '(tokens)\n', (432, 440), False, 'import nltk\n'), ((612, 640), 'nltk.chunk.ne_chunk', 'nltk.chunk.ne_chunk', (['classes'], {}), '(classes)\n', (631, 640), False, 'import nltk\n')]
__all__ = [ "GUIFrame" ] from six.moves.tkinter import ( Frame ) from .tk_unbind import ( unbind ) class GUIFrame(Frame): unbind = unbind def __init__(self, *args, **kwargs): Frame.__init__(self, *args, **kwargs) def enqueue(self, co_task): "Its toplevel must be GUITk." self.winfo_toplevel().enqueue(co_task) def cancel_task(self, co_task): "Its toplevel must be GUITk." self.winfo_toplevel().cancel_task(co_task)
[ "six.moves.tkinter.Frame.__init__" ]
[((206, 243), 'six.moves.tkinter.Frame.__init__', 'Frame.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (220, 243), False, 'from six.moves.tkinter import Frame\n')]
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from abc import abstractmethod from copy import deepcopy from typing import Callable, Dict, Union from jsonpickle.pickler import Pickler from botbuilder.core.state_property_accessor import StatePropertyAccessor from .bot_assert import BotAssert from .turn_context import TurnContext from .storage import Storage from .property_manager import PropertyManager class CachedBotState: """ Internal cached bot state. """ def __init__(self, state: Dict[str, object] = None): self.state = state if state is not None else {} self.hash = self.compute_hash(state) @property def is_changed(self) -> bool: return self.hash != self.compute_hash(self.state) def compute_hash(self, obj: object) -> str: return str(Pickler().flatten(obj)) class BotState(PropertyManager): """ Defines a state management object and automates the reading and writing of associated state properties to a storage layer. .. remarks:: Each state management object defines a scope for a storage layer. State properties are created within a state management scope, and the Bot Framework defines these scopes: :class:`ConversationState`, :class:`UserState`, and :class:`PrivateConversationState`. You can define additional scopes for your bot. """ def __init__(self, storage: Storage, context_service_key: str): """ Initializes a new instance of the :class:`BotState` class. :param storage: The storage layer this state management object will use to store and retrieve state :type storage: :class:`bptbuilder.core.Storage` :param context_service_key: The key for the state cache for this :class:`BotState` :type context_service_key: str .. remarks:: This constructor creates a state management object and associated scope. The object uses the :param storage: to persist state property values and the :param context_service_key: to cache state within the context for each turn. :raises: It raises an argument null exception. """ self.state_key = "state" self._storage = storage self._context_service_key = context_service_key def get_cached_state(self, turn_context: TurnContext): """ Gets the cached bot state instance that wraps the raw cached data for this "BotState" from the turn context. :param turn_context: The context object for this turn. :type turn_context: :class:`TurnContext` :return: The cached bot state instance. """ BotAssert.context_not_none(turn_context) return turn_context.turn_state.get(self._context_service_key) def create_property(self, name: str) -> StatePropertyAccessor: """ Creates a property definition and registers it with this :class:`BotState`. :param name: The name of the property :type name: str :return: If successful, the state property accessor created :rtype: :class:`StatePropertyAccessor` """ if not name: raise TypeError("BotState.create_property(): name cannot be None or empty.") return BotStatePropertyAccessor(self, name) def get(self, turn_context: TurnContext) -> Dict[str, object]: BotAssert.context_not_none(turn_context) cached = self.get_cached_state(turn_context) return getattr(cached, "state", None) async def load(self, turn_context: TurnContext, force: bool = False) -> None: """ Reads the current state object and caches it in the context object for this turn. :param turn_context: The context object for this turn :type turn_context: :class:`TurnContext` :param force: Optional, true to bypass the cache :type force: bool """ BotAssert.context_not_none(turn_context) cached_state = self.get_cached_state(turn_context) storage_key = self.get_storage_key(turn_context) if force or not cached_state or not cached_state.state: items = await self._storage.read([storage_key]) val = items.get(storage_key) turn_context.turn_state[self._context_service_key] = CachedBotState(val) async def save_changes( self, turn_context: TurnContext, force: bool = False ) -> None: """ Saves the state cached in the current context for this turn. If the state has changed, it saves the state cached in the current context for this turn. :param turn_context: The context object for this turn :type turn_context: :class:`TurnContext` :param force: Optional, true to save state to storage whether or not there are changes :type force: bool """ BotAssert.context_not_none(turn_context) cached_state = self.get_cached_state(turn_context) if force or (cached_state is not None and cached_state.is_changed): storage_key = self.get_storage_key(turn_context) changes: Dict[str, object] = {storage_key: cached_state.state} await self._storage.write(changes) cached_state.hash = cached_state.compute_hash(cached_state.state) async def clear_state(self, turn_context: TurnContext): """ Clears any state currently stored in this state scope. :param turn_context: The context object for this turn :type turn_context: :class:`TurnContext` :return: None .. remarks:: This function must be called in order for the cleared state to be persisted to the underlying store. """ BotAssert.context_not_none(turn_context) # Explicitly setting the hash will mean IsChanged is always true. And that will force a Save. cache_value = CachedBotState() cache_value.hash = "" turn_context.turn_state[self._context_service_key] = cache_value async def delete(self, turn_context: TurnContext) -> None: """ Deletes any state currently stored in this state scope. :param turn_context: The context object for this turn :type turn_context: :class:`TurnContext` :return: None """ BotAssert.context_not_none(turn_context) turn_context.turn_state.pop(self._context_service_key) storage_key = self.get_storage_key(turn_context) await self._storage.delete({storage_key}) @abstractmethod def get_storage_key(self, turn_context: TurnContext) -> str: raise NotImplementedError() async def get_property_value(self, turn_context: TurnContext, property_name: str): """ Gets the value of the specified property in the turn context. :param turn_context: The context object for this turn :type turn_context: :class:`TurnContext` :param property_name: The property name :type property_name: str :return: The value of the property """ BotAssert.context_not_none(turn_context) if not property_name: raise TypeError( "BotState.get_property_value(): property_name cannot be None." ) cached_state = self.get_cached_state(turn_context) # if there is no value, this will throw, to signal to IPropertyAccesor that a default value should be computed # This allows this to work with value types return cached_state.state[property_name] async def delete_property_value( self, turn_context: TurnContext, property_name: str ) -> None: """ Deletes a property from the state cache in the turn context. :param turn_context: The context object for this turn :type turn_context: :TurnContext` :param property_name: The name of the property to delete :type property_name: str :return: None """ BotAssert.context_not_none(turn_context) if not property_name: raise TypeError("BotState.delete_property(): property_name cannot be None.") cached_state = self.get_cached_state(turn_context) del cached_state.state[property_name] async def set_property_value( self, turn_context: TurnContext, property_name: str, value: object ) -> None: """ Sets a property to the specified value in the turn context. :param turn_context: The context object for this turn :type turn_context: :class:`TurnContext` :param property_name: The property name :type property_name: str :param value: The value to assign to the property :type value: Object :return: None """ BotAssert.context_not_none(turn_context) if not property_name: raise TypeError("BotState.delete_property(): property_name cannot be None.") cached_state = self.get_cached_state(turn_context) cached_state.state[property_name] = value class BotStatePropertyAccessor(StatePropertyAccessor): """ Defines methods for accessing a state property created in a :class:`BotState` object. """ def __init__(self, bot_state: BotState, name: str): """ Initializes a new instance of the :class:`BotStatePropertyAccessor` class. :param bot_state: The state object to access :type bot_state: :class:`BotState` :param name: The name of the state property to access :type name: str """ self._bot_state = bot_state self._name = name @property def name(self) -> str: """ The name of the property. """ return self._name async def delete(self, turn_context: TurnContext) -> None: """ Deletes the property. :param turn_context: The context object for this turn :type turn_context: :class:`TurnContext` """ await self._bot_state.load(turn_context, False) await self._bot_state.delete_property_value(turn_context, self._name) async def get( self, turn_context: TurnContext, default_value_or_factory: Union[Callable, object] = None, ) -> object: """ Gets the property value. :param turn_context: The context object for this turn :type turn_context: :class:`TurnContext` :param default_value_or_factory: Defines the default value for the property """ await self._bot_state.load(turn_context, False) try: result = await self._bot_state.get_property_value(turn_context, self._name) return result except: # ask for default value from factory if not default_value_or_factory: return None result = ( default_value_or_factory() if callable(default_value_or_factory) else deepcopy(default_value_or_factory) ) # save default value for any further calls await self.set(turn_context, result) return result async def set(self, turn_context: TurnContext, value: object) -> None: """ Sets the property value. :param turn_context: The context object for this turn :type turn_context: :class:`TurnContext` :param value: The value to assign to the property """ await self._bot_state.load(turn_context, False) await self._bot_state.set_property_value(turn_context, self._name, value)
[ "copy.deepcopy", "jsonpickle.pickler.Pickler" ]
[((859, 868), 'jsonpickle.pickler.Pickler', 'Pickler', ([], {}), '()\n', (866, 868), False, 'from jsonpickle.pickler import Pickler\n'), ((11029, 11063), 'copy.deepcopy', 'deepcopy', (['default_value_or_factory'], {}), '(default_value_or_factory)\n', (11037, 11063), False, 'from copy import deepcopy\n')]
import torch import torch.nn as nn from torchinfo import summary class SimpleConv(nn.Module): def __init__(self, in_size): super(SimpleConv, self).__init__() # 3x3 Convolution with 6 filters ('same' padding) self.conv = nn.Conv2d(in_size, 6, 3, padding='same') self.relu = nn.ReLU() def forward(self, x): # Applies convolution and then relu x = self.conv(x) x = self.relu(x) return x class StackedConv(nn.Module): def __init__(self, in_size): super(StackedConv, self).__init__() # Stack 3 SimpleConv layers self.stack = nn.Sequential( SimpleConv(in_size), SimpleConv(6), SimpleConv(6), ) def forward(self, x): x = self.stack(x) return x # TODO: Abstract this away class ResidualBlock(nn.Module): def __init__(self, in_size): super(ResidualBlock, self).__init__() self.conv = StackedConv(in_size) self.relu = nn.ReLU() def forward(self, x): res = x x = self.conv(x) x = x + res return x class StackedRes(nn.Module): def __init__(self, in_size): super(StackedRes, self).__init__() self.stack = nn.Sequential( StackedConv(in_size), ResidualBlock(6), ResidualBlock(6), ) def forward(self, x): x = self.stack(x) return x class Encoder(nn.Module): def __init__(self, in_size, out_size): super(Encoder, self).__init__() self.flat = nn.Flatten() self.lin = nn.Linear(in_size, out_size) def forward(self, x): x = self.flat(x) x = self.lin(x) return x class ResNetwork(nn.Module): def __init__(self, in_size, in_dim, out_size): super(ResNetwork, self).__init__() self.__name__ = 'ResNetwork' self.res = StackedRes(in_size) self.enc = Encoder(6 * in_dim**2, out_size) def forward(self, x): x = self.res(x) x = self.enc(x) return x if __name__ == '__main__': model = ResNetwork(1, 105, 1623) summary(model, input_size=(1, 105, 105))
[ "torch.nn.ReLU", "torch.nn.Conv2d", "torchinfo.summary", "torch.nn.Linear", "torch.nn.Flatten" ]
[((2149, 2189), 'torchinfo.summary', 'summary', (['model'], {'input_size': '(1, 105, 105)'}), '(model, input_size=(1, 105, 105))\n', (2156, 2189), False, 'from torchinfo import summary\n'), ((252, 292), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_size', '(6)', '(3)'], {'padding': '"""same"""'}), "(in_size, 6, 3, padding='same')\n", (261, 292), True, 'import torch.nn as nn\n'), ((313, 322), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (320, 322), True, 'import torch.nn as nn\n'), ((1013, 1022), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1020, 1022), True, 'import torch.nn as nn\n'), ((1578, 1590), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (1588, 1590), True, 'import torch.nn as nn\n'), ((1610, 1638), 'torch.nn.Linear', 'nn.Linear', (['in_size', 'out_size'], {}), '(in_size, out_size)\n', (1619, 1638), True, 'import torch.nn as nn\n')]
# Generated by Django 2.2.6 on 2019-10-27 16:03 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Medication', fields=[ ('id', models.AutoField(primary_key=True, serialize=False)), ('name', models.CharField(choices=[('adderall', 'Adderall'), ('vyvanse', 'Vyvanse')], max_length=256)), ('blood_pressure_diastolic', models.PositiveSmallIntegerField()), ('time', models.DateTimeField(auto_now=True)), ], ), ]
[ "django.db.models.CharField", "django.db.models.DateTimeField", "django.db.models.PositiveSmallIntegerField", "django.db.models.AutoField" ]
[((306, 357), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (322, 357), False, 'from django.db import migrations, models\n'), ((385, 481), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('adderall', 'Adderall'), ('vyvanse', 'Vyvanse')]", 'max_length': '(256)'}), "(choices=[('adderall', 'Adderall'), ('vyvanse', 'Vyvanse')],\n max_length=256)\n", (401, 481), False, 'from django.db import migrations, models\n'), ((525, 559), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {}), '()\n', (557, 559), False, 'from django.db import migrations, models\n'), ((587, 622), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (607, 622), False, 'from django.db import migrations, models\n')]
"""Print summary statistics of DRIAMS spectra. The purpose of this script is to print some summary statistics about DRIAMS data sets, stratified by site. This is just a debug script with no usage in real-world analysis scenarios. """ import argparse import dotenv import os import numpy as np from maldi_learn.driams import DRIAMSDatasetExplorer from maldi_learn.driams import load_driams_dataset from tqdm import tqdm dotenv.load_dotenv() DRIAMS_ROOT = os.getenv('DRIAMS_ROOT') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '-s', '--site', default='DRIAMS-A', type=str, help='Site to pre-process') parser.add_argument( '-y', '--years', default=['2015', '2016', '2017', '2018'], type=str, nargs='+', help='Years to pre-process' ) args = parser.parse_args() # Get all available antibiotics for the selected site. We will # pre-process *all* the spectra. explorer = DRIAMSDatasetExplorer(DRIAMS_ROOT) antibiotics = explorer.available_antibiotics(args.site) # Process each year separately, because that simplifies assigning # the output files. for year in tqdm(args.years, desc='Year'): driams_dataset = load_driams_dataset( explorer.root, args.site, year, '*', # Load all species; we do *not* want to filter anything antibiotics[year], handle_missing_resistance_measurements='keep', # Keep all spectra_type='binned_6000', ) codes = driams_dataset.y['code'].values for spectrum, code in tqdm(zip(driams_dataset.X, codes), total=len(codes), desc='Spectrum'): # Use intensity values only if len(spectrum.shape) == 2: spectrum = spectrum[:, 1] min_value, max_value = np.min(spectrum), np.max(spectrum) tic = np.sum(spectrum) print(f'*** {code} ***') print(f'Min: {min_value:.08f}') print(f'Max: {max_value:.08f}') print(f'TIC: {tic:.2f}')
[ "tqdm.tqdm", "numpy.sum", "argparse.ArgumentParser", "maldi_learn.driams.DRIAMSDatasetExplorer", "maldi_learn.driams.load_driams_dataset", "dotenv.load_dotenv", "numpy.min", "numpy.max", "os.getenv" ]
[((425, 445), 'dotenv.load_dotenv', 'dotenv.load_dotenv', ([], {}), '()\n', (443, 445), False, 'import dotenv\n'), ((460, 484), 'os.getenv', 'os.getenv', (['"""DRIAMS_ROOT"""'], {}), "('DRIAMS_ROOT')\n", (469, 484), False, 'import os\n'), ((528, 553), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (551, 553), False, 'import argparse\n'), ((1018, 1052), 'maldi_learn.driams.DRIAMSDatasetExplorer', 'DRIAMSDatasetExplorer', (['DRIAMS_ROOT'], {}), '(DRIAMS_ROOT)\n', (1039, 1052), False, 'from maldi_learn.driams import DRIAMSDatasetExplorer\n'), ((1224, 1253), 'tqdm.tqdm', 'tqdm', (['args.years'], {'desc': '"""Year"""'}), "(args.years, desc='Year')\n", (1228, 1253), False, 'from tqdm import tqdm\n'), ((1280, 1434), 'maldi_learn.driams.load_driams_dataset', 'load_driams_dataset', (['explorer.root', 'args.site', 'year', '"""*"""', 'antibiotics[year]'], {'handle_missing_resistance_measurements': '"""keep"""', 'spectra_type': '"""binned_6000"""'}), "(explorer.root, args.site, year, '*', antibiotics[year],\n handle_missing_resistance_measurements='keep', spectra_type='binned_6000')\n", (1299, 1434), False, 'from maldi_learn.driams import load_driams_dataset\n'), ((2057, 2073), 'numpy.sum', 'np.sum', (['spectrum'], {}), '(spectrum)\n', (2063, 2073), True, 'import numpy as np\n'), ((2004, 2020), 'numpy.min', 'np.min', (['spectrum'], {}), '(spectrum)\n', (2010, 2020), True, 'import numpy as np\n'), ((2022, 2038), 'numpy.max', 'np.max', (['spectrum'], {}), '(spectrum)\n', (2028, 2038), True, 'import numpy as np\n')]
"""@author: Young @license: (C) Copyright 2013-2017 @contact: <EMAIL> @file: main.py @time: 2018/1/17 10:02 """ import gc import gym from agent.agent import Agent MAX_EPISODES = 5000 env = gym.make('BipedalWalker-v2') state_size = env.observation_space.shape[0] action_size = env.action_space.shape[0] agent = Agent(state_size, action_size) state = env.reset() for _ in range(int(1e3)): action = agent.get_exploration_policy(state) next_state, reward, done, info = env.step(action) agent.append(state, action, reward, done, next_state) state = next_state if done: state = env.reset() for _ep in range(MAX_EPISODES): state = env.reset() count = 0 while True: count += 1 # env.render() action = agent.get_exploration_policy(state) next_state, reward, done, info = env.step(action) agent.append(state, action, reward, done, next_state) state = next_state agent.optimize() if done: state = env.reset() break gc.collect() if _ep % 100 == 0: print("{} - score: {}".format(_ep, count)) agent.save_models(_ep)
[ "agent.agent.Agent", "gc.collect", "gym.make" ]
[((193, 221), 'gym.make', 'gym.make', (['"""BipedalWalker-v2"""'], {}), "('BipedalWalker-v2')\n", (201, 221), False, 'import gym\n'), ((315, 345), 'agent.agent.Agent', 'Agent', (['state_size', 'action_size'], {}), '(state_size, action_size)\n', (320, 345), False, 'from agent.agent import Agent\n'), ((1043, 1055), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1053, 1055), False, 'import gc\n')]
import sys if sys.version_info >= (3, 8): from typing import Dict, Literal, List, Optional, Any else: from typing import Dict, List, Optional, Any from typing_extensions import Literal import logging from aiortc import RTCIceServer, RTCPeerConnection, RTCSessionDescription, RTCRtpTransceiver, MediaStreamTrack import sdp_transform from .sdp import common_utils from .sdp.remote_sdp import RemoteSdp from .sdp.unified_plan_utils import addLegacySimulcast, getRtpEncodings from .sdp.common_utils import applyCodecParameters, extractDtlsParameters from .handler_interface import HandlerInterface from ..ortc import ExtendedRtpCapabilities from ..rtp_parameters import MediaKind, RtpParameters, RtpCapabilities, RtpCodecCapability, RtpEncodingParameters, RtcpParameters from ..sctp_parameters import SctpCapabilities, SctpParameters, SctpStreamParameters from ..ortc import getSendingRtpParameters, getSendingRemoteRtpParameters, reduceCodecs from ..scalability_modes import parse as smParse from ..models.transport import IceCandidate, IceParameters, DtlsParameters, DtlsRole from ..models.handler_interface import HandlerRunOptions, HandlerSendOptions, HandlerSendResult, HandlerSendDataChannelResult, HandlerReceiveDataChannelResult, HandlerReceiveOptions, HandlerReceiveResult, HandlerReceiveDataChannelOptions from ..producer import ProducerCodecOptions SCTP_NUM_STREAMS = { 'OS': 1024, 'MIS': 1024 } class AiortcHandler(HandlerInterface): def __init__(self, tracks: List[MediaStreamTrack]=[], loop=None): super(AiortcHandler, self).__init__(loop=loop) # Handler direction. self._direction: Optional[Literal['send', 'recv']] = None # Remote SDP handler. self._remoteSdp: Optional[RemoteSdp] = None # Generic sending RTP parameters for audio and video. self._sendingRtpParametersByKind: Dict[str, RtpParameters] = {} # Generic sending RTP parameters for audio and video suitable for the SDP # remote answer. self._sendingRemoteRtpParametersByKind: Dict[str, RtpParameters] = {} # RTCPeerConnection instance. self._pc: Optional[RTCPeerConnection] = None # Map of RTCTransceivers indexed by MID. self._mapMidTransceiver: Dict[str, RTCRtpTransceiver] = {} # Whether a DataChannel m=application section has been created. self._hasDataChannelMediaSection = False # Sending DataChannel id value counter. Incremented for each new DataChannel. self._nextSendSctpStreamId = 0 # Got transport local and remote parameters. self._transportReady = False self._tracks = tracks @classmethod def createFactory(cls, tracks: List[MediaStreamTrack]=[], loop=None): return lambda: cls(tracks, loop) @property def name(self) -> str: return 'aiortc' @property def pc(self) -> RTCPeerConnection: if self._pc: return self._pc else: raise Exception('PeerConnection not ready') @property def remoteSdp(self) -> RemoteSdp: if self._remoteSdp: return self._remoteSdp else: raise Exception('Remote SDP not ready') async def close(self): logging.debug('close()') if self._pc: await self._pc.close() async def getNativeRtpCapabilities(self) -> RtpCapabilities: logging.debug('getNativeRtpCapabilities()') pc = RTCPeerConnection() for track in self._tracks: pc.addTrack(track) pc.addTransceiver('audio') pc.addTransceiver('video') offer: RTCSessionDescription = await pc.createOffer() await pc.close() sdpDict: dict = sdp_transform.parse(offer.sdp) nativeRtpCapabilities:RtpCapabilities = common_utils.extractRtpCapabilities(sdpDict) return nativeRtpCapabilities async def getNativeSctpCapabilities(self) -> SctpCapabilities: logging.debug('getNativeSctpCapabilities()') return SctpCapabilities.parse_obj({ 'numStreams': SCTP_NUM_STREAMS }) def run( self, direction: Literal['send', 'recv'], iceParameters: IceParameters, iceCandidates: List[IceCandidate], dtlsParameters: DtlsParameters, extendedRtpCapabilities: ExtendedRtpCapabilities, sctpParameters: Optional[SctpParameters]=None, iceServers: Optional[RTCIceServer]=None, iceTransportPolicy: Optional[Literal['all', 'relay']]=None, additionalSettings: Optional[Any]=None, proprietaryConstraints: Optional[Any]=None ): logging.debug('AiortcHandler run()') options = HandlerRunOptions( direction=direction, iceParameters=iceParameters, iceCandidates=iceCandidates, dtlsParameters=dtlsParameters, sctpParameters=sctpParameters, iceServers=iceServers, iceTransportPolicy=iceTransportPolicy, additionalSettings=additionalSettings, proprietaryConstraints=proprietaryConstraints, extendedRtpCapabilities=extendedRtpCapabilities ) self._direction = options.direction self._remoteSdp = RemoteSdp( iceParameters=options.iceParameters, iceCandidates=options.iceCandidates, dtlsParameters=options.dtlsParameters, sctpParameters=options.sctpParameters ) self._sendingRtpParametersByKind = { 'audio': getSendingRtpParameters('audio', options.extendedRtpCapabilities), 'video': getSendingRtpParameters('video', options.extendedRtpCapabilities) } self._sendingRemoteRtpParametersByKind = { 'audio': getSendingRemoteRtpParameters('audio', options.extendedRtpCapabilities), 'video': getSendingRemoteRtpParameters('video', options.extendedRtpCapabilities) } self._pc = RTCPeerConnection() @self._pc.on('iceconnectionstatechange') def on_iceconnectionstatechange(): if self._pc.iceConnectionState == 'checking': self.emit('@connectionstatechange', 'connecting') elif self._pc.iceConnectionState in ['connected', 'completed']: self.emit('@connectionstatechange', 'connected') elif self._pc.iceConnectionState == 'failed': self.emit('@connectionstatechange', 'failed') elif self._pc.iceConnectionState == 'disconnected': self.emit('@connectionstatechange', 'disconnected') elif self._pc.iceConnectionState == 'closed': self.emit('@connectionstatechange', 'closed') async def updateIceServers(self, iceServers): logging.warning('updateIceServers() not implemented') # TODO: aiortc can not update iceServers async def restartIce(self, iceParameters): logging.debug('restartIce()') self._remoteSdp.updateIceParameters(iceParameters) if not self._transportReady: return if self._direction == 'send': # NOTE: aiortc RTCPeerConnection createOffer do not have iceRestart options offer = await self._pc.createOffer() logging.debug(f'restartIce() | calling pc.setLocalDescription() [offer:{offer}]') await self._pc.setLocalDescription(offer) answer: RTCSessionDescription = RTCSessionDescription( type='answer', sdp=self._remoteSdp.getSdp() ) logging.debug(f'restartIce() | calling pc.setRemoteDescription() [answer:{answer}]') await self._pc.setRemoteDescription(answer) else: offer: RTCSessionDescription = RTCSessionDescription( type='offer', sdp=self._remoteSdp.getSdp() ) logging.debug(f'restartIce() | calling pc.setRemoteDescription() [offer:{offer}]') await self._pc.setRemoteDescription(offer) answer = await self._pc.createAnswer() logging.debug(f'restartIce() | calling pc.setLocalDescription() [answer:{answer}]') await self._pc.setLocalDescription(answer) async def getTransportStats(self): return self._pc.getStats() async def send( self, track: MediaStreamTrack, encodings: List[RtpEncodingParameters]=[], codecOptions: Optional[ProducerCodecOptions]=None, codec: Optional[RtpCodecCapability]=None ) -> HandlerSendResult: options = HandlerSendOptions( track=track, encodings=encodings, codecOptions=codecOptions, codec=codec ) self._assertSendDirection() logging.debug(f'send() [kind:{options.track.kind}, track.id:{options.track.id}]') if options.encodings: for idx in range(len(options.encodings)): options.encodings[idx].rid = f'r{idx}' sendingRtpParameters: RtpParameters = self._sendingRtpParametersByKind[options.track.kind].copy(deep=True) sendingRtpParameters.codecs = reduceCodecs(sendingRtpParameters.codecs, options.codec) sendingRemoteRtpParameters: RtpParameters = self._sendingRemoteRtpParametersByKind[options.track.kind].copy(deep=True) sendingRemoteRtpParameters.codecs = reduceCodecs(sendingRemoteRtpParameters.codecs, options.codec) mediaSectionIdx = self.remoteSdp.getNextMediaSectionIdx() transceiver = self.pc.addTransceiver(options.track, direction='sendonly') offer: RTCSessionDescription = await self.pc.createOffer() offerMediaDict: dict localSdpDict = sdp_transform.parse(offer.sdp) if not self._transportReady: await self._setupTransport(localDtlsRole='server', localSdpDict=localSdpDict) # Special case for VP9 with SVC. hackVp9Svc = False if options.encodings: layers=smParse(options.encodings[0].scalabilityMode if options.encodings[0].scalabilityMode else '') else: layers=smParse('') if len(options.encodings) == 1 and layers.spatialLayers > 1 and sendingRtpParameters.codecs[0].mimeType.lower() == 'video/vp9': logging.debug('send() | enabling legacy simulcast for VP9 SVC') hackVp9Svc = True localSdpDict = sdp_transform.parse offerMediaDict = localSdpDict['media'][mediaSectionIdx.idx] addLegacySimulcast(offerMediaDict=offerMediaDict, numStreams=layers.spatialLayers) offer = RTCSessionDescription( type='offer', sdp=sdp_transform.write(localSdpDict) ) logging.debug(f'send() | calling pc.setLocalDescription() [offer:{offer}]') await self.pc.setLocalDescription(offer) # We can now get the transceiver.mid. localId = transceiver.mid # Set MID. sendingRtpParameters.mid = localId localSdpDict = sdp_transform.parse(self.pc.localDescription.sdp) offerMediaDict = localSdpDict['media'][mediaSectionIdx.idx] logging.debug(f"send() | get offerMediaDict {offerMediaDict} \n from localSdpDict {localSdpDict['media']} index {mediaSectionIdx.idx}") # Set RTCP CNAME. if sendingRtpParameters.rtcp == None: sendingRtpParameters.rtcp = RtcpParameters() sendingRtpParameters.rtcp.cname = common_utils.getCname(offerMediaDict) # Set RTP encodings by parsing the SDP offer if no encodings are given. if not options.encodings: sendingRtpParameters.encodings = getRtpEncodings(offerMediaDict) # Set RTP encodings by parsing the SDP offer and complete them with given # one if just a single encoding has been given. elif len(options.encodings) == 1: newEncodings = getRtpEncodings(offerMediaDict) if newEncodings and options.encodings[0]: firstEncodingDict: dict = newEncodings[0].dict() optionsEncodingDict: dict = options.encodings[0].dict() firstEncodingDict.update(optionsEncodingDict) newEncodings[0] = RtpEncodingParameters(**firstEncodingDict) if hackVp9Svc: newEncodings = [newEncodings[0]] sendingRtpParameters.encodings = newEncodings # Otherwise if more than 1 encoding are given use them verbatim. else: sendingRtpParameters.encodings = options.encodings # If VP8 or H264 and there is effective simulcast, add scalabilityMode to # each encoding. if len(sendingRtpParameters.encodings) > 1 and (sendingRtpParameters.codecs[0].mimeType.lower() == 'video/vp8' or sendingRtpParameters.codecs[0].mimeType.lower() == 'video/h264'): for encoding in sendingRtpParameters.encodings: encoding.scalabilityMode = 'S1T3' self.remoteSdp.send( offerMediaDict=offerMediaDict, reuseMid=mediaSectionIdx.reuseMid, offerRtpParameters=sendingRtpParameters, answerRtpParameters=sendingRemoteRtpParameters, codecOptions=options.codecOptions, extmapAllowMixed=True ) answer: RTCSessionDescription = RTCSessionDescription( type='answer', sdp=self.remoteSdp.getSdp() ) logging.debug(f'send() | calling pc.setRemoteDescription() [answer:{answer}]') await self.pc.setRemoteDescription(answer) # Store in the map. self._mapMidTransceiver[localId] = transceiver return HandlerSendResult( localId=localId, rtpParameters=sendingRtpParameters, rtpSender=transceiver.sender ) async def stopSending(self, localId): pass # self._assertSendDirection() # logging.debug(f'stopSending() [localId:{localId}]') # transceiver = self._mapMidTransceiver.get(localId) # if not transceiver: # raise Exception('associated RTCRtpTransceiver not found') # transceiver.sender.replaceTrack() # # TODO:RTCPeerConnection do not have removeTrack() # self._pc.removeTrack(transceiver.sender) # self._remoteSdp.closeMediaSection(transceiver.mid) # offer = await self._pc.createOffer() # logging.debug(f'stopSending() | calling pc.setLocalDescription() [offer:{offer}]') # await self._pc.localDescription(offer) # answer: RTCSessionDescription = RTCSessionDescription( # type='answer', # sdp=self._remoteSdp.getSdp() # ) # logging.debug(f'stopSending() | calling pc.setRemoteDescription() [answer:{answer}]') # await self._pc.setRemoteDescription(answer) async def replaceTrack(self, localId, track=None): self._assertSendDirection() if track: logging.debug(f'replaceTrack() [localId:{localId}, track.id:{track.id}]') else: logging.debug(f'replaceTrack() [localId:{localId}, no track]') transceiver = self._mapMidTransceiver.get(localId) if not transceiver: raise Exception('associated RTCRtpTransceiver not found') await transceiver.sender.replaceTrack(track) async def setMaxSpatialLayer(self, localId: str, spatialLayer: int): logging.warning('setMaxSpatialLayer() not implemented') # NOTE: RTCRtpSender do not have getParameters() # self._assertSendDirection() # logging.debug(f'setMaxSpatialLayer() [localId:{localId}, spatialLayer:{spatialLayer}]') # transceiver = self._mapMidTransceiver.get(localId) # if not transceiver: # raise Exception('associated RTCRtpTransceiver not found') # parameters = transceiver.sender.getParameters() async def setRtpEncodingParameters(self, localId: str, params: Any): logging.warning('setRtpEncodingParameters() not implemented') # NOTE: RTCRtpSender do not have getParameters() async def getSenderStats(self, localId: str): self._assertSendDirection() transceiver = self._mapMidTransceiver.get(localId) if not transceiver: raise Exception('associated RTCRtpTransceiver not found') return await transceiver.sender.getStats() async def sendDataChannel( self, streamId: Optional[int]=None, ordered: Optional[bool]=True, maxPacketLifeTime: Optional[int]=None, maxRetransmits: Optional[int]=None, priority: Optional[Literal['very-low','low','medium','high']]=None, label: Optional[str]=None, protocol: Optional[str]=None ) -> HandlerSendDataChannelResult: if streamId == None: streamId = self._nextSendSctpStreamId options=SctpStreamParameters( streamId=streamId, ordered=ordered, maxPacketLifeTime=maxPacketLifeTime, maxRetransmits=maxRetransmits, priority=priority, label=label, protocol=protocol ) self._assertSendDirection() logging.debug('sendDataChannel()') dataChannel = self.pc.createDataChannel( label=options.label, maxPacketLifeTime=options.maxPacketLifeTime, ordered=options.ordered, protocol=options.protocol, negotiated=True, id=self._nextSendSctpStreamId ) # Increase next id. self._nextSendSctpStreamId = (self._nextSendSctpStreamId + 1) % SCTP_NUM_STREAMS.get('MIS', 1) # If this is the first DataChannel we need to create the SDP answer with # m=application section. if not self._hasDataChannelMediaSection: offer: RTCSessionDescription = await self.pc.createOffer() localSdpDict = sdp_transform.parse(offer.sdp) offerMediaDicts = [m for m in localSdpDict.get('media') if m.get('type') == 'application'] if not offerMediaDicts: raise Exception('No datachannel') offerMediaDict = offerMediaDicts[0] if not self._transportReady: await self._setupTransport(localDtlsRole='server', localSdpDict=localSdpDict) logging.debug(f'sendDataChannel() | calling pc.setLocalDescription() [offer:{offer}]') await self.pc.setLocalDescription(offer) self.remoteSdp.sendSctpAssociation(offerMediaDict=offerMediaDict) answer: RTCSessionDescription = RTCSessionDescription( type='answer', sdp=self.remoteSdp.getSdp() ) logging.debug(f'sendDataChannel() | calling pc.setRemoteDescription() [answer:{answer}]') await self.pc.setRemoteDescription(answer) self._hasDataChannelMediaSection = True return HandlerSendDataChannelResult( dataChannel=dataChannel, sctpStreamParameters=options ) async def receive( self, trackId: str, kind: MediaKind, rtpParameters: RtpParameters ) -> HandlerReceiveResult: options = HandlerReceiveOptions( trackId=trackId, kind=kind, rtpParameters=rtpParameters ) self._assertRecvDirection() logging.debug(f'receive() [trackId:{options.trackId}, kind:{options.kind}]') localId = options.rtpParameters.mid if options.rtpParameters.mid != None else str(len(self._mapMidTransceiver)) self.remoteSdp.receive( mid=localId, kind=options.kind, offerRtpParameters=options.rtpParameters, streamId=options.rtpParameters.rtcp.cname, trackId=options.trackId ) offer: RTCSessionDescription = RTCSessionDescription( type='offer', sdp=self.remoteSdp.getSdp() ) logging.debug(f'receive() | calling pc.setRemoteDescription() [offer:{offer}]') await self.pc.setRemoteDescription(offer) answer: RTCSessionDescription = await self.pc.createAnswer() localSdpDict = sdp_transform.parse(answer.sdp) answerMediaDict = [m for m in localSdpDict.get('media') if str(m.get('mid')) == localId][0] # May need to modify codec parameters in the answer based on codec # parameters in the offer. applyCodecParameters(offerRtpParameters=options.rtpParameters, answerMediaDict=answerMediaDict) answer = RTCSessionDescription( type='answer', sdp=sdp_transform.write(localSdpDict) ) if not self._transportReady: await self._setupTransport(localDtlsRole='client', localSdpDict=localSdpDict) logging.debug(f'receive() | calling pc.setLocalDescription() [answer:{answer}]') await self.pc.setLocalDescription(answer) transceivers = [t for t in self.pc.getTransceivers() if t.mid == localId] if not transceivers: raise Exception('new RTCRtpTransceiver not found') # Store in the map. transceiver = transceivers[0] self._mapMidTransceiver[localId] = transceiver return HandlerReceiveResult( localId=localId, track=transceiver.receiver.track, rtpReceiver=transceiver.receiver ) async def stopReceiving(self, localId: str): self._assertRecvDirection() logging.debug(f'stopReceiving() [localId:{localId}]') transceiver = self._mapMidTransceiver.get(localId) if not transceiver: raise Exception('associated RTCRtpTransceiver not found') self.remoteSdp.closeMediaSection(transceiver.mid) offer: RTCSessionDescription = RTCSessionDescription( type='offer', sdp=self.remoteSdp.getSdp() ) logging.debug(f'stopReceiving() | calling pc.setRemoteDescription() [offer:{offer}]') await self.pc.setRemoteDescription(offer) answer = await self.pc.createAnswer() logging.debug(f'stopReceiving() | calling pc.setLocalDescription() [answer:{answer}]') await self.pc.setLocalDescription(answer) async def getReceiverStats(self, localId: str): self._assertRecvDirection() transceiver = self._mapMidTransceiver.get(localId) if not transceiver: raise Exception('associated RTCRtpTransceiver not found') return await transceiver.receiver.getStats() async def receiveDataChannel( self, sctpStreamParameters: SctpStreamParameters, label: Optional[str]=None, protocol: Optional[str]=None ) -> HandlerReceiveDataChannelResult: options = HandlerReceiveDataChannelOptions( sctpStreamParameters=sctpStreamParameters, label=label, protocol=protocol ) self._assertRecvDirection() logging.debug(f'[receiveDataChannel() [options:{options.sctpStreamParameters}]]') dataChannel = self.pc.createDataChannel( label=options.label, maxPacketLifeTime=options.sctpStreamParameters.maxPacketLifeTime, maxRetransmits=options.sctpStreamParameters.maxRetransmits, ordered=options.sctpStreamParameters.ordered, protocol=options.protocol, negotiated=True, id=options.sctpStreamParameters.streamId ) # If this is the first DataChannel we need to create the SDP offer with # m=application section. if not self._hasDataChannelMediaSection: self.remoteSdp.receiveSctpAssociation() offer: RTCSessionDescription = RTCSessionDescription( type='offer', sdp=self.remoteSdp.getSdp() ) logging.debug(f'receiveDataChannel() | calling pc.setRemoteDescription() [offer:{offer}]') await self.pc.setRemoteDescription(offer) answer = await self.pc.createAnswer() if not self._transportReady: localSdpDict = sdp_transform.parse(answer.sdp) await self._setupTransport(localDtlsRole='client', localSdpDict=localSdpDict) logging.debug(f'receiveDataChannel() | calling pc.setRemoteDescription() [answer:{answer}]') await self.pc.setLocalDescription(answer) self._hasDataChannelMediaSection = True return HandlerReceiveDataChannelResult(dataChannel=dataChannel) async def _setupTransport(self, localDtlsRole: DtlsRole, localSdpDict: dict={}): if localSdpDict == {}: localSdpDict = sdp_transform.parse(self.pc.localDescription.sdp) # Get our local DTLS parameters. dtlsParameters: DtlsParameters = extractDtlsParameters(localSdpDict) # Set our DTLS role. dtlsParameters.role = localDtlsRole # Update the remote DTLS role in the SDP. self.remoteSdp.updateDtlsRole('server' if localDtlsRole == 'client' else 'client') # Need to tell the remote transport about our parameters. await self.emit_for_results('@connect', dtlsParameters) self._transportReady = True def _assertSendDirection(self): if self._direction != 'send': raise Exception('method can just be called for handlers with "send" direction') def _assertRecvDirection(self): if self._direction != 'recv': raise Exception('method can just be called for handlers with "recv" direction')
[ "logging.debug", "aiortc.RTCPeerConnection", "logging.warning", "sdp_transform.parse", "sdp_transform.write" ]
[((3255, 3279), 'logging.debug', 'logging.debug', (['"""close()"""'], {}), "('close()')\n", (3268, 3279), False, 'import logging\n'), ((3411, 3454), 'logging.debug', 'logging.debug', (['"""getNativeRtpCapabilities()"""'], {}), "('getNativeRtpCapabilities()')\n", (3424, 3454), False, 'import logging\n'), ((3469, 3488), 'aiortc.RTCPeerConnection', 'RTCPeerConnection', ([], {}), '()\n', (3486, 3488), False, 'from aiortc import RTCIceServer, RTCPeerConnection, RTCSessionDescription, RTCRtpTransceiver, MediaStreamTrack\n'), ((3738, 3768), 'sdp_transform.parse', 'sdp_transform.parse', (['offer.sdp'], {}), '(offer.sdp)\n', (3757, 3768), False, 'import sdp_transform\n'), ((3980, 4024), 'logging.debug', 'logging.debug', (['"""getNativeSctpCapabilities()"""'], {}), "('getNativeSctpCapabilities()')\n", (3993, 4024), False, 'import logging\n'), ((4664, 4700), 'logging.debug', 'logging.debug', (['"""AiortcHandler run()"""'], {}), "('AiortcHandler run()')\n", (4677, 4700), False, 'import logging\n'), ((5992, 6011), 'aiortc.RTCPeerConnection', 'RTCPeerConnection', ([], {}), '()\n', (6009, 6011), False, 'from aiortc import RTCIceServer, RTCPeerConnection, RTCSessionDescription, RTCRtpTransceiver, MediaStreamTrack\n'), ((6809, 6862), 'logging.warning', 'logging.warning', (['"""updateIceServers() not implemented"""'], {}), "('updateIceServers() not implemented')\n", (6824, 6862), False, 'import logging\n'), ((6972, 7001), 'logging.debug', 'logging.debug', (['"""restartIce()"""'], {}), "('restartIce()')\n", (6985, 7001), False, 'import logging\n'), ((8822, 8908), 'logging.debug', 'logging.debug', (['f"""send() [kind:{options.track.kind}, track.id:{options.track.id}]"""'], {}), "(\n f'send() [kind:{options.track.kind}, track.id:{options.track.id}]')\n", (8835, 8908), False, 'import logging\n'), ((9759, 9789), 'sdp_transform.parse', 'sdp_transform.parse', (['offer.sdp'], {}), '(offer.sdp)\n', (9778, 9789), False, 'import sdp_transform\n'), ((10787, 10862), 'logging.debug', 'logging.debug', (['f"""send() | calling pc.setLocalDescription() [offer:{offer}]"""'], {}), "(f'send() | calling pc.setLocalDescription() [offer:{offer}]')\n", (10800, 10862), False, 'import logging\n'), ((11078, 11127), 'sdp_transform.parse', 'sdp_transform.parse', (['self.pc.localDescription.sdp'], {}), '(self.pc.localDescription.sdp)\n', (11097, 11127), False, 'import sdp_transform\n'), ((11206, 11354), 'logging.debug', 'logging.debug', (['f"""send() | get offerMediaDict {offerMediaDict} \n from localSdpDict {localSdpDict[\'media\']} index {mediaSectionIdx.idx}"""'], {}), '(\n f"""send() | get offerMediaDict {offerMediaDict} \n from localSdpDict {localSdpDict[\'media\']} index {mediaSectionIdx.idx}"""\n )\n', (11219, 11354), False, 'import logging\n'), ((13479, 13557), 'logging.debug', 'logging.debug', (['f"""send() | calling pc.setRemoteDescription() [answer:{answer}]"""'], {}), "(f'send() | calling pc.setRemoteDescription() [answer:{answer}]')\n", (13492, 13557), False, 'import logging\n'), ((15464, 15519), 'logging.warning', 'logging.warning', (['"""setMaxSpatialLayer() not implemented"""'], {}), "('setMaxSpatialLayer() not implemented')\n", (15479, 15519), False, 'import logging\n'), ((16020, 16081), 'logging.warning', 'logging.warning', (['"""setRtpEncodingParameters() not implemented"""'], {}), "('setRtpEncodingParameters() not implemented')\n", (16035, 16081), False, 'import logging\n'), ((17251, 17285), 'logging.debug', 'logging.debug', (['"""sendDataChannel()"""'], {}), "('sendDataChannel()')\n", (17264, 17285), False, 'import logging\n'), ((19473, 19549), 'logging.debug', 'logging.debug', (['f"""receive() [trackId:{options.trackId}, kind:{options.kind}]"""'], {}), "(f'receive() [trackId:{options.trackId}, kind:{options.kind}]')\n", (19486, 19549), False, 'import logging\n'), ((20059, 20138), 'logging.debug', 'logging.debug', (['f"""receive() | calling pc.setRemoteDescription() [offer:{offer}]"""'], {}), "(f'receive() | calling pc.setRemoteDescription() [offer:{offer}]')\n", (20072, 20138), False, 'import logging\n'), ((20281, 20312), 'sdp_transform.parse', 'sdp_transform.parse', (['answer.sdp'], {}), '(answer.sdp)\n', (20300, 20312), False, 'import sdp_transform\n'), ((20889, 20974), 'logging.debug', 'logging.debug', (['f"""receive() | calling pc.setLocalDescription() [answer:{answer}]"""'], {}), "(f'receive() | calling pc.setLocalDescription() [answer:{answer}]'\n )\n", (20902, 20974), False, 'import logging\n'), ((21585, 21638), 'logging.debug', 'logging.debug', (['f"""stopReceiving() [localId:{localId}]"""'], {}), "(f'stopReceiving() [localId:{localId}]')\n", (21598, 21638), False, 'import logging\n'), ((22000, 22090), 'logging.debug', 'logging.debug', (['f"""stopReceiving() | calling pc.setRemoteDescription() [offer:{offer}]"""'], {}), "(\n f'stopReceiving() | calling pc.setRemoteDescription() [offer:{offer}]')\n", (22013, 22090), False, 'import logging\n'), ((22190, 22281), 'logging.debug', 'logging.debug', (['f"""stopReceiving() | calling pc.setLocalDescription() [answer:{answer}]"""'], {}), "(\n f'stopReceiving() | calling pc.setLocalDescription() [answer:{answer}]')\n", (22203, 22281), False, 'import logging\n'), ((23065, 23151), 'logging.debug', 'logging.debug', (['f"""[receiveDataChannel() [options:{options.sctpStreamParameters}]]"""'], {}), "(\n f'[receiveDataChannel() [options:{options.sctpStreamParameters}]]')\n", (23078, 23151), False, 'import logging\n'), ((7304, 7390), 'logging.debug', 'logging.debug', (['f"""restartIce() | calling pc.setLocalDescription() [offer:{offer}]"""'], {}), "(\n f'restartIce() | calling pc.setLocalDescription() [offer:{offer}]')\n", (7317, 7390), False, 'import logging\n'), ((7609, 7698), 'logging.debug', 'logging.debug', (['f"""restartIce() | calling pc.setRemoteDescription() [answer:{answer}]"""'], {}), "(\n f'restartIce() | calling pc.setRemoteDescription() [answer:{answer}]')\n", (7622, 7698), False, 'import logging\n'), ((7931, 8018), 'logging.debug', 'logging.debug', (['f"""restartIce() | calling pc.setRemoteDescription() [offer:{offer}]"""'], {}), "(\n f'restartIce() | calling pc.setRemoteDescription() [offer:{offer}]')\n", (7944, 8018), False, 'import logging\n'), ((8132, 8220), 'logging.debug', 'logging.debug', (['f"""restartIce() | calling pc.setLocalDescription() [answer:{answer}]"""'], {}), "(\n f'restartIce() | calling pc.setLocalDescription() [answer:{answer}]')\n", (8145, 8220), False, 'import logging\n'), ((10321, 10384), 'logging.debug', 'logging.debug', (['"""send() | enabling legacy simulcast for VP9 SVC"""'], {}), "('send() | enabling legacy simulcast for VP9 SVC')\n", (10334, 10384), False, 'import logging\n'), ((15004, 15077), 'logging.debug', 'logging.debug', (['f"""replaceTrack() [localId:{localId}, track.id:{track.id}]"""'], {}), "(f'replaceTrack() [localId:{localId}, track.id:{track.id}]')\n", (15017, 15077), False, 'import logging\n'), ((15104, 15166), 'logging.debug', 'logging.debug', (['f"""replaceTrack() [localId:{localId}, no track]"""'], {}), "(f'replaceTrack() [localId:{localId}, no track]')\n", (15117, 15166), False, 'import logging\n'), ((17974, 18004), 'sdp_transform.parse', 'sdp_transform.parse', (['offer.sdp'], {}), '(offer.sdp)\n', (17993, 18004), False, 'import sdp_transform\n'), ((18403, 18494), 'logging.debug', 'logging.debug', (['f"""sendDataChannel() | calling pc.setLocalDescription() [offer:{offer}]"""'], {}), "(\n f'sendDataChannel() | calling pc.setLocalDescription() [offer:{offer}]')\n", (18416, 18494), False, 'import logging\n'), ((18790, 18884), 'logging.debug', 'logging.debug', (['f"""sendDataChannel() | calling pc.setRemoteDescription() [answer:{answer}]"""'], {}), "(\n f'sendDataChannel() | calling pc.setRemoteDescription() [answer:{answer}]')\n", (18803, 18884), False, 'import logging\n'), ((23949, 24049), 'logging.debug', 'logging.debug', (['f"""receiveDataChannel() | calling pc.setRemoteDescription() [offer:{offer}]"""'], {}), "(\n f'receiveDataChannel() | calling pc.setRemoteDescription() [offer:{offer}]'\n )\n", (23962, 24049), False, 'import logging\n'), ((24354, 24456), 'logging.debug', 'logging.debug', (['f"""receiveDataChannel() | calling pc.setRemoteDescription() [answer:{answer}]"""'], {}), "(\n f'receiveDataChannel() | calling pc.setRemoteDescription() [answer:{answer}]'\n )\n", (24367, 24456), False, 'import logging\n'), ((24773, 24822), 'sdp_transform.parse', 'sdp_transform.parse', (['self.pc.localDescription.sdp'], {}), '(self.pc.localDescription.sdp)\n', (24792, 24822), False, 'import sdp_transform\n'), ((20710, 20743), 'sdp_transform.write', 'sdp_transform.write', (['localSdpDict'], {}), '(localSdpDict)\n', (20729, 20743), False, 'import sdp_transform\n'), ((24216, 24247), 'sdp_transform.parse', 'sdp_transform.parse', (['answer.sdp'], {}), '(answer.sdp)\n', (24235, 24247), False, 'import sdp_transform\n'), ((10722, 10755), 'sdp_transform.write', 'sdp_transform.write', (['localSdpDict'], {}), '(localSdpDict)\n', (10741, 10755), False, 'import sdp_transform\n')]
from functools import partial import numpy as np from keras.preprocessing.image import img_to_array from keras.preprocessing.image import load_img from toolbox.image import bicubic_rescale from toolbox.image import modcrop from toolbox.paths import data_dir def load_set(name, lr_sub_size=11, lr_sub_stride=5, scale=3): hr_sub_size = lr_sub_size * scale hr_sub_stride = lr_sub_stride * scale lr_gen_sub = partial(generate_sub_images, size=lr_sub_size, stride=lr_sub_stride) hr_gen_sub = partial(generate_sub_images, size=hr_sub_size, stride=hr_sub_stride) lr_sub_arrays = [] hr_sub_arrays = [] for path in (data_dir / name).glob('*'): lr_image, hr_image = load_image_pair(str(path), scale=scale) lr_sub_arrays += [img_to_array(img) for img in lr_gen_sub(lr_image)] hr_sub_arrays += [img_to_array(img) for img in hr_gen_sub(hr_image)] x = np.stack(lr_sub_arrays) y = np.stack(hr_sub_arrays) return x, y def load_image_pair(path, scale=3): image = load_img(path) image = image.convert('YCbCr') hr_image = modcrop(image, scale) lr_image = bicubic_rescale(hr_image, 1 / scale) return lr_image, hr_image def generate_sub_images(image, size, stride): for i in range(0, image.size[0] - size + 1, stride): for j in range(0, image.size[1] - size + 1, stride): yield image.crop([i, j, i + size, j + size])
[ "numpy.stack", "functools.partial", "toolbox.image.bicubic_rescale", "keras.preprocessing.image.img_to_array", "keras.preprocessing.image.load_img", "toolbox.image.modcrop" ]
[((421, 489), 'functools.partial', 'partial', (['generate_sub_images'], {'size': 'lr_sub_size', 'stride': 'lr_sub_stride'}), '(generate_sub_images, size=lr_sub_size, stride=lr_sub_stride)\n', (428, 489), False, 'from functools import partial\n'), ((532, 600), 'functools.partial', 'partial', (['generate_sub_images'], {'size': 'hr_sub_size', 'stride': 'hr_sub_stride'}), '(generate_sub_images, size=hr_sub_size, stride=hr_sub_stride)\n', (539, 600), False, 'from functools import partial\n'), ((949, 972), 'numpy.stack', 'np.stack', (['lr_sub_arrays'], {}), '(lr_sub_arrays)\n', (957, 972), True, 'import numpy as np\n'), ((981, 1004), 'numpy.stack', 'np.stack', (['hr_sub_arrays'], {}), '(hr_sub_arrays)\n', (989, 1004), True, 'import numpy as np\n'), ((1071, 1085), 'keras.preprocessing.image.load_img', 'load_img', (['path'], {}), '(path)\n', (1079, 1085), False, 'from keras.preprocessing.image import load_img\n'), ((1136, 1157), 'toolbox.image.modcrop', 'modcrop', (['image', 'scale'], {}), '(image, scale)\n', (1143, 1157), False, 'from toolbox.image import modcrop\n'), ((1173, 1209), 'toolbox.image.bicubic_rescale', 'bicubic_rescale', (['hr_image', '(1 / scale)'], {}), '(hr_image, 1 / scale)\n', (1188, 1209), False, 'from toolbox.image import bicubic_rescale\n'), ((813, 830), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['img'], {}), '(img)\n', (825, 830), False, 'from keras.preprocessing.image import img_to_array\n'), ((890, 907), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['img'], {}), '(img)\n', (902, 907), False, 'from keras.preprocessing.image import img_to_array\n')]
import logging from typing import Tuple, Any, List, Dict import torch from torch.fx.node import map_aggregate from .quantization_state import ( AutoQuantizationState, ) from .utils import ( trace_with_inputs, is_leaf, HookType, get_torch_function_hook_type, get_module_hook_type, OpQuantizeabilityType, ) from .model_utils import ( pack_weights_for_functionals, attach_scale_zp_values_to_model, attach_op_convert_info_to_model, attach_output_convert_info_to_model, ) from . import auto_trace_rewriter from torch.ao.quantization import is_activation_post_process logger = logging.getLogger('auto_trace') logging.basicConfig(level=logging.DEBUG) # logging.basicConfig(level=logging.INFO) # enabling this tanks performance, make sure to disable for benchmarking # TODO(future PR): clean this up enable_logging = False # enable_logging = True def add_auto_observation( model : torch.nn.Module, qconfig_dict: Dict[str, Any], example_inputs: Tuple[Any], input_dtypes: Any = (torch.float,), # must be same structure as model inputs prepare_custom_config_dict: Dict[str, Any] = None, ) -> torch.nn.Module: if prepare_custom_config_dict is None: prepare_custom_config_dict = {} output_dtypes = prepare_custom_config_dict.get('output_dtypes', (torch.float,)) def convert_to_interception_proxy(x): if isinstance(x, torch.Tensor): return x.as_subclass(QuantizationPrepareTensorProxy) # type: ignore[arg-type] else: return x cur_module = None first_call = True module_stack : List[torch.nn.Module] = [] # Counter for tensor IDs, will be modified inplace by quant state. # This is used to track tensors from output ops to input ops. For example, # if op_n had a tensor output with id=1, and op_n+2 had a tensor input with # id=1, we know that the output of op_n is the input to op_n+2. Note, # this is a list because it needs to incremented inplace. qtensor_id = [0] module_id_to_fqn: Dict[int, str] = {} # Counter for global quantizeable ops, useful for intermediate activation # logging. global_op_idx = [0] global_disable_torch_function_override = False class QuantizationPrepareTensorProxy(torch.Tensor): """ An override of `torch.Tensor` to enable dynamic tracing for quantization. For each function with a `__torch_function__` override, this proxy does the following for functions which need quantization: 1. calls `_auto_quant_state.validate_cur_op` to validate that the currently seen op is the same as what was recorded during tracing 2. calls `_auto_quant_state.op_prepare_before_hook` 3. executes the original function 4. calls `_auto_quant_state.op_prepare_after_hook` 5. calls `_auto_quant_state.mark_cur_op_complete` to increment the current op index in preparation for the next op Otherwise, calls the original function. """ @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): nonlocal global_disable_torch_function_override if ( # global override means disable the override here global_disable_torch_function_override or # to prevent printing things from going into an infinite loop func == torch.Tensor.__repr__ or # we don't need to override getters in this framework func.__name__ == '__get__' ): return super().__torch_function__(func, types, args, kwargs) # if we are in a function, the current module is always a parent nonlocal cur_module parent_module = cur_module if enable_logging: if not is_activation_post_process(parent_module): # logging for insides of obs/fq is not useful for this framework # fqn map does not contain observers, which is why we # cannot always assume that FQN exists fqn_for_logging = module_id_to_fqn.get( id(parent_module), 'unknown') if parent_module else None logger.debug( f' fqn:{fqn_for_logging} _tf_ {str(func)} len_args {len(args)}') nonlocal qtensor_id kwargs = kwargs if kwargs else {} hook_type = get_torch_function_hook_type(parent_module, func) if hook_type is HookType.OP_HOOKS: fqn = module_id_to_fqn[id(parent_module)] if parent_module else None qstate = parent_module._auto_quant_state # type: ignore[attr-defined] if not first_call: qstate.validate_cur_op(func) # run "before" hook if first_call: args, kwargs = qstate.first_call_op_prepare_before_hook( func, args, kwargs, qtensor_id, fqn, parent_module, OpQuantizeabilityType.QUANTIZEABLE) else: args, kwargs = qstate.op_prepare_before_hook( func, args, kwargs) # forward output = super().__torch_function__(func, types, args, kwargs) # run "after" hook if first_call: output = qstate.first_call_op_prepare_after_hook( func, output, args, qtensor_id, OpQuantizeabilityType.QUANTIZEABLE) else: output = qstate.op_prepare_after_hook( func, output, args, global_op_idx) qstate.mark_cur_op_complete(func) else: # Hook type is not HookType.OP_HOOKS, if first_call is True we # record the DAG of non-quantizeable ops. if first_call: qstate = getattr(parent_module, '_auto_quant_state', None) if qstate: fqn = module_id_to_fqn.get(id(parent_module), None) \ if parent_module else None args, kwargs = qstate.first_call_op_prepare_before_hook( func, args, kwargs, qtensor_id, fqn, parent_module, OpQuantizeabilityType.NOT_QUANTIZEABLE) output = super().__torch_function__(func, types, args, kwargs) if first_call: qstate = getattr(parent_module, '_auto_quant_state', None) if qstate: output = qstate.first_call_op_prepare_after_hook( func, output, args, qtensor_id, OpQuantizeabilityType.NOT_QUANTIZEABLE) # TODO: is this right? Don't really understand this if output is NotImplemented: with torch._C.DisableTorchFunction(): output = func(*args, **kwargs).as_subclass( QuantizationPrepareTensorProxy) assert output is not NotImplemented return output def __repr__(self): return f'QuantizationPrepareTensorProxy({super().__repr__()})' # TODO(future PR): add other math overrides class QuantizationInterceptionModule(type(model)): # type: ignore[misc] """ An override of user defined subclass of `nn.Module` to enable dynamic tracing for quantization. `cur_module` keeps track of the current module in the stack. During the fist call, an `AutoQuantizationState` object is created and attached to each non-leaf modules which we need to check for quantizeable operations. We override the `__call__` function to do the following for each module: If the module is an op which needs quantization: 1. calls `_auto_quant_state.validate_cur_op` to validate that the currently seen op is the same as what was recorded during tracing 2. calls parent module's `._auto_quant_state.op_prepare_before_hook` 3. executes the original module forward 4. calls parent module's `_auto_quant_state.op_prepare_after_hook` 5. calls `_auto_quant_state.mark_cur_op_complete` to increment the current op index in preparation for the next op If the module can contain children ops that need quantization: 1. calls `_auto_quant_state.inputs_prepare_hook` (not implemented yet) 2. executes the original module forward 3. calls `_auto_quant_state.outputs_prepare_hook` Otherwise, calls the original module forward. """ def __call__(self, *args, **kwargs): new_args = map_aggregate(args, convert_to_interception_proxy) new_kwargs = map_aggregate(kwargs, convert_to_interception_proxy) orig_module_call = torch.nn.Module.__call__ orig_nn_sequential_forward = torch.nn.Sequential.forward def _patched_module_call(self, *args, **kwargs): if enable_logging: fqn = module_id_to_fqn.get(id(self), None) logger.debug(f" fqn:{fqn} _cl_: {type(self)} start") nonlocal cur_module old_module = cur_module cur_module = self try: parent_module = module_stack[-1] if len(module_stack) else None module_stack.append(self) fqn = module_id_to_fqn.get(id(self), None) hook_type = get_module_hook_type(parent_module, cur_module) if hook_type is HookType.OP_HOOKS: parent_qstate: AutoQuantizationState = \ parent_module._auto_quant_state # type: ignore[union-attr, assignment] # before hooks if not first_call: parent_qstate.validate_cur_op(cur_module) # If we are in this hook, `cur_module` is a leaf module. # Therefore, we do not need to override any of its # children. Disabling the overrides for performance. nonlocal global_disable_torch_function_override old_global_disable_torch_function_override = \ global_disable_torch_function_override global_disable_torch_function_override = True if first_call: # mypy ignore is used instead of assert because this # runs on every forward and assert has a performance cost args, kwargs = parent_qstate.first_call_op_prepare_before_hook( cur_module, args, kwargs, qtensor_id, fqn, cur_module, # type: ignore[arg-type] OpQuantizeabilityType.QUANTIZEABLE) else: # mypy ignore is used instead of assert because this # runs on every forward and assert has a performance cost args, kwargs = parent_qstate.op_prepare_before_hook( cur_module, args, kwargs) # type: ignore[arg-type] # original forward output = orig_module_call(self, *args, **kwargs) # Re-enable the overrides. global_disable_torch_function_override = \ old_global_disable_torch_function_override # after hooks if first_call: output = parent_qstate.first_call_op_prepare_after_hook( cur_module, output, args, qtensor_id, OpQuantizeabilityType.QUANTIZEABLE) else: output = parent_qstate.op_prepare_after_hook( cur_module, output, args, global_op_idx) parent_qstate.mark_cur_op_complete(cur_module) elif hook_type is HookType.MODULE_IO_HOOKS: # TODO(future PR): add inputs io hook cur_qstate = cur_module._auto_quant_state cur_qstate.reset_to_new_call() # original forward output = orig_module_call(self, *args, **kwargs) # after hooks if first_call: output = cur_qstate.first_call_outputs_prepare_hook( output, qtensor_id) else: output = cur_qstate.outputs_prepare_hook(output) cur_qstate.validate_is_at_last_seen_idx() elif hook_type is HookType.ARG_DEQUANTS: if first_call and parent_module is not None: parent_qstate_fc = getattr( parent_module, '_auto_quant_state', None) if parent_qstate_fc: args, kwargs = \ parent_qstate_fc.first_call_op_prepare_before_hook( cur_module, args, kwargs, qtensor_id, fqn, cur_module, OpQuantizeabilityType.NOT_QUANTIZEABLE) output = orig_module_call(self, *args, **kwargs) # if this fp32 was inplace, make sure to set the output dtype # back to torch.float if hasattr(output, '_qtensor_info'): del output._qtensor_info if first_call and parent_module is not None: parent_qstate_fc = getattr( parent_module, '_auto_quant_state', None) if parent_qstate_fc: output = \ parent_qstate_fc.first_call_op_prepare_after_hook( cur_module, output, args, qtensor_id, OpQuantizeabilityType.NOT_QUANTIZEABLE) else: output = orig_module_call(self, *args, **kwargs) if enable_logging: fqn = module_id_to_fqn.get(id(self), None) logger.debug(f" fqn:{fqn} _cl_: {type(self)} end") return output finally: module_stack.pop() cur_module = old_module torch.nn.Module.__call__ = _patched_module_call torch.nn.Sequential.forward = _nn_sequential_patched_forward # type: ignore[assignment] nonlocal first_call try: if first_call: # Create a list before iterating because we are adding new # named modules inside the loop. named_modules = list(self.named_modules()) # Record module instances which are leaves or children of leaves leaves = set() for fqn, child in named_modules: if is_leaf(child, prepare_custom_config_dict): for _, child_child in child.named_modules(): leaves.add(child_child) for fqn, v in named_modules: # fqn is the global FQN, i.e. 'foo.bar.baz' # v is the module instance # # we need to associate the global FQN with SeenOp # for modules, this is the module FQN # for functions, this is the parent module FQN module_id_to_fqn[id(v)] = fqn if v in leaves: continue if v is self: # for the top level module only, specify input # and output dtypes v._auto_quant_state = AutoQuantizationState( qconfig_dict, fqn, input_dtypes, output_dtypes) pass else: v._auto_quant_state = AutoQuantizationState( qconfig_dict, fqn) global_op_idx[0] = 0 output = super().__call__(*new_args, **new_kwargs) if first_call: for _, v in self.named_modules(): if hasattr(v, '_auto_quant_state'): v._auto_quant_state.match_fusion_patterns() v._auto_quant_state.insert_observers(v) return output finally: torch.nn.Module.__call__ = orig_module_call torch.nn.Sequential.forward = orig_nn_sequential_forward # type: ignore[assignment] first_call = False model.__class__ = QuantizationInterceptionModule # create the graph trace_with_inputs(model, example_inputs) return model def add_auto_convert(module : torch.nn.Module) -> torch.nn.Module: def convert_to_dispatch_proxy(x): if isinstance(x, torch.Tensor): return x.as_subclass(QuantizationConvertTensorProxy) # type: ignore[arg-type] else: return x module_id_to_fqn: Dict[int, str] = {} # Counter for global quantizeable ops, useful for intermediate activation # logging. global_op_idx = [0] global_disable_torch_function_override = False class QuantizationConvertTensorProxy(torch.Tensor): """ An override of `torch.Tensor` to enable dynamic dispatch for quantization inference. For each function with a `__torch_fuction__` override, this proxy does the following for functions which need quantization: 1. calls `_auto_quant_state.validate_cur_op` to validate that the currently seen op is the same as what was recorded during tracing 2. calls `_auto_quant_state.op_convert_before_hook`. 3. executes the function, with target, args and kwargs possibly modified by (2) 4. calls `_auto_quant_state.inference_function_after_hook`. 5. calls `_auto_quant_state.mark_cur_op_complete` to increment the current op index in preparation for the next op Otherwise, calls the original function. """ @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): nonlocal global_disable_torch_function_override if ( # global override means disable the override here global_disable_torch_function_override or # to prevent printing things from going into an infinite loop func == torch.Tensor.__repr__ or # we don't need to override getters in this framework func.__name__ == '__get__' ): return super().__torch_function__(func, types, args, kwargs) kwargs = kwargs if kwargs else {} # if we are in a function, the current module is always a parent parent_module = cur_module hook_type = get_torch_function_hook_type(parent_module, func) if enable_logging: fqn_for_logging = module_id_to_fqn.get( id(parent_module), 'unknown') if parent_module else None logger.debug( f" fqn:{fqn_for_logging} _tf_ {func} " + f"hook_type {hook_type} " + # f"arg_types {[type(arg) for arg in args]}) " + f"arg_dtypes {[arg.dtype if isinstance(arg, torch.Tensor) else None for arg in args]}") if hook_type is HookType.OP_HOOKS: qstate: AutoQuantizationState = parent_module._auto_quant_state # type: ignore[union-attr] # before hooks qstate.validate_cur_op(func) func, args, kwargs = qstate.op_convert_before_hook( func, args, kwargs, parent_module) # type: ignore[arg-type] # forward output = super().__torch_function__(func, types, args, kwargs) # after hooks output = qstate.op_convert_after_hook( func, output, global_op_idx) qstate.mark_cur_op_complete(func) elif hook_type is HookType.ARG_DEQUANTS: # TODO(future PR): handle more dtypes new_args = [] for arg in args: if isinstance(arg, torch.Tensor) and arg.is_quantized: new_args.append(arg.dequantize()) else: new_args.append(arg) args = tuple(new_args) output = super().__torch_function__(func, types, args, kwargs) else: # HookType.NONE output = super().__torch_function__(func, types, args, kwargs) # TODO: is this right? Don't really understand this if output is NotImplemented: with torch._C.DisableTorchFunction(): output = func(*args, **kwargs).as_subclass( QuantizationConvertTensorProxy) assert output is not NotImplemented if enable_logging: fqn_for_logging = module_id_to_fqn.get( id(parent_module), 'unknown') if parent_module else None out_dtype = None if isinstance(output, torch.Tensor): out_dtype = output.dtype logger.debug(f" fqn:{fqn_for_logging} _tf_ {func} out {out_dtype} end") return output def __repr__(self): return f'QuantizationConvertTensorProxy({super().__repr__()})' cur_module = None module_stack : List[torch.nn.Module] = [] assert len(module.__class__.__bases__) == 1 class QuantizationDispatchModule(module.__class__.__bases__[0]): # type: ignore[name-defined] """ An override of user defined subclass of `nn.Module` to enable dynamic tracing for quantization, after model conversion to quantized domain. `cur_module` keeps track of the current module in the stack. Tensor arguments are converted to `QuantizationConvertTensorProxy`. We override the `__call__` function to do the following for each module: If the module is an op which needs quantization: 1. calls `_auto_quant_state.validate_cur_op` to validate that the currently seen op is the same as what was recorded during tracing 2. calls parent module's `._auto_quant_state.op_convert_before_hook` 3. executes the original module forward 4. calls parent module's `_auto_quant_state.op_convert_after_hook` 5. calls `_auto_quant_state.mark_cur_op_complete` to increment the current op index in preparation for the next op If the module can contain children ops that need quantization: 1. calls `_auto_quant_state.inputs_convert_hook` (not implemented yet) 2. executes the original module forward 3. calls `_auto_quant_state.outputs_convert_hook` Otherwise, calls the original module forward. """ def __call__(self, *args, **kwargs): new_args = map_aggregate(args, convert_to_dispatch_proxy) new_kwargs = map_aggregate(kwargs, convert_to_dispatch_proxy) orig_module_call = torch.nn.Module.__call__ orig_nn_sequential_forward = torch.nn.Sequential.forward def _patched_module_call(self, *args, **kwargs): nonlocal cur_module old_module = cur_module cur_module = self nonlocal global_disable_torch_function_override try: parent_module = module_stack[-1] if len(module_stack) else None module_stack.append(self) hook_type = get_module_hook_type(parent_module, cur_module) if enable_logging: fqn_for_logging = module_id_to_fqn.get(id(self), None) logger.debug( f" fqn: {fqn_for_logging} " + f"_cl_ {type(self)} " + f"arg_dtypes {[arg.dtype if isinstance(arg, torch.Tensor) else None for arg in args]} " + f"hook_type {hook_type}") if hook_type is HookType.OP_HOOKS: # before hooks qstate: AutoQuantizationState = \ parent_module._auto_quant_state # type: ignore[union-attr, assignment] qstate.validate_cur_op(cur_module) # If we are in this hook, `cur_module` is a leaf module. # Therefore, we do not need to override any of its # children. Disabling the overrides for performance. old_global_disable_torch_function_override = \ global_disable_torch_function_override global_disable_torch_function_override = True _, args, kwargs = qstate.op_convert_before_hook( cur_module, args, kwargs, cur_module) # forward output = orig_module_call(self, *args, **kwargs) # after hooks output = qstate.op_convert_after_hook( cur_module, output, global_op_idx) # Re-enable the override. global_disable_torch_function_override = \ old_global_disable_torch_function_override qstate.mark_cur_op_complete(cur_module) elif hook_type is HookType.MODULE_IO_HOOKS: cur_qstate: AutoQuantizationState = cur_module._auto_quant_state cur_qstate.reset_to_new_call() # before hooks (TODO) # forward output = orig_module_call(self, *args, **kwargs) # after hooks # For the sake of performance, we assume no overrides # are needed for quantizing/dequantizing things old_global_disable_torch_function_override = \ global_disable_torch_function_override global_disable_torch_function_override = True output = cur_qstate.outputs_convert_hook(output) global_disable_torch_function_override = \ old_global_disable_torch_function_override cur_qstate.validate_is_at_last_seen_idx() elif hook_type is HookType.ARG_DEQUANTS: # TODO(future PR): handle more dtypes new_args = [] for arg in args: if isinstance(arg, torch.Tensor) and arg.is_quantized: dequant = arg.dequantize().as_subclass( QuantizationConvertTensorProxy) # type: ignore[arg-type] new_args.append(dequant) else: new_args.append(arg) args = tuple(new_args) output = orig_module_call(self, *args, **kwargs) else: output = orig_module_call(self, *args, **kwargs) if enable_logging: fqn_for_logging = module_id_to_fqn.get(id(self), None) logger.debug( f" fqn: {fqn_for_logging} " + f"_cl_ {type(self)} " + f"dtype {output.dtype if isinstance(output, torch.Tensor) else None} " + "end") return output finally: module_stack.pop() cur_module = old_module torch.nn.Module.__call__ = _patched_module_call torch.nn.Sequential.forward = _nn_sequential_patched_forward # type: ignore[assignment] try: global_op_idx[0] = 0 output = super().__call__(*new_args, **new_kwargs) def unwrap_proxy(a): if isinstance(a, QuantizationConvertTensorProxy): a.__class__ = torch.Tensor # type: ignore[assignment] return a output = map_aggregate(output, unwrap_proxy) return output finally: torch.nn.Module.__call__ = orig_module_call torch.nn.Sequential.forward = orig_nn_sequential_forward # type: ignore[assignment] def rewrite_for_scripting(self): return auto_trace_rewriter.rewrite_for_scripting(self) pack_weights_for_functionals(module) attach_scale_zp_values_to_model(module) attach_op_convert_info_to_model(module) attach_output_convert_info_to_model(module) # Since eager mode convert could have changed the IDs of some modules, # populate the FQN map again for k, v in module.named_modules(): module_id_to_fqn[id(v)] = k module.__class__ = QuantizationDispatchModule return module # AutoQuantizationState lives in parent module's _modules. # Currently, `torch.nn.Sequential`'s forward iterates over all # items in _modules. To avoid changing the meaning of the program, for # now we patch the forward to ignore our quantization state. # Note: this is a hackedy hack, before launching we should consider # checking the fix into `torch.nn.Sequential` to avoid the patch. def _nn_sequential_patched_forward(cls, input): for module in cls: if not isinstance(module, AutoQuantizationState): input = module(input) return input
[ "torch.ao.quantization.is_activation_post_process", "logging.basicConfig", "torch.fx.node.map_aggregate", "torch._C.DisableTorchFunction", "logging.getLogger" ]
[((618, 649), 'logging.getLogger', 'logging.getLogger', (['"""auto_trace"""'], {}), "('auto_trace')\n", (635, 649), False, 'import logging\n'), ((650, 690), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (669, 690), False, 'import logging\n'), ((8897, 8947), 'torch.fx.node.map_aggregate', 'map_aggregate', (['args', 'convert_to_interception_proxy'], {}), '(args, convert_to_interception_proxy)\n', (8910, 8947), False, 'from torch.fx.node import map_aggregate\n'), ((8973, 9025), 'torch.fx.node.map_aggregate', 'map_aggregate', (['kwargs', 'convert_to_interception_proxy'], {}), '(kwargs, convert_to_interception_proxy)\n', (8986, 9025), False, 'from torch.fx.node import map_aggregate\n'), ((24174, 24220), 'torch.fx.node.map_aggregate', 'map_aggregate', (['args', 'convert_to_dispatch_proxy'], {}), '(args, convert_to_dispatch_proxy)\n', (24187, 24220), False, 'from torch.fx.node import map_aggregate\n'), ((24246, 24294), 'torch.fx.node.map_aggregate', 'map_aggregate', (['kwargs', 'convert_to_dispatch_proxy'], {}), '(kwargs, convert_to_dispatch_proxy)\n', (24259, 24294), False, 'from torch.fx.node import map_aggregate\n'), ((29672, 29707), 'torch.fx.node.map_aggregate', 'map_aggregate', (['output', 'unwrap_proxy'], {}), '(output, unwrap_proxy)\n', (29685, 29707), False, 'from torch.fx.node import map_aggregate\n'), ((3874, 3915), 'torch.ao.quantization.is_activation_post_process', 'is_activation_post_process', (['parent_module'], {}), '(parent_module)\n', (3900, 3915), False, 'from torch.ao.quantization import is_activation_post_process\n'), ((7026, 7057), 'torch._C.DisableTorchFunction', 'torch._C.DisableTorchFunction', ([], {}), '()\n', (7055, 7057), False, 'import torch\n'), ((21885, 21916), 'torch._C.DisableTorchFunction', 'torch._C.DisableTorchFunction', ([], {}), '()\n', (21914, 21916), False, 'import torch\n')]
import datetime import os from sagemaker.sklearn import SKLearn import typer def run_sklearn_sagemaker( data_path, model_path, job_name_prefix="sklearn", instance_type="local", role=os.environ.get("AWS_SAGEMAKER_ROLE"), min_df: int = 5, max_ngram: int = 1, stopwords="english", loss="hinge", learning_rate: float = 1e-4, ): hyperparameters = { "min_df": min_df, "max_ngram": max_ngram, "stopwords": stopwords, "loss": loss, "learning_rate": learning_rate, } sk = SKLearn( entry_point="src/train_sklearn.py", git_config={"repo": "https://github.com/nsorros/sagemaker_examples.git"}, framework_version="0.20.0", instance_type=instance_type, instance_count=1, role=role, hyperparameters=hyperparameters, output_path=model_path, ) now = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S") job_name = f"{job_name_prefix}-{now}" print(f"Job name: {job_name}") sk.fit({"train": data_path}, job_name=job_name) if __name__ == "__main__": typer.run(run_sklearn_sagemaker)
[ "os.environ.get", "typer.run", "sagemaker.sklearn.SKLearn", "datetime.datetime.now" ]
[((205, 241), 'os.environ.get', 'os.environ.get', (['"""AWS_SAGEMAKER_ROLE"""'], {}), "('AWS_SAGEMAKER_ROLE')\n", (219, 241), False, 'import os\n'), ((558, 831), 'sagemaker.sklearn.SKLearn', 'SKLearn', ([], {'entry_point': '"""src/train_sklearn.py"""', 'git_config': "{'repo': 'https://github.com/nsorros/sagemaker_examples.git'}", 'framework_version': '"""0.20.0"""', 'instance_type': 'instance_type', 'instance_count': '(1)', 'role': 'role', 'hyperparameters': 'hyperparameters', 'output_path': 'model_path'}), "(entry_point='src/train_sklearn.py', git_config={'repo':\n 'https://github.com/nsorros/sagemaker_examples.git'}, framework_version\n ='0.20.0', instance_type=instance_type, instance_count=1, role=role,\n hyperparameters=hyperparameters, output_path=model_path)\n", (565, 831), False, 'from sagemaker.sklearn import SKLearn\n'), ((1118, 1150), 'typer.run', 'typer.run', (['run_sklearn_sagemaker'], {}), '(run_sklearn_sagemaker)\n', (1127, 1150), False, 'import typer\n'), ((901, 924), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (922, 924), False, 'import datetime\n')]
from datetime import datetime import numpy as NP import scipy.optimize import sympy as SYM import pylab as PL def chapman(z, Nm, Hm, H_O, exp=NP.exp): """ Return Chapman function electron density at height *z*, maximum electron density at the F-peak *Nm*, height at the maximum *Hm*, and the scale height of atomic oxygen *H_O*. The exponential function can be overridden with *exp*. """ return Nm * exp((1 - ((z - Hm) / H_O) - exp(-(z - Hm) / H_O)) / 2) def chapman_sym(z, Nm, Hm, H_O): """ Return symbolic (i.e., :module:`sympy`) Chapman electron density profile function. """ return chapman(z, Nm, Hm, H_O, exp=SYM.exp) def chapman_vec(z_vec, Nm_vec, Hm_vec, H_O_vec): """ Vectorized implementation of the Chapman function evaluation routine :func:`chapman`. The input arguments must be sequences with the same length and the output is an :class:`NP.ndarray` with that length. """ try: chapman_vec._chapman_sym_f except AttributeError: sym_vars = SYM.symbols('z Nm Hm H_O') chapman_vec._chapman_sym_f = SYM.lambdify(sym_vars, chapman_sym(*sym_vars), modules='numexpr') return chapman_vec._chapman_sym_f(z_vec, Nm_vec, Hm_vec, H_O_vec) def chapman_fit(alt, ne, x0=[1e6, 300, 50], bounds=[(1, None), (150, 500), (30, 80)], verbose=False, **kwds): """ """ # optimization setup z, Nm, Hm, H_O = SYM.symbols('z Nm Hm H_O') chapman = chapman_sym(z, Nm, Hm, H_O) dNm = SYM.diff(chapman, Nm) dHm = SYM.diff(chapman, Hm) dH_O = SYM.diff(chapman, H_O) chapman_f = SYM.lambdify((z, Nm, Hm, H_O), chapman, modules='numexpr') dNm_f = SYM.lambdify((z, Nm, Hm, H_O), dNm, modules='numexpr') dHm_f = SYM.lambdify((z, Nm, Hm, H_O), dHm, modules='numexpr') dH_O_f = SYM.lambdify((z, Nm, Hm, H_O), dH_O, modules='numexpr') # define cost function y = NP.asarray(ne) def J(x): Nm, Hm, H_O = x if verbose: print('-' * 80) print(x) y_hat = NP.array([chapman_f(z, Nm, Hm, H_O) for z in alt]) diff = y - y_hat J1 = NP.array([dNm_f(z, Nm, Hm, H_O) for z in alt]) J2 = NP.array([dHm_f(z, Nm, Hm, H_O) for z in alt]) J3 = NP.array([dH_O_f(z, Nm, Hm, H_O) for z in alt]) return (NP.dot(diff, diff), NP.array([-2 * NP.sum(diff * J1), -2 * NP.sum(diff * J2), -2 * NP.sum(diff * J3)])) # minimize cost function x_star, f, d = scipy.optimize.fmin_l_bfgs_b(J, x0, bounds=bounds, **kwds) assert d['warnflag'] == 0 return x_star if __name__ == '__main__': from pyglow.pyglow import Point N = 200 alt = NP.linspace(100, 1500, N) dt = datetime(2000, 1, 1) lat = 0 lon = 0 iri_ne = [] for alt_i in alt: point = Point(dt, lat, lon, alt_i) point.run_iri() iri_ne.append(point.ne) Nm_star, Hm_star, H_O_star = chapman_fit(alt, iri_ne, verbose=True) chapman_ne = [chapman(z, Nm_star, Hm_star, H_O_star) for z in alt] fig = PL.figure(figsize=(6,10)) PL.plot(iri_ne, alt, color='b', label='IRI') PL.plot(chapman_ne, alt, color='g', label='Chapman fit') PL.legend() PL.xlabel('Electron density [cm$^{-3}$]') PL.ylabel('Height [km]') PL.ticklabel_format(style='sci', axis='x', scilimits=(0,0)) PL.axis('tight') PL.show()
[ "sympy.symbols", "pylab.show", "numpy.sum", "pylab.axis", "numpy.asarray", "sympy.diff", "pylab.ylabel", "sympy.lambdify", "datetime.datetime", "pylab.figure", "numpy.linspace", "pylab.xlabel", "numpy.dot", "pyglow.pyglow.Point", "pylab.ticklabel_format", "pylab.legend", "pylab.plot" ]
[((1769, 1795), 'sympy.symbols', 'SYM.symbols', (['"""z Nm Hm H_O"""'], {}), "('z Nm Hm H_O')\n", (1780, 1795), True, 'import sympy as SYM\n'), ((1848, 1869), 'sympy.diff', 'SYM.diff', (['chapman', 'Nm'], {}), '(chapman, Nm)\n', (1856, 1869), True, 'import sympy as SYM\n'), ((1880, 1901), 'sympy.diff', 'SYM.diff', (['chapman', 'Hm'], {}), '(chapman, Hm)\n', (1888, 1901), True, 'import sympy as SYM\n'), ((1913, 1935), 'sympy.diff', 'SYM.diff', (['chapman', 'H_O'], {}), '(chapman, H_O)\n', (1921, 1935), True, 'import sympy as SYM\n'), ((1952, 2010), 'sympy.lambdify', 'SYM.lambdify', (['(z, Nm, Hm, H_O)', 'chapman'], {'modules': '"""numexpr"""'}), "((z, Nm, Hm, H_O), chapman, modules='numexpr')\n", (1964, 2010), True, 'import sympy as SYM\n'), ((2081, 2135), 'sympy.lambdify', 'SYM.lambdify', (['(z, Nm, Hm, H_O)', 'dNm'], {'modules': '"""numexpr"""'}), "((z, Nm, Hm, H_O), dNm, modules='numexpr')\n", (2093, 2135), True, 'import sympy as SYM\n'), ((2198, 2252), 'sympy.lambdify', 'SYM.lambdify', (['(z, Nm, Hm, H_O)', 'dHm'], {'modules': '"""numexpr"""'}), "((z, Nm, Hm, H_O), dHm, modules='numexpr')\n", (2210, 2252), True, 'import sympy as SYM\n'), ((2316, 2371), 'sympy.lambdify', 'SYM.lambdify', (['(z, Nm, Hm, H_O)', 'dH_O'], {'modules': '"""numexpr"""'}), "((z, Nm, Hm, H_O), dH_O, modules='numexpr')\n", (2328, 2371), True, 'import sympy as SYM\n'), ((2459, 2473), 'numpy.asarray', 'NP.asarray', (['ne'], {}), '(ne)\n', (2469, 2473), True, 'import numpy as NP\n'), ((3429, 3454), 'numpy.linspace', 'NP.linspace', (['(100)', '(1500)', 'N'], {}), '(100, 1500, N)\n', (3440, 3454), True, 'import numpy as NP\n'), ((3465, 3485), 'datetime.datetime', 'datetime', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (3473, 3485), False, 'from datetime import datetime\n'), ((3804, 3830), 'pylab.figure', 'PL.figure', ([], {'figsize': '(6, 10)'}), '(figsize=(6, 10))\n', (3813, 3830), True, 'import pylab as PL\n'), ((3834, 3878), 'pylab.plot', 'PL.plot', (['iri_ne', 'alt'], {'color': '"""b"""', 'label': '"""IRI"""'}), "(iri_ne, alt, color='b', label='IRI')\n", (3841, 3878), True, 'import pylab as PL\n'), ((3919, 3975), 'pylab.plot', 'PL.plot', (['chapman_ne', 'alt'], {'color': '"""g"""', 'label': '"""Chapman fit"""'}), "(chapman_ne, alt, color='g', label='Chapman fit')\n", (3926, 3975), True, 'import pylab as PL\n'), ((4016, 4027), 'pylab.legend', 'PL.legend', ([], {}), '()\n', (4025, 4027), True, 'import pylab as PL\n'), ((4032, 4073), 'pylab.xlabel', 'PL.xlabel', (['"""Electron density [cm$^{-3}$]"""'], {}), "('Electron density [cm$^{-3}$]')\n", (4041, 4073), True, 'import pylab as PL\n'), ((4078, 4102), 'pylab.ylabel', 'PL.ylabel', (['"""Height [km]"""'], {}), "('Height [km]')\n", (4087, 4102), True, 'import pylab as PL\n'), ((4107, 4167), 'pylab.ticklabel_format', 'PL.ticklabel_format', ([], {'style': '"""sci"""', 'axis': '"""x"""', 'scilimits': '(0, 0)'}), "(style='sci', axis='x', scilimits=(0, 0))\n", (4126, 4167), True, 'import pylab as PL\n'), ((4171, 4187), 'pylab.axis', 'PL.axis', (['"""tight"""'], {}), "('tight')\n", (4178, 4187), True, 'import pylab as PL\n'), ((4193, 4202), 'pylab.show', 'PL.show', ([], {}), '()\n', (4200, 4202), True, 'import pylab as PL\n'), ((3565, 3591), 'pyglow.pyglow.Point', 'Point', (['dt', 'lat', 'lon', 'alt_i'], {}), '(dt, lat, lon, alt_i)\n', (3570, 3591), False, 'from pyglow.pyglow import Point\n'), ((1053, 1079), 'sympy.symbols', 'SYM.symbols', (['"""z Nm Hm H_O"""'], {}), "('z Nm Hm H_O')\n", (1064, 1079), True, 'import sympy as SYM\n'), ((2870, 2888), 'numpy.dot', 'NP.dot', (['diff', 'diff'], {}), '(diff, diff)\n', (2876, 2888), True, 'import numpy as NP\n'), ((2921, 2938), 'numpy.sum', 'NP.sum', (['(diff * J1)'], {}), '(diff * J1)\n', (2927, 2938), True, 'import numpy as NP\n'), ((2971, 2988), 'numpy.sum', 'NP.sum', (['(diff * J2)'], {}), '(diff * J2)\n', (2977, 2988), True, 'import numpy as NP\n'), ((3021, 3038), 'numpy.sum', 'NP.sum', (['(diff * J3)'], {}), '(diff * J3)\n', (3027, 3038), True, 'import numpy as NP\n')]
import asyncio from contextlib import nullcontext, AsyncExitStack from typing import Optional, TYPE_CHECKING from jina.clients.base.helper import HTTPClientlet from jina.clients.base import BaseClient from jina.clients.helper import callback_exec, callback_exec_on_error from jina.excepts import BadClient from jina.importer import ImportExtensions from jina.logging.profile import ProgressBar from jina.types.request import Request from jina.serve.stream import RequestStreamer from jina.types.request.data import DataRequest if TYPE_CHECKING: from jina.clients.base import InputType, CallbackFnType class HTTPBaseClient(BaseClient): """A MixIn for HTTP Client.""" async def _get_results( self, inputs: 'InputType', on_done: 'CallbackFnType', on_error: Optional['CallbackFnType'] = None, on_always: Optional['CallbackFnType'] = None, **kwargs, ): """ :param inputs: the callable :param on_done: the callback for on_done :param on_error: the callback for on_error :param on_always: the callback for on_always :param kwargs: kwargs for _get_task_name and _get_requests :yields: generator over results """ with ImportExtensions(required=True): import aiohttp self.inputs = inputs request_iterator = self._get_requests(**kwargs) async with AsyncExitStack() as stack: try: cm1 = ProgressBar( total_length=self._inputs_length, disable=not (self.show_progress) ) p_bar = stack.enter_context(cm1) proto = 'https' if self.args.https else 'http' url = f'{proto}://{self.args.host}:{self.args.port}/post' iolet = await stack.enter_async_context( HTTPClientlet(url=url, logger=self.logger) ) def _request_handler(request: 'Request') -> 'asyncio.Future': """ For HTTP Client, for each request in the iterator, we `send_message` using http POST request and add it to the list of tasks which is awaited and yielded. :param request: current request in the iterator :return: asyncio Task for sending message """ return asyncio.ensure_future(iolet.send_message(request=request)) def _result_handler(result): return result streamer = RequestStreamer( self.args, request_handler=_request_handler, result_handler=_result_handler, ) async for response in streamer.stream(request_iterator): r_status = response.status r_str = await response.json() if r_status == 404: raise BadClient(f'no such endpoint {url}') elif r_status < 200 or r_status > 300: raise ValueError(r_str) da = None if 'data' in r_str and r_str['data'] is not None: from docarray import DocumentArray da = DocumentArray.from_dict(r_str['data']) del r_str['data'] resp = DataRequest(r_str) if da is not None: resp.data.docs = da callback_exec( response=resp, on_error=on_error, on_done=on_done, on_always=on_always, continue_on_error=self.continue_on_error, logger=self.logger, ) if self.show_progress: p_bar.update() yield resp except aiohttp.ClientError as e: self.logger.error( f'Error while fetching response from HTTP server {e!r}' ) if on_error or on_always: if on_error: callback_exec_on_error(on_error, e, self.logger) if on_always: callback_exec( response=None, on_error=None, on_done=None, on_always=on_always, continue_on_error=self.continue_on_error, logger=self.logger, ) else: raise e
[ "jina.types.request.data.DataRequest", "docarray.DocumentArray.from_dict", "jina.logging.profile.ProgressBar", "jina.importer.ImportExtensions", "jina.clients.helper.callback_exec", "jina.serve.stream.RequestStreamer", "jina.clients.base.helper.HTTPClientlet", "contextlib.AsyncExitStack", "jina.excepts.BadClient", "jina.clients.helper.callback_exec_on_error" ]
[((1250, 1281), 'jina.importer.ImportExtensions', 'ImportExtensions', ([], {'required': '(True)'}), '(required=True)\n', (1266, 1281), False, 'from jina.importer import ImportExtensions\n'), ((1416, 1432), 'contextlib.AsyncExitStack', 'AsyncExitStack', ([], {}), '()\n', (1430, 1432), False, 'from contextlib import nullcontext, AsyncExitStack\n'), ((1482, 1559), 'jina.logging.profile.ProgressBar', 'ProgressBar', ([], {'total_length': 'self._inputs_length', 'disable': '(not self.show_progress)'}), '(total_length=self._inputs_length, disable=not self.show_progress)\n', (1493, 1559), False, 'from jina.logging.profile import ProgressBar\n'), ((2571, 2668), 'jina.serve.stream.RequestStreamer', 'RequestStreamer', (['self.args'], {'request_handler': '_request_handler', 'result_handler': '_result_handler'}), '(self.args, request_handler=_request_handler, result_handler\n =_result_handler)\n', (2586, 2668), False, 'from jina.serve.stream import RequestStreamer\n'), ((3427, 3445), 'jina.types.request.data.DataRequest', 'DataRequest', (['r_str'], {}), '(r_str)\n', (3438, 3445), False, 'from jina.types.request.data import DataRequest\n'), ((3550, 3702), 'jina.clients.helper.callback_exec', 'callback_exec', ([], {'response': 'resp', 'on_error': 'on_error', 'on_done': 'on_done', 'on_always': 'on_always', 'continue_on_error': 'self.continue_on_error', 'logger': 'self.logger'}), '(response=resp, on_error=on_error, on_done=on_done, on_always=\n on_always, continue_on_error=self.continue_on_error, logger=self.logger)\n', (3563, 3702), False, 'from jina.clients.helper import callback_exec, callback_exec_on_error\n'), ((1864, 1906), 'jina.clients.base.helper.HTTPClientlet', 'HTTPClientlet', ([], {'url': 'url', 'logger': 'self.logger'}), '(url=url, logger=self.logger)\n', (1877, 1906), False, 'from jina.clients.base.helper import HTTPClientlet\n'), ((2984, 3020), 'jina.excepts.BadClient', 'BadClient', (['f"""no such endpoint {url}"""'], {}), "(f'no such endpoint {url}')\n", (2993, 3020), False, 'from jina.excepts import BadClient\n'), ((3318, 3356), 'docarray.DocumentArray.from_dict', 'DocumentArray.from_dict', (["r_str['data']"], {}), "(r_str['data'])\n", (3341, 3356), False, 'from docarray import DocumentArray\n'), ((4253, 4301), 'jina.clients.helper.callback_exec_on_error', 'callback_exec_on_error', (['on_error', 'e', 'self.logger'], {}), '(on_error, e, self.logger)\n', (4275, 4301), False, 'from jina.clients.helper import callback_exec, callback_exec_on_error\n'), ((4360, 4505), 'jina.clients.helper.callback_exec', 'callback_exec', ([], {'response': 'None', 'on_error': 'None', 'on_done': 'None', 'on_always': 'on_always', 'continue_on_error': 'self.continue_on_error', 'logger': 'self.logger'}), '(response=None, on_error=None, on_done=None, on_always=\n on_always, continue_on_error=self.continue_on_error, logger=self.logger)\n', (4373, 4505), False, 'from jina.clients.helper import callback_exec, callback_exec_on_error\n')]
# Copyright 2013 Google, Inc. All Rights Reserved. # # Google Author(s): <NAME>, <NAME> from fontTools.ttLib.tables.DefaultTable import DefaultTable import logging log = logging.getLogger("fontTools.merge") def add_method(*clazzes, **kwargs): """Returns a decorator function that adds a new method to one or more classes.""" allowDefault = kwargs.get('allowDefaultTable', False) def wrapper(method): done = [] for clazz in clazzes: if clazz in done: continue # Support multiple names of a clazz done.append(clazz) assert allowDefault or clazz != DefaultTable, 'Oops, table class not found.' assert method.__name__ not in clazz.__dict__, \ "Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__) setattr(clazz, method.__name__, method) return None return wrapper def mergeObjects(lst): lst = [item for item in lst if item is not NotImplemented] if not lst: return NotImplemented lst = [item for item in lst if item is not None] if not lst: return None clazz = lst[0].__class__ assert all(type(item) == clazz for item in lst), lst logic = clazz.mergeMap returnTable = clazz() returnDict = {} allKeys = set.union(set(), *(vars(table).keys() for table in lst)) for key in allKeys: try: mergeLogic = logic[key] except KeyError: try: mergeLogic = logic['*'] except KeyError: raise Exception("Don't know how to merge key %s of class %s" % (key, clazz.__name__)) if mergeLogic is NotImplemented: continue value = mergeLogic(getattr(table, key, NotImplemented) for table in lst) if value is not NotImplemented: returnDict[key] = value returnTable.__dict__ = returnDict return returnTable @add_method(DefaultTable, allowDefaultTable=True) def merge(self, m, tables): if not hasattr(self, 'mergeMap'): log.info("Don't know how to merge '%s'.", self.tableTag) return NotImplemented logic = self.mergeMap if isinstance(logic, dict): return m.mergeObjects(self, self.mergeMap, tables) else: return logic(tables)
[ "logging.getLogger" ]
[((173, 209), 'logging.getLogger', 'logging.getLogger', (['"""fontTools.merge"""'], {}), "('fontTools.merge')\n", (190, 209), False, 'import logging\n')]
### EXPERIMENT FLAGS ITERATION flags = {'SC4', 'EPI1', '!AV'} ### EXPERIMENT EXECUTION STUB import sys sys.path.append('../../../../PatchSim/') sys.path.append('../../../../school_closures/') import patchsim as sim import pandas as pd import numpy as np import multiprocessing import sc_variants as sc from copy import deepcopy np.random.seed(42) ### PREP SCENARIO BY FLAGS epidemic = [flag for flag in flags if flag.startswith('EPI')][0] schoolClosures = [flag for flag in flags if flag.startswith('SC')] if len(schoolClosures) != 0: schoolClosure = schoolClosures[0] else: schoolClosure = 'null' #### SET NUMBER OF REPS AND THREADS n = 100 threads = 50 ### INITIAL LOADS OF PARAMS print("Loading params") configs = sim.read_config('config.patchsim') patch_df = sim.load_patch(configs) params = sim.load_params(configs, patch_df) Theta = sim.load_Theta(configs, patch_df) seeds = sim.load_seed(configs, params, patch_df) if schoolClosure in {'SC2','SC4'}: scMethod = sc.NetIntervention(configs) elif schoolClosure in {'SC1','SC3'}: scMethod = sc.NetInterventionAdaptive(configs) else: scMethod = None ### EXPERIMENT EXECUTION def runPatchsimSub(args): """Runs experiment in parallel using modified copies of params and configs""" (i,betaOut) = args print("Starting run",i) configsOut = deepcopy(configs) paramsOut = deepcopy(params) configsOut['ExposureRate'] = betaOut paramsOut['beta'] = np.where(params['beta']==1337, betaOut, params['beta']) df = sim.run_disease_simulation(configsOut, patch_df, params=paramsOut, Theta=Theta, seeds=seeds, write_epi=False, return_epi=True, intervene_step=scMethod) df.loc[:,'sample'] = i df.index.rename('id',inplace=True) return df betaOut = {'EPI1':1.29e-06, 'EPI2':1.60e-06}[epidemic] stdDev = {'EPI1':7.08e-08, 'EPI2':8.60e-08}[epidemic] argsList = [(i,np.random.normal(betaOut,stdDev)) for i in range(n)] print("Starting runs with beta %s and stddev %s" % (betaOut,stdDev)) with multiprocessing.Pool(threads) as mp_pool: results = mp_pool.map(runPatchsimSub, argsList) results = pd.concat(results) results.to_csv('MergedSamples.csv')
[ "sys.path.append", "copy.deepcopy", "numpy.random.seed", "patchsim.load_params", "patchsim.run_disease_simulation", "numpy.where", "patchsim.load_Theta", "sc_variants.NetInterventionAdaptive", "multiprocessing.Pool", "numpy.random.normal", "patchsim.read_config", "sc_variants.NetIntervention", "patchsim.load_patch", "pandas.concat", "patchsim.load_seed" ]
[((106, 146), 'sys.path.append', 'sys.path.append', (['"""../../../../PatchSim/"""'], {}), "('../../../../PatchSim/')\n", (121, 146), False, 'import sys\n'), ((147, 194), 'sys.path.append', 'sys.path.append', (['"""../../../../school_closures/"""'], {}), "('../../../../school_closures/')\n", (162, 194), False, 'import sys\n'), ((334, 352), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (348, 352), True, 'import numpy as np\n'), ((750, 784), 'patchsim.read_config', 'sim.read_config', (['"""config.patchsim"""'], {}), "('config.patchsim')\n", (765, 784), True, 'import patchsim as sim\n'), ((796, 819), 'patchsim.load_patch', 'sim.load_patch', (['configs'], {}), '(configs)\n', (810, 819), True, 'import patchsim as sim\n'), ((829, 863), 'patchsim.load_params', 'sim.load_params', (['configs', 'patch_df'], {}), '(configs, patch_df)\n', (844, 863), True, 'import patchsim as sim\n'), ((872, 905), 'patchsim.load_Theta', 'sim.load_Theta', (['configs', 'patch_df'], {}), '(configs, patch_df)\n', (886, 905), True, 'import patchsim as sim\n'), ((914, 954), 'patchsim.load_seed', 'sim.load_seed', (['configs', 'params', 'patch_df'], {}), '(configs, params, patch_df)\n', (927, 954), True, 'import patchsim as sim\n'), ((2467, 2485), 'pandas.concat', 'pd.concat', (['results'], {}), '(results)\n', (2476, 2485), True, 'import pandas as pd\n'), ((1007, 1034), 'sc_variants.NetIntervention', 'sc.NetIntervention', (['configs'], {}), '(configs)\n', (1025, 1034), True, 'import sc_variants as sc\n'), ((1354, 1371), 'copy.deepcopy', 'deepcopy', (['configs'], {}), '(configs)\n', (1362, 1371), False, 'from copy import deepcopy\n'), ((1388, 1404), 'copy.deepcopy', 'deepcopy', (['params'], {}), '(params)\n', (1396, 1404), False, 'from copy import deepcopy\n'), ((1470, 1527), 'numpy.where', 'np.where', (["(params['beta'] == 1337)", 'betaOut', "params['beta']"], {}), "(params['beta'] == 1337, betaOut, params['beta'])\n", (1478, 1527), True, 'import numpy as np\n'), ((1592, 1753), 'patchsim.run_disease_simulation', 'sim.run_disease_simulation', (['configsOut', 'patch_df'], {'params': 'paramsOut', 'Theta': 'Theta', 'seeds': 'seeds', 'write_epi': '(False)', 'return_epi': '(True)', 'intervene_step': 'scMethod'}), '(configsOut, patch_df, params=paramsOut, Theta=\n Theta, seeds=seeds, write_epi=False, return_epi=True, intervene_step=\n scMethod)\n', (1618, 1753), True, 'import patchsim as sim\n'), ((2358, 2387), 'multiprocessing.Pool', 'multiprocessing.Pool', (['threads'], {}), '(threads)\n', (2378, 2387), False, 'import multiprocessing\n'), ((1087, 1122), 'sc_variants.NetInterventionAdaptive', 'sc.NetInterventionAdaptive', (['configs'], {}), '(configs)\n', (1113, 1122), True, 'import sc_variants as sc\n'), ((2229, 2262), 'numpy.random.normal', 'np.random.normal', (['betaOut', 'stdDev'], {}), '(betaOut, stdDev)\n', (2245, 2262), True, 'import numpy as np\n')]
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2012 OpenStack, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Request Body limiting middleware. """ from oslo.config import cfg import webob.dec import webob.exc from cinder import flags from cinder.openstack.common import log as logging from cinder import wsgi #default request size is 112k max_request_body_size_opt = cfg.IntOpt('osapi_max_request_body_size', default=114688, help='Max size for body of a request') FLAGS = flags.FLAGS FLAGS.register_opt(max_request_body_size_opt) LOG = logging.getLogger(__name__) class LimitingReader(object): """Reader to limit the size of an incoming request.""" def __init__(self, data, limit): """ :param data: Underlying data object :param limit: maximum number of bytes the reader should allow """ self.data = data self.limit = limit self.bytes_read = 0 def __iter__(self): for chunk in self.data: self.bytes_read += len(chunk) if self.bytes_read > self.limit: msg = _("Request is too large.") raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) else: yield chunk def read(self, i=None): result = self.data.read(i) self.bytes_read += len(result) if self.bytes_read > self.limit: msg = _("Request is too large.") raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) return result class RequestBodySizeLimiter(wsgi.Middleware): """Add a 'cinder.context' to WSGI environ.""" def __init__(self, *args, **kwargs): super(RequestBodySizeLimiter, self).__init__(*args, **kwargs) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): if req.content_length > FLAGS.osapi_max_request_body_size: msg = _("Request is too large.") raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) if req.content_length is None and req.is_body_readable: limiter = LimitingReader(req.body_file, FLAGS.osapi_max_request_body_size) req.body_file = limiter return self.application
[ "oslo.config.cfg.IntOpt", "cinder.openstack.common.log.getLogger" ]
[((921, 1022), 'oslo.config.cfg.IntOpt', 'cfg.IntOpt', (['"""osapi_max_request_body_size"""'], {'default': '(114688)', 'help': '"""Max size for body of a request"""'}), "('osapi_max_request_body_size', default=114688, help=\n 'Max size for body of a request')\n", (931, 1022), False, 'from oslo.config import cfg\n'), ((1169, 1196), 'cinder.openstack.common.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1186, 1196), True, 'from cinder.openstack.common import log as logging\n')]
import io import textwrap import traceback from contextlib import redirect_stdout from datetime import datetime as dt import discord from discord.ext import commands from main import initial_cogs class BotSettings(commands.Cog): def __init__(self, bot): self.bot = bot self._last_result = None @commands.Cog.listener() async def on_ready(self): print(f"{self.__class__.__name__} Cog has been loaded\n-----") @commands.command(name="prefix") @commands.guild_only() @commands.has_permissions(administrator=True) async def change_prefix(self, ctx, new_prefix: str): if len(new_prefix) > 5: await ctx.send("The prefix can not be more than 5 characters in length.") else: await self.bot.db.execute("UPDATE guild SET prefix = $1 WHERE server_id = $2", new_prefix, ctx.guild.id) self.bot.prefix[ctx.guild.id] = new await ctx.send(f"Prefix set to {new_prefix}.") @commands.command(name="toggle", description="Enable or disable a command!") @commands.is_owner() async def toggle(self, ctx, *, command): command = self.bot.get_command(command) if command is None: await ctx.send("I can't find a command with that name!") elif ctx.command == command: await ctx.send("You cannot disable this command.") else: command.enabled = not command.enabled ternary = "enabled" if command.enabled else "disabled" await ctx.send(f"I have {ternary} {command.qualified_name} for you!") @commands.command() @commands.is_owner() async def reload(self, ctx, cog): """ This Command Is used to Reload all cogs """ if cog == 'all': embed = discord.Embed(title='Reloading Cogs...', description='', color=discord.Color.teal()) for i in initial_cogs: self.bot.reload_extension(f"cogs.{i}") embed.description += f"{i} reloaded successfully.\n" await ctx.send(embed=embed) return self.bot.reload_extension(f"cogs.{cog}") await ctx.send(f"{cog} reloaded successfully.") @commands.command() @commands.is_owner() async def load(self, ctx, cog): """ This command is Used to UnLoad a cog """ self.bot.load_extension(f"cogs.{cog}") await ctx.send(f"{cog} loaded successfully.") @commands.command() @commands.is_owner() async def unload(self, ctx, cog): """ This command is Used to UnLoad a cog """ self.bot.unload_extension(f"cogs.{cog}") await ctx.send(f"{cog} unloaded successfully.") @staticmethod def cleanup_code(content): """Automatically removes code blocks from the code.""" # remove ```py\n``` if content.startswith('```') and content.endswith('```'): return '\n'.join(content.split('\n')[1:-1]) # remove `foo` return content.strip('` \n') @commands.command(hidden=True) @commands.is_owner() async def eval(self, ctx, *, code: str): """Evaluate a code block""" env = { 'bot': self.bot, 'ctx': ctx, 'channel': ctx.channel, 'author': ctx.author, 'guild': ctx.guild, 'message': ctx.message, 'command': ctx.command, 'datetime': dt, '_': self._last_result } env.update(globals()) body = self.cleanup_code(code) stdout = io.StringIO() to_compile = f'async def func():\n{textwrap.indent(body, " ")}' try: exec(to_compile, env) except Exception as e: return await ctx.reply(f'```py\n{e.__class__.__name__}: {e}\n```') func = env['func'] try: with redirect_stdout(stdout): ret = await func() except Exception as e: value = stdout.getvalue() await ctx.reply(f'```py\n{value}{traceback.format_exc()}\n```') else: value = stdout.getvalue() try: await ctx.message.add_reaction('\u2705') except: pass if ret is None: if value: await ctx.reply(f'```py\n{value}\n```') else: self._last_result = ret await ctx.reply(f'```py\n{value}{ret}\n```') def setup(bot): bot.add_cog(BotSettings(bot))
[ "io.StringIO", "discord.ext.commands.command", "discord.ext.commands.has_permissions", "textwrap.indent", "discord.Color.teal", "discord.ext.commands.Cog.listener", "contextlib.redirect_stdout", "traceback.format_exc", "discord.ext.commands.guild_only", "discord.ext.commands.is_owner" ]
[((324, 347), 'discord.ext.commands.Cog.listener', 'commands.Cog.listener', ([], {}), '()\n', (345, 347), False, 'from discord.ext import commands\n'), ((455, 486), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""prefix"""'}), "(name='prefix')\n", (471, 486), False, 'from discord.ext import commands\n'), ((492, 513), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (511, 513), False, 'from discord.ext import commands\n'), ((519, 563), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'administrator': '(True)'}), '(administrator=True)\n', (543, 563), False, 'from discord.ext import commands\n'), ((984, 1059), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""toggle"""', 'description': '"""Enable or disable a command!"""'}), "(name='toggle', description='Enable or disable a command!')\n", (1000, 1059), False, 'from discord.ext import commands\n'), ((1065, 1084), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (1082, 1084), False, 'from discord.ext import commands\n'), ((1597, 1615), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (1613, 1615), False, 'from discord.ext import commands\n'), ((1621, 1640), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (1638, 1640), False, 'from discord.ext import commands\n'), ((2246, 2264), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (2262, 2264), False, 'from discord.ext import commands\n'), ((2270, 2289), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (2287, 2289), False, 'from discord.ext import commands\n'), ((2502, 2520), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (2518, 2520), False, 'from discord.ext import commands\n'), ((2526, 2545), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (2543, 2545), False, 'from discord.ext import commands\n'), ((3088, 3117), 'discord.ext.commands.command', 'commands.command', ([], {'hidden': '(True)'}), '(hidden=True)\n', (3104, 3117), False, 'from discord.ext import commands\n'), ((3123, 3142), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (3140, 3142), False, 'from discord.ext import commands\n'), ((3628, 3641), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (3639, 3641), False, 'import io\n'), ((3686, 3713), 'textwrap.indent', 'textwrap.indent', (['body', '""" """'], {}), "(body, ' ')\n", (3701, 3713), False, 'import textwrap\n'), ((3932, 3955), 'contextlib.redirect_stdout', 'redirect_stdout', (['stdout'], {}), '(stdout)\n', (3947, 3955), False, 'from contextlib import redirect_stdout\n'), ((1893, 1913), 'discord.Color.teal', 'discord.Color.teal', ([], {}), '()\n', (1911, 1913), False, 'import discord\n'), ((4106, 4128), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (4126, 4128), False, 'import traceback\n')]
from frost import FrostClient # Store the client globally client = FrostClient()
[ "frost.FrostClient" ]
[((68, 81), 'frost.FrostClient', 'FrostClient', ([], {}), '()\n', (79, 81), False, 'from frost import FrostClient\n')]
import pandas as pd df_pub = pd.read_csv('tableS4.Refseq.assembly.summary.tsv',sep='\t') df_pub['ATCC Catalog'] = df_pub['ATCC Catalog'].apply(lambda x: x.upper()) #set all catalogIDs to uppercase #count = 577 df_port = pd.read_csv('tableS4.atcc.assembly.summary.tsv',sep='\t') portal_catalog = list(set(list((df_port['Base Catalog Number'])))) #get list of pcatalog IDs on the portal. MAY BE REDUNDANT #plan: return ratio values of length, illumina n50, contig count, read depth, gc content - gc content not currently in public dataset #int(df_pub[df_pub['ATCC Catalog'] == catalog]['Total Length']) / int(df_port[df_port['Base Catalog Number'] == catalog]['Total Length']) #need to remove redundancy problem #ideas: #separate into assembly level data frames - still has redundancies by catalog id. #return average ratio values when catalog ID appears more than once #Both of the above so values returned are collected by assembly level. Would need to prevent lower levels from repeating higher level assemblies per catalogID #third option will be addressed below #set public assembly dataframes df_pub_complete = df_pub[(df_pub['Assembly Level'] == 'Chromosome') | (df_pub['Assembly Level'] == 'Complete Genome')] #count=234 pub_complete = [x for x in portal_catalog if x in list(df_pub_complete['ATCC Catalog'])] #get "complete" catalog IDs from public assemblies df_pub_scaffold = df_pub[df_pub['Assembly Level'] == 'Scaffold'] #get down to just scaffolds pub_scaffold = [x for x in portal_catalog if x in list(df_pub_scaffold['ATCC Catalog'])] #get list of scaffold catalogIDs #count = 107 df_pub_contig = df_pub[df_pub['Assembly Level'] == 'Contig'] #just contigs #should be unnecessary at this stage #count = 81 #totals of dataframes don't add up to top level dataframe count because of assemblies that exist at multiple stages. This approach should only compare the highest quality public assembly to ATCC's. df_pub_comp_mean = df_pub_complete.groupby('ATCC Catalog').mean().reset_index() #summarize values by average df_pub_comp_min = df_pub_complete.groupby('ATCC Catalog').min().reset_index() #summarize values by minimum (appropriate for contig counts) df_pub_comp_max = df_pub_complete.groupby('ATCC Catalog').max().reset_index() #summarize values by maximum (appropriate for N50, perhaps) df_pub_scaffold_mean = df_pub_scaffold.groupby('ATCC Catalog').mean().reset_index() #summarize values by average df_pub_scaffold_min = df_pub_scaffold.groupby('ATCC Catalog').min().reset_index() #summarize values by minimum (appropriate for contig counts) df_pub_scaffold_max = df_pub_scaffold.groupby('ATCC Catalog').max().reset_index() #summarize values by maximum (appropriate for N50, perhaps) df_pub_contig_mean = df_pub_contig.groupby('ATCC Catalog').mean().reset_index() #summarize values by average df_pub_contig_min = df_pub_contig.groupby('ATCC Catalog').min().reset_index() #summarize values by minimum (appropriate for contig counts) df_pub_contig_max = df_pub_contig.groupby('ATCC Catalog').max().reset_index() #summarize values by maximum (appropriate for N50, perhaps) #set portal assembly dataframes df_port_complete = df_port if len(df_port_complete) == len(portal_catalog): print('Evaluating only top ranked assemblies') if len(list(set(list(df_port_complete['Base Catalog Number'])))) == len(portal_catalog): print('No Repeated Catalog IDs will be evaluated') else: print('There are Catalog IDs are present more than once in the top ranked assemblies, which will be averaged') else: port_cat_2 = list(df_port[df_port['Assembly Rank'] == 2]['Base Catalog Number']) #get all second rank assemblies to check that each assembly has a top ranking if len([x for x in port_cat_2 if x not in list(df_port[df_port['Assembly Rank'] == 1]['Base Catalog Number'])]) == 0: print('No second rank assemblies without a first rank assembly in the portal assembly list') #comparisons #'complete' public assemblies #genome length #compared by average length #comparison metric could be command line argument and consistent across all fields. port_length = df_port_complete[['Base Catalog Number', 'Total Length', 'Product Collection']] pub_length = df_pub_complete[['ATCC Catalog', 'Total Length', 'Assembly Accession', 'Organism']] port_length.columns = ['Catalog ID', 'Total Length', 'Product Collection'] pub_length.columns = ['Catalog ID', 'Total Length', 'Assembly Accession', 'Organism'] #need same column names to merge dataframes print('public df length is: {}'.format(len(df_pub))) print('pub_len length is: {}'.format(len(pub_length))) print(len(port_length)) df_len = pd.merge(port_length, pub_length, on=['Catalog ID'], suffixes=('_portal','_public')) df_len['Length Ratio'] = df_len['Total Length_portal'] / df_len['Total Length_public'] df_len['Length Difference'] = df_len['Total Length_portal'] - df_len['Total Length_public'] df_len['Public Assembly Level'] = 'Complete' df_len.to_csv('Assembly_Complete_length_no_aggregate.comparisons.txt',sep='\t',index=False) print(len(df_len)) #N50 #compared by max N50 port_N50= df_port_complete[['Base Catalog Number', 'Filtered N50', 'Product Collection']] pub_N50 = df_pub_complete[['ATCC Catalog', 'Scaffold N50', 'Assembly Accession', 'Organism']] port_N50.columns = ['Catalog ID', 'N50', 'Product Collection'] pub_N50.columns = ['Catalog ID', 'N50', 'Assembly Accession', 'Organism'] df_N50 = pd.merge(port_N50, pub_N50, on=['Catalog ID'], suffixes=('_portal','_public')) df_N50['N50 Ratio'] = df_N50['N50_portal'] / df_N50['N50_public'] df_N50['N50 Difference'] = df_N50['N50_portal'] - df_N50['N50_public'] df_N50['Public Assembly Level'] = 'Complete' df_N50.to_csv('Assembly_Complete_N50_no_aggregate.comparisons.txt',sep='\t',index=False) #contig/replicon count #compared by min port_contig= df_port_complete[['Base Catalog Number', 'Total Contigs', 'Product Collection']] pub_contig = df_pub_complete[['ATCC Catalog', 'Scaffold Count', 'Assembly Accession', 'Organism']] port_contig.columns = ['Catalog ID', 'Contig Count', 'Product Collection'] pub_contig.columns = ['Catalog ID', 'Contig Count', 'Assembly Accession', 'Organism'] df_contig = pd.merge(port_contig, pub_contig, on=['Catalog ID'], suffixes=('_portal','_public')) df_contig['Contig Count Ratio'] = df_contig['Contig Count_portal'] / df_contig['Contig Count_public'] df_contig['Contig Count Difference'] = df_contig['Contig Count_portal'] - df_contig['Contig Count_public'] df_contig['Public Assembly Level'] = 'Complete' df_contig.to_csv('Assembly_Complete_Contig_no_aggregate.comparisons.txt',sep='\t',index=False) #GC Content #compared by mean port_gc= df_port_complete[['Base Catalog Number', 'GC Content', 'Product Collection']] pub_gc = df_pub_complete[['ATCC Catalog', 'GC Content', 'Assembly Accession', 'Organism']] port_gc.columns = ['Catalog ID', 'GC', 'Product Collection'] port_gc['GC'] = port_gc['GC'] * 100 pub_gc.columns = ['Catalog ID', 'GC', 'Assembly Accession', 'Organism'] df_gc = pd.merge(port_gc, pub_gc, on=['Catalog ID'], suffixes=('_portal','_public')) df_gc['GC Ratio'] = df_gc['GC_portal'] / df_gc['GC_public'] df_gc['GC Difference'] = df_gc['GC_portal'] - df_gc['GC_public'] df_gc['Public Assembly Level'] = 'Complete' df_gc.to_csv('Assembly_Complete_GC_no_aggregate.comparisons.txt',sep='\t',index=False) #'scaffold' public assemblies #note: variables are reused because we're writing to files as we go #genome length #compared by average length #comparison metric could be command line argument and consistent across all fields. #port_length = df_port_complete[['Base Catalog Number', 'Total Length']] #already done, doesn't need to be repeated pub_length = df_pub_scaffold[['ATCC Catalog', 'Total Length', 'Assembly Accession', 'Organism']] #port_length.columns = ['Catalog ID', 'Total Length'] #already done, doesn't need to be repeated pub_length.columns = ['Catalog ID', 'Total Length', 'Assembly Accession', 'Organism'] #need same column names to merge dataframes df_len = pd.merge(port_length, pub_length, on=['Catalog ID'], suffixes=('_portal','_public')) df_len['Length Ratio'] = df_len['Total Length_portal'] / df_len['Total Length_public'] df_len['Length Difference'] = df_len['Total Length_portal'] - df_len['Total Length_public'] df_len['Public Assembly Level'] = 'Scaffold' df_len.to_csv('Assembly_Scaffold_length_no_aggregate.comparisons.txt',sep='\t',index=False) #N50 #compared by max N50 #port_N50= df_port_complete[['Base Catalog Number', 'Filtered N50']] pub_N50 = df_pub_scaffold[['ATCC Catalog', 'Scaffold N50', 'Assembly Accession', 'Organism']] #port_N50.columns = ['Catalog ID', 'N50'] pub_N50.columns = ['Catalog ID', 'N50', 'Assembly Accession', 'Organism'] df_N50 = pd.merge(port_N50, pub_N50, on=['Catalog ID'], suffixes=('_portal','_public')) df_N50['N50 Ratio'] = df_N50['N50_portal'] / df_N50['N50_public'] df_N50['N50 Difference'] = df_N50['N50_portal'] - df_N50['N50_public'] df_N50['Public Assembly Level'] = 'Scaffold' df_N50.to_csv('Assembly_Scaffold_N50_no_aggregate.comparisons.txt',sep='\t',index=False) #contig/replicon count #compared by min #port_contig= df_port_complete[['Base Catalog Number', 'Total Contigs']] pub_contig = df_pub_scaffold[['ATCC Catalog', 'Scaffold Count', 'Assembly Accession', 'Organism']] #port_contig.columns = ['Catalog ID', 'Contig Count'] pub_contig.columns = ['Catalog ID', 'Contig Count', 'Assembly Accession', 'Organism'] df_contig = pd.merge(port_contig, pub_contig, on=['Catalog ID'], suffixes=('_portal','_public')) df_contig['Contig Count Ratio'] = df_contig['Contig Count_portal'] / df_contig['Contig Count_public'] df_contig['Contig Count Difference'] = df_contig['Contig Count_portal'] - df_contig['Contig Count_public'] df_contig['Public Assembly Level'] = 'Scaffold' df_contig.to_csv('Assembly_Scaffold_Contig_no_aggregate.comparisons.txt',sep='\t',index=False) #GC Content #compared by mean #port_gc= df_port_complete[['Base Catalog Number', 'GC Content']] pub_gc = df_pub_scaffold[['ATCC Catalog', 'GC Content','Assembly Accession', 'Organism']] #port_gc.columns = ['Catalog ID', 'GC'] #port_gc['GC'] = port_gc['GC'] * 100 pub_gc.columns = ['Catalog ID', 'GC', 'Assembly Accession', 'Organism'] df_gc = pd.merge(port_gc, pub_gc, on=['Catalog ID'], suffixes=('_portal','_public')) df_gc['GC Ratio'] = df_gc['GC_portal'] / df_gc['GC_public'] df_gc['GC Difference'] = df_gc['GC_portal'] - df_gc['GC_public'] df_gc['Public Assembly Level'] = 'Scaffold' df_gc.to_csv('Assembly_Scaffold_GC_no_aggregate.comparisons.txt',sep='\t',index=False) #'contig' public assemblies #genome length #compared by average length #comparison metric could be command line argument and consistent across all fields. #port_length = df_port_complete[['Base Catalog Number', 'Total Length']] pub_length = df_pub_contig[['ATCC Catalog', 'Total Length', 'Assembly Accession', 'Organism']] #port_length.columns = ['Catalog ID', 'Total Length'] pub_length.columns = ['Catalog ID', 'Total Length', 'Assembly Accession', 'Organism'] #need same column names to merge dataframes df_len = pd.merge(port_length, pub_length, on=['Catalog ID'], suffixes=('_portal','_public')) df_len['Length Ratio'] = df_len['Total Length_portal'] / df_len['Total Length_public'] df_len['Length Difference'] = df_len['Total Length_portal'] - df_len['Total Length_public'] df_len['Public Assembly Level'] = 'Contig' df_len.to_csv('Assembly_Contig_length_no_aggregate.comparisons.txt',sep='\t',index=False) #N50 #compared by max N50 #port_N50= df_port_complete[['Base Catalog Number', 'Filtered N50']] pub_N50 = df_pub_contig[['ATCC Catalog', 'Scaffold N50', 'Assembly Accession', 'Organism']] #port_N50.columns = ['Catalog ID', 'N50'] pub_N50.columns = ['Catalog ID', 'N50', 'Assembly Accession', 'Organism'] df_N50 = pd.merge(port_N50, pub_N50, on=['Catalog ID'], suffixes=('_portal','_public')) df_N50['N50 Ratio'] = df_N50['N50_portal'] / df_N50['N50_public'] df_N50['N50 Difference'] = df_N50['N50_portal'] - df_N50['N50_public'] df_N50['Public Assembly Level'] = 'Contig' df_N50.to_csv('Assembly_Contig_N50_no_aggregate.comparisons.txt',sep='\t',index=False) #contig/replicon count #compared by min #port_contig= df_port_complete[['Base Catalog Number', 'Total Contigs']] pub_contig = df_pub_contig[['ATCC Catalog', 'Scaffold Count', 'Assembly Accession','Organism']] #port_contig.columns = ['Catalog ID', 'Contig Count'] pub_contig.columns = ['Catalog ID', 'Contig Count', 'Assembly Accession', 'Organism'] df_contig = pd.merge(port_contig, pub_contig, on=['Catalog ID'], suffixes=('_portal','_public')) df_contig['Contig Count Ratio'] = df_contig['Contig Count_portal'] / df_contig['Contig Count_public'] df_contig['Contig Count Difference'] = df_contig['Contig Count_portal'] - df_contig['Contig Count_public'] df_contig['Public Assembly Level'] = 'Contig' df_contig.to_csv('Assembly_Contig_Contig_no_aggregate.comparisons.txt',sep='\t',index=False) #GC Content #compared by mean #port_gc= df_port_complete[['Base Catalog Number', 'GC Content']] pub_gc = df_pub_contig[['ATCC Catalog', 'GC Content', 'Assembly Accession', 'Organism']] #port_gc.columns = ['Catalog ID', 'GC'] #port_gc['GC'] = port_gc['GC'] * 100 pub_gc.columns = ['Catalog ID', 'GC', 'Assembly Accession', 'Organism'] df_gc = pd.merge(port_gc, pub_gc, on=['Catalog ID'], suffixes=('_portal','_public')) df_gc['GC Ratio'] = df_gc['GC_portal'] / df_gc['GC_public'] df_gc['GC Difference'] = df_gc['GC_portal'] - df_gc['GC_public'] df_gc['Public Assembly Level'] = 'Contig' df_gc.to_csv('Assembly_Contig_GC_no_aggregate.comparisons.txt',sep='\t',index=False)
[ "pandas.read_csv", "pandas.merge" ]
[((30, 90), 'pandas.read_csv', 'pd.read_csv', (['"""tableS4.Refseq.assembly.summary.tsv"""'], {'sep': '"""\t"""'}), "('tableS4.Refseq.assembly.summary.tsv', sep='\\t')\n", (41, 90), True, 'import pandas as pd\n'), ((221, 279), 'pandas.read_csv', 'pd.read_csv', (['"""tableS4.atcc.assembly.summary.tsv"""'], {'sep': '"""\t"""'}), "('tableS4.atcc.assembly.summary.tsv', sep='\\t')\n", (232, 279), True, 'import pandas as pd\n'), ((4638, 4727), 'pandas.merge', 'pd.merge', (['port_length', 'pub_length'], {'on': "['Catalog ID']", 'suffixes': "('_portal', '_public')"}), "(port_length, pub_length, on=['Catalog ID'], suffixes=('_portal',\n '_public'))\n", (4646, 4727), True, 'import pandas as pd\n'), ((5415, 5494), 'pandas.merge', 'pd.merge', (['port_N50', 'pub_N50'], {'on': "['Catalog ID']", 'suffixes': "('_portal', '_public')"}), "(port_N50, pub_N50, on=['Catalog ID'], suffixes=('_portal', '_public'))\n", (5423, 5494), True, 'import pandas as pd\n'), ((6173, 6262), 'pandas.merge', 'pd.merge', (['port_contig', 'pub_contig'], {'on': "['Catalog ID']", 'suffixes': "('_portal', '_public')"}), "(port_contig, pub_contig, on=['Catalog ID'], suffixes=('_portal',\n '_public'))\n", (6181, 6262), True, 'import pandas as pd\n'), ((6997, 7074), 'pandas.merge', 'pd.merge', (['port_gc', 'pub_gc'], {'on': "['Catalog ID']", 'suffixes': "('_portal', '_public')"}), "(port_gc, pub_gc, on=['Catalog ID'], suffixes=('_portal', '_public'))\n", (7005, 7074), True, 'import pandas as pd\n'), ((8005, 8094), 'pandas.merge', 'pd.merge', (['port_length', 'pub_length'], {'on': "['Catalog ID']", 'suffixes': "('_portal', '_public')"}), "(port_length, pub_length, on=['Catalog ID'], suffixes=('_portal',\n '_public'))\n", (8013, 8094), True, 'import pandas as pd\n'), ((8722, 8801), 'pandas.merge', 'pd.merge', (['port_N50', 'pub_N50'], {'on': "['Catalog ID']", 'suffixes': "('_portal', '_public')"}), "(port_N50, pub_N50, on=['Catalog ID'], suffixes=('_portal', '_public'))\n", (8730, 8801), True, 'import pandas as pd\n'), ((9438, 9527), 'pandas.merge', 'pd.merge', (['port_contig', 'pub_contig'], {'on': "['Catalog ID']", 'suffixes': "('_portal', '_public')"}), "(port_contig, pub_contig, on=['Catalog ID'], suffixes=('_portal',\n '_public'))\n", (9446, 9527), True, 'import pandas as pd\n'), ((10220, 10297), 'pandas.merge', 'pd.merge', (['port_gc', 'pub_gc'], {'on': "['Catalog ID']", 'suffixes': "('_portal', '_public')"}), "(port_gc, pub_gc, on=['Catalog ID'], suffixes=('_portal', '_public'))\n", (10228, 10297), True, 'import pandas as pd\n'), ((11070, 11159), 'pandas.merge', 'pd.merge', (['port_length', 'pub_length'], {'on': "['Catalog ID']", 'suffixes': "('_portal', '_public')"}), "(port_length, pub_length, on=['Catalog ID'], suffixes=('_portal',\n '_public'))\n", (11078, 11159), True, 'import pandas as pd\n'), ((11781, 11860), 'pandas.merge', 'pd.merge', (['port_N50', 'pub_N50'], {'on': "['Catalog ID']", 'suffixes': "('_portal', '_public')"}), "(port_N50, pub_N50, on=['Catalog ID'], suffixes=('_portal', '_public'))\n", (11789, 11860), True, 'import pandas as pd\n'), ((12490, 12579), 'pandas.merge', 'pd.merge', (['port_contig', 'pub_contig'], {'on': "['Catalog ID']", 'suffixes': "('_portal', '_public')"}), "(port_contig, pub_contig, on=['Catalog ID'], suffixes=('_portal',\n '_public'))\n", (12498, 12579), True, 'import pandas as pd\n'), ((13267, 13344), 'pandas.merge', 'pd.merge', (['port_gc', 'pub_gc'], {'on': "['Catalog ID']", 'suffixes': "('_portal', '_public')"}), "(port_gc, pub_gc, on=['Catalog ID'], suffixes=('_portal', '_public'))\n", (13275, 13344), True, 'import pandas as pd\n')]
import wx class ErrorDialog(wx.MiniFrame): def __init__(self, parent, name, errors): wx.MiniFrame.__init__(self, parent, -1, name) self.errorbox = wx.TextCtrl(self, -1) for e in errors: self.errorbox.AppendText(e) self.Show()
[ "wx.TextCtrl", "wx.MiniFrame.__init__" ]
[((98, 143), 'wx.MiniFrame.__init__', 'wx.MiniFrame.__init__', (['self', 'parent', '(-1)', 'name'], {}), '(self, parent, -1, name)\n', (119, 143), False, 'import wx\n'), ((168, 189), 'wx.TextCtrl', 'wx.TextCtrl', (['self', '(-1)'], {}), '(self, -1)\n', (179, 189), False, 'import wx\n')]
import tensorflow as tf class FeedForwardNetwork(tf.keras.models.Model): ''' Transformer 用の Position-wise Feedforward Neural Network です。 ''' def __init__(self, hidden_dim: int, dropout_rate: float, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self.hidden_dim = hidden_dim self.dropout_rate = dropout_rate self.filter_dense_layer = tf.keras.layers.Dense(hidden_dim * 4, use_bias=True, activation=tf.nn.relu, name='filter_layer') self.output_dense_layer = tf.keras.layers.Dense(hidden_dim, use_bias=True, name='output_layer') self.dropout_layer = tf.keras.layers.Dropout(dropout_rate) def call(self, input: tf.Tensor, training: bool) -> tf.Tensor: ''' FeedForwardNetwork を適用します。 :param input: shape = [batch_size, length, hidden_dim] :return: shape = [batch_size, length, hidden_dim] ''' tensor = self.filter_dense_layer(input) tensor = self.dropout_layer(tensor, training=training) return self.output_dense_layer(tensor) class ResidualNormalizationWrapper(tf.keras.models.Model): ''' 与えられたレイヤー(もしくはモデル)に対して、下記のノーマライゼーションを行います。 - Layer Normalization - Dropout - Residual Connection ''' def __init__(self, layer: tf.keras.layers.Layer, dropout_rate: float, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self.layer = layer self.layer_normalization = LayerNormalization() self.dropout_layer = tf.keras.layers.Dropout(dropout_rate) def call(self, input: tf.Tensor, training: bool, *args, **kwargs) -> tf.Tensor: tensor = self.layer_normalization(input) tensor = self.layer(tensor, training=training, *args, **kwargs) tensor = self.dropout_layer(tensor, training=training) return input + tensor class LayerNormalization(tf.keras.layers.Layer): ''' レイヤーノーマライゼーションです。 レイヤーの出力が平均 bias, 標準偏差 scale になるように調整します。 ''' def build(self, input_shape: tf.TensorShape) -> None: hidden_dim = input_shape[-1] self.scale = self.add_weight('layer_norm_scale', shape=[hidden_dim], initializer=tf.ones_initializer()) self.bias = self.add_weight('layer_norm_bias', [hidden_dim], initializer=tf.zeros_initializer()) super().build(input_shape) def call(self, x: tf.Tensor, epsilon: float = 1e-6) -> tf.Tensor: mean = tf.reduce_mean(x, axis=[-1], keepdims=True) variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keepdims=True) norm_x = (x - mean) * tf.rsqrt(variance + epsilon) return norm_x * self.scale + self.bias
[ "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.Dense", "tensorflow.ones_initializer", "tensorflow.reduce_mean", "tensorflow.rsqrt", "tensorflow.square", "tensorflow.zeros_initializer" ]
[((397, 497), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(hidden_dim * 4)'], {'use_bias': '(True)', 'activation': 'tf.nn.relu', 'name': '"""filter_layer"""'}), "(hidden_dim * 4, use_bias=True, activation=tf.nn.relu,\n name='filter_layer')\n", (418, 497), True, 'import tensorflow as tf\n'), ((584, 653), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['hidden_dim'], {'use_bias': '(True)', 'name': '"""output_layer"""'}), "(hidden_dim, use_bias=True, name='output_layer')\n", (605, 653), True, 'import tensorflow as tf\n'), ((683, 720), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (706, 720), True, 'import tensorflow as tf\n'), ((1571, 1608), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (1594, 1608), True, 'import tensorflow as tf\n'), ((2548, 2591), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['x'], {'axis': '[-1]', 'keepdims': '(True)'}), '(x, axis=[-1], keepdims=True)\n', (2562, 2591), True, 'import tensorflow as tf\n'), ((2626, 2645), 'tensorflow.square', 'tf.square', (['(x - mean)'], {}), '(x - mean)\n', (2635, 2645), True, 'import tensorflow as tf\n'), ((2703, 2731), 'tensorflow.rsqrt', 'tf.rsqrt', (['(variance + epsilon)'], {}), '(variance + epsilon)\n', (2711, 2731), True, 'import tensorflow as tf\n'), ((2263, 2284), 'tensorflow.ones_initializer', 'tf.ones_initializer', ([], {}), '()\n', (2282, 2284), True, 'import tensorflow as tf\n'), ((2403, 2425), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (2423, 2425), True, 'import tensorflow as tf\n')]
""" numpy.ma : a package to handle missing or invalid values. This package was initially written for numarray by <NAME> at Lawrence Livermore National Laboratory. In 2006, the package was completely rewritten by <NAME> (University of Georgia) to make the MaskedArray class a subclass of ndarray, and to improve support of structured arrays. Copyright 1999, 2000, 2001 Regents of the University of California. Released for unlimited redistribution. * Adapted for numpy_core 2005 by <NAME> and (mainly) <NAME>. * Subclassing of the base `ndarray` 2006 by <NAME> (pgmdevlist_AT_gmail_DOT_com) * Improvements suggested by <NAME> (reggie_AT_merfinllc_DOT_com) .. moduleauthor:: <NAME> """ # pylint: disable-msg=E1002 import builtins import inspect import operator import warnings import textwrap import re from functools import reduce import numpy as np import numpy.core.umath as umath import numpy.core.numerictypes as ntypes from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue from numpy import array as narray from numpy.lib.function_base import angle from numpy.compat import ( getargspec, formatargspec, long, unicode, bytes ) from numpy import expand_dims from numpy.core.numeric import normalize_axis_tuple from numpy.core._internal import recursive from numpy.compat import pickle __all__ = [ 'MAError', 'MaskError', 'MaskType', 'MaskedArray', 'abs', 'absolute', 'add', 'all', 'allclose', 'allequal', 'alltrue', 'amax', 'amin', 'angle', 'anom', 'anomalies', 'any', 'append', 'arange', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'argmax', 'argmin', 'argsort', 'around', 'array', 'asanyarray', 'asarray', 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'bool_', 'ceil', 'choose', 'clip', 'common_fill_value', 'compress', 'compressed', 'concatenate', 'conjugate', 'convolve', 'copy', 'correlate', 'cos', 'cosh', 'count', 'cumprod', 'cumsum', 'default_fill_value', 'diag', 'diagonal', 'diff', 'divide', 'empty', 'empty_like', 'equal', 'exp', 'expand_dims', 'fabs', 'filled', 'fix_invalid', 'flatten_mask', 'flatten_structured_array', 'floor', 'floor_divide', 'fmod', 'frombuffer', 'fromflex', 'fromfunction', 'getdata', 'getmask', 'getmaskarray', 'greater', 'greater_equal', 'harden_mask', 'hypot', 'identity', 'ids', 'indices', 'inner', 'innerproduct', 'isMA', 'isMaskedArray', 'is_mask', 'is_masked', 'isarray', 'left_shift', 'less', 'less_equal', 'log', 'log10', 'log2', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'make_mask', 'make_mask_descr', 'make_mask_none', 'mask_or', 'masked', 'masked_array', 'masked_equal', 'masked_greater', 'masked_greater_equal', 'masked_inside', 'masked_invalid', 'masked_less', 'masked_less_equal', 'masked_not_equal', 'masked_object', 'masked_outside', 'masked_print_option', 'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum', 'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value', 'mod', 'multiply', 'mvoid', 'ndim', 'negative', 'nomask', 'nonzero', 'not_equal', 'ones', 'outer', 'outerproduct', 'power', 'prod', 'product', 'ptp', 'put', 'putmask', 'ravel', 'remainder', 'repeat', 'reshape', 'resize', 'right_shift', 'round', 'round_', 'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'soften_mask', 'sometrue', 'sort', 'sqrt', 'squeeze', 'std', 'subtract', 'sum', 'swapaxes', 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide', 'var', 'where', 'zeros', ] MaskType = np.bool_ nomask = MaskType(0) class MaskedArrayFutureWarning(FutureWarning): pass def _deprecate_argsort_axis(arr): """ Adjust the axis passed to argsort, warning if necessary Parameters ---------- arr The array which argsort was called on np.ma.argsort has a long-term bug where the default of the axis argument is wrong (gh-8701), which now must be kept for backwards compatibility. Thankfully, this only makes a difference when arrays are 2- or more- dimensional, so we only need a warning then. """ if arr.ndim <= 1: # no warning needed - but switch to -1 anyway, to avoid surprising # subclasses, which are more likely to implement scalar axes. return -1 else: # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default warnings.warn( "In the future the default for argsort will be axis=-1, not the " "current None, to match its documentation and np.argsort. " "Explicitly pass -1 or None to silence this warning.", MaskedArrayFutureWarning, stacklevel=3) return None def doc_note(initialdoc, note): """ Adds a Notes section to an existing docstring. """ if initialdoc is None: return if note is None: return initialdoc notesplit = re.split(r'\n\s*?Notes\n\s*?-----', inspect.cleandoc(initialdoc)) notedoc = "\n\nNotes\n-----\n%s\n" % inspect.cleandoc(note) return ''.join(notesplit[:1] + [notedoc] + notesplit[1:]) def get_object_signature(obj): """ Get the signature from obj """ try: sig = formatargspec(*getargspec(obj)) except TypeError: sig = '' return sig ############################################################################### # Exceptions # ############################################################################### class MAError(Exception): """ Class for masked array related errors. """ pass class MaskError(MAError): """ Class for mask related errors. """ pass ############################################################################### # Filling options # ############################################################################### # b: boolean - c: complex - f: floats - i: integer - O: object - S: string default_filler = {'b': True, 'c': 1.e20 + 0.0j, 'f': 1.e20, 'i': 999999, 'O': '?', 'S': b'N/A', 'u': 999999, 'V': b'???', 'U': u'N/A' } # Add datetime64 and timedelta64 types for v in ["Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as"]: default_filler["M8[" + v + "]"] = np.datetime64("NaT", v) default_filler["m8[" + v + "]"] = np.timedelta64("NaT", v) float_types_list = [np.half, np.single, np.double, np.longdouble, np.csingle, np.cdouble, np.clongdouble] max_filler = ntypes._minvals max_filler.update([(k, -np.inf) for k in float_types_list[:4]]) max_filler.update([(k, complex(-np.inf, -np.inf)) for k in float_types_list[-3:]]) min_filler = ntypes._maxvals min_filler.update([(k, +np.inf) for k in float_types_list[:4]]) min_filler.update([(k, complex(+np.inf, +np.inf)) for k in float_types_list[-3:]]) del float_types_list def _recursive_fill_value(dtype, f): """ Recursively produce a fill value for `dtype`, calling f on scalar dtypes """ if dtype.names is not None: vals = tuple(_recursive_fill_value(dtype[name], f) for name in dtype.names) return np.array(vals, dtype=dtype)[()] # decay to void scalar from 0d elif dtype.subdtype: subtype, shape = dtype.subdtype subval = _recursive_fill_value(subtype, f) return np.full(shape, subval) else: return f(dtype) def _get_dtype_of(obj): """ Convert the argument for *_fill_value into a dtype """ if isinstance(obj, np.dtype): return obj elif hasattr(obj, 'dtype'): return obj.dtype else: return np.asanyarray(obj).dtype def default_fill_value(obj): """ Return the default fill value for the argument object. The default filling value depends on the datatype of the input array or the type of the input scalar: ======== ======== datatype default ======== ======== bool True int 999999 float 1.e20 complex 1.e20+0j object '?' string 'N/A' ======== ======== For structured types, a structured scalar is returned, with each field the default fill value for its type. For subarray types, the fill value is an array of the same size containing the default scalar fill value. Parameters ---------- obj : ndarray, dtype or scalar The array data-type or scalar for which the default fill value is returned. Returns ------- fill_value : scalar The default fill value. Examples -------- >>> np.ma.default_fill_value(1) 999999 >>> np.ma.default_fill_value(np.array([1.1, 2., np.pi])) 1e+20 >>> np.ma.default_fill_value(np.dtype(complex)) (1e+20+0j) """ def _scalar_fill_value(dtype): if dtype.kind in 'Mm': return default_filler.get(dtype.str[1:], '?') else: return default_filler.get(dtype.kind, '?') dtype = _get_dtype_of(obj) return _recursive_fill_value(dtype, _scalar_fill_value) def _extremum_fill_value(obj, extremum, extremum_name): def _scalar_fill_value(dtype): try: return extremum[dtype] except KeyError as e: raise TypeError( f"Unsuitable type {dtype} for calculating {extremum_name}." ) from None dtype = _get_dtype_of(obj) return _recursive_fill_value(dtype, _scalar_fill_value) def minimum_fill_value(obj): """ Return the maximum value that can be represented by the dtype of an object. This function is useful for calculating a fill value suitable for taking the minimum of an array with a given dtype. Parameters ---------- obj : ndarray, dtype or scalar An object that can be queried for it's numeric type. Returns ------- val : scalar The maximum representable value. Raises ------ TypeError If `obj` isn't a suitable numeric type. See Also -------- maximum_fill_value : The inverse function. set_fill_value : Set the filling value of a masked array. MaskedArray.fill_value : Return current fill value. Examples -------- >>> import numpy.ma as ma >>> a = np.int8() >>> ma.minimum_fill_value(a) 127 >>> a = np.int32() >>> ma.minimum_fill_value(a) 2147483647 An array of numeric data can also be passed. >>> a = np.array([1, 2, 3], dtype=np.int8) >>> ma.minimum_fill_value(a) 127 >>> a = np.array([1, 2, 3], dtype=np.float32) >>> ma.minimum_fill_value(a) inf """ return _extremum_fill_value(obj, min_filler, "minimum") def maximum_fill_value(obj): """ Return the minimum value that can be represented by the dtype of an object. This function is useful for calculating a fill value suitable for taking the maximum of an array with a given dtype. Parameters ---------- obj : ndarray, dtype or scalar An object that can be queried for it's numeric type. Returns ------- val : scalar The minimum representable value. Raises ------ TypeError If `obj` isn't a suitable numeric type. See Also -------- minimum_fill_value : The inverse function. set_fill_value : Set the filling value of a masked array. MaskedArray.fill_value : Return current fill value. Examples -------- >>> import numpy.ma as ma >>> a = np.int8() >>> ma.maximum_fill_value(a) -128 >>> a = np.int32() >>> ma.maximum_fill_value(a) -2147483648 An array of numeric data can also be passed. >>> a = np.array([1, 2, 3], dtype=np.int8) >>> ma.maximum_fill_value(a) -128 >>> a = np.array([1, 2, 3], dtype=np.float32) >>> ma.maximum_fill_value(a) -inf """ return _extremum_fill_value(obj, max_filler, "maximum") def _recursive_set_fill_value(fillvalue, dt): """ Create a fill value for a structured dtype. Parameters ---------- fillvalue: scalar or array_like Scalar or array representing the fill value. If it is of shorter length than the number of fields in dt, it will be resized. dt: dtype The structured dtype for which to create the fill value. Returns ------- val: tuple A tuple of values corresponding to the structured fill value. """ fillvalue = np.resize(fillvalue, len(dt.names)) output_value = [] for (fval, name) in zip(fillvalue, dt.names): cdtype = dt[name] if cdtype.subdtype: cdtype = cdtype.subdtype[0] if cdtype.names is not None: output_value.append(tuple(_recursive_set_fill_value(fval, cdtype))) else: output_value.append(np.array(fval, dtype=cdtype).item()) return tuple(output_value) def _check_fill_value(fill_value, ndtype): """ Private function validating the given `fill_value` for the given dtype. If fill_value is None, it is set to the default corresponding to the dtype. If fill_value is not None, its value is forced to the given dtype. The result is always a 0d array. """ ndtype = np.dtype(ndtype) if fill_value is None: fill_value = default_fill_value(ndtype) elif ndtype.names is not None: if isinstance(fill_value, (ndarray, np.void)): try: fill_value = np.array(fill_value, copy=False, dtype=ndtype) except ValueError as e: err_msg = "Unable to transform %s to dtype %s" raise ValueError(err_msg % (fill_value, ndtype)) from e else: fill_value = np.asarray(fill_value, dtype=object) fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype), dtype=ndtype) else: if isinstance(fill_value, str) and (ndtype.char not in 'OSVU'): # Note this check doesn't work if fill_value is not a scalar err_msg = "Cannot set fill value of string with array of dtype %s" raise TypeError(err_msg % ndtype) else: # In case we want to convert 1e20 to int. # Also in case of converting string arrays. try: fill_value = np.array(fill_value, copy=False, dtype=ndtype) except (OverflowError, ValueError) as e: # Raise TypeError instead of OverflowError or ValueError. # OverflowError is seldom used, and the real problem here is # that the passed fill_value is not compatible with the ndtype. err_msg = "Cannot convert fill_value %s to dtype %s" raise TypeError(err_msg % (fill_value, ndtype)) from e return np.array(fill_value) def set_fill_value(a, fill_value): """ Set the filling value of a, if a is a masked array. This function changes the fill value of the masked array `a` in place. If `a` is not a masked array, the function returns silently, without doing anything. Parameters ---------- a : array_like Input array. fill_value : dtype Filling value. A consistency test is performed to make sure the value is compatible with the dtype of `a`. Returns ------- None Nothing returned by this function. See Also -------- maximum_fill_value : Return the default fill value for a dtype. MaskedArray.fill_value : Return current fill value. MaskedArray.set_fill_value : Equivalent method. Examples -------- >>> import numpy.ma as ma >>> a = np.arange(5) >>> a array([0, 1, 2, 3, 4]) >>> a = ma.masked_where(a < 3, a) >>> a masked_array(data=[--, --, --, 3, 4], mask=[ True, True, True, False, False], fill_value=999999) >>> ma.set_fill_value(a, -999) >>> a masked_array(data=[--, --, --, 3, 4], mask=[ True, True, True, False, False], fill_value=-999) Nothing happens if `a` is not a masked array. >>> a = list(range(5)) >>> a [0, 1, 2, 3, 4] >>> ma.set_fill_value(a, 100) >>> a [0, 1, 2, 3, 4] >>> a = np.arange(5) >>> a array([0, 1, 2, 3, 4]) >>> ma.set_fill_value(a, 100) >>> a array([0, 1, 2, 3, 4]) """ if isinstance(a, MaskedArray): a.set_fill_value(fill_value) return def get_fill_value(a): """ Return the filling value of a, if any. Otherwise, returns the default filling value for that type. """ if isinstance(a, MaskedArray): result = a.fill_value else: result = default_fill_value(a) return result def common_fill_value(a, b): """ Return the common filling value of two masked arrays, if any. If ``a.fill_value == b.fill_value``, return the fill value, otherwise return None. Parameters ---------- a, b : MaskedArray The masked arrays for which to compare fill values. Returns ------- fill_value : scalar or None The common fill value, or None. Examples -------- >>> x = np.ma.array([0, 1.], fill_value=3) >>> y = np.ma.array([0, 1.], fill_value=3) >>> np.ma.common_fill_value(x, y) 3.0 """ t1 = get_fill_value(a) t2 = get_fill_value(b) if t1 == t2: return t1 return None def filled(a, fill_value=None): """ Return input as an array with masked data replaced by a fill value. If `a` is not a `MaskedArray`, `a` itself is returned. If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to ``a.fill_value``. Parameters ---------- a : MaskedArray or array_like An input object. fill_value : array_like, optional. Can be scalar or non-scalar. If non-scalar, the resulting filled array should be broadcastable over input array. Default is None. Returns ------- a : ndarray The filled array. See Also -------- compressed Examples -------- >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], ... [1, 0, 0], ... [0, 0, 0]]) >>> x.filled() array([[999999, 1, 2], [999999, 4, 5], [ 6, 7, 8]]) >>> x.filled(fill_value=333) array([[333, 1, 2], [333, 4, 5], [ 6, 7, 8]]) >>> x.filled(fill_value=np.arange(3)) array([[0, 1, 2], [0, 4, 5], [6, 7, 8]]) """ if hasattr(a, 'filled'): return a.filled(fill_value) elif isinstance(a, ndarray): # Should we check for contiguity ? and a.flags['CONTIGUOUS']: return a elif isinstance(a, dict): return np.array(a, 'O') else: return np.array(a) def get_masked_subclass(*arrays): """ Return the youngest subclass of MaskedArray from a list of (masked) arrays. In case of siblings, the first listed takes over. """ if len(arrays) == 1: arr = arrays[0] if isinstance(arr, MaskedArray): rcls = type(arr) else: rcls = MaskedArray else: arrcls = [type(a) for a in arrays] rcls = arrcls[0] if not issubclass(rcls, MaskedArray): rcls = MaskedArray for cls in arrcls[1:]: if issubclass(cls, rcls): rcls = cls # Don't return MaskedConstant as result: revert to MaskedArray if rcls.__name__ == 'MaskedConstant': return MaskedArray return rcls def getdata(a, subok=True): """ Return the data of a masked array as an ndarray. Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``, else return `a` as a ndarray or subclass (depending on `subok`) if not. Parameters ---------- a : array_like Input ``MaskedArray``, alternatively a ndarray or a subclass thereof. subok : bool Whether to force the output to be a `pure` ndarray (False) or to return a subclass of ndarray if appropriate (True, default). See Also -------- getmask : Return the mask of a masked array, or nomask. getmaskarray : Return the mask of a masked array, or full array of False. Examples -------- >>> import numpy.ma as ma >>> a = ma.masked_equal([[1,2],[3,4]], 2) >>> a masked_array( data=[[1, --], [3, 4]], mask=[[False, True], [False, False]], fill_value=2) >>> ma.getdata(a) array([[1, 2], [3, 4]]) Equivalently use the ``MaskedArray`` `data` attribute. >>> a.data array([[1, 2], [3, 4]]) """ try: data = a._data except AttributeError: data = np.array(a, copy=False, subok=subok) if not subok: return data.view(ndarray) return data get_data = getdata def fix_invalid(a, mask=nomask, copy=True, fill_value=None): """ Return input with invalid data masked and replaced by a fill value. Invalid data means values of `nan`, `inf`, etc. Parameters ---------- a : array_like Input array, a (subclass of) ndarray. mask : sequence, optional Mask. Must be convertible to an array of booleans with the same shape as `data`. True indicates a masked (i.e. invalid) data. copy : bool, optional Whether to use a copy of `a` (True) or to fix `a` in place (False). Default is True. fill_value : scalar, optional Value used for fixing invalid data. Default is None, in which case the ``a.fill_value`` is used. Returns ------- b : MaskedArray The input array with invalid entries fixed. Notes ----- A copy is performed by default. Examples -------- >>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3) >>> x masked_array(data=[--, -1.0, nan, inf], mask=[ True, False, False, False], fill_value=1e+20) >>> np.ma.fix_invalid(x) masked_array(data=[--, -1.0, --, --], mask=[ True, False, True, True], fill_value=1e+20) >>> fixed = np.ma.fix_invalid(x) >>> fixed.data array([ 1.e+00, -1.e+00, 1.e+20, 1.e+20]) >>> x.data array([ 1., -1., nan, inf]) """ a = masked_array(a, copy=copy, mask=mask, subok=True) invalid = np.logical_not(np.isfinite(a._data)) if not invalid.any(): return a a._mask |= invalid if fill_value is None: fill_value = a.fill_value a._data[invalid] = fill_value return a def is_string_or_list_of_strings(val): return (isinstance(val, str) or (isinstance(val, list) and val and builtins.all(isinstance(s, str) for s in val))) ############################################################################### # Ufuncs # ############################################################################### ufunc_domain = {} ufunc_fills = {} class _DomainCheckInterval: """ Define a valid interval, so that : ``domain_check_interval(a,b)(x) == True`` where ``x < a`` or ``x > b``. """ def __init__(self, a, b): "domain_check_interval(a,b)(x) = true where x < a or y > b" if a > b: (a, b) = (b, a) self.a = a self.b = b def __call__(self, x): "Execute the call behavior." # nans at masked positions cause RuntimeWarnings, even though # they are masked. To avoid this we suppress warnings. with np.errstate(invalid='ignore'): return umath.logical_or(umath.greater(x, self.b), umath.less(x, self.a)) class _DomainTan: """ Define a valid interval for the `tan` function, so that: ``domain_tan(eps) = True`` where ``abs(cos(x)) < eps`` """ def __init__(self, eps): "domain_tan(eps) = true where abs(cos(x)) < eps)" self.eps = eps def __call__(self, x): "Executes the call behavior." with np.errstate(invalid='ignore'): return umath.less(umath.absolute(umath.cos(x)), self.eps) class _DomainSafeDivide: """ Define a domain for safe division. """ def __init__(self, tolerance=None): self.tolerance = tolerance def __call__(self, a, b): # Delay the selection of the tolerance to here in order to reduce numpy # import times. The calculation of these parameters is a substantial # component of numpy's import time. if self.tolerance is None: self.tolerance = np.finfo(float).tiny # don't call ma ufuncs from __array_wrap__ which would fail for scalars a, b = np.asarray(a), np.asarray(b) with np.errstate(invalid='ignore'): return umath.absolute(a) * self.tolerance >= umath.absolute(b) class _DomainGreater: """ DomainGreater(v)(x) is True where x <= v. """ def __init__(self, critical_value): "DomainGreater(v)(x) = true where x <= v" self.critical_value = critical_value def __call__(self, x): "Executes the call behavior." with np.errstate(invalid='ignore'): return umath.less_equal(x, self.critical_value) class _DomainGreaterEqual: """ DomainGreaterEqual(v)(x) is True where x < v. """ def __init__(self, critical_value): "DomainGreaterEqual(v)(x) = true where x < v" self.critical_value = critical_value def __call__(self, x): "Executes the call behavior." with np.errstate(invalid='ignore'): return umath.less(x, self.critical_value) class _MaskedUFunc: def __init__(self, ufunc): self.f = ufunc self.__doc__ = ufunc.__doc__ self.__name__ = ufunc.__name__ def __str__(self): return f"Masked version of {self.f}" class _MaskedUnaryOperation(_MaskedUFunc): """ Defines masked version of unary operations, where invalid values are pre-masked. Parameters ---------- mufunc : callable The function for which to define a masked version. Made available as ``_MaskedUnaryOperation.f``. fill : scalar, optional Filling value, default is 0. domain : class instance Domain for the function. Should be one of the ``_Domain*`` classes. Default is None. """ def __init__(self, mufunc, fill=0, domain=None): super(_MaskedUnaryOperation, self).__init__(mufunc) self.fill = fill self.domain = domain ufunc_domain[mufunc] = domain ufunc_fills[mufunc] = fill def __call__(self, a, *args, **kwargs): """ Execute the call behavior. """ d = getdata(a) # Deal with domain if self.domain is not None: # Case 1.1. : Domained function # nans at masked positions cause RuntimeWarnings, even though # they are masked. To avoid this we suppress warnings. with np.errstate(divide='ignore', invalid='ignore'): result = self.f(d, *args, **kwargs) # Make a mask m = ~umath.isfinite(result) m |= self.domain(d) m |= getmask(a) else: # Case 1.2. : Function without a domain # Get the result and the mask with np.errstate(divide='ignore', invalid='ignore'): result = self.f(d, *args, **kwargs) m = getmask(a) if not result.ndim: # Case 2.1. : The result is scalarscalar if m: return masked return result if m is not nomask: # Case 2.2. The result is an array # We need to fill the invalid data back w/ the input Now, # that's plain silly: in C, we would just skip the element and # keep the original, but we do have to do it that way in Python # In case result has a lower dtype than the inputs (as in # equal) try: np.copyto(result, d, where=m) except TypeError: pass # Transform to masked_result = result.view(get_masked_subclass(a)) masked_result._mask = m masked_result._update_from(a) return masked_result class _MaskedBinaryOperation(_MaskedUFunc): """ Define masked version of binary operations, where invalid values are pre-masked. Parameters ---------- mbfunc : function The function for which to define a masked version. Made available as ``_MaskedBinaryOperation.f``. domain : class instance Default domain for the function. Should be one of the ``_Domain*`` classes. Default is None. fillx : scalar, optional Filling value for the first argument, default is 0. filly : scalar, optional Filling value for the second argument, default is 0. """ def __init__(self, mbfunc, fillx=0, filly=0): """ abfunc(fillx, filly) must be defined. abfunc(x, filly) = x for all x to enable reduce. """ super(_MaskedBinaryOperation, self).__init__(mbfunc) self.fillx = fillx self.filly = filly ufunc_domain[mbfunc] = None ufunc_fills[mbfunc] = (fillx, filly) def __call__(self, a, b, *args, **kwargs): """ Execute the call behavior. """ # Get the data, as ndarray (da, db) = (getdata(a), getdata(b)) # Get the result with np.errstate(): np.seterr(divide='ignore', invalid='ignore') result = self.f(da, db, *args, **kwargs) # Get the mask for the result (ma, mb) = (getmask(a), getmask(b)) if ma is nomask: if mb is nomask: m = nomask else: m = umath.logical_or(getmaskarray(a), mb) elif mb is nomask: m = umath.logical_or(ma, getmaskarray(b)) else: m = umath.logical_or(ma, mb) # Case 1. : scalar if not result.ndim: if m: return masked return result # Case 2. : array # Revert result to da where masked if m is not nomask and m.any(): # any errors, just abort; impossible to guarantee masked values try: np.copyto(result, da, casting='unsafe', where=m) except Exception: pass # Transforms to a (subclass of) MaskedArray masked_result = result.view(get_masked_subclass(a, b)) masked_result._mask = m if isinstance(a, MaskedArray): masked_result._update_from(a) elif isinstance(b, MaskedArray): masked_result._update_from(b) return masked_result def reduce(self, target, axis=0, dtype=None): """ Reduce `target` along the given `axis`. """ tclass = get_masked_subclass(target) m = getmask(target) t = filled(target, self.filly) if t.shape == (): t = t.reshape(1) if m is not nomask: m = make_mask(m, copy=True) m.shape = (1,) if m is nomask: tr = self.f.reduce(t, axis) mr = nomask else: tr = self.f.reduce(t, axis, dtype=dtype or t.dtype) mr = umath.logical_and.reduce(m, axis) if not tr.shape: if mr: return masked else: return tr masked_tr = tr.view(tclass) masked_tr._mask = mr return masked_tr def outer(self, a, b): """ Return the function applied to the outer product of a and b. """ (da, db) = (getdata(a), getdata(b)) d = self.f.outer(da, db) ma = getmask(a) mb = getmask(b) if ma is nomask and mb is nomask: m = nomask else: ma = getmaskarray(a) mb = getmaskarray(b) m = umath.logical_or.outer(ma, mb) if (not m.ndim) and m: return masked if m is not nomask: np.copyto(d, da, where=m) if not d.shape: return d masked_d = d.view(get_masked_subclass(a, b)) masked_d._mask = m return masked_d def accumulate(self, target, axis=0): """Accumulate `target` along `axis` after filling with y fill value. """ tclass = get_masked_subclass(target) t = filled(target, self.filly) result = self.f.accumulate(t, axis) masked_result = result.view(tclass) return masked_result class _DomainedBinaryOperation(_MaskedUFunc): """ Define binary operations that have a domain, like divide. They have no reduce, outer or accumulate. Parameters ---------- mbfunc : function The function for which to define a masked version. Made available as ``_DomainedBinaryOperation.f``. domain : class instance Default domain for the function. Should be one of the ``_Domain*`` classes. fillx : scalar, optional Filling value for the first argument, default is 0. filly : scalar, optional Filling value for the second argument, default is 0. """ def __init__(self, dbfunc, domain, fillx=0, filly=0): """abfunc(fillx, filly) must be defined. abfunc(x, filly) = x for all x to enable reduce. """ super(_DomainedBinaryOperation, self).__init__(dbfunc) self.domain = domain self.fillx = fillx self.filly = filly ufunc_domain[dbfunc] = domain ufunc_fills[dbfunc] = (fillx, filly) def __call__(self, a, b, *args, **kwargs): "Execute the call behavior." # Get the data (da, db) = (getdata(a), getdata(b)) # Get the result with np.errstate(divide='ignore', invalid='ignore'): result = self.f(da, db, *args, **kwargs) # Get the mask as a combination of the source masks and invalid m = ~umath.isfinite(result) m |= getmask(a) m |= getmask(b) # Apply the domain domain = ufunc_domain.get(self.f, None) if domain is not None: m |= domain(da, db) # Take care of the scalar case first if not m.ndim: if m: return masked else: return result # When the mask is True, put back da if possible # any errors, just abort; impossible to guarantee masked values try: np.copyto(result, 0, casting='unsafe', where=m) # avoid using "*" since this may be overlaid masked_da = umath.multiply(m, da) # only add back if it can be cast safely if np.can_cast(masked_da.dtype, result.dtype, casting='safe'): result += masked_da except Exception: pass # Transforms to a (subclass of) MaskedArray masked_result = result.view(get_masked_subclass(a, b)) masked_result._mask = m if isinstance(a, MaskedArray): masked_result._update_from(a) elif isinstance(b, MaskedArray): masked_result._update_from(b) return masked_result # Unary ufuncs exp = _MaskedUnaryOperation(umath.exp) conjugate = _MaskedUnaryOperation(umath.conjugate) sin = _MaskedUnaryOperation(umath.sin) cos = _MaskedUnaryOperation(umath.cos) arctan = _MaskedUnaryOperation(umath.arctan) arcsinh = _MaskedUnaryOperation(umath.arcsinh) sinh = _MaskedUnaryOperation(umath.sinh) cosh = _MaskedUnaryOperation(umath.cosh) tanh = _MaskedUnaryOperation(umath.tanh) abs = absolute = _MaskedUnaryOperation(umath.absolute) angle = _MaskedUnaryOperation(angle) # from numpy.lib.function_base fabs = _MaskedUnaryOperation(umath.fabs) negative = _MaskedUnaryOperation(umath.negative) floor = _MaskedUnaryOperation(umath.floor) ceil = _MaskedUnaryOperation(umath.ceil) around = _MaskedUnaryOperation(np.round_) logical_not = _MaskedUnaryOperation(umath.logical_not) # Domained unary ufuncs sqrt = _MaskedUnaryOperation(umath.sqrt, 0.0, _DomainGreaterEqual(0.0)) log = _MaskedUnaryOperation(umath.log, 1.0, _DomainGreater(0.0)) log2 = _MaskedUnaryOperation(umath.log2, 1.0, _DomainGreater(0.0)) log10 = _MaskedUnaryOperation(umath.log10, 1.0, _DomainGreater(0.0)) tan = _MaskedUnaryOperation(umath.tan, 0.0, _DomainTan(1e-35)) arcsin = _MaskedUnaryOperation(umath.arcsin, 0.0, _DomainCheckInterval(-1.0, 1.0)) arccos = _MaskedUnaryOperation(umath.arccos, 0.0, _DomainCheckInterval(-1.0, 1.0)) arccosh = _MaskedUnaryOperation(umath.arccosh, 1.0, _DomainGreaterEqual(1.0)) arctanh = _MaskedUnaryOperation(umath.arctanh, 0.0, _DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15)) # Binary ufuncs add = _MaskedBinaryOperation(umath.add) subtract = _MaskedBinaryOperation(umath.subtract) multiply = _MaskedBinaryOperation(umath.multiply, 1, 1) arctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0) equal = _MaskedBinaryOperation(umath.equal) equal.reduce = None not_equal = _MaskedBinaryOperation(umath.not_equal) not_equal.reduce = None less_equal = _MaskedBinaryOperation(umath.less_equal) less_equal.reduce = None greater_equal = _MaskedBinaryOperation(umath.greater_equal) greater_equal.reduce = None less = _MaskedBinaryOperation(umath.less) less.reduce = None greater = _MaskedBinaryOperation(umath.greater) greater.reduce = None logical_and = _MaskedBinaryOperation(umath.logical_and) alltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce logical_or = _MaskedBinaryOperation(umath.logical_or) sometrue = logical_or.reduce logical_xor = _MaskedBinaryOperation(umath.logical_xor) bitwise_and = _MaskedBinaryOperation(umath.bitwise_and) bitwise_or = _MaskedBinaryOperation(umath.bitwise_or) bitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor) hypot = _MaskedBinaryOperation(umath.hypot) # Domained binary ufuncs divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1) true_divide = _DomainedBinaryOperation(umath.true_divide, _DomainSafeDivide(), 0, 1) floor_divide = _DomainedBinaryOperation(umath.floor_divide, _DomainSafeDivide(), 0, 1) remainder = _DomainedBinaryOperation(umath.remainder, _DomainSafeDivide(), 0, 1) fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1) mod = _DomainedBinaryOperation(umath.mod, _DomainSafeDivide(), 0, 1) ############################################################################### # Mask creation functions # ############################################################################### def _replace_dtype_fields_recursive(dtype, primitive_dtype): "Private function allowing recursion in _replace_dtype_fields." _recurse = _replace_dtype_fields_recursive # Do we have some name fields ? if dtype.names is not None: descr = [] for name in dtype.names: field = dtype.fields[name] if len(field) == 3: # Prepend the title to the name name = (field[-1], name) descr.append((name, _recurse(field[0], primitive_dtype))) new_dtype = np.dtype(descr) # Is this some kind of composite a la (float,2) elif dtype.subdtype: descr = list(dtype.subdtype) descr[0] = _recurse(dtype.subdtype[0], primitive_dtype) new_dtype = np.dtype(tuple(descr)) # this is a primitive type, so do a direct replacement else: new_dtype = primitive_dtype # preserve identity of dtypes if new_dtype == dtype: new_dtype = dtype return new_dtype def _replace_dtype_fields(dtype, primitive_dtype): """ Construct a dtype description list from a given dtype. Returns a new dtype object, with all fields and subtypes in the given type recursively replaced with `primitive_dtype`. Arguments are coerced to dtypes first. """ dtype = np.dtype(dtype) primitive_dtype = np.dtype(primitive_dtype) return _replace_dtype_fields_recursive(dtype, primitive_dtype) def make_mask_descr(ndtype): """ Construct a dtype description list from a given dtype. Returns a new dtype object, with the type of all fields in `ndtype` to a boolean type. Field names are not altered. Parameters ---------- ndtype : dtype The dtype to convert. Returns ------- result : dtype A dtype that looks like `ndtype`, the type of all fields is boolean. Examples -------- >>> import numpy.ma as ma >>> dtype = np.dtype({'names':['foo', 'bar'], ... 'formats':[np.float32, np.int64]}) >>> dtype dtype([('foo', '<f4'), ('bar', '<i8')]) >>> ma.make_mask_descr(dtype) dtype([('foo', '|b1'), ('bar', '|b1')]) >>> ma.make_mask_descr(np.float32) dtype('bool') """ return _replace_dtype_fields(ndtype, MaskType) def getmask(a): """ Return the mask of a masked array, or nomask. Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the mask is not `nomask`, else return `nomask`. To guarantee a full array of booleans of the same shape as a, use `getmaskarray`. Parameters ---------- a : array_like Input `MaskedArray` for which the mask is required. See Also -------- getdata : Return the data of a masked array as an ndarray. getmaskarray : Return the mask of a masked array, or full array of False. Examples -------- >>> import numpy.ma as ma >>> a = ma.masked_equal([[1,2],[3,4]], 2) >>> a masked_array( data=[[1, --], [3, 4]], mask=[[False, True], [False, False]], fill_value=2) >>> ma.getmask(a) array([[False, True], [False, False]]) Equivalently use the `MaskedArray` `mask` attribute. >>> a.mask array([[False, True], [False, False]]) Result when mask == `nomask` >>> b = ma.masked_array([[1,2],[3,4]]) >>> b masked_array( data=[[1, 2], [3, 4]], mask=False, fill_value=999999) >>> ma.nomask False >>> ma.getmask(b) == ma.nomask True >>> b.mask == ma.nomask True """ return getattr(a, '_mask', nomask) get_mask = getmask def getmaskarray(arr): """ Return the mask of a masked array, or full boolean array of False. Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and the mask is not `nomask`, else return a full boolean array of False of the same shape as `arr`. Parameters ---------- arr : array_like Input `MaskedArray` for which the mask is required. See Also -------- getmask : Return the mask of a masked array, or nomask. getdata : Return the data of a masked array as an ndarray. Examples -------- >>> import numpy.ma as ma >>> a = ma.masked_equal([[1,2],[3,4]], 2) >>> a masked_array( data=[[1, --], [3, 4]], mask=[[False, True], [False, False]], fill_value=2) >>> ma.getmaskarray(a) array([[False, True], [False, False]]) Result when mask == ``nomask`` >>> b = ma.masked_array([[1,2],[3,4]]) >>> b masked_array( data=[[1, 2], [3, 4]], mask=False, fill_value=999999) >>> ma.getmaskarray(b) array([[False, False], [False, False]]) """ mask = getmask(arr) if mask is nomask: mask = make_mask_none(np.shape(arr), getattr(arr, 'dtype', None)) return mask def is_mask(m): """ Return True if m is a valid, standard mask. This function does not check the contents of the input, only that the type is MaskType. In particular, this function returns False if the mask has a flexible dtype. Parameters ---------- m : array_like Array to test. Returns ------- result : bool True if `m.dtype.type` is MaskType, False otherwise. See Also -------- ma.isMaskedArray : Test whether input is an instance of MaskedArray. Examples -------- >>> import numpy.ma as ma >>> m = ma.masked_equal([0, 1, 0, 2, 3], 0) >>> m masked_array(data=[--, 1, --, 2, 3], mask=[ True, False, True, False, False], fill_value=0) >>> ma.is_mask(m) False >>> ma.is_mask(m.mask) True Input must be an ndarray (or have similar attributes) for it to be considered a valid mask. >>> m = [False, True, False] >>> ma.is_mask(m) False >>> m = np.array([False, True, False]) >>> m array([False, True, False]) >>> ma.is_mask(m) True Arrays with complex dtypes don't return True. >>> dtype = np.dtype({'names':['monty', 'pithon'], ... 'formats':[bool, bool]}) >>> dtype dtype([('monty', '|b1'), ('pithon', '|b1')]) >>> m = np.array([(True, False), (False, True), (True, False)], ... dtype=dtype) >>> m array([( True, False), (False, True), ( True, False)], dtype=[('monty', '?'), ('pithon', '?')]) >>> ma.is_mask(m) False """ try: return m.dtype.type is MaskType except AttributeError: return False def _shrink_mask(m): """ Shrink a mask to nomask if possible """ if m.dtype.names is None and not m.any(): return nomask else: return m def make_mask(m, copy=False, shrink=True, dtype=MaskType): """ Create a boolean mask from an array. Return `m` as a boolean mask, creating a copy if necessary or requested. The function can accept any sequence that is convertible to integers, or ``nomask``. Does not require that contents must be 0s and 1s, values of 0 are interpreted as False, everything else as True. Parameters ---------- m : array_like Potential mask. copy : bool, optional Whether to return a copy of `m` (True) or `m` itself (False). shrink : bool, optional Whether to shrink `m` to ``nomask`` if all its values are False. dtype : dtype, optional Data-type of the output mask. By default, the output mask has a dtype of MaskType (bool). If the dtype is flexible, each field has a boolean dtype. This is ignored when `m` is ``nomask``, in which case ``nomask`` is always returned. Returns ------- result : ndarray A boolean mask derived from `m`. Examples -------- >>> import numpy.ma as ma >>> m = [True, False, True, True] >>> ma.make_mask(m) array([ True, False, True, True]) >>> m = [1, 0, 1, 1] >>> ma.make_mask(m) array([ True, False, True, True]) >>> m = [1, 0, 2, -3] >>> ma.make_mask(m) array([ True, False, True, True]) Effect of the `shrink` parameter. >>> m = np.zeros(4) >>> m array([0., 0., 0., 0.]) >>> ma.make_mask(m) False >>> ma.make_mask(m, shrink=False) array([False, False, False, False]) Using a flexible `dtype`. >>> m = [1, 0, 1, 1] >>> n = [0, 1, 0, 0] >>> arr = [] >>> for man, mouse in zip(m, n): ... arr.append((man, mouse)) >>> arr [(1, 0), (0, 1), (1, 0), (1, 0)] >>> dtype = np.dtype({'names':['man', 'mouse'], ... 'formats':[np.int64, np.int64]}) >>> arr = np.array(arr, dtype=dtype) >>> arr array([(1, 0), (0, 1), (1, 0), (1, 0)], dtype=[('man', '<i8'), ('mouse', '<i8')]) >>> ma.make_mask(arr, dtype=dtype) array([(True, False), (False, True), (True, False), (True, False)], dtype=[('man', '|b1'), ('mouse', '|b1')]) """ if m is nomask: return nomask # Make sure the input dtype is valid. dtype = make_mask_descr(dtype) # legacy boolean special case: "existence of fields implies true" if isinstance(m, ndarray) and m.dtype.fields and dtype == np.bool_: return np.ones(m.shape, dtype=dtype) # Fill the mask in case there are missing data; turn it into an ndarray. result = np.array(filled(m, True), copy=copy, dtype=dtype, subok=True) # Bas les masques ! if shrink: result = _shrink_mask(result) return result def make_mask_none(newshape, dtype=None): """ Return a boolean mask of the given shape, filled with False. This function returns a boolean ndarray with all entries False, that can be used in common mask manipulations. If a complex dtype is specified, the type of each field is converted to a boolean type. Parameters ---------- newshape : tuple A tuple indicating the shape of the mask. dtype : {None, dtype}, optional If None, use a MaskType instance. Otherwise, use a new datatype with the same fields as `dtype`, converted to boolean types. Returns ------- result : ndarray An ndarray of appropriate shape and dtype, filled with False. See Also -------- make_mask : Create a boolean mask from an array. make_mask_descr : Construct a dtype description list from a given dtype. Examples -------- >>> import numpy.ma as ma >>> ma.make_mask_none((3,)) array([False, False, False]) Defining a more complex dtype. >>> dtype = np.dtype({'names':['foo', 'bar'], ... 'formats':[np.float32, np.int64]}) >>> dtype dtype([('foo', '<f4'), ('bar', '<i8')]) >>> ma.make_mask_none((3,), dtype=dtype) array([(False, False), (False, False), (False, False)], dtype=[('foo', '|b1'), ('bar', '|b1')]) """ if dtype is None: result = np.zeros(newshape, dtype=MaskType) else: result = np.zeros(newshape, dtype=make_mask_descr(dtype)) return result def mask_or(m1, m2, copy=False, shrink=True): """ Combine two masks with the ``logical_or`` operator. The result may be a view on `m1` or `m2` if the other is `nomask` (i.e. False). Parameters ---------- m1, m2 : array_like Input masks. copy : bool, optional If copy is False and one of the inputs is `nomask`, return a view of the other input mask. Defaults to False. shrink : bool, optional Whether to shrink the output to `nomask` if all its values are False. Defaults to True. Returns ------- mask : output mask The result masks values that are masked in either `m1` or `m2`. Raises ------ ValueError If `m1` and `m2` have different flexible dtypes. Examples -------- >>> m1 = np.ma.make_mask([0, 1, 1, 0]) >>> m2 = np.ma.make_mask([1, 0, 0, 0]) >>> np.ma.mask_or(m1, m2) array([ True, True, True, False]) """ @recursive def _recursive_mask_or(self, m1, m2, newmask): names = m1.dtype.names for name in names: current1 = m1[name] if current1.dtype.names is not None: self(current1, m2[name], newmask[name]) else: umath.logical_or(current1, m2[name], newmask[name]) return if (m1 is nomask) or (m1 is False): dtype = getattr(m2, 'dtype', MaskType) return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype) if (m2 is nomask) or (m2 is False): dtype = getattr(m1, 'dtype', MaskType) return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype) if m1 is m2 and is_mask(m1): return m1 (dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None)) if dtype1 != dtype2: raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2)) if dtype1.names is not None: # Allocate an output mask array with the properly broadcast shape. newmask = np.empty(np.broadcast(m1, m2).shape, dtype1) _recursive_mask_or(m1, m2, newmask) return newmask return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink) def flatten_mask(mask): """ Returns a completely flattened version of the mask, where nested fields are collapsed. Parameters ---------- mask : array_like Input array, which will be interpreted as booleans. Returns ------- flattened_mask : ndarray of bools The flattened input. Examples -------- >>> mask = np.array([0, 0, 1]) >>> np.ma.flatten_mask(mask) array([False, False, True]) >>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) >>> np.ma.flatten_mask(mask) array([False, False, False, True]) >>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] >>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype) >>> np.ma.flatten_mask(mask) array([False, False, False, False, False, True]) """ def _flatmask(mask): "Flatten the mask and returns a (maybe nested) sequence of booleans." mnames = mask.dtype.names if mnames is not None: return [flatten_mask(mask[name]) for name in mnames] else: return mask def _flatsequence(sequence): "Generates a flattened version of the sequence." try: for element in sequence: if hasattr(element, '__iter__'): yield from _flatsequence(element) else: yield element except TypeError: yield sequence mask = np.asarray(mask) flattened = _flatsequence(_flatmask(mask)) return np.array([_ for _ in flattened], dtype=bool) def _check_mask_axis(mask, axis, keepdims=np._NoValue): "Check whether there are masked values along the given axis" kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} if mask is not nomask: return mask.all(axis=axis, **kwargs) return nomask ############################################################################### # Masking functions # ############################################################################### def masked_where(condition, a, copy=True): """ Mask an array where a condition is met. Return `a` as an array masked where `condition` is True. Any masked values of `a` or `condition` are also masked in the output. Parameters ---------- condition : array_like Masking condition. When `condition` tests floating point values for equality, consider using ``masked_values`` instead. a : array_like Array to mask. copy : bool If True (default) make a copy of `a` in the result. If False modify `a` in place and return a view. Returns ------- result : MaskedArray The result of masking `a` where `condition` is True. See Also -------- masked_values : Mask using floating point equality. masked_equal : Mask where equal to a given value. masked_not_equal : Mask where `not` equal to a given value. masked_less_equal : Mask where less than or equal to a given value. masked_greater_equal : Mask where greater than or equal to a given value. masked_less : Mask where less than a given value. masked_greater : Mask where greater than a given value. masked_inside : Mask inside a given interval. masked_outside : Mask outside a given interval. masked_invalid : Mask invalid values (NaNs or infs). Examples -------- >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_where(a <= 2, a) masked_array(data=[--, --, --, 3], mask=[ True, True, True, False], fill_value=999999) Mask array `b` conditional on `a`. >>> b = ['a', 'b', 'c', 'd'] >>> ma.masked_where(a == 2, b) masked_array(data=['a', 'b', --, 'd'], mask=[False, False, True, False], fill_value='N/A', dtype='<U1') Effect of the `copy` argument. >>> c = ma.masked_where(a <= 2, a) >>> c masked_array(data=[--, --, --, 3], mask=[ True, True, True, False], fill_value=999999) >>> c[0] = 99 >>> c masked_array(data=[99, --, --, 3], mask=[False, True, True, False], fill_value=999999) >>> a array([0, 1, 2, 3]) >>> c = ma.masked_where(a <= 2, a, copy=False) >>> c[0] = 99 >>> c masked_array(data=[99, --, --, 3], mask=[False, True, True, False], fill_value=999999) >>> a array([99, 1, 2, 3]) When `condition` or `a` contain masked values. >>> a = np.arange(4) >>> a = ma.masked_where(a == 2, a) >>> a masked_array(data=[0, 1, --, 3], mask=[False, False, True, False], fill_value=999999) >>> b = np.arange(4) >>> b = ma.masked_where(b == 0, b) >>> b masked_array(data=[--, 1, 2, 3], mask=[ True, False, False, False], fill_value=999999) >>> ma.masked_where(a == 3, b) masked_array(data=[--, 1, --, --], mask=[ True, False, True, True], fill_value=999999) """ # Make sure that condition is a valid standard-type mask. cond = make_mask(condition, shrink=False) a = np.array(a, copy=copy, subok=True) (cshape, ashape) = (cond.shape, a.shape) if cshape and cshape != ashape: raise IndexError("Inconsistent shape between the condition and the input" " (got %s and %s)" % (cshape, ashape)) if hasattr(a, '_mask'): cond = mask_or(cond, a._mask) cls = type(a) else: cls = MaskedArray result = a.view(cls) # Assign to *.mask so that structured masks are handled correctly. result.mask = _shrink_mask(cond) return result def masked_greater(x, value, copy=True): """ Mask an array where greater than a given value. This function is a shortcut to ``masked_where``, with `condition` = (x > value). See Also -------- masked_where : Mask where a condition is met. Examples -------- >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_greater(a, 2) masked_array(data=[0, 1, 2, --], mask=[False, False, False, True], fill_value=999999) """ return masked_where(greater(x, value), x, copy=copy) def masked_greater_equal(x, value, copy=True): """ Mask an array where greater than or equal to a given value. This function is a shortcut to ``masked_where``, with `condition` = (x >= value). See Also -------- masked_where : Mask where a condition is met. Examples -------- >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_greater_equal(a, 2) masked_array(data=[0, 1, --, --], mask=[False, False, True, True], fill_value=999999) """ return masked_where(greater_equal(x, value), x, copy=copy) def masked_less(x, value, copy=True): """ Mask an array where less than a given value. This function is a shortcut to ``masked_where``, with `condition` = (x < value). See Also -------- masked_where : Mask where a condition is met. Examples -------- >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_less(a, 2) masked_array(data=[--, --, 2, 3], mask=[ True, True, False, False], fill_value=999999) """ return masked_where(less(x, value), x, copy=copy) def masked_less_equal(x, value, copy=True): """ Mask an array where less than or equal to a given value. This function is a shortcut to ``masked_where``, with `condition` = (x <= value). See Also -------- masked_where : Mask where a condition is met. Examples -------- >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_less_equal(a, 2) masked_array(data=[--, --, --, 3], mask=[ True, True, True, False], fill_value=999999) """ return masked_where(less_equal(x, value), x, copy=copy) def masked_not_equal(x, value, copy=True): """ Mask an array where `not` equal to a given value. This function is a shortcut to ``masked_where``, with `condition` = (x != value). See Also -------- masked_where : Mask where a condition is met. Examples -------- >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_not_equal(a, 2) masked_array(data=[--, --, 2, --], mask=[ True, True, False, True], fill_value=999999) """ return masked_where(not_equal(x, value), x, copy=copy) def masked_equal(x, value, copy=True): """ Mask an array where equal to a given value. This function is a shortcut to ``masked_where``, with `condition` = (x == value). For floating point arrays, consider using ``masked_values(x, value)``. See Also -------- masked_where : Mask where a condition is met. masked_values : Mask using floating point equality. Examples -------- >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_equal(a, 2) masked_array(data=[0, 1, --, 3], mask=[False, False, True, False], fill_value=2) """ output = masked_where(equal(x, value), x, copy=copy) output.fill_value = value return output def masked_inside(x, v1, v2, copy=True): """ Mask an array inside a given interval. Shortcut to ``masked_where``, where `condition` is True for `x` inside the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2` can be given in either order. See Also -------- masked_where : Mask where a condition is met. Notes ----- The array `x` is prefilled with its filling value. Examples -------- >>> import numpy.ma as ma >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] >>> ma.masked_inside(x, -0.3, 0.3) masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1], mask=[False, False, True, True, False, False], fill_value=1e+20) The order of `v1` and `v2` doesn't matter. >>> ma.masked_inside(x, 0.3, -0.3) masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1], mask=[False, False, True, True, False, False], fill_value=1e+20) """ if v2 < v1: (v1, v2) = (v2, v1) xf = filled(x) condition = (xf >= v1) & (xf <= v2) return masked_where(condition, x, copy=copy) def masked_outside(x, v1, v2, copy=True): """ Mask an array outside a given interval. Shortcut to ``masked_where``, where `condition` is True for `x` outside the interval [v1,v2] (x < v1)|(x > v2). The boundaries `v1` and `v2` can be given in either order. See Also -------- masked_where : Mask where a condition is met. Notes ----- The array `x` is prefilled with its filling value. Examples -------- >>> import numpy.ma as ma >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] >>> ma.masked_outside(x, -0.3, 0.3) masked_array(data=[--, --, 0.01, 0.2, --, --], mask=[ True, True, False, False, True, True], fill_value=1e+20) The order of `v1` and `v2` doesn't matter. >>> ma.masked_outside(x, 0.3, -0.3) masked_array(data=[--, --, 0.01, 0.2, --, --], mask=[ True, True, False, False, True, True], fill_value=1e+20) """ if v2 < v1: (v1, v2) = (v2, v1) xf = filled(x) condition = (xf < v1) | (xf > v2) return masked_where(condition, x, copy=copy) def masked_object(x, value, copy=True, shrink=True): """ Mask the array `x` where the data are exactly equal to value. This function is similar to `masked_values`, but only suitable for object arrays: for floating point, use `masked_values` instead. Parameters ---------- x : array_like Array to mask value : object Comparison value copy : {True, False}, optional Whether to return a copy of `x`. shrink : {True, False}, optional Whether to collapse a mask full of False to nomask Returns ------- result : MaskedArray The result of masking `x` where equal to `value`. See Also -------- masked_where : Mask where a condition is met. masked_equal : Mask where equal to a given value (integers). masked_values : Mask using floating point equality. Examples -------- >>> import numpy.ma as ma >>> food = np.array(['green_eggs', 'ham'], dtype=object) >>> # don't eat spoiled food >>> eat = ma.masked_object(food, 'green_eggs') >>> eat masked_array(data=[--, 'ham'], mask=[ True, False], fill_value='green_eggs', dtype=object) >>> # plain ol` ham is boring >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object) >>> eat = ma.masked_object(fresh_food, 'green_eggs') >>> eat masked_array(data=['cheese', 'ham', 'pineapple'], mask=False, fill_value='green_eggs', dtype=object) Note that `mask` is set to ``nomask`` if possible. >>> eat masked_array(data=['cheese', 'ham', 'pineapple'], mask=False, fill_value='green_eggs', dtype=object) """ if isMaskedArray(x): condition = umath.equal(x._data, value) mask = x._mask else: condition = umath.equal(np.asarray(x), value) mask = nomask mask = mask_or(mask, make_mask(condition, shrink=shrink)) return masked_array(x, mask=mask, copy=copy, fill_value=value) def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True): """ Mask using floating point equality. Return a MaskedArray, masked where the data in array `x` are approximately equal to `value`, determined using `isclose`. The default tolerances for `masked_values` are the same as those for `isclose`. For integer types, exact equality is used, in the same way as `masked_equal`. The fill_value is set to `value` and the mask is set to ``nomask`` if possible. Parameters ---------- x : array_like Array to mask. value : float Masking value. rtol, atol : float, optional Tolerance parameters passed on to `isclose` copy : bool, optional Whether to return a copy of `x`. shrink : bool, optional Whether to collapse a mask full of False to ``nomask``. Returns ------- result : MaskedArray The result of masking `x` where approximately equal to `value`. See Also -------- masked_where : Mask where a condition is met. masked_equal : Mask where equal to a given value (integers). Examples -------- >>> import numpy.ma as ma >>> x = np.array([1, 1.1, 2, 1.1, 3]) >>> ma.masked_values(x, 1.1) masked_array(data=[1.0, --, 2.0, --, 3.0], mask=[False, True, False, True, False], fill_value=1.1) Note that `mask` is set to ``nomask`` if possible. >>> ma.masked_values(x, 1.5) masked_array(data=[1. , 1.1, 2. , 1.1, 3. ], mask=False, fill_value=1.5) For integers, the fill value will be different in general to the result of ``masked_equal``. >>> x = np.arange(5) >>> x array([0, 1, 2, 3, 4]) >>> ma.masked_values(x, 2) masked_array(data=[0, 1, --, 3, 4], mask=[False, False, True, False, False], fill_value=2) >>> ma.masked_equal(x, 2) masked_array(data=[0, 1, --, 3, 4], mask=[False, False, True, False, False], fill_value=2) """ xnew = filled(x, value) if np.issubdtype(xnew.dtype, np.floating): mask = np.isclose(xnew, value, atol=atol, rtol=rtol) else: mask = umath.equal(xnew, value) ret = masked_array(xnew, mask=mask, copy=copy, fill_value=value) if shrink: ret.shrink_mask() return ret def masked_invalid(a, copy=True): """ Mask an array where invalid values occur (NaNs or infs). This function is a shortcut to ``masked_where``, with `condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved. Only applies to arrays with a dtype where NaNs or infs make sense (i.e. floating point types), but accepts any array_like object. See Also -------- masked_where : Mask where a condition is met. Examples -------- >>> import numpy.ma as ma >>> a = np.arange(5, dtype=float) >>> a[2] = np.NaN >>> a[3] = np.PINF >>> a array([ 0., 1., nan, inf, 4.]) >>> ma.masked_invalid(a) masked_array(data=[0.0, 1.0, --, --, 4.0], mask=[False, False, True, True, False], fill_value=1e+20) """ a = np.array(a, copy=copy, subok=True) mask = getattr(a, '_mask', None) if mask is not None: condition = ~(np.isfinite(getdata(a))) if mask is not nomask: condition |= mask cls = type(a) else: condition = ~(np.isfinite(a)) cls = MaskedArray result = a.view(cls) result._mask = condition return result ############################################################################### # Printing options # ############################################################################### class _MaskedPrintOption: """ Handle the string used to represent missing data in a masked array. """ def __init__(self, display): """ Create the masked_print_option object. """ self._display = display self._enabled = True def display(self): """ Display the string to print for masked values. """ return self._display def set_display(self, s): """ Set the string to print for masked values. """ self._display = s def enabled(self): """ Is the use of the display value enabled? """ return self._enabled def enable(self, shrink=1): """ Set the enabling shrink to `shrink`. """ self._enabled = shrink def __str__(self): return str(self._display) __repr__ = __str__ # if you single index into a masked location you get this object. masked_print_option = _MaskedPrintOption('--') def _recursive_printoption(result, mask, printopt): """ Puts printoptions in result where mask is True. Private function allowing for recursion """ names = result.dtype.names if names is not None: for name in names: curdata = result[name] curmask = mask[name] _recursive_printoption(curdata, curmask, printopt) else: np.copyto(result, printopt, where=mask) return # For better or worse, these end in a newline _legacy_print_templates = dict( long_std=textwrap.dedent("""\ masked_%(name)s(data = %(data)s, %(nlen)s mask = %(mask)s, %(nlen)s fill_value = %(fill)s) """), long_flx=textwrap.dedent("""\ masked_%(name)s(data = %(data)s, %(nlen)s mask = %(mask)s, %(nlen)s fill_value = %(fill)s, %(nlen)s dtype = %(dtype)s) """), short_std=textwrap.dedent("""\ masked_%(name)s(data = %(data)s, %(nlen)s mask = %(mask)s, %(nlen)s fill_value = %(fill)s) """), short_flx=textwrap.dedent("""\ masked_%(name)s(data = %(data)s, %(nlen)s mask = %(mask)s, %(nlen)s fill_value = %(fill)s, %(nlen)s dtype = %(dtype)s) """) ) ############################################################################### # MaskedArray class # ############################################################################### def _recursive_filled(a, mask, fill_value): """ Recursively fill `a` with `fill_value`. """ names = a.dtype.names for name in names: current = a[name] if current.dtype.names is not None: _recursive_filled(current, mask[name], fill_value[name]) else: np.copyto(current, fill_value[name], where=mask[name]) def flatten_structured_array(a): """ Flatten a structured array. The data type of the output is chosen such that it can represent all of the (nested) fields. Parameters ---------- a : structured array Returns ------- output : masked array or ndarray A flattened masked array if the input is a masked array, otherwise a standard ndarray. Examples -------- >>> ndtype = [('a', int), ('b', float)] >>> a = np.array([(1, 1), (2, 2)], dtype=ndtype) >>> np.ma.flatten_structured_array(a) array([[1., 1.], [2., 2.]]) """ def flatten_sequence(iterable): """ Flattens a compound of nested iterables. """ for elm in iter(iterable): if hasattr(elm, '__iter__'): yield from flatten_sequence(elm) else: yield elm a = np.asanyarray(a) inishape = a.shape a = a.ravel() if isinstance(a, MaskedArray): out = np.array([tuple(flatten_sequence(d.item())) for d in a._data]) out = out.view(MaskedArray) out._mask = np.array([tuple(flatten_sequence(d.item())) for d in getmaskarray(a)]) else: out = np.array([tuple(flatten_sequence(d.item())) for d in a]) if len(inishape) > 1: newshape = list(out.shape) newshape[0] = inishape out.shape = tuple(flatten_sequence(newshape)) return out def _arraymethod(funcname, onmask=True): """ Return a class method wrapper around a basic array method. Creates a class method which returns a masked array, where the new ``_data`` array is the output of the corresponding basic method called on the original ``_data``. If `onmask` is True, the new mask is the output of the method called on the initial mask. Otherwise, the new mask is just a reference to the initial mask. Parameters ---------- funcname : str Name of the function to apply on data. onmask : bool Whether the mask must be processed also (True) or left alone (False). Default is True. Make available as `_onmask` attribute. Returns ------- method : instancemethod Class method wrapper of the specified basic array method. """ def wrapped_method(self, *args, **params): result = getattr(self._data, funcname)(*args, **params) result = result.view(type(self)) result._update_from(self) mask = self._mask if not onmask: result.__setmask__(mask) elif mask is not nomask: # __setmask__ makes a copy, which we don't want result._mask = getattr(mask, funcname)(*args, **params) return result methdoc = getattr(ndarray, funcname, None) or getattr(np, funcname, None) if methdoc is not None: wrapped_method.__doc__ = methdoc.__doc__ wrapped_method.__name__ = funcname return wrapped_method class MaskedIterator: """ Flat iterator object to iterate over masked arrays. A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array `x`. It allows iterating over the array as if it were a 1-D array, either in a for-loop or by calling its `next` method. Iteration is done in C-contiguous style, with the last index varying the fastest. The iterator can also be indexed using basic slicing or advanced indexing. See Also -------- MaskedArray.flat : Return a flat iterator over an array. MaskedArray.flatten : Returns a flattened copy of an array. Notes ----- `MaskedIterator` is not exported by the `ma` module. Instead of instantiating a `MaskedIterator` directly, use `MaskedArray.flat`. Examples -------- >>> x = np.ma.array(arange(6).reshape(2, 3)) >>> fl = x.flat >>> type(fl) <class 'numpy.ma.core.MaskedIterator'> >>> for item in fl: ... print(item) ... 0 1 2 3 4 5 Extracting more than a single element b indexing the `MaskedIterator` returns a masked array: >>> fl[2:4] masked_array(data = [2 3], mask = False, fill_value = 999999) """ def __init__(self, ma): self.ma = ma self.dataiter = ma._data.flat if ma._mask is nomask: self.maskiter = None else: self.maskiter = ma._mask.flat def __iter__(self): return self def __getitem__(self, indx): result = self.dataiter.__getitem__(indx).view(type(self.ma)) if self.maskiter is not None: _mask = self.maskiter.__getitem__(indx) if isinstance(_mask, ndarray): # set shape to match that of data; this is needed for matrices _mask.shape = result.shape result._mask = _mask elif isinstance(_mask, np.void): return mvoid(result, mask=_mask, hardmask=self.ma._hardmask) elif _mask: # Just a scalar, masked return masked return result # This won't work if ravel makes a copy def __setitem__(self, index, value): self.dataiter[index] = getdata(value) if self.maskiter is not None: self.maskiter[index] = getmaskarray(value) def __next__(self): """ Return the next value, or raise StopIteration. Examples -------- >>> x = np.ma.array([3, 2], mask=[0, 1]) >>> fl = x.flat >>> next(fl) 3 >>> next(fl) masked >>> next(fl) Traceback (most recent call last): ... StopIteration """ d = next(self.dataiter) if self.maskiter is not None: m = next(self.maskiter) if isinstance(m, np.void): return mvoid(d, mask=m, hardmask=self.ma._hardmask) elif m: # Just a scalar, masked return masked return d class MaskedArray(ndarray): """ An array class with possibly masked values. Masked values of True exclude the corresponding element from any computation. Construction:: x = MaskedArray(data, mask=nomask, dtype=None, copy=False, subok=True, ndmin=0, fill_value=None, keep_mask=True, hard_mask=None, shrink=True, order=None) Parameters ---------- data : array_like Input data. mask : sequence, optional Mask. Must be convertible to an array of booleans with the same shape as `data`. True indicates a masked (i.e. invalid) data. dtype : dtype, optional Data type of the output. If `dtype` is None, the type of the data argument (``data.dtype``) is used. If `dtype` is not None and different from ``data.dtype``, a copy is performed. copy : bool, optional Whether to copy the input data (True), or to use a reference instead. Default is False. subok : bool, optional Whether to return a subclass of `MaskedArray` if possible (True) or a plain `MaskedArray`. Default is True. ndmin : int, optional Minimum number of dimensions. Default is 0. fill_value : scalar, optional Value used to fill in the masked values when necessary. If None, a default based on the data-type is used. keep_mask : bool, optional Whether to combine `mask` with the mask of the input data, if any (True), or to use only `mask` for the output (False). Default is True. hard_mask : bool, optional Whether to use a hard mask or not. With a hard mask, masked values cannot be unmasked. Default is False. shrink : bool, optional Whether to force compression of an empty mask. Default is True. order : {'C', 'F', 'A'}, optional Specify the order of the array. If order is 'C', then the array will be in C-contiguous order (last-index varies the fastest). If order is 'F', then the returned array will be in Fortran-contiguous order (first-index varies the fastest). If order is 'A' (default), then the returned array may be in any order (either C-, Fortran-contiguous, or even discontiguous), unless a copy is required, in which case it will be C-contiguous. Examples -------- The ``mask`` can be initialized with an array of boolean values with the same shape as ``data``. >>> data = np.arange(6).reshape((2, 3)) >>> np.ma.MaskedArray(data, mask=[[False, True, False], ... [False, False, True]]) masked_array( data=[[0, --, 2], [3, 4, --]], mask=[[False, True, False], [False, False, True]], fill_value=999999) Alternatively, the ``mask`` can be initialized to homogeneous boolean array with the same shape as ``data`` by passing in a scalar boolean value: >>> np.ma.MaskedArray(data, mask=False) masked_array( data=[[0, 1, 2], [3, 4, 5]], mask=[[False, False, False], [False, False, False]], fill_value=999999) >>> np.ma.MaskedArray(data, mask=True) masked_array( data=[[--, --, --], [--, --, --]], mask=[[ True, True, True], [ True, True, True]], fill_value=999999, dtype=int64) .. note:: The recommended practice for initializing ``mask`` with a scalar boolean value is to use ``True``/``False`` rather than ``np.True_``/``np.False_``. The reason is :attr:`nomask` is represented internally as ``np.False_``. >>> np.False_ is np.ma.nomask True """ __array_priority__ = 15 _defaultmask = nomask _defaulthardmask = False _baseclass = ndarray # Maximum number of elements per axis used when printing an array. The # 1d case is handled separately because we need more values in this case. _print_width = 100 _print_width_1d = 1500 def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, subok=True, ndmin=0, fill_value=None, keep_mask=True, hard_mask=None, shrink=True, order=None): """ Create a new masked array from scratch. Notes ----- A masked array can also be created by taking a .view(MaskedArray). """ # Process data. _data = np.array(data, dtype=dtype, copy=copy, order=order, subok=True, ndmin=ndmin) _baseclass = getattr(data, '_baseclass', type(_data)) # Check that we're not erasing the mask. if isinstance(data, MaskedArray) and (data.shape != _data.shape): copy = True # Here, we copy the _view_, so that we can attach new properties to it # we must never do .view(MaskedConstant), as that would create a new # instance of np.ma.masked, which make identity comparison fail if isinstance(data, cls) and subok and not isinstance(data, MaskedConstant): _data = ndarray.view(_data, type(data)) else: _data = ndarray.view(_data, cls) # Backwards compatibility w/ numpy.core.ma. if hasattr(data, '_mask') and not isinstance(data, ndarray): _data._mask = data._mask # FIXME _sharedmask is never used. _sharedmask = True # Process mask. # Type of the mask mdtype = make_mask_descr(_data.dtype) if mask is nomask: # Case 1. : no mask in input. # Erase the current mask ? if not keep_mask: # With a reduced version if shrink: _data._mask = nomask # With full version else: _data._mask = np.zeros(_data.shape, dtype=mdtype) # Check whether we missed something elif isinstance(data, (tuple, list)): try: # If data is a sequence of masked array mask = np.array([getmaskarray(np.asanyarray(m, dtype=mdtype)) for m in data], dtype=mdtype) except ValueError: # If data is nested mask = nomask # Force shrinking of the mask if needed (and possible) if (mdtype == MaskType) and mask.any(): _data._mask = mask _data._sharedmask = False else: _data._sharedmask = not copy if copy: _data._mask = _data._mask.copy() # Reset the shape of the original mask if getmask(data) is not nomask: data._mask.shape = data.shape else: # Case 2. : With a mask in input. # If mask is boolean, create an array of True or False if mask is True and mdtype == MaskType: mask = np.ones(_data.shape, dtype=mdtype) elif mask is False and mdtype == MaskType: mask = np.zeros(_data.shape, dtype=mdtype) else: # Read the mask with the current mdtype try: mask = np.array(mask, copy=copy, dtype=mdtype) # Or assume it's a sequence of bool/int except TypeError: mask = np.array([tuple([m] * len(mdtype)) for m in mask], dtype=mdtype) # Make sure the mask and the data have the same shape if mask.shape != _data.shape: (nd, nm) = (_data.size, mask.size) if nm == 1: mask = np.resize(mask, _data.shape) elif nm == nd: mask = np.reshape(mask, _data.shape) else: msg = "Mask and data not compatible: data size is %i, " + \ "mask size is %i." raise MaskError(msg % (nd, nm)) copy = True # Set the mask to the new value if _data._mask is nomask: _data._mask = mask _data._sharedmask = not copy else: if not keep_mask: _data._mask = mask _data._sharedmask = not copy else: if _data.dtype.names is not None: def _recursive_or(a, b): "do a|=b on each field of a, recursively" for name in a.dtype.names: (af, bf) = (a[name], b[name]) if af.dtype.names is not None: _recursive_or(af, bf) else: af |= bf _recursive_or(_data._mask, mask) else: _data._mask = np.logical_or(mask, _data._mask) _data._sharedmask = False # Update fill_value. if fill_value is None: fill_value = getattr(data, '_fill_value', None) # But don't run the check unless we have something to check. if fill_value is not None: _data._fill_value = _check_fill_value(fill_value, _data.dtype) # Process extra options .. if hard_mask is None: _data._hardmask = getattr(data, '_hardmask', False) else: _data._hardmask = hard_mask _data._baseclass = _baseclass return _data def _update_from(self, obj): """ Copies some attributes of obj to self. """ if isinstance(obj, ndarray): _baseclass = type(obj) else: _baseclass = ndarray # We need to copy the _basedict to avoid backward propagation _optinfo = {} _optinfo.update(getattr(obj, '_optinfo', {})) _optinfo.update(getattr(obj, '_basedict', {})) if not isinstance(obj, MaskedArray): _optinfo.update(getattr(obj, '__dict__', {})) _dict = dict(_fill_value=getattr(obj, '_fill_value', None), _hardmask=getattr(obj, '_hardmask', False), _sharedmask=getattr(obj, '_sharedmask', False), _isfield=getattr(obj, '_isfield', False), _baseclass=getattr(obj, '_baseclass', _baseclass), _optinfo=_optinfo, _basedict=_optinfo) self.__dict__.update(_dict) self.__dict__.update(_optinfo) return def __array_finalize__(self, obj): """ Finalizes the masked array. """ # Get main attributes. self._update_from(obj) # We have to decide how to initialize self.mask, based on # obj.mask. This is very difficult. There might be some # correspondence between the elements in the array we are being # created from (= obj) and us. Or there might not. This method can # be called in all kinds of places for all kinds of reasons -- could # be empty_like, could be slicing, could be a ufunc, could be a view. # The numpy subclassing interface simply doesn't give us any way # to know, which means that at best this method will be based on # guesswork and heuristics. To make things worse, there isn't even any # clear consensus about what the desired behavior is. For instance, # most users think that np.empty_like(marr) -- which goes via this # method -- should return a masked array with an empty mask (see # gh-3404 and linked discussions), but others disagree, and they have # existing code which depends on empty_like returning an array that # matches the input mask. # # Historically our algorithm was: if the template object mask had the # same *number of elements* as us, then we used *it's mask object # itself* as our mask, so that writes to us would also write to the # original array. This is horribly broken in multiple ways. # # Now what we do instead is, if the template object mask has the same # number of elements as us, and we do not have the same base pointer # as the template object (b/c views like arr[...] should keep the same # mask), then we make a copy of the template object mask and use # that. This is also horribly broken but somewhat less so. Maybe. if isinstance(obj, ndarray): # XX: This looks like a bug -- shouldn't it check self.dtype # instead? if obj.dtype.names is not None: _mask = getmaskarray(obj) else: _mask = getmask(obj) # If self and obj point to exactly the same data, then probably # self is a simple view of obj (e.g., self = obj[...]), so they # should share the same mask. (This isn't 100% reliable, e.g. self # could be the first row of obj, or have strange strides, but as a # heuristic it's not bad.) In all other cases, we make a copy of # the mask, so that future modifications to 'self' do not end up # side-effecting 'obj' as well. if (_mask is not nomask and obj.__array_interface__["data"][0] != self.__array_interface__["data"][0]): # We should make a copy. But we could get here via astype, # in which case the mask might need a new dtype as well # (e.g., changing to or from a structured dtype), and the # order could have changed. So, change the mask type if # needed and use astype instead of copy. if self.dtype == obj.dtype: _mask_dtype = _mask.dtype else: _mask_dtype = make_mask_descr(self.dtype) if self.flags.c_contiguous: order = "C" elif self.flags.f_contiguous: order = "F" else: order = "K" _mask = _mask.astype(_mask_dtype, order) else: # Take a view so shape changes, etc., do not propagate back. _mask = _mask.view() else: _mask = nomask self._mask = _mask # Finalize the mask if self._mask is not nomask: try: self._mask.shape = self.shape except ValueError: self._mask = nomask except (TypeError, AttributeError): # When _mask.shape is not writable (because it's a void) pass # Finalize the fill_value if self._fill_value is not None: self._fill_value = _check_fill_value(self._fill_value, self.dtype) elif self.dtype.names is not None: # Finalize the default fill_value for structured arrays self._fill_value = _check_fill_value(None, self.dtype) def __array_wrap__(self, obj, context=None): """ Special hook for ufuncs. Wraps the numpy array and sets the mask according to context. """ if obj is self: # for in-place operations result = obj else: result = obj.view(type(self)) result._update_from(self) if context is not None: result._mask = result._mask.copy() func, args, out_i = context # args sometimes contains outputs (gh-10459), which we don't want input_args = args[:func.nin] m = reduce(mask_or, [getmaskarray(arg) for arg in input_args]) # Get the domain mask domain = ufunc_domain.get(func, None) if domain is not None: # Take the domain, and make sure it's a ndarray with np.errstate(divide='ignore', invalid='ignore'): d = filled(domain(*input_args), True) if d.any(): # Fill the result where the domain is wrong try: # Binary domain: take the last value fill_value = ufunc_fills[func][-1] except TypeError: # Unary domain: just use this one fill_value = ufunc_fills[func] except KeyError: # Domain not recognized, use fill_value instead fill_value = self.fill_value np.copyto(result, fill_value, where=d) # Update the mask if m is nomask: m = d else: # Don't modify inplace, we risk back-propagation m = (m | d) # Make sure the mask has the proper size if result is not self and result.shape == () and m: return masked else: result._mask = m result._sharedmask = False return result def view(self, dtype=None, type=None, fill_value=None): """ Return a view of the MaskedArray data. Parameters ---------- dtype : data-type or ndarray sub-class, optional Data-type descriptor of the returned view, e.g., float32 or int16. The default, None, results in the view having the same data-type as `a`. As with ``ndarray.view``, dtype can also be specified as an ndarray sub-class, which then specifies the type of the returned object (this is equivalent to setting the ``type`` parameter). type : Python type, optional Type of the returned view, either ndarray or a subclass. The default None results in type preservation. fill_value : scalar, optional The value to use for invalid entries (None by default). If None, then this argument is inferred from the passed `dtype`, or in its absence the original array, as discussed in the notes below. See Also -------- numpy.ndarray.view : Equivalent method on ndarray object. Notes ----- ``a.view()`` is used two different ways: ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view of the array's memory with a different data-type. This can cause a reinterpretation of the bytes of memory. ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just returns an instance of `ndarray_subclass` that looks at the same array (same shape, dtype, etc.) This does not cause a reinterpretation of the memory. If `fill_value` is not specified, but `dtype` is specified (and is not an ndarray sub-class), the `fill_value` of the MaskedArray will be reset. If neither `fill_value` nor `dtype` are specified (or if `dtype` is an ndarray sub-class), then the fill value is preserved. Finally, if `fill_value` is specified, but `dtype` is not, the fill value is set to the specified value. For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of bytes per entry than the previous dtype (for example, converting a regular array to a structured array), then the behavior of the view cannot be predicted just from the superficial appearance of ``a`` (shown by ``print(a)``). It also depends on exactly how ``a`` is stored in memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus defined as a slice or transpose, etc., the view may give different results. """ if dtype is None: if type is None: output = ndarray.view(self) else: output = ndarray.view(self, type) elif type is None: try: if issubclass(dtype, ndarray): output = ndarray.view(self, dtype) dtype = None else: output = ndarray.view(self, dtype) except TypeError: output = ndarray.view(self, dtype) else: output = ndarray.view(self, dtype, type) # also make the mask be a view (so attr changes to the view's # mask do no affect original object's mask) # (especially important to avoid affecting np.masked singleton) if getmask(output) is not nomask: output._mask = output._mask.view() # Make sure to reset the _fill_value if needed if getattr(output, '_fill_value', None) is not None: if fill_value is None: if dtype is None: pass # leave _fill_value as is else: output._fill_value = None else: output.fill_value = fill_value return output def __getitem__(self, indx): """ x.__getitem__(y) <==> x[y] Return the item described by i, as a masked array. """ # We could directly use ndarray.__getitem__ on self. # But then we would have to modify __array_finalize__ to prevent the # mask of being reshaped if it hasn't been set up properly yet # So it's easier to stick to the current version dout = self.data[indx] _mask = self._mask def _is_scalar(m): return not isinstance(m, np.ndarray) def _scalar_heuristic(arr, elem): """ Return whether `elem` is a scalar result of indexing `arr`, or None if undecidable without promoting nomask to a full mask """ # obviously a scalar if not isinstance(elem, np.ndarray): return True # object array scalar indexing can return anything elif arr.dtype.type is np.object_: if arr.dtype is not elem.dtype: # elem is an array, but dtypes do not match, so must be # an element return True # well-behaved subclass that only returns 0d arrays when # expected - this is not a scalar elif type(arr).__getitem__ == ndarray.__getitem__: return False return None if _mask is not nomask: # _mask cannot be a subclass, so it tells us whether we should # expect a scalar. It also cannot be of dtype object. mout = _mask[indx] scalar_expected = _is_scalar(mout) else: # attempt to apply the heuristic to avoid constructing a full mask mout = nomask scalar_expected = _scalar_heuristic(self.data, dout) if scalar_expected is None: # heuristics have failed # construct a full array, so we can be certain. This is costly. # we could also fall back on ndarray.__getitem__(self.data, indx) scalar_expected = _is_scalar(getmaskarray(self)[indx]) # Did we extract a single item? if scalar_expected: # A record if isinstance(dout, np.void): # We should always re-cast to mvoid, otherwise users can # change masks on rows that already have masked values, but not # on rows that have no masked values, which is inconsistent. return mvoid(dout, mask=mout, hardmask=self._hardmask) # special case introduced in gh-5962 elif (self.dtype.type is np.object_ and isinstance(dout, np.ndarray) and dout is not masked): # If masked, turn into a MaskedArray, with everything masked. if mout: return MaskedArray(dout, mask=True) else: return dout # Just a scalar else: if mout: return masked else: return dout else: # Force dout to MA dout = dout.view(type(self)) # Inherit attributes from self dout._update_from(self) # Check the fill_value if is_string_or_list_of_strings(indx): if self._fill_value is not None: dout._fill_value = self._fill_value[indx] # Something like gh-15895 has happened if this check fails. # _fill_value should always be an ndarray. if not isinstance(dout._fill_value, np.ndarray): raise RuntimeError('Internal NumPy error.') # If we're indexing a multidimensional field in a # structured array (such as dtype("(2,)i2,(2,)i1")), # dimensionality goes up (M[field].ndim == M.ndim + # M.dtype[field].ndim). That's fine for # M[field] but problematic for M[field].fill_value # which should have shape () to avoid breaking several # methods. There is no great way out, so set to # first element. See issue #6723. if dout._fill_value.ndim > 0: if not (dout._fill_value == dout._fill_value.flat[0]).all(): warnings.warn( "Upon accessing multidimensional field " f"{indx!s}, need to keep dimensionality " "of fill_value at 0. Discarding " "heterogeneous fill_value and setting " f"all to {dout._fill_value[0]!s}.", stacklevel=2) # Need to use `.flat[0:1].squeeze(...)` instead of just # `.flat[0]` to ensure the result is a 0d array and not # a scalar. dout._fill_value = dout._fill_value.flat[0:1].squeeze(axis=0) dout._isfield = True # Update the mask if needed if mout is not nomask: # set shape to match that of data; this is needed for matrices dout._mask = reshape(mout, dout.shape) dout._sharedmask = True # Note: Don't try to check for m.any(), that'll take too long return dout def __setitem__(self, indx, value): """ x.__setitem__(i, y) <==> x[i]=y Set item described by index. If value is masked, masks those locations. """ if self is masked: raise MaskError('Cannot alter the masked element.') _data = self._data _mask = self._mask if isinstance(indx, str): _data[indx] = value if _mask is nomask: self._mask = _mask = make_mask_none(self.shape, self.dtype) _mask[indx] = getmask(value) return _dtype = _data.dtype if value is masked: # The mask wasn't set: create a full version. if _mask is nomask: _mask = self._mask = make_mask_none(self.shape, _dtype) # Now, set the mask to its value. if _dtype.names is not None: _mask[indx] = tuple([True] * len(_dtype.names)) else: _mask[indx] = True return # Get the _data part of the new value dval = getattr(value, '_data', value) # Get the _mask part of the new value mval = getmask(value) if _dtype.names is not None and mval is nomask: mval = tuple([False] * len(_dtype.names)) if _mask is nomask: # Set the data, then the mask _data[indx] = dval if mval is not nomask: _mask = self._mask = make_mask_none(self.shape, _dtype) _mask[indx] = mval elif not self._hardmask: # Set the data, then the mask _data[indx] = dval _mask[indx] = mval elif hasattr(indx, 'dtype') and (indx.dtype == MaskType): indx = indx * umath.logical_not(_mask) _data[indx] = dval else: if _dtype.names is not None: err_msg = "Flexible 'hard' masks are not yet supported." raise NotImplementedError(err_msg) mindx = mask_or(_mask[indx], mval, copy=True) dindx = self._data[indx] if dindx.size > 1: np.copyto(dindx, dval, where=~mindx) elif mindx is nomask: dindx = dval _data[indx] = dindx _mask[indx] = mindx return # Define so that we can overwrite the setter. @property def dtype(self): return super(MaskedArray, self).dtype @dtype.setter def dtype(self, dtype): super(MaskedArray, type(self)).dtype.__set__(self, dtype) if self._mask is not nomask: self._mask = self._mask.view(make_mask_descr(dtype), ndarray) # Try to reset the shape of the mask (if we don't have a void). # This raises a ValueError if the dtype change won't work. try: self._mask.shape = self.shape except (AttributeError, TypeError): pass @property def shape(self): return super(MaskedArray, self).shape @shape.setter def shape(self, shape): super(MaskedArray, type(self)).shape.__set__(self, shape) # Cannot use self._mask, since it may not (yet) exist when a # masked matrix sets the shape. if getmask(self) is not nomask: self._mask.shape = self.shape def __setmask__(self, mask, copy=False): """ Set the mask. """ idtype = self.dtype current_mask = self._mask if mask is masked: mask = True if current_mask is nomask: # Make sure the mask is set # Just don't do anything if there's nothing to do. if mask is nomask: return current_mask = self._mask = make_mask_none(self.shape, idtype) if idtype.names is None: # No named fields. # Hardmask: don't unmask the data if self._hardmask: current_mask |= mask # Softmask: set everything to False # If it's obviously a compatible scalar, use a quick update # method. elif isinstance(mask, (int, float, np.bool_, np.number)): current_mask[...] = mask # Otherwise fall back to the slower, general purpose way. else: current_mask.flat = mask else: # Named fields w/ mdtype = current_mask.dtype mask = np.array(mask, copy=False) # Mask is a singleton if not mask.ndim: # It's a boolean : make a record if mask.dtype.kind == 'b': mask = np.array(tuple([mask.item()] * len(mdtype)), dtype=mdtype) # It's a record: make sure the dtype is correct else: mask = mask.astype(mdtype) # Mask is a sequence else: # Make sure the new mask is a ndarray with the proper dtype try: mask = np.array(mask, copy=copy, dtype=mdtype) # Or assume it's a sequence of bool/int except TypeError: mask = np.array([tuple([m] * len(mdtype)) for m in mask], dtype=mdtype) # Hardmask: don't unmask the data if self._hardmask: for n in idtype.names: current_mask[n] |= mask[n] # Softmask: set everything to False # If it's obviously a compatible scalar, use a quick update # method. elif isinstance(mask, (int, float, np.bool_, np.number)): current_mask[...] = mask # Otherwise fall back to the slower, general purpose way. else: current_mask.flat = mask # Reshape if needed if current_mask.shape: current_mask.shape = self.shape return _set_mask = __setmask__ @property def mask(self): """ Current mask. """ # We could try to force a reshape, but that wouldn't work in some # cases. # Return a view so that the dtype and shape cannot be changed in place # This still preserves nomask by identity return self._mask.view() @mask.setter def mask(self, value): self.__setmask__(value) @property def recordmask(self): """ Get or set the mask of the array if it has no named fields. For structured arrays, returns a ndarray of booleans where entries are ``True`` if **all** the fields are masked, ``False`` otherwise: >>> x = np.ma.array([(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], ... mask=[(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)], ... dtype=[('a', int), ('b', int)]) >>> x.recordmask array([False, False, True, False, False]) """ _mask = self._mask.view(ndarray) if _mask.dtype.names is None: return _mask return np.all(flatten_structured_array(_mask), axis=-1) @recordmask.setter def recordmask(self, mask): raise NotImplementedError("Coming soon: setting the mask per records!") def harden_mask(self): """ Force the mask to hard. Whether the mask of a masked array is hard or soft is determined by its `~ma.MaskedArray.hardmask` property. `harden_mask` sets `~ma.MaskedArray.hardmask` to ``True``. See Also -------- ma.MaskedArray.hardmask """ self._hardmask = True return self def soften_mask(self): """ Force the mask to soft. Whether the mask of a masked array is hard or soft is determined by its `~ma.MaskedArray.hardmask` property. `soften_mask` sets `~ma.MaskedArray.hardmask` to ``False``. See Also -------- ma.MaskedArray.hardmask """ self._hardmask = False return self @property def hardmask(self): """ Hardness of the mask """ return self._hardmask def unshare_mask(self): """ Copy the mask and set the sharedmask flag to False. Whether the mask is shared between masked arrays can be seen from the `sharedmask` property. `unshare_mask` ensures the mask is not shared. A copy of the mask is only made if it was shared. See Also -------- sharedmask """ if self._sharedmask: self._mask = self._mask.copy() self._sharedmask = False return self @property def sharedmask(self): """ Share status of the mask (read-only). """ return self._sharedmask def shrink_mask(self): """ Reduce a mask to nomask when possible. Parameters ---------- None Returns ------- None Examples -------- >>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4) >>> x.mask array([[False, False], [False, False]]) >>> x.shrink_mask() masked_array( data=[[1, 2], [3, 4]], mask=False, fill_value=999999) >>> x.mask False """ self._mask = _shrink_mask(self._mask) return self @property def baseclass(self): """ Class of the underlying data (read-only). """ return self._baseclass def _get_data(self): """ Returns the underlying data, as a view of the masked array. If the underlying data is a subclass of :class:`numpy.ndarray`, it is returned as such. >>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) >>> x.data matrix([[1, 2], [3, 4]]) The type of the data can be accessed through the :attr:`baseclass` attribute. """ return ndarray.view(self, self._baseclass) _data = property(fget=_get_data) data = property(fget=_get_data) @property def flat(self): """ Return a flat iterator, or set a flattened version of self to value. """ return MaskedIterator(self) @flat.setter def flat(self, value): y = self.ravel() y[:] = value @property def fill_value(self): """ The filling value of the masked array is a scalar. When setting, None will set to a default based on the data type. Examples -------- >>> for dt in [np.int32, np.int64, np.float64, np.complex128]: ... np.ma.array([0, 1], dtype=dt).get_fill_value() ... 999999 999999 1e+20 (1e+20+0j) >>> x = np.ma.array([0, 1.], fill_value=-np.inf) >>> x.fill_value -inf >>> x.fill_value = np.pi >>> x.fill_value 3.1415926535897931 # may vary Reset to default: >>> x.fill_value = None >>> x.fill_value 1e+20 """ if self._fill_value is None: self._fill_value = _check_fill_value(None, self.dtype) # Temporary workaround to account for the fact that str and bytes # scalars cannot be indexed with (), whereas all other numpy # scalars can. See issues #7259 and #7267. # The if-block can be removed after #7267 has been fixed. if isinstance(self._fill_value, ndarray): return self._fill_value[()] return self._fill_value @fill_value.setter def fill_value(self, value=None): target = _check_fill_value(value, self.dtype) if not target.ndim == 0: # 2019-11-12, 1.18.0 warnings.warn( "Non-scalar arrays for the fill value are deprecated. Use " "arrays with scalar values instead. The filled function " "still supports any array as `fill_value`.", DeprecationWarning, stacklevel=2) _fill_value = self._fill_value if _fill_value is None: # Create the attribute if it was undefined self._fill_value = target else: # Don't overwrite the attribute, just fill it (for propagation) _fill_value[()] = target # kept for compatibility get_fill_value = fill_value.fget set_fill_value = fill_value.fset def filled(self, fill_value=None): """ Return a copy of self, with masked values filled with a given value. **However**, if there are no masked values to fill, self will be returned instead as an ndarray. Parameters ---------- fill_value : array_like, optional The value to use for invalid entries. Can be scalar or non-scalar. If non-scalar, the resulting ndarray must be broadcastable over input array. Default is None, in which case, the `fill_value` attribute of the array is used instead. Returns ------- filled_array : ndarray A copy of ``self`` with invalid entries replaced by *fill_value* (be it the function argument or the attribute of ``self``), or ``self`` itself as an ndarray if there are no invalid entries to be replaced. Notes ----- The result is **not** a MaskedArray! Examples -------- >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999) >>> x.filled() array([ 1, 2, -999, 4, -999]) >>> x.filled(fill_value=1000) array([ 1, 2, 1000, 4, 1000]) >>> type(x.filled()) <class 'numpy.ndarray'> Subclassing is preserved. This means that if, e.g., the data part of the masked array is a recarray, `filled` returns a recarray: >>> x = np.array([(-1, 2), (-3, 4)], dtype='i8,i8').view(np.recarray) >>> m = np.ma.array(x, mask=[(True, False), (False, True)]) >>> m.filled() rec.array([(999999, 2), ( -3, 999999)], dtype=[('f0', '<i8'), ('f1', '<i8')]) """ m = self._mask if m is nomask: return self._data if fill_value is None: fill_value = self.fill_value else: fill_value = _check_fill_value(fill_value, self.dtype) if self is masked_singleton: return np.asanyarray(fill_value) if m.dtype.names is not None: result = self._data.copy('K') _recursive_filled(result, self._mask, fill_value) elif not m.any(): return self._data else: result = self._data.copy('K') try: np.copyto(result, fill_value, where=m) except (TypeError, AttributeError): fill_value = narray(fill_value, dtype=object) d = result.astype(object) result = np.choose(m, (d, fill_value)) except IndexError: # ok, if scalar if self._data.shape: raise elif m: result = np.array(fill_value, dtype=self.dtype) else: result = self._data return result def compressed(self): """ Return all the non-masked data as a 1-D array. Returns ------- data : ndarray A new `ndarray` holding the non-masked data is returned. Notes ----- The result is **not** a MaskedArray! Examples -------- >>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3) >>> x.compressed() array([0, 1]) >>> type(x.compressed()) <class 'numpy.ndarray'> """ data = ndarray.ravel(self._data) if self._mask is not nomask: data = data.compress(np.logical_not(ndarray.ravel(self._mask))) return data def compress(self, condition, axis=None, out=None): """ Return `a` where condition is ``True``. If condition is a `~ma.MaskedArray`, missing values are considered as ``False``. Parameters ---------- condition : var Boolean 1-d array selecting which entries to return. If len(condition) is less than the size of a along the axis, then output is truncated to length of condition array. axis : {None, int}, optional Axis along which the operation must be performed. out : {None, ndarray}, optional Alternative output array in which to place the result. It must have the same shape as the expected output but the type will be cast if necessary. Returns ------- result : MaskedArray A :class:`~ma.MaskedArray` object. Notes ----- Please note the difference with :meth:`compressed` ! The output of :meth:`compress` has a mask, the output of :meth:`compressed` does not. Examples -------- >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( data=[[1, --, 3], [--, 5, --], [7, --, 9]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=999999) >>> x.compress([1, 0, 1]) masked_array(data=[1, 3], mask=[False, False], fill_value=999999) >>> x.compress([1, 0, 1], axis=1) masked_array( data=[[1, 3], [--, --], [7, 9]], mask=[[False, False], [ True, True], [False, False]], fill_value=999999) """ # Get the basic components (_data, _mask) = (self._data, self._mask) # Force the condition to a regular ndarray and forget the missing # values. condition = np.array(condition, copy=False, subok=False) _new = _data.compress(condition, axis=axis, out=out).view(type(self)) _new._update_from(self) if _mask is not nomask: _new._mask = _mask.compress(condition, axis=axis) return _new def _insert_masked_print(self): """ Replace masked values with masked_print_option, casting all innermost dtypes to object. """ if masked_print_option.enabled(): mask = self._mask if mask is nomask: res = self._data else: # convert to object array to make filled work data = self._data # For big arrays, to avoid a costly conversion to the # object dtype, extract the corners before the conversion. print_width = (self._print_width if self.ndim > 1 else self._print_width_1d) for axis in range(self.ndim): if data.shape[axis] > print_width: ind = print_width // 2 arr = np.split(data, (ind, -ind), axis=axis) data = np.concatenate((arr[0], arr[2]), axis=axis) arr = np.split(mask, (ind, -ind), axis=axis) mask = np.concatenate((arr[0], arr[2]), axis=axis) rdtype = _replace_dtype_fields(self.dtype, "O") res = data.astype(rdtype) _recursive_printoption(res, mask, masked_print_option) else: res = self.filled(self.fill_value) return res def __str__(self): return str(self._insert_masked_print()) def __repr__(self): """ Literal string representation. """ if self._baseclass is np.ndarray: name = 'array' else: name = self._baseclass.__name__ # 2016-11-19: Demoted to legacy format if np.get_printoptions()['legacy'] == '1.13': is_long = self.ndim > 1 parameters = dict( name=name, nlen=" " * len(name), data=str(self), mask=str(self._mask), fill=str(self.fill_value), dtype=str(self.dtype) ) is_structured = bool(self.dtype.names) key = '{}_{}'.format( 'long' if is_long else 'short', 'flx' if is_structured else 'std' ) return _legacy_print_templates[key] % parameters prefix = f"masked_{name}(" dtype_needed = ( not np.core.arrayprint.dtype_is_implied(self.dtype) or np.all(self.mask) or self.size == 0 ) # determine which keyword args need to be shown keys = ['data', 'mask', 'fill_value'] if dtype_needed: keys.append('dtype') # array has only one row (non-column) is_one_row = builtins.all(dim == 1 for dim in self.shape[:-1]) # choose what to indent each keyword with min_indent = 2 if is_one_row: # first key on the same line as the type, remaining keys # aligned by equals indents = {} indents[keys[0]] = prefix for k in keys[1:]: n = builtins.max(min_indent, len(prefix + keys[0]) - len(k)) indents[k] = ' ' * n prefix = '' # absorbed into the first indent else: # each key on its own line, indented by two spaces indents = {k: ' ' * min_indent for k in keys} prefix = prefix + '\n' # first key on the next line # format the field values reprs = {} reprs['data'] = np.array2string( self._insert_masked_print(), separator=", ", prefix=indents['data'] + 'data=', suffix=',') reprs['mask'] = np.array2string( self._mask, separator=", ", prefix=indents['mask'] + 'mask=', suffix=',') reprs['fill_value'] = repr(self.fill_value) if dtype_needed: reprs['dtype'] = np.core.arrayprint.dtype_short_repr(self.dtype) # join keys with values and indentations result = ',\n'.join( '{}{}={}'.format(indents[k], k, reprs[k]) for k in keys ) return prefix + result + ')' def _delegate_binop(self, other): # This emulates the logic in # private/binop_override.h:forward_binop_should_defer if isinstance(other, type(self)): return False array_ufunc = getattr(other, "__array_ufunc__", False) if array_ufunc is False: other_priority = getattr(other, "__array_priority__", -1000000) return self.__array_priority__ < other_priority else: # If array_ufunc is not None, it will be called inside the ufunc; # None explicitly tells us to not call the ufunc, i.e., defer. return array_ufunc is None def _comparison(self, other, compare): """Compare self with other using operator.eq or operator.ne. When either of the elements is masked, the result is masked as well, but the underlying boolean data are still set, with self and other considered equal if both are masked, and unequal otherwise. For structured arrays, all fields are combined, with masked values ignored. The result is masked if all fields were masked, with self and other considered equal only if both were fully masked. """ omask = getmask(other) smask = self.mask mask = mask_or(smask, omask, copy=True) odata = getdata(other) if mask.dtype.names is not None: # For possibly masked structured arrays we need to be careful, # since the standard structured array comparison will use all # fields, masked or not. To avoid masked fields influencing the # outcome, we set all masked fields in self to other, so they'll # count as equal. To prepare, we ensure we have the right shape. broadcast_shape = np.broadcast(self, odata).shape sbroadcast = np.broadcast_to(self, broadcast_shape, subok=True) sbroadcast._mask = mask sdata = sbroadcast.filled(odata) # Now take care of the mask; the merged mask should have an item # masked if all fields were masked (in one and/or other). mask = (mask == np.ones((), mask.dtype)) else: # For regular arrays, just use the data as they come. sdata = self.data check = compare(sdata, odata) if isinstance(check, (np.bool_, bool)): return masked if mask else check if mask is not nomask: # Adjust elements that were masked, which should be treated # as equal if masked in both, unequal if masked in one. # Note that this works automatically for structured arrays too. check = np.where(mask, compare(smask, omask), check) if mask.shape != check.shape: # Guarantee consistency of the shape, making a copy since the # the mask may need to get written to later. mask = np.broadcast_to(mask, check.shape).copy() check = check.view(type(self)) check._update_from(self) check._mask = mask # Cast fill value to bool_ if needed. If it cannot be cast, the # default boolean fill value is used. if check._fill_value is not None: try: fill = _check_fill_value(check._fill_value, np.bool_) except (TypeError, ValueError): fill = _check_fill_value(None, np.bool_) check._fill_value = fill return check def __eq__(self, other): """Check whether other equals self elementwise. When either of the elements is masked, the result is masked as well, but the underlying boolean data are still set, with self and other considered equal if both are masked, and unequal otherwise. For structured arrays, all fields are combined, with masked values ignored. The result is masked if all fields were masked, with self and other considered equal only if both were fully masked. """ return self._comparison(other, operator.eq) def __ne__(self, other): """Check whether other does not equal self elementwise. When either of the elements is masked, the result is masked as well, but the underlying boolean data are still set, with self and other considered equal if both are masked, and unequal otherwise. For structured arrays, all fields are combined, with masked values ignored. The result is masked if all fields were masked, with self and other considered equal only if both were fully masked. """ return self._comparison(other, operator.ne) def __add__(self, other): """ Add self to other, and return a new masked array. """ if self._delegate_binop(other): return NotImplemented return add(self, other) def __radd__(self, other): """ Add other to self, and return a new masked array. """ # In analogy with __rsub__ and __rdiv__, use original order: # we get here from `other + self`. return add(other, self) def __sub__(self, other): """ Subtract other from self, and return a new masked array. """ if self._delegate_binop(other): return NotImplemented return subtract(self, other) def __rsub__(self, other): """ Subtract self from other, and return a new masked array. """ return subtract(other, self) def __mul__(self, other): "Multiply self by other, and return a new masked array." if self._delegate_binop(other): return NotImplemented return multiply(self, other) def __rmul__(self, other): """ Multiply other by self, and return a new masked array. """ # In analogy with __rsub__ and __rdiv__, use original order: # we get here from `other * self`. return multiply(other, self) def __div__(self, other): """ Divide other into self, and return a new masked array. """ if self._delegate_binop(other): return NotImplemented return divide(self, other) def __truediv__(self, other): """ Divide other into self, and return a new masked array. """ if self._delegate_binop(other): return NotImplemented return true_divide(self, other) def __rtruediv__(self, other): """ Divide self into other, and return a new masked array. """ return true_divide(other, self) def __floordiv__(self, other): """ Divide other into self, and return a new masked array. """ if self._delegate_binop(other): return NotImplemented return floor_divide(self, other) def __rfloordiv__(self, other): """ Divide self into other, and return a new masked array. """ return floor_divide(other, self) def __pow__(self, other): """ Raise self to the power other, masking the potential NaNs/Infs """ if self._delegate_binop(other): return NotImplemented return power(self, other) def __rpow__(self, other): """ Raise other to the power self, masking the potential NaNs/Infs """ return power(other, self) def __iadd__(self, other): """ Add other to self in-place. """ m = getmask(other) if self._mask is nomask: if m is not nomask and m.any(): self._mask = make_mask_none(self.shape, self.dtype) self._mask += m else: if m is not nomask: self._mask += m self._data.__iadd__(np.where(self._mask, self.dtype.type(0), getdata(other))) return self def __isub__(self, other): """ Subtract other from self in-place. """ m = getmask(other) if self._mask is nomask: if m is not nomask and m.any(): self._mask = make_mask_none(self.shape, self.dtype) self._mask += m elif m is not nomask: self._mask += m self._data.__isub__(np.where(self._mask, self.dtype.type(0), getdata(other))) return self def __imul__(self, other): """ Multiply self by other in-place. """ m = getmask(other) if self._mask is nomask: if m is not nomask and m.any(): self._mask = make_mask_none(self.shape, self.dtype) self._mask += m elif m is not nomask: self._mask += m self._data.__imul__(np.where(self._mask, self.dtype.type(1), getdata(other))) return self def __idiv__(self, other): """ Divide self by other in-place. """ other_data = getdata(other) dom_mask = _DomainSafeDivide().__call__(self._data, other_data) other_mask = getmask(other) new_mask = mask_or(other_mask, dom_mask) # The following 3 lines control the domain filling if dom_mask.any(): (_, fval) = ufunc_fills[np.divide] other_data = np.where(dom_mask, fval, other_data) self._mask |= new_mask self._data.__idiv__(np.where(self._mask, self.dtype.type(1), other_data)) return self def __ifloordiv__(self, other): """ Floor divide self by other in-place. """ other_data = getdata(other) dom_mask = _DomainSafeDivide().__call__(self._data, other_data) other_mask = getmask(other) new_mask = mask_or(other_mask, dom_mask) # The following 3 lines control the domain filling if dom_mask.any(): (_, fval) = ufunc_fills[np.floor_divide] other_data = np.where(dom_mask, fval, other_data) self._mask |= new_mask self._data.__ifloordiv__(np.where(self._mask, self.dtype.type(1), other_data)) return self def __itruediv__(self, other): """ True divide self by other in-place. """ other_data = getdata(other) dom_mask = _DomainSafeDivide().__call__(self._data, other_data) other_mask = getmask(other) new_mask = mask_or(other_mask, dom_mask) # The following 3 lines control the domain filling if dom_mask.any(): (_, fval) = ufunc_fills[np.true_divide] other_data = np.where(dom_mask, fval, other_data) self._mask |= new_mask self._data.__itruediv__(np.where(self._mask, self.dtype.type(1), other_data)) return self def __ipow__(self, other): """ Raise self to the power other, in place. """ other_data = getdata(other) other_mask = getmask(other) with np.errstate(divide='ignore', invalid='ignore'): self._data.__ipow__(np.where(self._mask, self.dtype.type(1), other_data)) invalid = np.logical_not(np.isfinite(self._data)) if invalid.any(): if self._mask is not nomask: self._mask |= invalid else: self._mask = invalid np.copyto(self._data, self.fill_value, where=invalid) new_mask = mask_or(other_mask, invalid) self._mask = mask_or(self._mask, new_mask) return self def __float__(self): """ Convert to float. """ if self.size > 1: raise TypeError("Only length-1 arrays can be converted " "to Python scalars") elif self._mask: warnings.warn("Warning: converting a masked element to nan.", stacklevel=2) return np.nan return float(self.item()) def __int__(self): """ Convert to int. """ if self.size > 1: raise TypeError("Only length-1 arrays can be converted " "to Python scalars") elif self._mask: raise MaskError('Cannot convert masked element to a Python int.') return int(self.item()) @property def imag(self): """ The imaginary part of the masked array. This property is a view on the imaginary part of this `MaskedArray`. See Also -------- real Examples -------- >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) >>> x.imag masked_array(data=[1.0, --, 1.6], mask=[False, True, False], fill_value=1e+20) """ result = self._data.imag.view(type(self)) result.__setmask__(self._mask) return result # kept for compatibility get_imag = imag.fget @property def real(self): """ The real part of the masked array. This property is a view on the real part of this `MaskedArray`. See Also -------- imag Examples -------- >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) >>> x.real masked_array(data=[1.0, --, 3.45], mask=[False, True, False], fill_value=1e+20) """ result = self._data.real.view(type(self)) result.__setmask__(self._mask) return result # kept for compatibility get_real = real.fget def count(self, axis=None, keepdims=np._NoValue): """ Count the non-masked elements of the array along the given axis. Parameters ---------- axis : None or int or tuple of ints, optional Axis or axes along which the count is performed. The default, None, performs the count over all the dimensions of the input array. `axis` may be negative, in which case it counts from the last to the first axis. .. versionadded:: 1.10.0 If this is a tuple of ints, the count is performed on multiple axes, instead of a single axis or all the axes as before. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the array. Returns ------- result : ndarray or scalar An array with the same shape as the input array, with the specified axis removed. If the array is a 0-d array, or if `axis` is None, a scalar is returned. See Also -------- ma.count_masked : Count masked elements in array or along a given axis. Examples -------- >>> import numpy.ma as ma >>> a = ma.arange(6).reshape((2, 3)) >>> a[1, :] = ma.masked >>> a masked_array( data=[[0, 1, 2], [--, --, --]], mask=[[False, False, False], [ True, True, True]], fill_value=999999) >>> a.count() 3 When the `axis` keyword is specified an array of appropriate size is returned. >>> a.count(axis=0) array([1, 1, 1]) >>> a.count(axis=1) array([3, 0]) """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} m = self._mask # special case for matrices (we assume no other subclasses modify # their dimensions) if isinstance(self.data, np.matrix): if m is nomask: m = np.zeros(self.shape, dtype=np.bool_) m = m.view(type(self.data)) if m is nomask: # compare to _count_reduce_items in _methods.py if self.shape == (): if axis not in (None, 0): raise np.AxisError(axis=axis, ndim=self.ndim) return 1 elif axis is None: if kwargs.get('keepdims', False): return np.array(self.size, dtype=np.intp, ndmin=self.ndim) return self.size axes = normalize_axis_tuple(axis, self.ndim) items = 1 for ax in axes: items *= self.shape[ax] if kwargs.get('keepdims', False): out_dims = list(self.shape) for a in axes: out_dims[a] = 1 else: out_dims = [d for n, d in enumerate(self.shape) if n not in axes] # make sure to return a 0-d array if axis is supplied return np.full(out_dims, items, dtype=np.intp) # take care of the masked singleton if self is masked: return 0 return (~m).sum(axis=axis, dtype=np.intp, **kwargs) def ravel(self, order='C'): """ Returns a 1D version of self, as a view. Parameters ---------- order : {'C', 'F', 'A', 'K'}, optional The elements of `a` are read using this index order. 'C' means to index the elements in C-like order, with the last axis index changing fastest, back to the first axis index changing slowest. 'F' means to index the elements in Fortran-like index order, with the first index changing fastest, and the last index changing slowest. Note that the 'C' and 'F' options take no account of the memory layout of the underlying array, and only refer to the order of axis indexing. 'A' means to read the elements in Fortran-like index order if `m` is Fortran *contiguous* in memory, C-like order otherwise. 'K' means to read the elements in the order they occur in memory, except for reversing the data when strides are negative. By default, 'C' index order is used. Returns ------- MaskedArray Output view is of shape ``(self.size,)`` (or ``(np.ma.product(self.shape),)``). Examples -------- >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( data=[[1, --, 3], [--, 5, --], [7, --, 9]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=999999) >>> x.ravel() masked_array(data=[1, --, 3, --, 5, --, 7, --, 9], mask=[False, True, False, True, False, True, False, True, False], fill_value=999999) """ r = ndarray.ravel(self._data, order=order).view(type(self)) r._update_from(self) if self._mask is not nomask: r._mask = ndarray.ravel(self._mask, order=order).reshape(r.shape) else: r._mask = nomask return r def reshape(self, *s, **kwargs): """ Give a new shape to the array without changing its data. Returns a masked array containing the same data, but with a new shape. The result is a view on the original array; if this is not possible, a ValueError is raised. Parameters ---------- shape : int or tuple of ints The new shape should be compatible with the original shape. If an integer is supplied, then the result will be a 1-D array of that length. order : {'C', 'F'}, optional Determines whether the array data should be viewed as in C (row-major) or FORTRAN (column-major) order. Returns ------- reshaped_array : array A new view on the array. See Also -------- reshape : Equivalent function in the masked array module. numpy.ndarray.reshape : Equivalent method on ndarray object. numpy.reshape : Equivalent function in the NumPy module. Notes ----- The reshaping operation cannot guarantee that a copy will not be made, to modify the shape in place, use ``a.shape = s`` Examples -------- >>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1]) >>> x masked_array( data=[[--, 2], [3, --]], mask=[[ True, False], [False, True]], fill_value=999999) >>> x = x.reshape((4,1)) >>> x masked_array( data=[[--], [2], [3], [--]], mask=[[ True], [False], [False], [ True]], fill_value=999999) """ kwargs.update(order=kwargs.get('order', 'C')) result = self._data.reshape(*s, **kwargs).view(type(self)) result._update_from(self) mask = self._mask if mask is not nomask: result._mask = mask.reshape(*s, **kwargs) return result def resize(self, newshape, refcheck=True, order=False): """ .. warning:: This method does nothing, except raise a ValueError exception. A masked array does not own its data and therefore cannot safely be resized in place. Use the `numpy.ma.resize` function instead. This method is difficult to implement safely and may be deprecated in future releases of NumPy. """ # Note : the 'order' keyword looks broken, let's just drop it errmsg = "A masked array does not own its data "\ "and therefore cannot be resized.\n" \ "Use the numpy.ma.resize function instead." raise ValueError(errmsg) def put(self, indices, values, mode='raise'): """ Set storage-indexed locations to corresponding values. Sets self._data.flat[n] = values[n] for each n in indices. If `values` is shorter than `indices` then it will repeat. If `values` has some masked values, the initial mask is updated in consequence, else the corresponding values are unmasked. Parameters ---------- indices : 1-D array_like Target indices, interpreted as integers. values : array_like Values to place in self._data copy at target indices. mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices will behave. 'raise' : raise an error. 'wrap' : wrap around. 'clip' : clip to the range. Notes ----- `values` can be a scalar or length 1 array. Examples -------- >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( data=[[1, --, 3], [--, 5, --], [7, --, 9]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=999999) >>> x.put([0,4,8],[10,20,30]) >>> x masked_array( data=[[10, --, 3], [--, 20, --], [7, --, 30]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=999999) >>> x.put(4,999) >>> x masked_array( data=[[10, --, 3], [--, 999, --], [7, --, 30]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=999999) """ # Hard mask: Get rid of the values/indices that fall on masked data if self._hardmask and self._mask is not nomask: mask = self._mask[indices] indices = narray(indices, copy=False) values = narray(values, copy=False, subok=True) values.resize(indices.shape) indices = indices[~mask] values = values[~mask] self._data.put(indices, values, mode=mode) # short circuit if neither self nor values are masked if self._mask is nomask and getmask(values) is nomask: return m = getmaskarray(self) if getmask(values) is nomask: m.put(indices, False, mode=mode) else: m.put(indices, values._mask, mode=mode) m = make_mask(m, copy=False, shrink=True) self._mask = m return def ids(self): """ Return the addresses of the data and mask areas. Parameters ---------- None Examples -------- >>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1]) >>> x.ids() (166670640, 166659832) # may vary If the array has no mask, the address of `nomask` is returned. This address is typically not close to the data in memory: >>> x = np.ma.array([1, 2, 3]) >>> x.ids() (166691080, 3083169284) # may vary """ if self._mask is nomask: return (self.ctypes.data, id(nomask)) return (self.ctypes.data, self._mask.ctypes.data) def iscontiguous(self): """ Return a boolean indicating whether the data is contiguous. Parameters ---------- None Examples -------- >>> x = np.ma.array([1, 2, 3]) >>> x.iscontiguous() True `iscontiguous` returns one of the flags of the masked array: >>> x.flags C_CONTIGUOUS : True F_CONTIGUOUS : True OWNDATA : False WRITEABLE : True ALIGNED : True WRITEBACKIFCOPY : False UPDATEIFCOPY : False """ return self.flags['CONTIGUOUS'] def all(self, axis=None, out=None, keepdims=np._NoValue): """ Returns True if all elements evaluate to True. The output array is masked where all the values along the given axis are masked: if the output would have been a scalar and that all the values are masked, then the output is `masked`. Refer to `numpy.all` for full documentation. See Also -------- numpy.ndarray.all : corresponding function for ndarrays numpy.all : equivalent function Examples -------- >>> np.ma.array([1,2,3]).all() True >>> a = np.ma.array([1,2,3], mask=True) >>> (a.all() is np.ma.masked) True """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} mask = _check_mask_axis(self._mask, axis, **kwargs) if out is None: d = self.filled(True).all(axis=axis, **kwargs).view(type(self)) if d.ndim: d.__setmask__(mask) elif mask: return masked return d self.filled(True).all(axis=axis, out=out, **kwargs) if isinstance(out, MaskedArray): if out.ndim or mask: out.__setmask__(mask) return out def any(self, axis=None, out=None, keepdims=np._NoValue): """ Returns True if any of the elements of `a` evaluate to True. Masked values are considered as False during computation. Refer to `numpy.any` for full documentation. See Also -------- numpy.ndarray.any : corresponding function for ndarrays numpy.any : equivalent function """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} mask = _check_mask_axis(self._mask, axis, **kwargs) if out is None: d = self.filled(False).any(axis=axis, **kwargs).view(type(self)) if d.ndim: d.__setmask__(mask) elif mask: d = masked return d self.filled(False).any(axis=axis, out=out, **kwargs) if isinstance(out, MaskedArray): if out.ndim or mask: out.__setmask__(mask) return out def nonzero(self): """ Return the indices of unmasked elements that are not zero. Returns a tuple of arrays, one for each dimension, containing the indices of the non-zero elements in that dimension. The corresponding non-zero values can be obtained with:: a[a.nonzero()] To group the indices by element, rather than dimension, use instead:: np.transpose(a.nonzero()) The result of this is always a 2d array, with a row for each non-zero element. Parameters ---------- None Returns ------- tuple_of_arrays : tuple Indices of elements that are non-zero. See Also -------- numpy.nonzero : Function operating on ndarrays. flatnonzero : Return indices that are non-zero in the flattened version of the input array. numpy.ndarray.nonzero : Equivalent ndarray method. count_nonzero : Counts the number of non-zero elements in the input array. Examples -------- >>> import numpy.ma as ma >>> x = ma.array(np.eye(3)) >>> x masked_array( data=[[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], mask=False, fill_value=1e+20) >>> x.nonzero() (array([0, 1, 2]), array([0, 1, 2])) Masked elements are ignored. >>> x[1, 1] = ma.masked >>> x masked_array( data=[[1.0, 0.0, 0.0], [0.0, --, 0.0], [0.0, 0.0, 1.0]], mask=[[False, False, False], [False, True, False], [False, False, False]], fill_value=1e+20) >>> x.nonzero() (array([0, 2]), array([0, 2])) Indices can also be grouped by element. >>> np.transpose(x.nonzero()) array([[0, 0], [2, 2]]) A common use for ``nonzero`` is to find the indices of an array, where a condition is True. Given an array `a`, the condition `a` > 3 is a boolean array and since False is interpreted as 0, ma.nonzero(a > 3) yields the indices of the `a` where the condition is true. >>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]]) >>> a > 3 masked_array( data=[[False, False, False], [ True, True, True], [ True, True, True]], mask=False, fill_value=True) >>> ma.nonzero(a > 3) (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) The ``nonzero`` method of the condition array can also be called. >>> (a > 3).nonzero() (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) """ return narray(self.filled(0), copy=False).nonzero() def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): """ (this docstring should be overwritten) """ #!!!: implement out + test! m = self._mask if m is nomask: result = super(MaskedArray, self).trace(offset=offset, axis1=axis1, axis2=axis2, out=out) return result.astype(dtype) else: D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2) return D.astype(dtype).filled(0).sum(axis=-1, out=out) trace.__doc__ = ndarray.trace.__doc__ def dot(self, b, out=None, strict=False): """ a.dot(b, out=None) Masked dot product of two arrays. Note that `out` and `strict` are located in different positions than in `ma.dot`. In order to maintain compatibility with the functional version, it is recommended that the optional arguments be treated as keyword only. At some point that may be mandatory. .. versionadded:: 1.10.0 Parameters ---------- b : masked_array_like Inputs array. out : masked_array, optional Output argument. This must have the exact kind that would be returned if it was not used. In particular, it must have the right type, must be C-contiguous, and its dtype must be the dtype that would be returned for `ma.dot(a,b)`. This is a performance feature. Therefore, if these conditions are not met, an exception is raised, instead of attempting to be flexible. strict : bool, optional Whether masked data are propagated (True) or set to 0 (False) for the computation. Default is False. Propagating the mask means that if a masked value appears in a row or column, the whole row or column is considered masked. .. versionadded:: 1.10.2 See Also -------- numpy.ma.dot : equivalent function """ return dot(self, b, out=out, strict=strict) def sum(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): """ Return the sum of the array elements over the given axis. Masked elements are set to 0 internally. Refer to `numpy.sum` for full documentation. See Also -------- numpy.ndarray.sum : corresponding function for ndarrays numpy.sum : equivalent function Examples -------- >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( data=[[1, --, 3], [--, 5, --], [7, --, 9]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=999999) >>> x.sum() 25 >>> x.sum(axis=1) masked_array(data=[4, 5, 16], mask=[False, False, False], fill_value=999999) >>> x.sum(axis=0) masked_array(data=[8, 5, 12], mask=[False, False, False], fill_value=999999) >>> print(type(x.sum(axis=0, dtype=np.int64)[0])) <class 'numpy.int64'> """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} _mask = self._mask newmask = _check_mask_axis(_mask, axis, **kwargs) # No explicit output if out is None: result = self.filled(0).sum(axis, dtype=dtype, **kwargs) rndim = getattr(result, 'ndim', 0) if rndim: result = result.view(type(self)) result.__setmask__(newmask) elif newmask: result = masked return result # Explicit output result = self.filled(0).sum(axis, dtype=dtype, out=out, **kwargs) if isinstance(out, MaskedArray): outmask = getmask(out) if outmask is nomask: outmask = out._mask = make_mask_none(out.shape) outmask.flat = newmask return out def cumsum(self, axis=None, dtype=None, out=None): """ Return the cumulative sum of the array elements over the given axis. Masked values are set to 0 internally during the computation. However, their position is saved, and the result will be masked at the same locations. Refer to `numpy.cumsum` for full documentation. Notes ----- The mask is lost if `out` is not a valid :class:`ma.MaskedArray` ! Arithmetic is modular when using integer types, and no error is raised on overflow. See Also -------- numpy.ndarray.cumsum : corresponding function for ndarrays numpy.cumsum : equivalent function Examples -------- >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0]) >>> marr.cumsum() masked_array(data=[0, 1, 3, --, --, --, 9, 16, 24, 33], mask=[False, False, False, True, True, True, False, False, False, False], fill_value=999999) """ result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out) if out is not None: if isinstance(out, MaskedArray): out.__setmask__(self.mask) return out result = result.view(type(self)) result.__setmask__(self._mask) return result def prod(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): """ Return the product of the array elements over the given axis. Masked elements are set to 1 internally for computation. Refer to `numpy.prod` for full documentation. Notes ----- Arithmetic is modular when using integer types, and no error is raised on overflow. See Also -------- numpy.ndarray.prod : corresponding function for ndarrays numpy.prod : equivalent function """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} _mask = self._mask newmask = _check_mask_axis(_mask, axis, **kwargs) # No explicit output if out is None: result = self.filled(1).prod(axis, dtype=dtype, **kwargs) rndim = getattr(result, 'ndim', 0) if rndim: result = result.view(type(self)) result.__setmask__(newmask) elif newmask: result = masked return result # Explicit output result = self.filled(1).prod(axis, dtype=dtype, out=out, **kwargs) if isinstance(out, MaskedArray): outmask = getmask(out) if outmask is nomask: outmask = out._mask = make_mask_none(out.shape) outmask.flat = newmask return out product = prod def cumprod(self, axis=None, dtype=None, out=None): """ Return the cumulative product of the array elements over the given axis. Masked values are set to 1 internally during the computation. However, their position is saved, and the result will be masked at the same locations. Refer to `numpy.cumprod` for full documentation. Notes ----- The mask is lost if `out` is not a valid MaskedArray ! Arithmetic is modular when using integer types, and no error is raised on overflow. See Also -------- numpy.ndarray.cumprod : corresponding function for ndarrays numpy.cumprod : equivalent function """ result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out) if out is not None: if isinstance(out, MaskedArray): out.__setmask__(self._mask) return out result = result.view(type(self)) result.__setmask__(self._mask) return result def mean(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): """ Returns the average of the array elements along given axis. Masked entries are ignored, and result elements which are not finite will be masked. Refer to `numpy.mean` for full documentation. See Also -------- numpy.ndarray.mean : corresponding function for ndarrays numpy.mean : Equivalent function numpy.ma.average: Weighted average. Examples -------- >>> a = np.ma.array([1,2,3], mask=[False, False, True]) >>> a masked_array(data=[1, 2, --], mask=[False, False, True], fill_value=999999) >>> a.mean() 1.5 """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} if self._mask is nomask: result = super(MaskedArray, self).mean(axis=axis, dtype=dtype, **kwargs)[()] else: dsum = self.sum(axis=axis, dtype=dtype, **kwargs) cnt = self.count(axis=axis, **kwargs) if cnt.shape == () and (cnt == 0): result = masked else: result = dsum * 1. / cnt if out is not None: out.flat = result if isinstance(out, MaskedArray): outmask = getmask(out) if outmask is nomask: outmask = out._mask = make_mask_none(out.shape) outmask.flat = getmask(result) return out return result def anom(self, axis=None, dtype=None): """ Compute the anomalies (deviations from the arithmetic mean) along the given axis. Returns an array of anomalies, with the same shape as the input and where the arithmetic mean is computed along the given axis. Parameters ---------- axis : int, optional Axis over which the anomalies are taken. The default is to use the mean of the flattened array as reference. dtype : dtype, optional Type to use in computing the variance. For arrays of integer type the default is float32; for arrays of float types it is the same as the array type. See Also -------- mean : Compute the mean of the array. Examples -------- >>> a = np.ma.array([1,2,3]) >>> a.anom() masked_array(data=[-1., 0., 1.], mask=False, fill_value=1e+20) """ m = self.mean(axis, dtype) if m is masked: return m if not axis: return self - m else: return self - expand_dims(m, axis) def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): """ Returns the variance of the array elements along given axis. Masked entries are ignored, and result elements which are not finite will be masked. Refer to `numpy.var` for full documentation. See Also -------- numpy.ndarray.var : corresponding function for ndarrays numpy.var : Equivalent function """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} # Easy case: nomask, business as usual if self._mask is nomask: ret = super(MaskedArray, self).var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)[()] if out is not None: if isinstance(out, MaskedArray): out.__setmask__(nomask) return out return ret # Some data are masked, yay! cnt = self.count(axis=axis, **kwargs) - ddof danom = self - self.mean(axis, dtype, keepdims=True) if iscomplexobj(self): danom = umath.absolute(danom) ** 2 else: danom *= danom dvar = divide(danom.sum(axis, **kwargs), cnt).view(type(self)) # Apply the mask if it's not a scalar if dvar.ndim: dvar._mask = mask_or(self._mask.all(axis, **kwargs), (cnt <= 0)) dvar._update_from(self) elif getmask(dvar): # Make sure that masked is returned when the scalar is masked. dvar = masked if out is not None: if isinstance(out, MaskedArray): out.flat = 0 out.__setmask__(True) elif out.dtype.kind in 'biu': errmsg = "Masked data information would be lost in one or "\ "more location." raise MaskError(errmsg) else: out.flat = np.nan return out # In case with have an explicit output if out is not None: # Set the data out.flat = dvar # Set the mask if needed if isinstance(out, MaskedArray): out.__setmask__(dvar.mask) return out return dvar var.__doc__ = np.var.__doc__ def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): """ Returns the standard deviation of the array elements along given axis. Masked entries are ignored. Refer to `numpy.std` for full documentation. See Also -------- numpy.ndarray.std : corresponding function for ndarrays numpy.std : Equivalent function """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} dvar = self.var(axis, dtype, out, ddof, **kwargs) if dvar is not masked: if out is not None: np.power(out, 0.5, out=out, casting='unsafe') return out dvar = sqrt(dvar) return dvar def round(self, decimals=0, out=None): """ Return each element rounded to the given number of decimals. Refer to `numpy.around` for full documentation. See Also -------- numpy.ndarray.round : corresponding function for ndarrays numpy.around : equivalent function """ result = self._data.round(decimals=decimals, out=out).view(type(self)) if result.ndim > 0: result._mask = self._mask result._update_from(self) elif self._mask: # Return masked when the scalar is masked result = masked # No explicit output: we're done if out is None: return result if isinstance(out, MaskedArray): out.__setmask__(self._mask) return out def argsort(self, axis=np._NoValue, kind=None, order=None, endwith=True, fill_value=None): """ Return an ndarray of indices that sort the array along the specified axis. Masked values are filled beforehand to `fill_value`. Parameters ---------- axis : int, optional Axis along which to sort. If None, the default, the flattened array is used. .. versionchanged:: 1.13.0 Previously, the default was documented to be -1, but that was in error. At some future date, the default will change to -1, as originally intended. Until then, the axis should be given explicitly when ``arr.ndim > 1``, to avoid a FutureWarning. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional The sorting algorithm used. order : list, optional When `a` is an array with fields defined, this argument specifies which fields to compare first, second, etc. Not all fields need be specified. endwith : {True, False}, optional Whether missing values (if any) should be treated as the largest values (True) or the smallest values (False) When the array contains unmasked values at the same extremes of the datatype, the ordering of these values and the masked values is undefined. fill_value : {var}, optional Value used internally for the masked values. If ``fill_value`` is not None, it supersedes ``endwith``. Returns ------- index_array : ndarray, int Array of indices that sort `a` along the specified axis. In other words, ``a[index_array]`` yields a sorted `a`. See Also -------- ma.MaskedArray.sort : Describes sorting algorithms used. lexsort : Indirect stable sort with multiple keys. numpy.ndarray.sort : Inplace sort. Notes ----- See `sort` for notes on the different sorting algorithms. Examples -------- >>> a = np.ma.array([3,2,1], mask=[False, False, True]) >>> a masked_array(data=[3, 2, --], mask=[False, False, True], fill_value=999999) >>> a.argsort() array([1, 0, 2]) """ # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default if axis is np._NoValue: axis = _deprecate_argsort_axis(self) if fill_value is None: if endwith: # nan > inf if np.issubdtype(self.dtype, np.floating): fill_value = np.nan else: fill_value = minimum_fill_value(self) else: fill_value = maximum_fill_value(self) filled = self.filled(fill_value) return filled.argsort(axis=axis, kind=kind, order=order) def argmin(self, axis=None, fill_value=None, out=None): """ Return array of indices to the minimum values along the given axis. Parameters ---------- axis : {None, integer} If None, the index is into the flattened array, otherwise along the specified axis fill_value : {var}, optional Value used to fill in the masked values. If None, the output of minimum_fill_value(self._data) is used instead. out : {None, array}, optional Array into which the result can be placed. Its type is preserved and it must be of the right shape to hold the output. Returns ------- ndarray or scalar If multi-dimension input, returns a new ndarray of indices to the minimum values along the given axis. Otherwise, returns a scalar of index to the minimum values along the given axis. Examples -------- >>> x = np.ma.array(np.arange(4), mask=[1,1,0,0]) >>> x.shape = (2,2) >>> x masked_array( data=[[--, --], [2, 3]], mask=[[ True, True], [False, False]], fill_value=999999) >>> x.argmin(axis=0, fill_value=-1) array([0, 0]) >>> x.argmin(axis=0, fill_value=9) array([1, 1]) """ if fill_value is None: fill_value = minimum_fill_value(self) d = self.filled(fill_value).view(ndarray) return d.argmin(axis, out=out) def argmax(self, axis=None, fill_value=None, out=None): """ Returns array of indices of the maximum values along the given axis. Masked values are treated as if they had the value fill_value. Parameters ---------- axis : {None, integer} If None, the index is into the flattened array, otherwise along the specified axis fill_value : {var}, optional Value used to fill in the masked values. If None, the output of maximum_fill_value(self._data) is used instead. out : {None, array}, optional Array into which the result can be placed. Its type is preserved and it must be of the right shape to hold the output. Returns ------- index_array : {integer_array} Examples -------- >>> a = np.arange(6).reshape(2,3) >>> a.argmax() 5 >>> a.argmax(0) array([1, 1, 1]) >>> a.argmax(1) array([2, 2]) """ if fill_value is None: fill_value = maximum_fill_value(self._data) d = self.filled(fill_value).view(ndarray) return d.argmax(axis, out=out) def sort(self, axis=-1, kind=None, order=None, endwith=True, fill_value=None): """ Sort the array, in-place Parameters ---------- a : array_like Array to be sorted. axis : int, optional Axis along which to sort. If None, the array is flattened before sorting. The default is -1, which sorts along the last axis. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional The sorting algorithm used. order : list, optional When `a` is a structured array, this argument specifies which fields to compare first, second, and so on. This list does not need to include all of the fields. endwith : {True, False}, optional Whether missing values (if any) should be treated as the largest values (True) or the smallest values (False) When the array contains unmasked values sorting at the same extremes of the datatype, the ordering of these values and the masked values is undefined. fill_value : {var}, optional Value used internally for the masked values. If ``fill_value`` is not None, it supersedes ``endwith``. Returns ------- sorted_array : ndarray Array of the same type and shape as `a`. See Also -------- numpy.ndarray.sort : Method to sort an array in-place. argsort : Indirect sort. lexsort : Indirect stable sort on multiple keys. searchsorted : Find elements in a sorted array. Notes ----- See ``sort`` for notes on the different sorting algorithms. Examples -------- >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) >>> # Default >>> a.sort() >>> a masked_array(data=[1, 3, 5, --, --], mask=[False, False, False, True, True], fill_value=999999) >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) >>> # Put missing values in the front >>> a.sort(endwith=False) >>> a masked_array(data=[--, --, 1, 3, 5], mask=[ True, True, False, False, False], fill_value=999999) >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) >>> # fill_value takes over endwith >>> a.sort(endwith=False, fill_value=3) >>> a masked_array(data=[1, --, --, 3, 5], mask=[False, True, True, False, False], fill_value=999999) """ if self._mask is nomask: ndarray.sort(self, axis=axis, kind=kind, order=order) return if self is masked: return sidx = self.argsort(axis=axis, kind=kind, order=order, fill_value=fill_value, endwith=endwith) self[...] = np.take_along_axis(self, sidx, axis=axis) def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): """ Return the minimum along a given axis. Parameters ---------- axis : {None, int}, optional Axis along which to operate. By default, ``axis`` is None and the flattened input is used. out : array_like, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. fill_value : {var}, optional Value used to fill in the masked values. If None, use the output of `minimum_fill_value`. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the array. Returns ------- amin : array_like New array holding the result. If ``out`` was specified, ``out`` is returned. See Also -------- ma.minimum_fill_value Returns the minimum filling value for a given datatype. """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} _mask = self._mask newmask = _check_mask_axis(_mask, axis, **kwargs) if fill_value is None: fill_value = minimum_fill_value(self) # No explicit output if out is None: result = self.filled(fill_value).min( axis=axis, out=out, **kwargs).view(type(self)) if result.ndim: # Set the mask result.__setmask__(newmask) # Get rid of Infs if newmask.ndim: np.copyto(result, result.fill_value, where=newmask) elif newmask: result = masked return result # Explicit output result = self.filled(fill_value).min(axis=axis, out=out, **kwargs) if isinstance(out, MaskedArray): outmask = getmask(out) if outmask is nomask: outmask = out._mask = make_mask_none(out.shape) outmask.flat = newmask else: if out.dtype.kind in 'biu': errmsg = "Masked data information would be lost in one or more"\ " location." raise MaskError(errmsg) np.copyto(out, np.nan, where=newmask) return out # unique to masked arrays def mini(self, axis=None): """ Return the array minimum along the specified axis. .. deprecated:: 1.13.0 This function is identical to both: * ``self.min(keepdims=True, axis=axis).squeeze(axis=axis)`` * ``np.ma.minimum.reduce(self, axis=axis)`` Typically though, ``self.min(axis=axis)`` is sufficient. Parameters ---------- axis : int, optional The axis along which to find the minima. Default is None, in which case the minimum value in the whole array is returned. Returns ------- min : scalar or MaskedArray If `axis` is None, the result is a scalar. Otherwise, if `axis` is given and the array is at least 2-D, the result is a masked array with dimension one smaller than the array on which `mini` is called. Examples -------- >>> x = np.ma.array(np.arange(6), mask=[0 ,1, 0, 0, 0 ,1]).reshape(3, 2) >>> x masked_array( data=[[0, --], [2, 3], [4, --]], mask=[[False, True], [False, False], [False, True]], fill_value=999999) >>> x.mini() masked_array(data=0, mask=False, fill_value=999999) >>> x.mini(axis=0) masked_array(data=[0, 3], mask=[False, False], fill_value=999999) >>> x.mini(axis=1) masked_array(data=[0, 2, 4], mask=[False, False, False], fill_value=999999) There is a small difference between `mini` and `min`: >>> x[:,1].mini(axis=0) masked_array(data=3, mask=False, fill_value=999999) >>> x[:,1].min(axis=0) 3 """ # 2016-04-13, 1.13.0, gh-8764 warnings.warn( "`mini` is deprecated; use the `min` method or " "`np.ma.minimum.reduce instead.", DeprecationWarning, stacklevel=2) return minimum.reduce(self, axis) def max(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): """ Return the maximum along a given axis. Parameters ---------- axis : {None, int}, optional Axis along which to operate. By default, ``axis`` is None and the flattened input is used. out : array_like, optional Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. fill_value : {var}, optional Value used to fill in the masked values. If None, use the output of maximum_fill_value(). keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the array. Returns ------- amax : array_like New array holding the result. If ``out`` was specified, ``out`` is returned. See Also -------- ma.maximum_fill_value Returns the maximum filling value for a given datatype. """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} _mask = self._mask newmask = _check_mask_axis(_mask, axis, **kwargs) if fill_value is None: fill_value = maximum_fill_value(self) # No explicit output if out is None: result = self.filled(fill_value).max( axis=axis, out=out, **kwargs).view(type(self)) if result.ndim: # Set the mask result.__setmask__(newmask) # Get rid of Infs if newmask.ndim: np.copyto(result, result.fill_value, where=newmask) elif newmask: result = masked return result # Explicit output result = self.filled(fill_value).max(axis=axis, out=out, **kwargs) if isinstance(out, MaskedArray): outmask = getmask(out) if outmask is nomask: outmask = out._mask = make_mask_none(out.shape) outmask.flat = newmask else: if out.dtype.kind in 'biu': errmsg = "Masked data information would be lost in one or more"\ " location." raise MaskError(errmsg) np.copyto(out, np.nan, where=newmask) return out def ptp(self, axis=None, out=None, fill_value=None, keepdims=False): """ Return (maximum - minimum) along the given dimension (i.e. peak-to-peak value). .. warning:: `ptp` preserves the data type of the array. This means the return value for an input of signed integers with n bits (e.g. `np.int8`, `np.int16`, etc) is also a signed integer with n bits. In that case, peak-to-peak values greater than ``2**(n-1)-1`` will be returned as negative values. An example with a work-around is shown below. Parameters ---------- axis : {None, int}, optional Axis along which to find the peaks. If None (default) the flattened array is used. out : {None, array_like}, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output but the type will be cast if necessary. fill_value : {var}, optional Value used to fill in the masked values. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the array. Returns ------- ptp : ndarray. A new array holding the result, unless ``out`` was specified, in which case a reference to ``out`` is returned. Examples -------- >>> x = np.ma.MaskedArray([[4, 9, 2, 10], ... [6, 9, 7, 12]]) >>> x.ptp(axis=1) masked_array(data=[8, 6], mask=False, fill_value=999999) >>> x.ptp(axis=0) masked_array(data=[2, 0, 5, 2], mask=False, fill_value=999999) >>> x.ptp() 10 This example shows that a negative value can be returned when the input is an array of signed integers. >>> y = np.ma.MaskedArray([[1, 127], ... [0, 127], ... [-1, 127], ... [-2, 127]], dtype=np.int8) >>> y.ptp(axis=1) masked_array(data=[ 126, 127, -128, -127], mask=False, fill_value=999999, dtype=int8) A work-around is to use the `view()` method to view the result as unsigned integers with the same bit width: >>> y.ptp(axis=1).view(np.uint8) masked_array(data=[126, 127, 128, 129], mask=False, fill_value=999999, dtype=uint8) """ if out is None: result = self.max(axis=axis, fill_value=fill_value, keepdims=keepdims) result -= self.min(axis=axis, fill_value=fill_value, keepdims=keepdims) return result out.flat = self.max(axis=axis, out=out, fill_value=fill_value, keepdims=keepdims) min_value = self.min(axis=axis, fill_value=fill_value, keepdims=keepdims) np.subtract(out, min_value, out=out, casting='unsafe') return out def partition(self, *args, **kwargs): warnings.warn("Warning: 'partition' will ignore the 'mask' " f"of the {self.__class__.__name__}.", stacklevel=2) return super(MaskedArray, self).partition(*args, **kwargs) def argpartition(self, *args, **kwargs): warnings.warn("Warning: 'argpartition' will ignore the 'mask' " f"of the {self.__class__.__name__}.", stacklevel=2) return super(MaskedArray, self).argpartition(*args, **kwargs) def take(self, indices, axis=None, out=None, mode='raise'): """ """ (_data, _mask) = (self._data, self._mask) cls = type(self) # Make sure the indices are not masked maskindices = getmask(indices) if maskindices is not nomask: indices = indices.filled(0) # Get the data, promoting scalars to 0d arrays with [...] so that # .view works correctly if out is None: out = _data.take(indices, axis=axis, mode=mode)[...].view(cls) else: np.take(_data, indices, axis=axis, mode=mode, out=out) # Get the mask if isinstance(out, MaskedArray): if _mask is nomask: outmask = maskindices else: outmask = _mask.take(indices, axis=axis, mode=mode) outmask |= maskindices out.__setmask__(outmask) # demote 0d arrays back to scalars, for consistency with ndarray.take return out[()] # Array methods copy = _arraymethod('copy') diagonal = _arraymethod('diagonal') flatten = _arraymethod('flatten') repeat = _arraymethod('repeat') squeeze = _arraymethod('squeeze') swapaxes = _arraymethod('swapaxes') T = property(fget=lambda self: self.transpose()) transpose = _arraymethod('transpose') def tolist(self, fill_value=None): """ Return the data portion of the masked array as a hierarchical Python list. Data items are converted to the nearest compatible Python type. Masked values are converted to `fill_value`. If `fill_value` is None, the corresponding entries in the output list will be ``None``. Parameters ---------- fill_value : scalar, optional The value to use for invalid entries. Default is None. Returns ------- result : list The Python list representation of the masked array. Examples -------- >>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4) >>> x.tolist() [[1, None, 3], [None, 5, None], [7, None, 9]] >>> x.tolist(-999) [[1, -999, 3], [-999, 5, -999], [7, -999, 9]] """ _mask = self._mask # No mask ? Just return .data.tolist ? if _mask is nomask: return self._data.tolist() # Explicit fill_value: fill the array and get the list if fill_value is not None: return self.filled(fill_value).tolist() # Structured array. names = self.dtype.names if names: result = self._data.astype([(_, object) for _ in names]) for n in names: result[n][_mask[n]] = None return result.tolist() # Standard arrays. if _mask is nomask: return [None] # Set temps to save time when dealing w/ marrays. inishape = self.shape result = np.array(self._data.ravel(), dtype=object) result[_mask.ravel()] = None result.shape = inishape return result.tolist() def tostring(self, fill_value=None, order='C'): r""" A compatibility alias for `tobytes`, with exactly the same behavior. Despite its name, it returns `bytes` not `str`\ s. .. deprecated:: 1.19.0 """ # 2020-03-30, Numpy 1.19.0 warnings.warn( "tostring() is deprecated. Use tobytes() instead.", DeprecationWarning, stacklevel=2) return self.tobytes(fill_value, order=order) def tobytes(self, fill_value=None, order='C'): """ Return the array data as a string containing the raw bytes in the array. The array is filled with a fill value before the string conversion. .. versionadded:: 1.9.0 Parameters ---------- fill_value : scalar, optional Value used to fill in the masked values. Default is None, in which case `MaskedArray.fill_value` is used. order : {'C','F','A'}, optional Order of the data item in the copy. Default is 'C'. - 'C' -- C order (row major). - 'F' -- Fortran order (column major). - 'A' -- Any, current order of array. - None -- Same as 'A'. See Also -------- numpy.ndarray.tobytes tolist, tofile Notes ----- As for `ndarray.tobytes`, information about the shape, dtype, etc., but also about `fill_value`, will be lost. Examples -------- >>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) >>> x.tobytes() b'\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00' """ return self.filled(fill_value).tobytes(order=order) def tofile(self, fid, sep="", format="%s"): """ Save a masked array to a file in binary format. .. warning:: This function is not implemented yet. Raises ------ NotImplementedError When `tofile` is called. """ raise NotImplementedError("MaskedArray.tofile() not implemented yet.") def toflex(self): """ Transforms a masked array into a flexible-type array. The flexible type array that is returned will have two fields: * the ``_data`` field stores the ``_data`` part of the array. * the ``_mask`` field stores the ``_mask`` part of the array. Parameters ---------- None Returns ------- record : ndarray A new flexible-type `ndarray` with two fields: the first element containing a value, the second element containing the corresponding mask boolean. The returned record shape matches self.shape. Notes ----- A side-effect of transforming a masked array into a flexible `ndarray` is that meta information (``fill_value``, ...) will be lost. Examples -------- >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( data=[[1, --, 3], [--, 5, --], [7, --, 9]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=999999) >>> x.toflex() array([[(1, False), (2, True), (3, False)], [(4, True), (5, False), (6, True)], [(7, False), (8, True), (9, False)]], dtype=[('_data', '<i8'), ('_mask', '?')]) """ # Get the basic dtype. ddtype = self.dtype # Make sure we have a mask _mask = self._mask if _mask is None: _mask = make_mask_none(self.shape, ddtype) # And get its dtype mdtype = self._mask.dtype record = np.ndarray(shape=self.shape, dtype=[('_data', ddtype), ('_mask', mdtype)]) record['_data'] = self._data record['_mask'] = self._mask return record torecords = toflex # Pickling def __getstate__(self): """Return the internal state of the masked array, for pickling purposes. """ cf = 'CF'[self.flags.fnc] data_state = super(MaskedArray, self).__reduce__()[2] return data_state + (getmaskarray(self).tobytes(cf), self._fill_value) def __setstate__(self, state): """Restore the internal state of the masked array, for pickling purposes. ``state`` is typically the output of the ``__getstate__`` output, and is a 5-tuple: - class name - a tuple giving the shape of the data - a typecode for the data - a binary string for the data - a binary string for the mask. """ (_, shp, typ, isf, raw, msk, flv) = state super(MaskedArray, self).__setstate__((shp, typ, isf, raw)) self._mask.__setstate__((shp, make_mask_descr(typ), isf, msk)) self.fill_value = flv def __reduce__(self): """Return a 3-tuple for pickling a MaskedArray. """ return (_mareconstruct, (self.__class__, self._baseclass, (0,), 'b',), self.__getstate__()) def __deepcopy__(self, memo=None): from copy import deepcopy copied = MaskedArray.__new__(type(self), self, copy=True) if memo is None: memo = {} memo[id(self)] = copied for (k, v) in self.__dict__.items(): copied.__dict__[k] = deepcopy(v, memo) return copied def _mareconstruct(subtype, baseclass, baseshape, basetype,): """Internal function that builds a new MaskedArray from the information stored in a pickle. """ _data = ndarray.__new__(baseclass, baseshape, basetype) _mask = ndarray.__new__(ndarray, baseshape, make_mask_descr(basetype)) return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) class mvoid(MaskedArray): """ Fake a 'void' object to use for masked array with structured dtypes. """ def __new__(self, data, mask=nomask, dtype=None, fill_value=None, hardmask=False, copy=False, subok=True): _data = np.array(data, copy=copy, subok=subok, dtype=dtype) _data = _data.view(self) _data._hardmask = hardmask if mask is not nomask: if isinstance(mask, np.void): _data._mask = mask else: try: # Mask is already a 0D array _data._mask = np.void(mask) except TypeError: # Transform the mask to a void mdtype = make_mask_descr(dtype) _data._mask = np.array(mask, dtype=mdtype)[()] if fill_value is not None: _data.fill_value = fill_value return _data @property def _data(self): # Make sure that the _data part is a np.void return super(mvoid, self)._data[()] def __getitem__(self, indx): """ Get the index. """ m = self._mask if isinstance(m[indx], ndarray): # Can happen when indx is a multi-dimensional field: # A = ma.masked_array(data=[([0,1],)], mask=[([True, # False],)], dtype=[("A", ">i2", (2,))]) # x = A[0]; y = x["A"]; then y.mask["A"].size==2 # and we can not say masked/unmasked. # The result is no longer mvoid! # See also issue #6724. return masked_array( data=self._data[indx], mask=m[indx], fill_value=self._fill_value[indx], hard_mask=self._hardmask) if m is not nomask and m[indx]: return masked return self._data[indx] def __setitem__(self, indx, value): self._data[indx] = value if self._hardmask: self._mask[indx] |= getattr(value, "_mask", False) else: self._mask[indx] = getattr(value, "_mask", False) def __str__(self): m = self._mask if m is nomask: return str(self._data) rdtype = _replace_dtype_fields(self._data.dtype, "O") data_arr = super(mvoid, self)._data res = data_arr.astype(rdtype) _recursive_printoption(res, self._mask, masked_print_option) return str(res) __repr__ = __str__ def __iter__(self): "Defines an iterator for mvoid" (_data, _mask) = (self._data, self._mask) if _mask is nomask: yield from _data else: for (d, m) in zip(_data, _mask): if m: yield masked else: yield d def __len__(self): return self._data.__len__() def filled(self, fill_value=None): """ Return a copy with masked fields filled with a given value. Parameters ---------- fill_value : array_like, optional The value to use for invalid entries. Can be scalar or non-scalar. If latter is the case, the filled array should be broadcastable over input array. Default is None, in which case the `fill_value` attribute is used instead. Returns ------- filled_void A `np.void` object See Also -------- MaskedArray.filled """ return asarray(self).filled(fill_value)[()] def tolist(self): """ Transforms the mvoid object into a tuple. Masked fields are replaced by None. Returns ------- returned_tuple Tuple of fields """ _mask = self._mask if _mask is nomask: return self._data.tolist() result = [] for (d, m) in zip(self._data, self._mask): if m: result.append(None) else: # .item() makes sure we return a standard Python object result.append(d.item()) return tuple(result) ############################################################################## # Shortcuts # ############################################################################## def isMaskedArray(x): """ Test whether input is an instance of MaskedArray. This function returns True if `x` is an instance of MaskedArray and returns False otherwise. Any object is accepted as input. Parameters ---------- x : object Object to test. Returns ------- result : bool True if `x` is a MaskedArray. See Also -------- isMA : Alias to isMaskedArray. isarray : Alias to isMaskedArray. Examples -------- >>> import numpy.ma as ma >>> a = np.eye(3, 3) >>> a array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.]]) >>> m = ma.masked_values(a, 0) >>> m masked_array( data=[[1.0, --, --], [--, 1.0, --], [--, --, 1.0]], mask=[[False, True, True], [ True, False, True], [ True, True, False]], fill_value=0.0) >>> ma.isMaskedArray(a) False >>> ma.isMaskedArray(m) True >>> ma.isMaskedArray([0, 1, 2]) False """ return isinstance(x, MaskedArray) isarray = isMaskedArray isMA = isMaskedArray # backward compatibility class MaskedConstant(MaskedArray): # the lone np.ma.masked instance __singleton = None @classmethod def __has_singleton(cls): # second case ensures `cls.__singleton` is not just a view on the # superclass singleton return cls.__singleton is not None and type(cls.__singleton) is cls def __new__(cls): if not cls.__has_singleton(): # We define the masked singleton as a float for higher precedence. # Note that it can be tricky sometimes w/ type comparison data = np.array(0.) mask = np.array(True) # prevent any modifications data.flags.writeable = False mask.flags.writeable = False # don't fall back on MaskedArray.__new__(MaskedConstant), since # that might confuse it - this way, the construction is entirely # within our control cls.__singleton = MaskedArray(data, mask=mask).view(cls) return cls.__singleton def __array_finalize__(self, obj): if not self.__has_singleton(): # this handles the `.view` in __new__, which we want to copy across # properties normally return super(MaskedConstant, self).__array_finalize__(obj) elif self is self.__singleton: # not clear how this can happen, play it safe pass else: # everywhere else, we want to downcast to MaskedArray, to prevent a # duplicate maskedconstant. self.__class__ = MaskedArray MaskedArray.__array_finalize__(self, obj) def __array_prepare__(self, obj, context=None): return self.view(MaskedArray).__array_prepare__(obj, context) def __array_wrap__(self, obj, context=None): return self.view(MaskedArray).__array_wrap__(obj, context) def __str__(self): return str(masked_print_option._display) def __repr__(self): if self is MaskedConstant.__singleton: return 'masked' else: # it's a subclass, or something is wrong, make it obvious return object.__repr__(self) def __format__(self, format_spec): # Replace ndarray.__format__ with the default, which supports no format characters. # Supporting format characters is unwise here, because we do not know what type # the user was expecting - better to not guess. try: return object.__format__(self, format_spec) except TypeError: # 2020-03-23, NumPy 1.19.0 warnings.warn( "Format strings passed to MaskedConstant are ignored, but in future may " "error or produce different behavior", FutureWarning, stacklevel=2 ) return object.__format__(self, "") def __reduce__(self): """Override of MaskedArray's __reduce__. """ return (self.__class__, ()) # inplace operations have no effect. We have to override them to avoid # trying to modify the readonly data and mask arrays def __iop__(self, other): return self __iadd__ = \ __isub__ = \ __imul__ = \ __ifloordiv__ = \ __itruediv__ = \ __ipow__ = \ __iop__ del __iop__ # don't leave this around def copy(self, *args, **kwargs): """ Copy is a no-op on the maskedconstant, as it is a scalar """ # maskedconstant is a scalar, so copy doesn't need to copy. There's # precedent for this with `np.bool_` scalars. return self def __copy__(self): return self def __deepcopy__(self, memo): return self def __setattr__(self, attr, value): if not self.__has_singleton(): # allow the singleton to be initialized return super(MaskedConstant, self).__setattr__(attr, value) elif self is self.__singleton: raise AttributeError( f"attributes of {self!r} are not writeable") else: # duplicate instance - we can end up here from __array_finalize__, # where we set the __class__ attribute return super(MaskedConstant, self).__setattr__(attr, value) masked = masked_singleton = MaskedConstant() masked_array = MaskedArray def array(data, dtype=None, copy=False, order=None, mask=nomask, fill_value=None, keep_mask=True, hard_mask=False, shrink=True, subok=True, ndmin=0): """ Shortcut to MaskedArray. The options are in a different order for convenience and backwards compatibility. """ return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, subok=subok, keep_mask=keep_mask, hard_mask=hard_mask, fill_value=fill_value, ndmin=ndmin, shrink=shrink, order=order) array.__doc__ = masked_array.__doc__ def is_masked(x): """ Determine whether input has masked values. Accepts any object as input, but always returns False unless the input is a MaskedArray containing masked values. Parameters ---------- x : array_like Array to check for masked values. Returns ------- result : bool True if `x` is a MaskedArray with masked values, False otherwise. Examples -------- >>> import numpy.ma as ma >>> x = ma.masked_equal([0, 1, 0, 2, 3], 0) >>> x masked_array(data=[--, 1, --, 2, 3], mask=[ True, False, True, False, False], fill_value=0) >>> ma.is_masked(x) True >>> x = ma.masked_equal([0, 1, 0, 2, 3], 42) >>> x masked_array(data=[0, 1, 0, 2, 3], mask=False, fill_value=42) >>> ma.is_masked(x) False Always returns False if `x` isn't a MaskedArray. >>> x = [False, True, False] >>> ma.is_masked(x) False >>> x = 'a string' >>> ma.is_masked(x) False """ m = getmask(x) if m is nomask: return False elif m.any(): return True return False ############################################################################## # Extrema functions # ############################################################################## class _extrema_operation(_MaskedUFunc): """ Generic class for maximum/minimum functions. .. note:: This is the base class for `_maximum_operation` and `_minimum_operation`. """ def __init__(self, ufunc, compare, fill_value): super(_extrema_operation, self).__init__(ufunc) self.compare = compare self.fill_value_func = fill_value def __call__(self, a, b=None): "Executes the call behavior." if b is None: # 2016-04-13, 1.13.0 warnings.warn( f"Single-argument form of np.ma.{self.__name__} is deprecated. Use " f"np.ma.{self.__name__}.reduce instead.", DeprecationWarning, stacklevel=2) return self.reduce(a) return where(self.compare(a, b), a, b) def reduce(self, target, axis=np._NoValue): "Reduce target along the given axis." target = narray(target, copy=False, subok=True) m = getmask(target) if axis is np._NoValue and target.ndim > 1: # 2017-05-06, Numpy 1.13.0: warn on axis default warnings.warn( f"In the future the default for ma.{self.__name__}.reduce will be axis=0, " f"not the current None, to match np.{self.__name__}.reduce. " "Explicitly pass 0 or None to silence this warning.", MaskedArrayFutureWarning, stacklevel=2) axis = None if axis is not np._NoValue: kwargs = dict(axis=axis) else: kwargs = dict() if m is nomask: t = self.f.reduce(target, **kwargs) else: target = target.filled( self.fill_value_func(target)).view(type(target)) t = self.f.reduce(target, **kwargs) m = umath.logical_and.reduce(m, **kwargs) if hasattr(t, '_mask'): t._mask = m elif m: t = masked return t def outer(self, a, b): "Return the function applied to the outer product of a and b." ma = getmask(a) mb = getmask(b) if ma is nomask and mb is nomask: m = nomask else: ma = getmaskarray(a) mb = getmaskarray(b) m = logical_or.outer(ma, mb) result = self.f.outer(filled(a), filled(b)) if not isinstance(result, MaskedArray): result = result.view(MaskedArray) result._mask = m return result def min(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} try: return obj.min(axis=axis, fill_value=fill_value, out=out, **kwargs) except (AttributeError, TypeError): # If obj doesn't have a min method, or if the method doesn't accept a # fill_value argument return asanyarray(obj).min(axis=axis, fill_value=fill_value, out=out, **kwargs) min.__doc__ = MaskedArray.min.__doc__ def max(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} try: return obj.max(axis=axis, fill_value=fill_value, out=out, **kwargs) except (AttributeError, TypeError): # If obj doesn't have a max method, or if the method doesn't accept a # fill_value argument return asanyarray(obj).max(axis=axis, fill_value=fill_value, out=out, **kwargs) max.__doc__ = MaskedArray.max.__doc__ def ptp(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} try: return obj.ptp(axis, out=out, fill_value=fill_value, **kwargs) except (AttributeError, TypeError): # If obj doesn't have a ptp method or if the method doesn't accept # a fill_value argument return asanyarray(obj).ptp(axis=axis, fill_value=fill_value, out=out, **kwargs) ptp.__doc__ = MaskedArray.ptp.__doc__ ############################################################################## # Definition of functions from the corresponding methods # ############################################################################## class _frommethod: """ Define functions from existing MaskedArray methods. Parameters ---------- methodname : str Name of the method to transform. """ def __init__(self, methodname, reversed=False): self.__name__ = methodname self.__doc__ = self.getdoc() self.reversed = reversed def getdoc(self): "Return the doc of the function (from the doc of the method)." meth = getattr(MaskedArray, self.__name__, None) or\ getattr(np, self.__name__, None) signature = self.__name__ + get_object_signature(meth) if meth is not None: doc = """ %s\n%s""" % ( signature, getattr(meth, '__doc__', None)) return doc def __call__(self, a, *args, **params): if self.reversed: args = list(args) a, args[0] = args[0], a marr = asanyarray(a) method_name = self.__name__ method = getattr(type(marr), method_name, None) if method is None: # use the corresponding np function method = getattr(np, method_name) return method(marr, *args, **params) all = _frommethod('all') anomalies = anom = _frommethod('anom') any = _frommethod('any') compress = _frommethod('compress', reversed=True) cumprod = _frommethod('cumprod') cumsum = _frommethod('cumsum') copy = _frommethod('copy') diagonal = _frommethod('diagonal') harden_mask = _frommethod('harden_mask') ids = _frommethod('ids') maximum = _extrema_operation(umath.maximum, greater, maximum_fill_value) mean = _frommethod('mean') minimum = _extrema_operation(umath.minimum, less, minimum_fill_value) nonzero = _frommethod('nonzero') prod = _frommethod('prod') product = _frommethod('prod') ravel = _frommethod('ravel') repeat = _frommethod('repeat') shrink_mask = _frommethod('shrink_mask') soften_mask = _frommethod('soften_mask') std = _frommethod('std') sum = _frommethod('sum') swapaxes = _frommethod('swapaxes') #take = _frommethod('take') trace = _frommethod('trace') var = _frommethod('var') count = _frommethod('count') def take(a, indices, axis=None, out=None, mode='raise'): """ """ a = masked_array(a) return a.take(indices, axis=axis, out=out, mode=mode) def power(a, b, third=None): """ Returns element-wise base array raised to power from second array. This is the masked array version of `numpy.power`. For details see `numpy.power`. See Also -------- numpy.power Notes ----- The *out* argument to `numpy.power` is not supported, `third` has to be None. """ if third is not None: raise MaskError("3-argument power not supported.") # Get the masks ma = getmask(a) mb = getmask(b) m = mask_or(ma, mb) # Get the rawdata fa = getdata(a) fb = getdata(b) # Get the type of the result (so that we preserve subclasses) if isinstance(a, MaskedArray): basetype = type(a) else: basetype = MaskedArray # Get the result and view it as a (subclass of) MaskedArray with np.errstate(divide='ignore', invalid='ignore'): result = np.where(m, fa, umath.power(fa, fb)).view(basetype) result._update_from(a) # Find where we're in trouble w/ NaNs and Infs invalid = np.logical_not(np.isfinite(result.view(ndarray))) # Add the initial mask if m is not nomask: if not result.ndim: return masked result._mask = np.logical_or(m, invalid) # Fix the invalid parts if invalid.any(): if not result.ndim: return masked elif result._mask is nomask: result._mask = invalid result._data[invalid] = result.fill_value return result argmin = _frommethod('argmin') argmax = _frommethod('argmax') def argsort(a, axis=np._NoValue, kind=None, order=None, endwith=True, fill_value=None): "Function version of the eponymous method." a = np.asanyarray(a) # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default if axis is np._NoValue: axis = _deprecate_argsort_axis(a) if isinstance(a, MaskedArray): return a.argsort(axis=axis, kind=kind, order=order, endwith=endwith, fill_value=fill_value) else: return a.argsort(axis=axis, kind=kind, order=order) argsort.__doc__ = MaskedArray.argsort.__doc__ def sort(a, axis=-1, kind=None, order=None, endwith=True, fill_value=None): """ Return a sorted copy of the masked array. Equivalent to creating a copy of the array and applying the MaskedArray ``sort()`` method. Refer to ``MaskedArray.sort`` for the full documentation See Also -------- MaskedArray.sort : equivalent method """ a = np.array(a, copy=True, subok=True) if axis is None: a = a.flatten() axis = 0 if isinstance(a, MaskedArray): a.sort(axis=axis, kind=kind, order=order, endwith=endwith, fill_value=fill_value) else: a.sort(axis=axis, kind=kind, order=order) return a def compressed(x): """ Return all the non-masked data as a 1-D array. This function is equivalent to calling the "compressed" method of a `ma.MaskedArray`, see `ma.MaskedArray.compressed` for details. See Also -------- ma.MaskedArray.compressed Equivalent method. """ return asanyarray(x).compressed() def concatenate(arrays, axis=0): """ Concatenate a sequence of arrays along the given axis. Parameters ---------- arrays : sequence of array_like The arrays must have the same shape, except in the dimension corresponding to `axis` (the first, by default). axis : int, optional The axis along which the arrays will be joined. Default is 0. Returns ------- result : MaskedArray The concatenated array with any masked entries preserved. See Also -------- numpy.concatenate : Equivalent function in the top-level NumPy module. Examples -------- >>> import numpy.ma as ma >>> a = ma.arange(3) >>> a[1] = ma.masked >>> b = ma.arange(2, 5) >>> a masked_array(data=[0, --, 2], mask=[False, True, False], fill_value=999999) >>> b masked_array(data=[2, 3, 4], mask=False, fill_value=999999) >>> ma.concatenate([a, b]) masked_array(data=[0, --, 2, 2, 3, 4], mask=[False, True, False, False, False, False], fill_value=999999) """ d = np.concatenate([getdata(a) for a in arrays], axis) rcls = get_masked_subclass(*arrays) data = d.view(rcls) # Check whether one of the arrays has a non-empty mask. for x in arrays: if getmask(x) is not nomask: break else: return data # OK, so we have to concatenate the masks dm = np.concatenate([getmaskarray(a) for a in arrays], axis) dm = dm.reshape(d.shape) # If we decide to keep a '_shrinkmask' option, we want to check that # all of them are True, and then check for dm.any() data._mask = _shrink_mask(dm) return data def diag(v, k=0): """ Extract a diagonal or construct a diagonal array. This function is the equivalent of `numpy.diag` that takes masked values into account, see `numpy.diag` for details. See Also -------- numpy.diag : Equivalent function for ndarrays. """ output = np.diag(v, k).view(MaskedArray) if getmask(v) is not nomask: output._mask = np.diag(v._mask, k) return output def left_shift(a, n): """ Shift the bits of an integer to the left. This is the masked array version of `numpy.left_shift`, for details see that function. See Also -------- numpy.left_shift """ m = getmask(a) if m is nomask: d = umath.left_shift(filled(a), n) return masked_array(d) else: d = umath.left_shift(filled(a, 0), n) return masked_array(d, mask=m) def right_shift(a, n): """ Shift the bits of an integer to the right. This is the masked array version of `numpy.right_shift`, for details see that function. See Also -------- numpy.right_shift """ m = getmask(a) if m is nomask: d = umath.right_shift(filled(a), n) return masked_array(d) else: d = umath.right_shift(filled(a, 0), n) return masked_array(d, mask=m) def put(a, indices, values, mode='raise'): """ Set storage-indexed locations to corresponding values. This function is equivalent to `MaskedArray.put`, see that method for details. See Also -------- MaskedArray.put """ # We can't use 'frommethod', the order of arguments is different try: return a.put(indices, values, mode=mode) except AttributeError: return narray(a, copy=False).put(indices, values, mode=mode) def putmask(a, mask, values): # , mode='raise'): """ Changes elements of an array based on conditional and input values. This is the masked array version of `numpy.putmask`, for details see `numpy.putmask`. See Also -------- numpy.putmask Notes ----- Using a masked array as `values` will **not** transform a `ndarray` into a `MaskedArray`. """ # We can't use 'frommethod', the order of arguments is different if not isinstance(a, MaskedArray): a = a.view(MaskedArray) (valdata, valmask) = (getdata(values), getmask(values)) if getmask(a) is nomask: if valmask is not nomask: a._sharedmask = True a._mask = make_mask_none(a.shape, a.dtype) np.copyto(a._mask, valmask, where=mask) elif a._hardmask: if valmask is not nomask: m = a._mask.copy() np.copyto(m, valmask, where=mask) a.mask |= m else: if valmask is nomask: valmask = getmaskarray(values) np.copyto(a._mask, valmask, where=mask) np.copyto(a._data, valdata, where=mask) return def transpose(a, axes=None): """ Permute the dimensions of an array. This function is exactly equivalent to `numpy.transpose`. See Also -------- numpy.transpose : Equivalent function in top-level NumPy module. Examples -------- >>> import numpy.ma as ma >>> x = ma.arange(4).reshape((2,2)) >>> x[1, 1] = ma.masked >>> x masked_array( data=[[0, 1], [2, --]], mask=[[False, False], [False, True]], fill_value=999999) >>> ma.transpose(x) masked_array( data=[[0, 2], [1, --]], mask=[[False, False], [False, True]], fill_value=999999) """ # We can't use 'frommethod', as 'transpose' doesn't take keywords try: return a.transpose(axes) except AttributeError: return narray(a, copy=False).transpose(axes).view(MaskedArray) def reshape(a, new_shape, order='C'): """ Returns an array containing the same data with a new shape. Refer to `MaskedArray.reshape` for full documentation. See Also -------- MaskedArray.reshape : equivalent function """ # We can't use 'frommethod', it whine about some parameters. Dmmit. try: return a.reshape(new_shape, order=order) except AttributeError: _tmp = narray(a, copy=False).reshape(new_shape, order=order) return _tmp.view(MaskedArray) def resize(x, new_shape): """ Return a new masked array with the specified size and shape. This is the masked equivalent of the `numpy.resize` function. The new array is filled with repeated copies of `x` (in the order that the data are stored in memory). If `x` is masked, the new array will be masked, and the new mask will be a repetition of the old one. See Also -------- numpy.resize : Equivalent function in the top level NumPy module. Examples -------- >>> import numpy.ma as ma >>> a = ma.array([[1, 2] ,[3, 4]]) >>> a[0, 1] = ma.masked >>> a masked_array( data=[[1, --], [3, 4]], mask=[[False, True], [False, False]], fill_value=999999) >>> np.resize(a, (3, 3)) masked_array( data=[[1, 2, 3], [4, 1, 2], [3, 4, 1]], mask=False, fill_value=999999) >>> ma.resize(a, (3, 3)) masked_array( data=[[1, --, 3], [4, 1, --], [3, 4, 1]], mask=[[False, True, False], [False, False, True], [False, False, False]], fill_value=999999) A MaskedArray is always returned, regardless of the input type. >>> a = np.array([[1, 2] ,[3, 4]]) >>> ma.resize(a, (3, 3)) masked_array( data=[[1, 2, 3], [4, 1, 2], [3, 4, 1]], mask=False, fill_value=999999) """ # We can't use _frommethods here, as N.resize is notoriously whiny. m = getmask(x) if m is not nomask: m = np.resize(m, new_shape) result = np.resize(x, new_shape).view(get_masked_subclass(x)) if result.ndim: result._mask = m return result def ndim(obj): """ maskedarray version of the numpy function. """ return np.ndim(getdata(obj)) ndim.__doc__ = np.ndim.__doc__ def shape(obj): "maskedarray version of the numpy function." return np.shape(getdata(obj)) shape.__doc__ = np.shape.__doc__ def size(obj, axis=None): "maskedarray version of the numpy function." return np.size(getdata(obj), axis) size.__doc__ = np.size.__doc__ ############################################################################## # Extra functions # ############################################################################## def where(condition, x=_NoValue, y=_NoValue): """ Return a masked array with elements from `x` or `y`, depending on condition. .. note:: When only `condition` is provided, this function is identical to `nonzero`. The rest of this documentation covers only the case where all three arguments are provided. Parameters ---------- condition : array_like, bool Where True, yield `x`, otherwise yield `y`. x, y : array_like, optional Values from which to choose. `x`, `y` and `condition` need to be broadcastable to some shape. Returns ------- out : MaskedArray An masked array with `masked` elements where the condition is masked, elements from `x` where `condition` is True, and elements from `y` elsewhere. See Also -------- numpy.where : Equivalent function in the top-level NumPy module. nonzero : The function that is called when x and y are omitted Examples -------- >>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0], ... [1, 0, 1], ... [0, 1, 0]]) >>> x masked_array( data=[[0.0, --, 2.0], [--, 4.0, --], [6.0, --, 8.0]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=1e+20) >>> np.ma.where(x > 5, x, -3.1416) masked_array( data=[[-3.1416, --, -3.1416], [--, -3.1416, --], [6.0, --, 8.0]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=1e+20) """ # handle the single-argument case missing = (x is _NoValue, y is _NoValue).count(True) if missing == 1: raise ValueError("Must provide both 'x' and 'y' or neither.") if missing == 2: return nonzero(condition) # we only care if the condition is true - false or masked pick y cf = filled(condition, False) xd = getdata(x) yd = getdata(y) # we need the full arrays here for correct final dimensions cm = getmaskarray(condition) xm = getmaskarray(x) ym = getmaskarray(y) # deal with the fact that masked.dtype == float64, but we don't actually # want to treat it as that. if x is masked and y is not masked: xd = np.zeros((), dtype=yd.dtype) xm = np.ones((), dtype=ym.dtype) elif y is masked and x is not masked: yd = np.zeros((), dtype=xd.dtype) ym = np.ones((), dtype=xm.dtype) data = np.where(cf, xd, yd) mask = np.where(cf, xm, ym) mask = np.where(cm, np.ones((), dtype=mask.dtype), mask) # collapse the mask, for backwards compatibility mask = _shrink_mask(mask) return masked_array(data, mask=mask) def choose(indices, choices, out=None, mode='raise'): """ Use an index array to construct a new array from a set of choices. Given an array of integers and a set of n choice arrays, this method will create a new array that merges each of the choice arrays. Where a value in `a` is i, the new array will have the value that choices[i] contains in the same place. Parameters ---------- a : ndarray of ints This array must contain integers in ``[0, n-1]``, where n is the number of choices. choices : sequence of arrays Choice arrays. The index array and all of the choices should be broadcastable to the same shape. out : array, optional If provided, the result will be inserted into this array. It should be of the appropriate shape and `dtype`. mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices will behave. * 'raise' : raise an error * 'wrap' : wrap around * 'clip' : clip to the range Returns ------- merged_array : array See Also -------- choose : equivalent function Examples -------- >>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]]) >>> a = np.array([2, 1, 0]) >>> np.ma.choose(a, choice) masked_array(data=[3, 2, 1], mask=False, fill_value=999999) """ def fmask(x): "Returns the filled array, or True if masked." if x is masked: return True return filled(x) def nmask(x): "Returns the mask, True if ``masked``, False if ``nomask``." if x is masked: return True return getmask(x) # Get the indices. c = filled(indices, 0) # Get the masks. masks = [nmask(x) for x in choices] data = [fmask(x) for x in choices] # Construct the mask outputmask = np.choose(c, masks, mode=mode) outputmask = make_mask(mask_or(outputmask, getmask(indices)), copy=False, shrink=True) # Get the choices. d = np.choose(c, data, mode=mode, out=out).view(MaskedArray) if out is not None: if isinstance(out, MaskedArray): out.__setmask__(outputmask) return out d.__setmask__(outputmask) return d def round_(a, decimals=0, out=None): """ Return a copy of a, rounded to 'decimals' places. When 'decimals' is negative, it specifies the number of positions to the left of the decimal point. The real and imaginary parts of complex numbers are rounded separately. Nothing is done if the array is not of float type and 'decimals' is greater than or equal to 0. Parameters ---------- decimals : int Number of decimals to round to. May be negative. out : array_like Existing array to use for output. If not given, returns a default copy of a. Notes ----- If out is given and does not have a mask attribute, the mask of a is lost! """ if out is None: return np.round_(a, decimals, out) else: np.round_(getdata(a), decimals, out) if hasattr(out, '_mask'): out._mask = getmask(a) return out round = round_ # Needed by dot, so move here from extras.py. It will still be exported # from extras.py for compatibility. def mask_rowcols(a, axis=None): """ Mask rows and/or columns of a 2D array that contain masked values. Mask whole rows and/or columns of a 2D array that contain masked values. The masking behavior is selected using the `axis` parameter. - If `axis` is None, rows *and* columns are masked. - If `axis` is 0, only rows are masked. - If `axis` is 1 or -1, only columns are masked. Parameters ---------- a : array_like, MaskedArray The array to mask. If not a MaskedArray instance (or if no array elements are masked). The result is a MaskedArray with `mask` set to `nomask` (False). Must be a 2D array. axis : int, optional Axis along which to perform the operation. If None, applies to a flattened version of the array. Returns ------- a : MaskedArray A modified version of the input array, masked depending on the value of the `axis` parameter. Raises ------ NotImplementedError If input array `a` is not 2D. See Also -------- mask_rows : Mask rows of a 2D array that contain masked values. mask_cols : Mask cols of a 2D array that contain masked values. masked_where : Mask where a condition is met. Notes ----- The input array's mask is modified by this function. Examples -------- >>> import numpy.ma as ma >>> a = np.zeros((3, 3), dtype=int) >>> a[1, 1] = 1 >>> a array([[0, 0, 0], [0, 1, 0], [0, 0, 0]]) >>> a = ma.masked_equal(a, 1) >>> a masked_array( data=[[0, 0, 0], [0, --, 0], [0, 0, 0]], mask=[[False, False, False], [False, True, False], [False, False, False]], fill_value=1) >>> ma.mask_rowcols(a) masked_array( data=[[0, --, 0], [--, --, --], [0, --, 0]], mask=[[False, True, False], [ True, True, True], [False, True, False]], fill_value=1) """ a = array(a, subok=False) if a.ndim != 2: raise NotImplementedError("mask_rowcols works for 2D arrays only.") m = getmask(a) # Nothing is masked: return a if m is nomask or not m.any(): return a maskedval = m.nonzero() a._mask = a._mask.copy() if not axis: a[np.unique(maskedval[0])] = masked if axis in [None, 1, -1]: a[:, np.unique(maskedval[1])] = masked return a # Include masked dot here to avoid import problems in getting it from # extras.py. Note that it is not included in __all__, but rather exported # from extras in order to avoid backward compatibility problems. def dot(a, b, strict=False, out=None): """ Return the dot product of two arrays. This function is the equivalent of `numpy.dot` that takes masked values into account. Note that `strict` and `out` are in different position than in the method version. In order to maintain compatibility with the corresponding method, it is recommended that the optional arguments be treated as keyword only. At some point that may be mandatory. .. note:: Works only with 2-D arrays at the moment. Parameters ---------- a, b : masked_array_like Inputs arrays. strict : bool, optional Whether masked data are propagated (True) or set to 0 (False) for the computation. Default is False. Propagating the mask means that if a masked value appears in a row or column, the whole row or column is considered masked. out : masked_array, optional Output argument. This must have the exact kind that would be returned if it was not used. In particular, it must have the right type, must be C-contiguous, and its dtype must be the dtype that would be returned for `dot(a,b)`. This is a performance feature. Therefore, if these conditions are not met, an exception is raised, instead of attempting to be flexible. .. versionadded:: 1.10.2 See Also -------- numpy.dot : Equivalent function for ndarrays. Examples -------- >>> a = np.ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]]) >>> b = np.ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]]) >>> np.ma.dot(a, b) masked_array( data=[[21, 26], [45, 64]], mask=[[False, False], [False, False]], fill_value=999999) >>> np.ma.dot(a, b, strict=True) masked_array( data=[[--, --], [--, 64]], mask=[[ True, True], [ True, False]], fill_value=999999) """ # !!!: Works only with 2D arrays. There should be a way to get it to run # with higher dimension if strict and (a.ndim == 2) and (b.ndim == 2): a = mask_rowcols(a, 0) b = mask_rowcols(b, 1) am = ~getmaskarray(a) bm = ~getmaskarray(b) if out is None: d = np.dot(filled(a, 0), filled(b, 0)) m = ~np.dot(am, bm) if d.ndim == 0: d = np.asarray(d) r = d.view(get_masked_subclass(a, b)) r.__setmask__(m) return r else: d = np.dot(filled(a, 0), filled(b, 0), out._data) if out.mask.shape != d.shape: out._mask = np.empty(d.shape, MaskType) np.dot(am, bm, out._mask) np.logical_not(out._mask, out._mask) return out def inner(a, b): """ Returns the inner product of a and b for arrays of floating point types. Like the generic NumPy equivalent the product sum is over the last dimension of a and b. The first argument is not conjugated. """ fa = filled(a, 0) fb = filled(b, 0) if fa.ndim == 0: fa.shape = (1,) if fb.ndim == 0: fb.shape = (1,) return np.inner(fa, fb).view(MaskedArray) inner.__doc__ = doc_note(np.inner.__doc__, "Masked values are replaced by 0.") innerproduct = inner def outer(a, b): "maskedarray version of the numpy function." fa = filled(a, 0).ravel() fb = filled(b, 0).ravel() d = np.outer(fa, fb) ma = getmask(a) mb = getmask(b) if ma is nomask and mb is nomask: return masked_array(d) ma = getmaskarray(a) mb = getmaskarray(b) m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=False) return masked_array(d, mask=m) outer.__doc__ = doc_note(np.outer.__doc__, "Masked values are replaced by 0.") outerproduct = outer def _convolve_or_correlate(f, a, v, mode, propagate_mask): """ Helper function for ma.correlate and ma.convolve """ if propagate_mask: # results which are contributed to by either item in any pair being invalid mask = ( f(getmaskarray(a), np.ones(np.shape(v), dtype=bool), mode=mode) | f(np.ones(np.shape(a), dtype=bool), getmaskarray(v), mode=mode) ) data = f(getdata(a), getdata(v), mode=mode) else: # results which are not contributed to by any pair of valid elements mask = ~f(~getmaskarray(a), ~getmaskarray(v)) data = f(filled(a, 0), filled(v, 0), mode=mode) return masked_array(data, mask=mask) def correlate(a, v, mode='valid', propagate_mask=True): """ Cross-correlation of two 1-dimensional sequences. Parameters ---------- a, v : array_like Input sequences. mode : {'valid', 'same', 'full'}, optional Refer to the `np.convolve` docstring. Note that the default is 'valid', unlike `convolve`, which uses 'full'. propagate_mask : bool If True, then a result element is masked if any masked element contributes towards it. If False, then a result element is only masked if no non-masked element contribute towards it Returns ------- out : MaskedArray Discrete cross-correlation of `a` and `v`. See Also -------- numpy.correlate : Equivalent function in the top-level NumPy module. """ return _convolve_or_correlate(np.correlate, a, v, mode, propagate_mask) def convolve(a, v, mode='full', propagate_mask=True): """ Returns the discrete, linear convolution of two one-dimensional sequences. Parameters ---------- a, v : array_like Input sequences. mode : {'valid', 'same', 'full'}, optional Refer to the `np.convolve` docstring. propagate_mask : bool If True, then if any masked element is included in the sum for a result element, then the result is masked. If False, then the result element is only masked if no non-masked cells contribute towards it Returns ------- out : MaskedArray Discrete, linear convolution of `a` and `v`. See Also -------- numpy.convolve : Equivalent function in the top-level NumPy module. """ return _convolve_or_correlate(np.convolve, a, v, mode, propagate_mask) def allequal(a, b, fill_value=True): """ Return True if all entries of a and b are equal, using fill_value as a truth value where either or both are masked. Parameters ---------- a, b : array_like Input arrays to compare. fill_value : bool, optional Whether masked values in a or b are considered equal (True) or not (False). Returns ------- y : bool Returns True if the two arrays are equal within the given tolerance, False otherwise. If either array contains NaN, then False is returned. See Also -------- all, any numpy.ma.allclose Examples -------- >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) >>> a masked_array(data=[10000000000.0, 1e-07, --], mask=[False, False, True], fill_value=1e+20) >>> b = np.array([1e10, 1e-7, -42.0]) >>> b array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01]) >>> np.ma.allequal(a, b, fill_value=False) False >>> np.ma.allequal(a, b) True """ m = mask_or(getmask(a), getmask(b)) if m is nomask: x = getdata(a) y = getdata(b) d = umath.equal(x, y) return d.all() elif fill_value: x = getdata(a) y = getdata(b) d = umath.equal(x, y) dm = array(d, mask=m, copy=False) return dm.filled(True).all(None) else: return False def allclose(a, b, masked_equal=True, rtol=1e-5, atol=1e-8): """ Returns True if two arrays are element-wise equal within a tolerance. This function is equivalent to `allclose` except that masked values are treated as equal (default) or unequal, depending on the `masked_equal` argument. Parameters ---------- a, b : array_like Input arrays to compare. masked_equal : bool, optional Whether masked values in `a` and `b` are considered equal (True) or not (False). They are considered equal by default. rtol : float, optional Relative tolerance. The relative difference is equal to ``rtol * b``. Default is 1e-5. atol : float, optional Absolute tolerance. The absolute difference is equal to `atol`. Default is 1e-8. Returns ------- y : bool Returns True if the two arrays are equal within the given tolerance, False otherwise. If either array contains NaN, then False is returned. See Also -------- all, any numpy.allclose : the non-masked `allclose`. Notes ----- If the following equation is element-wise True, then `allclose` returns True:: absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) Return True if all elements of `a` and `b` are equal subject to given tolerances. Examples -------- >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) >>> a masked_array(data=[10000000000.0, 1e-07, --], mask=[False, False, True], fill_value=1e+20) >>> b = np.ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1]) >>> np.ma.allclose(a, b) False >>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) >>> b = np.ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1]) >>> np.ma.allclose(a, b) True >>> np.ma.allclose(a, b, masked_equal=False) False Masked values are not compared directly. >>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) >>> b = np.ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1]) >>> np.ma.allclose(a, b) True >>> np.ma.allclose(a, b, masked_equal=False) False """ x = masked_array(a, copy=False) y = masked_array(b, copy=False) # make sure y is an inexact type to avoid abs(MIN_INT); will cause # casting of x later. # NOTE: We explicitly allow timedelta, which used to work. This could # possibly be deprecated. See also gh-18286. # timedelta works if `atol` is an integer or also a timedelta. # Although, the default tolerances are unlikely to be useful if y.dtype.kind != "m": dtype = np.result_type(y, 1.) if y.dtype != dtype: y = masked_array(y, dtype=dtype, copy=False) m = mask_or(getmask(x), getmask(y)) xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False) # If we have some infs, they should fall at the same place. if not np.all(xinf == filled(np.isinf(y), False)): return False # No infs at all if not np.any(xinf): d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)), masked_equal) return np.all(d) if not np.all(filled(x[xinf] == y[xinf], masked_equal)): return False x = x[~xinf] y = y[~xinf] d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)), masked_equal) return np.all(d) def asarray(a, dtype=None, order=None): """ Convert the input to a masked array of the given data-type. No copy is performed if the input is already an `ndarray`. If `a` is a subclass of `MaskedArray`, a base class `MaskedArray` is returned. Parameters ---------- a : array_like Input data, in any form that can be converted to a masked array. This includes lists, lists of tuples, tuples, tuples of tuples, tuples of lists, ndarrays and masked arrays. dtype : dtype, optional By default, the data-type is inferred from the input data. order : {'C', 'F'}, optional Whether to use row-major ('C') or column-major ('FORTRAN') memory representation. Default is 'C'. Returns ------- out : MaskedArray Masked array interpretation of `a`. See Also -------- asanyarray : Similar to `asarray`, but conserves subclasses. Examples -------- >>> x = np.arange(10.).reshape(2, 5) >>> x array([[0., 1., 2., 3., 4.], [5., 6., 7., 8., 9.]]) >>> np.ma.asarray(x) masked_array( data=[[0., 1., 2., 3., 4.], [5., 6., 7., 8., 9.]], mask=False, fill_value=1e+20) >>> type(np.ma.asarray(x)) <class 'numpy.ma.core.MaskedArray'> """ order = order or 'C' return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=False, order=order) def asanyarray(a, dtype=None): """ Convert the input to a masked array, conserving subclasses. If `a` is a subclass of `MaskedArray`, its class is conserved. No copy is performed if the input is already an `ndarray`. Parameters ---------- a : array_like Input data, in any form that can be converted to an array. dtype : dtype, optional By default, the data-type is inferred from the input data. order : {'C', 'F'}, optional Whether to use row-major ('C') or column-major ('FORTRAN') memory representation. Default is 'C'. Returns ------- out : MaskedArray MaskedArray interpretation of `a`. See Also -------- asarray : Similar to `asanyarray`, but does not conserve subclass. Examples -------- >>> x = np.arange(10.).reshape(2, 5) >>> x array([[0., 1., 2., 3., 4.], [5., 6., 7., 8., 9.]]) >>> np.ma.asanyarray(x) masked_array( data=[[0., 1., 2., 3., 4.], [5., 6., 7., 8., 9.]], mask=False, fill_value=1e+20) >>> type(np.ma.asanyarray(x)) <class 'numpy.ma.core.MaskedArray'> """ # workaround for #8666, to preserve identity. Ideally the bottom line # would handle this for us. if isinstance(a, MaskedArray) and (dtype is None or dtype == a.dtype): return a return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True) ############################################################################## # Pickling # ############################################################################## def _pickle_warn(method): # NumPy 1.15.0, 2017-12-10 warnings.warn( f"np.ma.{method} is deprecated, use pickle.{method} instead", DeprecationWarning, stacklevel=3) def fromfile(file, dtype=float, count=-1, sep=''): raise NotImplementedError( "fromfile() not yet implemented for a MaskedArray.") def fromflex(fxarray): """ Build a masked array from a suitable flexible-type array. The input array has to have a data-type with ``_data`` and ``_mask`` fields. This type of array is output by `MaskedArray.toflex`. Parameters ---------- fxarray : ndarray The structured input array, containing ``_data`` and ``_mask`` fields. If present, other fields are discarded. Returns ------- result : MaskedArray The constructed masked array. See Also -------- MaskedArray.toflex : Build a flexible-type array from a masked array. Examples -------- >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4) >>> rec = x.toflex() >>> rec array([[(0, False), (1, True), (2, False)], [(3, True), (4, False), (5, True)], [(6, False), (7, True), (8, False)]], dtype=[('_data', '<i8'), ('_mask', '?')]) >>> x2 = np.ma.fromflex(rec) >>> x2 masked_array( data=[[0, --, 2], [--, 4, --], [6, --, 8]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=999999) Extra fields can be present in the structured array but are discarded: >>> dt = [('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')] >>> rec2 = np.zeros((2, 2), dtype=dt) >>> rec2 array([[(0, False, 0.), (0, False, 0.)], [(0, False, 0.), (0, False, 0.)]], dtype=[('_data', '<i4'), ('_mask', '?'), ('field3', '<f4')]) >>> y = np.ma.fromflex(rec2) >>> y masked_array( data=[[0, 0], [0, 0]], mask=[[False, False], [False, False]], fill_value=999999, dtype=int32) """ return masked_array(fxarray['_data'], mask=fxarray['_mask']) class _convert2ma: """ Convert functions from numpy to numpy.ma. Parameters ---------- _methodname : string Name of the method to transform. """ __doc__ = None def __init__(self, funcname, params=None): self._func = getattr(np, funcname) self.__doc__ = self.getdoc() self._extras = params or {} def getdoc(self): "Return the doc of the function (from the doc of the method)." doc = getattr(self._func, '__doc__', None) sig = get_object_signature(self._func) if doc: # Add the signature of the function at the beginning of the doc if sig: sig = "%s%s\n" % (self._func.__name__, sig) doc = sig + doc return doc def __call__(self, *args, **params): # Find the common parameters to the call and the definition _extras = self._extras common_params = set(params).intersection(_extras) # Drop the common parameters from the call for p in common_params: _extras[p] = params.pop(p) # Get the result result = self._func.__call__(*args, **params).view(MaskedArray) if "fill_value" in common_params: result.fill_value = _extras.get("fill_value", None) if "hardmask" in common_params: result._hardmask = bool(_extras.get("hard_mask", False)) return result arange = _convert2ma('arange', params=dict(fill_value=None, hardmask=False)) clip = np.clip diff = np.diff empty = _convert2ma('empty', params=dict(fill_value=None, hardmask=False)) empty_like = _convert2ma('empty_like') frombuffer = _convert2ma('frombuffer') fromfunction = _convert2ma('fromfunction') identity = _convert2ma( 'identity', params=dict(fill_value=None, hardmask=False)) indices = np.indices ones = _convert2ma('ones', params=dict(fill_value=None, hardmask=False)) ones_like = np.ones_like squeeze = np.squeeze zeros = _convert2ma('zeros', params=dict(fill_value=None, hardmask=False)) zeros_like = np.zeros_like def append(a, b, axis=None): """Append values to the end of an array. .. versionadded:: 1.9.0 Parameters ---------- a : array_like Values are appended to a copy of this array. b : array_like These values are appended to a copy of `a`. It must be of the correct shape (the same shape as `a`, excluding `axis`). If `axis` is not specified, `b` can be any shape and will be flattened before use. axis : int, optional The axis along which `v` are appended. If `axis` is not given, both `a` and `b` are flattened before use. Returns ------- append : MaskedArray A copy of `a` with `b` appended to `axis`. Note that `append` does not occur in-place: a new array is allocated and filled. If `axis` is None, the result is a flattened array. See Also -------- numpy.append : Equivalent function in the top-level NumPy module. Examples -------- >>> import numpy.ma as ma >>> a = ma.masked_values([1, 2, 3], 2) >>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) >>> ma.append(a, b) masked_array(data=[1, --, 3, 4, 5, 6, --, 8, 9], mask=[False, True, False, False, False, False, True, False, False], fill_value=999999) """ return concatenate([a, b], axis)
[ "numpy.core.umath.power", "numpy.core.umath.less", "numpy.split", "numpy.void", "numpy.resize", "numpy.empty", "numpy.ones", "builtins.all", "numpy.AxisError", "numpy.shape", "numpy.isclose", "numpy.core.umath.less_equal", "numpy.inner", "numpy.diag", "numpy.core.arrayprint.dtype_is_implied", "numpy.core.umath.multiply", "numpy.ndarray", "numpy.unique", "numpy.full", "numpy.core.umath.isfinite", "numpy.core.umath.equal", "numpy.power", "numpy.logical_not", "numpy.isfinite", "numpy.compat.getargspec", "numpy.take_along_axis", "numpy.finfo", "numpy.core.arrayprint.dtype_short_repr", "numpy.reshape", "numpy.core.umath.logical_not", "numpy.core.umath.logical_or.outer", "numpy.core.umath.greater", "numpy.core.umath.absolute", "numpy.get_printoptions", "numpy.result_type", "copy.deepcopy", "numpy.asarray", "numpy.array2string", "numpy.core.umath.logical_or", "numpy.ndarray.view", "numpy.can_cast", "numpy.isinf", "numpy.round_", "numpy.core.umath.logical_and.reduce", "numpy.dot", "numpy.choose", "numpy.copyto", "numpy.broadcast_to", "numpy.all", "numpy.issubdtype", "textwrap.dedent", "numpy.core.umath.cos", "numpy.concatenate", "numpy.outer", "numpy.iscomplexobj", "numpy.subtract", "numpy.datetime64", "numpy.seterr", "numpy.ndarray.__new__", "numpy.dtype", "numpy.asanyarray", "numpy.zeros", "numpy.errstate", "numpy.broadcast", "numpy.any", "numpy.ndarray.ravel", "numpy.expand_dims", "numpy.timedelta64", "numpy.array", "numpy.where", "numpy.logical_or", "numpy.core.numeric.normalize_axis_tuple", "numpy.ndarray.sort", "warnings.warn", "numpy.take", "inspect.cleandoc" ]
[((6685, 6708), 'numpy.datetime64', 'np.datetime64', (['"""NaT"""', 'v'], {}), "('NaT', v)\n", (6698, 6708), True, 'import numpy as np\n'), ((6748, 6772), 'numpy.timedelta64', 'np.timedelta64', (['"""NaT"""', 'v'], {}), "('NaT', v)\n", (6762, 6772), True, 'import numpy as np\n'), ((13864, 13880), 'numpy.dtype', 'np.dtype', (['ndtype'], {}), '(ndtype)\n', (13872, 13880), True, 'import numpy as np\n'), ((15477, 15497), 'numpy.array', 'np.array', (['fill_value'], {}), '(fill_value)\n', (15485, 15497), True, 'import numpy as np\n'), ((42267, 42282), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (42275, 42282), True, 'import numpy as np\n'), ((42306, 42331), 'numpy.dtype', 'np.dtype', (['primitive_dtype'], {}), '(primitive_dtype)\n', (42314, 42331), True, 'import numpy as np\n'), ((56314, 56330), 'numpy.asarray', 'np.asarray', (['mask'], {}), '(mask)\n', (56324, 56330), True, 'import numpy as np\n'), ((56391, 56435), 'numpy.array', 'np.array', (['[_ for _ in flattened]'], {'dtype': 'bool'}), '([_ for _ in flattened], dtype=bool)\n', (56399, 56435), True, 'import numpy as np\n'), ((60298, 60332), 'numpy.array', 'np.array', (['a'], {'copy': 'copy', 'subok': '(True)'}), '(a, copy=copy, subok=True)\n', (60306, 60332), True, 'import numpy as np\n'), ((71505, 71543), 'numpy.issubdtype', 'np.issubdtype', (['xnew.dtype', 'np.floating'], {}), '(xnew.dtype, np.floating)\n', (71518, 71543), True, 'import numpy as np\n'), ((72638, 72672), 'numpy.array', 'np.array', (['a'], {'copy': 'copy', 'subok': '(True)'}), '(a, copy=copy, subok=True)\n', (72646, 72672), True, 'import numpy as np\n'), ((77295, 77311), 'numpy.asanyarray', 'np.asanyarray', (['a'], {}), '(a)\n', (77308, 77311), True, 'import numpy as np\n'), ((211280, 211327), 'numpy.ndarray.__new__', 'ndarray.__new__', (['baseclass', 'baseshape', 'basetype'], {}), '(baseclass, baseshape, basetype)\n', (211295, 211327), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((232331, 232347), 'numpy.asanyarray', 'np.asanyarray', (['a'], {}), '(a)\n', (232344, 232347), True, 'import numpy as np\n'), ((233162, 233196), 'numpy.array', 'np.array', (['a'], {'copy': '(True)', 'subok': '(True)'}), '(a, copy=True, subok=True)\n', (233170, 233196), True, 'import numpy as np\n'), ((238671, 238710), 'numpy.copyto', 'np.copyto', (['a._data', 'valdata'], {'where': 'mask'}), '(a._data, valdata, where=mask)\n', (238680, 238710), True, 'import numpy as np\n'), ((245427, 245447), 'numpy.where', 'np.where', (['cf', 'xd', 'yd'], {}), '(cf, xd, yd)\n', (245435, 245447), True, 'import numpy as np\n'), ((245460, 245480), 'numpy.where', 'np.where', (['cf', 'xm', 'ym'], {}), '(cf, xm, ym)\n', (245468, 245480), True, 'import numpy as np\n'), ((247646, 247676), 'numpy.choose', 'np.choose', (['c', 'masks'], {'mode': 'mode'}), '(c, masks, mode=mode)\n', (247655, 247676), True, 'import numpy as np\n'), ((255505, 255521), 'numpy.outer', 'np.outer', (['fa', 'fb'], {}), '(fa, fb)\n', (255513, 255521), True, 'import numpy as np\n'), ((263514, 263523), 'numpy.all', 'np.all', (['d'], {}), '(d)\n', (263520, 263523), True, 'import numpy as np\n'), ((266836, 266949), 'warnings.warn', 'warnings.warn', (['f"""np.ma.{method} is deprecated, use pickle.{method} instead"""', 'DeprecationWarning'], {'stacklevel': '(3)'}), "(f'np.ma.{method} is deprecated, use pickle.{method} instead',\n DeprecationWarning, stacklevel=3)\n", (266849, 266949), False, 'import warnings\n'), ((4498, 4736), 'warnings.warn', 'warnings.warn', (['"""In the future the default for argsort will be axis=-1, not the current None, to match its documentation and np.argsort. Explicitly pass -1 or None to silence this warning."""', 'MaskedArrayFutureWarning'], {'stacklevel': '(3)'}), "(\n 'In the future the default for argsort will be axis=-1, not the current None, to match its documentation and np.argsort. Explicitly pass -1 or None to silence this warning.'\n , MaskedArrayFutureWarning, stacklevel=3)\n", (4511, 4736), False, 'import warnings\n'), ((5064, 5092), 'inspect.cleandoc', 'inspect.cleandoc', (['initialdoc'], {}), '(initialdoc)\n', (5080, 5092), False, 'import inspect\n'), ((5136, 5158), 'inspect.cleandoc', 'inspect.cleandoc', (['note'], {}), '(note)\n', (5152, 5158), False, 'import inspect\n'), ((23571, 23591), 'numpy.isfinite', 'np.isfinite', (['a._data'], {}), '(a._data)\n', (23582, 23591), True, 'import numpy as np\n'), ((41472, 41487), 'numpy.dtype', 'np.dtype', (['descr'], {}), '(descr)\n', (41480, 41487), True, 'import numpy as np\n'), ((50650, 50679), 'numpy.ones', 'np.ones', (['m.shape'], {'dtype': 'dtype'}), '(m.shape, dtype=dtype)\n', (50657, 50679), True, 'import numpy as np\n'), ((52394, 52428), 'numpy.zeros', 'np.zeros', (['newshape'], {'dtype': 'MaskType'}), '(newshape, dtype=MaskType)\n', (52402, 52428), True, 'import numpy as np\n'), ((54728, 54752), 'numpy.core.umath.logical_or', 'umath.logical_or', (['m1', 'm2'], {}), '(m1, m2)\n', (54744, 54752), True, 'import numpy.core.umath as umath\n'), ((69045, 69072), 'numpy.core.umath.equal', 'umath.equal', (['x._data', 'value'], {}), '(x._data, value)\n', (69056, 69072), True, 'import numpy.core.umath as umath\n'), ((71561, 71606), 'numpy.isclose', 'np.isclose', (['xnew', 'value'], {'atol': 'atol', 'rtol': 'rtol'}), '(xnew, value, atol=atol, rtol=rtol)\n', (71571, 71606), True, 'import numpy as np\n'), ((71634, 71658), 'numpy.core.umath.equal', 'umath.equal', (['xnew', 'value'], {}), '(xnew, value)\n', (71645, 71658), True, 'import numpy.core.umath as umath\n'), ((74746, 74785), 'numpy.copyto', 'np.copyto', (['result', 'printopt'], {'where': 'mask'}), '(result, printopt, where=mask)\n', (74755, 74785), True, 'import numpy as np\n'), ((74894, 75076), 'textwrap.dedent', 'textwrap.dedent', (['""" masked_%(name)s(data =\n %(data)s,\n %(nlen)s mask =\n %(mask)s,\n %(nlen)s fill_value = %(fill)s)\n """'], {}), '(\n """ masked_%(name)s(data =\n %(data)s,\n %(nlen)s mask =\n %(mask)s,\n %(nlen)s fill_value = %(fill)s)\n """\n )\n', (74909, 75076), False, 'import textwrap\n'), ((75090, 75314), 'textwrap.dedent', 'textwrap.dedent', (['""" masked_%(name)s(data =\n %(data)s,\n %(nlen)s mask =\n %(mask)s,\n %(nlen)s fill_value = %(fill)s,\n %(nlen)s dtype = %(dtype)s)\n """'], {}), '(\n """ masked_%(name)s(data =\n %(data)s,\n %(nlen)s mask =\n %(mask)s,\n %(nlen)s fill_value = %(fill)s,\n %(nlen)s dtype = %(dtype)s)\n """\n )\n', (75105, 75314), False, 'import textwrap\n'), ((75330, 75494), 'textwrap.dedent', 'textwrap.dedent', (['""" masked_%(name)s(data = %(data)s,\n %(nlen)s mask = %(mask)s,\n %(nlen)s fill_value = %(fill)s)\n """'], {}), '(\n """ masked_%(name)s(data = %(data)s,\n %(nlen)s mask = %(mask)s,\n %(nlen)s fill_value = %(fill)s)\n """\n )\n', (75345, 75494), False, 'import textwrap\n'), ((75507, 75713), 'textwrap.dedent', 'textwrap.dedent', (['""" masked_%(name)s(data = %(data)s,\n %(nlen)s mask = %(mask)s,\n %(nlen)s fill_value = %(fill)s,\n %(nlen)s dtype = %(dtype)s)\n """'], {}), '(\n """ masked_%(name)s(data = %(data)s,\n %(nlen)s mask = %(mask)s,\n %(nlen)s fill_value = %(fill)s,\n %(nlen)s dtype = %(dtype)s)\n """\n )\n', (75522, 75713), False, 'import textwrap\n'), ((87192, 87268), 'numpy.array', 'np.array', (['data'], {'dtype': 'dtype', 'copy': 'copy', 'order': 'order', 'subok': '(True)', 'ndmin': 'ndmin'}), '(data, dtype=dtype, copy=copy, order=order, subok=True, ndmin=ndmin)\n', (87200, 87268), True, 'import numpy as np\n'), ((120553, 120588), 'numpy.ndarray.view', 'ndarray.view', (['self', 'self._baseclass'], {}), '(self, self._baseclass)\n', (120565, 120588), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((126621, 126646), 'numpy.ndarray.ravel', 'ndarray.ravel', (['self._data'], {}), '(self._data)\n', (126634, 126646), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((128954, 128998), 'numpy.array', 'np.array', (['condition'], {'copy': '(False)', 'subok': '(False)'}), '(condition, copy=False, subok=False)\n', (128962, 128998), True, 'import numpy as np\n'), ((132061, 132110), 'builtins.all', 'builtins.all', (['(dim == 1 for dim in self.shape[:-1])'], {}), '(dim == 1 for dim in self.shape[:-1])\n', (132073, 132110), False, 'import builtins\n'), ((133058, 133151), 'numpy.array2string', 'np.array2string', (['self._mask'], {'separator': '""", """', 'prefix': "(indents['mask'] + 'mask=')", 'suffix': '""","""'}), "(self._mask, separator=', ', prefix=indents['mask'] +\n 'mask=', suffix=',')\n", (133073, 133151), True, 'import numpy as np\n'), ((178405, 178423), 'numpy.iscomplexobj', 'iscomplexobj', (['self'], {}), '(self)\n', (178417, 178423), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((190425, 190466), 'numpy.take_along_axis', 'np.take_along_axis', (['self', 'sidx'], {'axis': 'axis'}), '(self, sidx, axis=axis)\n', (190443, 190466), True, 'import numpy as np\n'), ((195117, 195254), 'warnings.warn', 'warnings.warn', (['"""`mini` is deprecated; use the `min` method or `np.ma.minimum.reduce instead."""', 'DeprecationWarning'], {'stacklevel': '(2)'}), "(\n '`mini` is deprecated; use the `min` method or `np.ma.minimum.reduce instead.'\n , DeprecationWarning, stacklevel=2)\n", (195130, 195254), False, 'import warnings\n'), ((201364, 201418), 'numpy.subtract', 'np.subtract', (['out', 'min_value'], {'out': 'out', 'casting': '"""unsafe"""'}), "(out, min_value, out=out, casting='unsafe')\n", (201375, 201418), True, 'import numpy as np\n'), ((201493, 201612), 'warnings.warn', 'warnings.warn', (['f"""Warning: \'partition\' will ignore the \'mask\' of the {self.__class__.__name__}."""'], {'stacklevel': '(2)'}), '(\n f"Warning: \'partition\' will ignore the \'mask\' of the {self.__class__.__name__}."\n , stacklevel=2)\n', (201506, 201612), False, 'import warnings\n'), ((201777, 201899), 'warnings.warn', 'warnings.warn', (['f"""Warning: \'argpartition\' will ignore the \'mask\' of the {self.__class__.__name__}."""'], {'stacklevel': '(2)'}), '(\n f"Warning: \'argpartition\' will ignore the \'mask\' of the {self.__class__.__name__}."\n , stacklevel=2)\n', (201790, 201899), False, 'import warnings\n'), ((205526, 205629), 'warnings.warn', 'warnings.warn', (['"""tostring() is deprecated. Use tobytes() instead."""', 'DeprecationWarning'], {'stacklevel': '(2)'}), "('tostring() is deprecated. Use tobytes() instead.',\n DeprecationWarning, stacklevel=2)\n", (205539, 205629), False, 'import warnings\n'), ((209296, 209370), 'numpy.ndarray', 'np.ndarray', ([], {'shape': 'self.shape', 'dtype': "[('_data', ddtype), ('_mask', mdtype)]"}), "(shape=self.shape, dtype=[('_data', ddtype), ('_mask', mdtype)])\n", (209306, 209370), True, 'import numpy as np\n'), ((211748, 211799), 'numpy.array', 'np.array', (['data'], {'copy': 'copy', 'subok': 'subok', 'dtype': 'dtype'}), '(data, copy=copy, subok=subok, dtype=dtype)\n', (211756, 211799), True, 'import numpy as np\n'), ((224696, 224734), 'numpy.array', 'narray', (['target'], {'copy': '(False)', 'subok': '(True)'}), '(target, copy=False, subok=True)\n', (224702, 224734), True, 'from numpy import array as narray\n'), ((231442, 231488), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (231453, 231488), True, 'import numpy as np\n'), ((231838, 231863), 'numpy.logical_or', 'np.logical_or', (['m', 'invalid'], {}), '(m, invalid)\n', (231851, 231863), True, 'import numpy as np\n'), ((236075, 236094), 'numpy.diag', 'np.diag', (['v._mask', 'k'], {}), '(v._mask, k)\n', (236082, 236094), True, 'import numpy as np\n'), ((241831, 241854), 'numpy.resize', 'np.resize', (['m', 'new_shape'], {}), '(m, new_shape)\n', (241840, 241854), True, 'import numpy as np\n'), ((245212, 245240), 'numpy.zeros', 'np.zeros', (['()'], {'dtype': 'yd.dtype'}), '((), dtype=yd.dtype)\n', (245220, 245240), True, 'import numpy as np\n'), ((245255, 245282), 'numpy.ones', 'np.ones', (['()'], {'dtype': 'ym.dtype'}), '((), dtype=ym.dtype)\n', (245262, 245282), True, 'import numpy as np\n'), ((245506, 245535), 'numpy.ones', 'np.ones', (['()'], {'dtype': 'mask.dtype'}), '((), dtype=mask.dtype)\n', (245513, 245535), True, 'import numpy as np\n'), ((248848, 248875), 'numpy.round_', 'np.round_', (['a', 'decimals', 'out'], {}), '(a, decimals, out)\n', (248857, 248875), True, 'import numpy as np\n'), ((254696, 254721), 'numpy.dot', 'np.dot', (['am', 'bm', 'out._mask'], {}), '(am, bm, out._mask)\n', (254702, 254721), True, 'import numpy as np\n'), ((254731, 254767), 'numpy.logical_not', 'np.logical_not', (['out._mask', 'out._mask'], {}), '(out._mask, out._mask)\n', (254745, 254767), True, 'import numpy as np\n'), ((259691, 259708), 'numpy.core.umath.equal', 'umath.equal', (['x', 'y'], {}), '(x, y)\n', (259702, 259708), True, 'import numpy.core.umath as umath\n'), ((262722, 262744), 'numpy.result_type', 'np.result_type', (['y', '(1.0)'], {}), '(y, 1.0)\n', (262736, 262744), True, 'import numpy as np\n'), ((263124, 263136), 'numpy.any', 'np.any', (['xinf'], {}), '(xinf)\n', (263130, 263136), True, 'import numpy as np\n'), ((263264, 263273), 'numpy.all', 'np.all', (['d'], {}), '(d)\n', (263270, 263273), True, 'import numpy as np\n'), ((7558, 7585), 'numpy.array', 'np.array', (['vals'], {'dtype': 'dtype'}), '(vals, dtype=dtype)\n', (7566, 7585), True, 'import numpy as np\n'), ((7757, 7779), 'numpy.full', 'np.full', (['shape', 'subval'], {}), '(shape, subval)\n', (7764, 7779), True, 'import numpy as np\n'), ((21865, 21901), 'numpy.array', 'np.array', (['a'], {'copy': '(False)', 'subok': 'subok'}), '(a, copy=False, subok=subok)\n', (21873, 21901), True, 'import numpy as np\n'), ((24833, 24862), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (24844, 24862), True, 'import numpy as np\n'), ((25352, 25381), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (25363, 25381), True, 'import numpy as np\n'), ((26044, 26057), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (26054, 26057), True, 'import numpy as np\n'), ((26059, 26072), 'numpy.asarray', 'np.asarray', (['b'], {}), '(b)\n', (26069, 26072), True, 'import numpy as np\n'), ((26087, 26116), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (26098, 26116), True, 'import numpy as np\n'), ((26511, 26540), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (26522, 26540), True, 'import numpy as np\n'), ((26562, 26602), 'numpy.core.umath.less_equal', 'umath.less_equal', (['x', 'self.critical_value'], {}), '(x, self.critical_value)\n', (26578, 26602), True, 'import numpy.core.umath as umath\n'), ((26933, 26962), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (26944, 26962), True, 'import numpy as np\n'), ((26984, 27018), 'numpy.core.umath.less', 'umath.less', (['x', 'self.critical_value'], {}), '(x, self.critical_value)\n', (26994, 27018), True, 'import numpy.core.umath as umath\n'), ((31070, 31083), 'numpy.errstate', 'np.errstate', ([], {}), '()\n', (31081, 31083), True, 'import numpy as np\n'), ((31098, 31142), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (31107, 31142), True, 'import numpy as np\n'), ((33000, 33033), 'numpy.core.umath.logical_and.reduce', 'umath.logical_and.reduce', (['m', 'axis'], {}), '(m, axis)\n', (33024, 33033), True, 'import numpy.core.umath as umath\n'), ((33676, 33706), 'numpy.core.umath.logical_or.outer', 'umath.logical_or.outer', (['ma', 'mb'], {}), '(ma, mb)\n', (33698, 33706), True, 'import numpy.core.umath as umath\n'), ((33808, 33833), 'numpy.copyto', 'np.copyto', (['d', 'da'], {'where': 'm'}), '(d, da, where=m)\n', (33817, 33833), True, 'import numpy as np\n'), ((35625, 35671), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (35636, 35671), True, 'import numpy as np\n'), ((35814, 35836), 'numpy.core.umath.isfinite', 'umath.isfinite', (['result'], {}), '(result)\n', (35828, 35836), True, 'import numpy.core.umath as umath\n'), ((36357, 36404), 'numpy.copyto', 'np.copyto', (['result', '(0)'], {'casting': '"""unsafe"""', 'where': 'm'}), "(result, 0, casting='unsafe', where=m)\n", (36366, 36404), True, 'import numpy as np\n'), ((36488, 36509), 'numpy.core.umath.multiply', 'umath.multiply', (['m', 'da'], {}), '(m, da)\n', (36502, 36509), True, 'import numpy.core.umath as umath\n'), ((36580, 36638), 'numpy.can_cast', 'np.can_cast', (['masked_da.dtype', 'result.dtype'], {'casting': '"""safe"""'}), "(masked_da.dtype, result.dtype, casting='safe')\n", (36591, 36638), True, 'import numpy as np\n'), ((46018, 46031), 'numpy.shape', 'np.shape', (['arr'], {}), '(arr)\n', (46026, 46031), True, 'import numpy as np\n'), ((69141, 69154), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (69151, 69154), True, 'import numpy as np\n'), ((72905, 72919), 'numpy.isfinite', 'np.isfinite', (['a'], {}), '(a)\n', (72916, 72919), True, 'import numpy as np\n'), ((76294, 76348), 'numpy.copyto', 'np.copyto', (['current', 'fill_value[name]'], {'where': 'mask[name]'}), '(current, fill_value[name], where=mask[name])\n', (76303, 76348), True, 'import numpy as np\n'), ((87916, 87940), 'numpy.ndarray.view', 'ndarray.view', (['_data', 'cls'], {}), '(_data, cls)\n', (87928, 87940), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((114784, 114810), 'numpy.array', 'np.array', (['mask'], {'copy': '(False)'}), '(mask, copy=False)\n', (114792, 114810), True, 'import numpy as np\n'), ((122385, 122599), 'warnings.warn', 'warnings.warn', (['"""Non-scalar arrays for the fill value are deprecated. Use arrays with scalar values instead. The filled function still supports any array as `fill_value`."""', 'DeprecationWarning'], {'stacklevel': '(2)'}), "(\n 'Non-scalar arrays for the fill value are deprecated. Use arrays with scalar values instead. The filled function still supports any array as `fill_value`.'\n , DeprecationWarning, stacklevel=2)\n", (122398, 122599), False, 'import warnings\n'), ((125180, 125205), 'numpy.asanyarray', 'np.asanyarray', (['fill_value'], {}), '(fill_value)\n', (125193, 125205), True, 'import numpy as np\n'), ((131764, 131781), 'numpy.all', 'np.all', (['self.mask'], {}), '(self.mask)\n', (131770, 131781), True, 'import numpy as np\n'), ((133310, 133357), 'numpy.core.arrayprint.dtype_short_repr', 'np.core.arrayprint.dtype_short_repr', (['self.dtype'], {}), '(self.dtype)\n', (133345, 133357), True, 'import numpy as np\n'), ((135466, 135516), 'numpy.broadcast_to', 'np.broadcast_to', (['self', 'broadcast_shape'], {'subok': '(True)'}), '(self, broadcast_shape, subok=True)\n', (135481, 135516), True, 'import numpy as np\n'), ((143279, 143315), 'numpy.where', 'np.where', (['dom_mask', 'fval', 'other_data'], {}), '(dom_mask, fval, other_data)\n', (143287, 143315), True, 'import numpy as np\n'), ((143968, 144004), 'numpy.where', 'np.where', (['dom_mask', 'fval', 'other_data'], {}), '(dom_mask, fval, other_data)\n', (143976, 144004), True, 'import numpy as np\n'), ((144664, 144700), 'numpy.where', 'np.where', (['dom_mask', 'fval', 'other_data'], {}), '(dom_mask, fval, other_data)\n', (144672, 144700), True, 'import numpy as np\n'), ((145083, 145129), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (145094, 145129), True, 'import numpy as np\n'), ((145294, 145317), 'numpy.isfinite', 'np.isfinite', (['self._data'], {}), '(self._data)\n', (145305, 145317), True, 'import numpy as np\n'), ((145497, 145550), 'numpy.copyto', 'np.copyto', (['self._data', 'self.fill_value'], {'where': 'invalid'}), '(self._data, self.fill_value, where=invalid)\n', (145506, 145550), True, 'import numpy as np\n'), ((150611, 150648), 'numpy.core.numeric.normalize_axis_tuple', 'normalize_axis_tuple', (['axis', 'self.ndim'], {}), '(axis, self.ndim)\n', (150631, 150648), False, 'from numpy.core.numeric import normalize_axis_tuple\n'), ((151123, 151162), 'numpy.full', 'np.full', (['out_dims', 'items'], {'dtype': 'np.intp'}), '(out_dims, items, dtype=np.intp)\n', (151130, 151162), True, 'import numpy as np\n'), ((158594, 158621), 'numpy.array', 'narray', (['indices'], {'copy': '(False)'}), '(indices, copy=False)\n', (158600, 158621), True, 'from numpy import array as narray\n'), ((158644, 158682), 'numpy.array', 'narray', (['values'], {'copy': '(False)', 'subok': '(True)'}), '(values, copy=False, subok=True)\n', (158650, 158682), True, 'from numpy import array as narray\n'), ((190143, 190196), 'numpy.ndarray.sort', 'ndarray.sort', (['self'], {'axis': 'axis', 'kind': 'kind', 'order': 'order'}), '(self, axis=axis, kind=kind, order=order)\n', (190155, 190196), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((193018, 193055), 'numpy.copyto', 'np.copyto', (['out', 'np.nan'], {'where': 'newmask'}), '(out, np.nan, where=newmask)\n', (193027, 193055), True, 'import numpy as np\n'), ((197884, 197921), 'numpy.copyto', 'np.copyto', (['out', 'np.nan'], {'where': 'newmask'}), '(out, np.nan, where=newmask)\n', (197893, 197921), True, 'import numpy as np\n'), ((202585, 202639), 'numpy.take', 'np.take', (['_data', 'indices'], {'axis': 'axis', 'mode': 'mode', 'out': 'out'}), '(_data, indices, axis=axis, mode=mode, out=out)\n', (202592, 202639), True, 'import numpy as np\n'), ((211046, 211063), 'copy.deepcopy', 'deepcopy', (['v', 'memo'], {}), '(v, memo)\n', (211054, 211063), False, 'from copy import deepcopy\n'), ((217793, 217806), 'numpy.array', 'np.array', (['(0.0)'], {}), '(0.0)\n', (217801, 217806), True, 'import numpy as np\n'), ((217826, 217840), 'numpy.array', 'np.array', (['(True)'], {}), '(True)\n', (217834, 217840), True, 'import numpy as np\n'), ((224286, 224450), 'warnings.warn', 'warnings.warn', (['f"""Single-argument form of np.ma.{self.__name__} is deprecated. Use np.ma.{self.__name__}.reduce instead."""', 'DeprecationWarning'], {'stacklevel': '(2)'}), "(\n f'Single-argument form of np.ma.{self.__name__} is deprecated. Use np.ma.{self.__name__}.reduce instead.'\n , DeprecationWarning, stacklevel=2)\n", (224299, 224450), False, 'import warnings\n'), ((224894, 225142), 'warnings.warn', 'warnings.warn', (['f"""In the future the default for ma.{self.__name__}.reduce will be axis=0, not the current None, to match np.{self.__name__}.reduce. Explicitly pass 0 or None to silence this warning."""', 'MaskedArrayFutureWarning'], {'stacklevel': '(2)'}), "(\n f'In the future the default for ma.{self.__name__}.reduce will be axis=0, not the current None, to match np.{self.__name__}.reduce. Explicitly pass 0 or None to silence this warning.'\n , MaskedArrayFutureWarning, stacklevel=2)\n", (224907, 225142), False, 'import warnings\n'), ((225615, 225652), 'numpy.core.umath.logical_and.reduce', 'umath.logical_and.reduce', (['m'], {}), '(m, **kwargs)\n', (225639, 225652), True, 'import numpy.core.umath as umath\n'), ((235985, 235998), 'numpy.diag', 'np.diag', (['v', 'k'], {}), '(v, k)\n', (235992, 235998), True, 'import numpy as np\n'), ((238329, 238368), 'numpy.copyto', 'np.copyto', (['a._mask', 'valmask'], {'where': 'mask'}), '(a._mask, valmask, where=mask)\n', (238338, 238368), True, 'import numpy as np\n'), ((238626, 238665), 'numpy.copyto', 'np.copyto', (['a._mask', 'valmask'], {'where': 'mask'}), '(a._mask, valmask, where=mask)\n', (238635, 238665), True, 'import numpy as np\n'), ((241869, 241892), 'numpy.resize', 'np.resize', (['x', 'new_shape'], {}), '(x, new_shape)\n', (241878, 241892), True, 'import numpy as np\n'), ((245341, 245369), 'numpy.zeros', 'np.zeros', (['()'], {'dtype': 'xd.dtype'}), '((), dtype=xd.dtype)\n', (245349, 245369), True, 'import numpy as np\n'), ((245384, 245411), 'numpy.ones', 'np.ones', (['()'], {'dtype': 'xm.dtype'}), '((), dtype=xm.dtype)\n', (245391, 245411), True, 'import numpy as np\n'), ((247830, 247868), 'numpy.choose', 'np.choose', (['c', 'data'], {'mode': 'mode', 'out': 'out'}), '(c, data, mode=mode, out=out)\n', (247839, 247868), True, 'import numpy as np\n'), ((251610, 251633), 'numpy.unique', 'np.unique', (['maskedval[0]'], {}), '(maskedval[0])\n', (251619, 251633), True, 'import numpy as np\n'), ((254363, 254377), 'numpy.dot', 'np.dot', (['am', 'bm'], {}), '(am, bm)\n', (254369, 254377), True, 'import numpy as np\n'), ((254420, 254433), 'numpy.asarray', 'np.asarray', (['d'], {}), '(d)\n', (254430, 254433), True, 'import numpy as np\n'), ((254659, 254686), 'numpy.empty', 'np.empty', (['d.shape', 'MaskType'], {}), '(d.shape, MaskType)\n', (254667, 254686), True, 'import numpy as np\n'), ((255199, 255215), 'numpy.inner', 'np.inner', (['fa', 'fb'], {}), '(fa, fb)\n', (255207, 255215), True, 'import numpy as np\n'), ((255710, 255734), 'numpy.outer', 'np.outer', (['(1 - ma)', '(1 - mb)'], {}), '(1 - ma, 1 - mb)\n', (255718, 255734), True, 'import numpy as np\n'), ((259816, 259833), 'numpy.core.umath.equal', 'umath.equal', (['x', 'y'], {}), '(x, y)\n', (259827, 259833), True, 'import numpy.core.umath as umath\n'), ((5352, 5367), 'numpy.compat.getargspec', 'getargspec', (['obj'], {}), '(obj)\n', (5362, 5367), False, 'from numpy.compat import getargspec, formatargspec, long, unicode, bytes\n'), ((8050, 8068), 'numpy.asanyarray', 'np.asanyarray', (['obj'], {}), '(obj)\n', (8063, 8068), True, 'import numpy as np\n'), ((14360, 14396), 'numpy.asarray', 'np.asarray', (['fill_value'], {'dtype': 'object'}), '(fill_value, dtype=object)\n', (14370, 14396), True, 'import numpy as np\n'), ((19771, 19787), 'numpy.array', 'np.array', (['a', '"""O"""'], {}), "(a, 'O')\n", (19779, 19787), True, 'import numpy as np\n'), ((19815, 19826), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (19823, 19826), True, 'import numpy as np\n'), ((24901, 24925), 'numpy.core.umath.greater', 'umath.greater', (['x', 'self.b'], {}), '(x, self.b)\n', (24914, 24925), True, 'import numpy.core.umath as umath\n'), ((24964, 24985), 'numpy.core.umath.less', 'umath.less', (['x', 'self.a'], {}), '(x, self.a)\n', (24974, 24985), True, 'import numpy.core.umath as umath\n'), ((25926, 25941), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (25934, 25941), True, 'import numpy as np\n'), ((26176, 26193), 'numpy.core.umath.absolute', 'umath.absolute', (['b'], {}), '(b)\n', (26190, 26193), True, 'import numpy.core.umath as umath\n'), ((28435, 28481), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (28446, 28481), True, 'import numpy as np\n'), ((28581, 28603), 'numpy.core.umath.isfinite', 'umath.isfinite', (['result'], {}), '(result)\n', (28595, 28603), True, 'import numpy.core.umath as umath\n'), ((28795, 28841), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (28806, 28841), True, 'import numpy as np\n'), ((29519, 29548), 'numpy.copyto', 'np.copyto', (['result', 'd'], {'where': 'm'}), '(result, d, where=m)\n', (29528, 29548), True, 'import numpy as np\n'), ((31558, 31582), 'numpy.core.umath.logical_or', 'umath.logical_or', (['ma', 'mb'], {}), '(ma, mb)\n', (31574, 31582), True, 'import numpy.core.umath as umath\n'), ((31945, 31993), 'numpy.copyto', 'np.copyto', (['result', 'da'], {'casting': '"""unsafe"""', 'where': 'm'}), "(result, da, casting='unsafe', where=m)\n", (31954, 31993), True, 'import numpy as np\n'), ((53838, 53889), 'numpy.core.umath.logical_or', 'umath.logical_or', (['current1', 'm2[name]', 'newmask[name]'], {}), '(current1, m2[name], newmask[name])\n', (53854, 53889), True, 'import numpy.core.umath as umath\n'), ((54601, 54621), 'numpy.broadcast', 'np.broadcast', (['m1', 'm2'], {}), '(m1, m2)\n', (54613, 54621), True, 'import numpy as np\n'), ((89866, 89900), 'numpy.ones', 'np.ones', (['_data.shape'], {'dtype': 'mdtype'}), '(_data.shape, dtype=mdtype)\n', (89873, 89900), True, 'import numpy as np\n'), ((103214, 103232), 'numpy.ndarray.view', 'ndarray.view', (['self'], {}), '(self)\n', (103226, 103232), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((103278, 103302), 'numpy.ndarray.view', 'ndarray.view', (['self', 'type'], {}), '(self, type)\n', (103290, 103302), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((103686, 103717), 'numpy.ndarray.view', 'ndarray.view', (['self', 'dtype', 'type'], {}), '(self, dtype, type)\n', (103698, 103717), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((131004, 131025), 'numpy.get_printoptions', 'np.get_printoptions', ([], {}), '()\n', (131023, 131025), True, 'import numpy as np\n'), ((131700, 131747), 'numpy.core.arrayprint.dtype_is_implied', 'np.core.arrayprint.dtype_is_implied', (['self.dtype'], {}), '(self.dtype)\n', (131735, 131747), True, 'import numpy as np\n'), ((135408, 135433), 'numpy.broadcast', 'np.broadcast', (['self', 'odata'], {}), '(self, odata)\n', (135420, 135433), True, 'import numpy as np\n'), ((135778, 135801), 'numpy.ones', 'np.ones', (['()', 'mask.dtype'], {}), '((), mask.dtype)\n', (135785, 135801), True, 'import numpy as np\n'), ((145942, 146017), 'warnings.warn', 'warnings.warn', (['"""Warning: converting a masked element to nan."""'], {'stacklevel': '(2)'}), "('Warning: converting a masked element to nan.', stacklevel=2)\n", (145955, 146017), False, 'import warnings\n'), ((150054, 150090), 'numpy.zeros', 'np.zeros', (['self.shape'], {'dtype': 'np.bool_'}), '(self.shape, dtype=np.bool_)\n', (150062, 150090), True, 'import numpy as np\n'), ((153246, 153284), 'numpy.ndarray.ravel', 'ndarray.ravel', (['self._data'], {'order': 'order'}), '(self._data, order=order)\n', (153259, 153284), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((177223, 177243), 'numpy.expand_dims', 'expand_dims', (['m', 'axis'], {}), '(m, axis)\n', (177234, 177243), False, 'from numpy import expand_dims\n'), ((178446, 178467), 'numpy.core.umath.absolute', 'umath.absolute', (['danom'], {}), '(danom)\n', (178460, 178467), True, 'import numpy.core.umath as umath\n'), ((180379, 180424), 'numpy.power', 'np.power', (['out', '(0.5)'], {'out': 'out', 'casting': '"""unsafe"""'}), "(out, 0.5, out=out, casting='unsafe')\n", (180387, 180424), True, 'import numpy as np\n'), ((184127, 184165), 'numpy.issubdtype', 'np.issubdtype', (['self.dtype', 'np.floating'], {}), '(self.dtype, np.floating)\n', (184140, 184165), True, 'import numpy as np\n'), ((219870, 220032), 'warnings.warn', 'warnings.warn', (['"""Format strings passed to MaskedConstant are ignored, but in future may error or produce different behavior"""', 'FutureWarning'], {'stacklevel': '(2)'}), "(\n 'Format strings passed to MaskedConstant are ignored, but in future may error or produce different behavior'\n , FutureWarning, stacklevel=2)\n", (219883, 220032), False, 'import warnings\n'), ((238472, 238505), 'numpy.copyto', 'np.copyto', (['m', 'valmask'], {'where': 'mask'}), '(m, valmask, where=mask)\n', (238481, 238505), True, 'import numpy as np\n'), ((251689, 251712), 'numpy.unique', 'np.unique', (['maskedval[1]'], {}), '(maskedval[1])\n', (251698, 251712), True, 'import numpy as np\n'), ((14098, 14144), 'numpy.array', 'np.array', (['fill_value'], {'copy': '(False)', 'dtype': 'ndtype'}), '(fill_value, copy=False, dtype=ndtype)\n', (14106, 14144), True, 'import numpy as np\n'), ((14988, 15034), 'numpy.array', 'np.array', (['fill_value'], {'copy': '(False)', 'dtype': 'ndtype'}), '(fill_value, copy=False, dtype=ndtype)\n', (14996, 15034), True, 'import numpy as np\n'), ((25429, 25441), 'numpy.core.umath.cos', 'umath.cos', (['x'], {}), '(x)\n', (25438, 25441), True, 'import numpy.core.umath as umath\n'), ((26138, 26155), 'numpy.core.umath.absolute', 'umath.absolute', (['a'], {}), '(a)\n', (26152, 26155), True, 'import numpy.core.umath as umath\n'), ((88633, 88668), 'numpy.zeros', 'np.zeros', (['_data.shape'], {'dtype': 'mdtype'}), '(_data.shape, dtype=mdtype)\n', (88641, 88668), True, 'import numpy as np\n'), ((89981, 90016), 'numpy.zeros', 'np.zeros', (['_data.shape'], {'dtype': 'mdtype'}), '(_data.shape, dtype=mdtype)\n', (89989, 90016), True, 'import numpy as np\n'), ((90624, 90652), 'numpy.resize', 'np.resize', (['mask', '_data.shape'], {}), '(mask, _data.shape)\n', (90633, 90652), True, 'import numpy as np\n'), ((99133, 99179), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (99144, 99179), True, 'import numpy as np\n'), ((99826, 99864), 'numpy.copyto', 'np.copyto', (['result', 'fill_value'], {'where': 'd'}), '(result, fill_value, where=d)\n', (99835, 99864), True, 'import numpy as np\n'), ((115411, 115450), 'numpy.array', 'np.array', (['mask'], {'copy': 'copy', 'dtype': 'mdtype'}), '(mask, copy=copy, dtype=mdtype)\n', (115419, 115450), True, 'import numpy as np\n'), ((125504, 125542), 'numpy.copyto', 'np.copyto', (['result', 'fill_value'], {'where': 'm'}), '(result, fill_value, where=m)\n', (125513, 125542), True, 'import numpy as np\n'), ((126734, 126759), 'numpy.ndarray.ravel', 'ndarray.ravel', (['self._mask'], {}), '(self._mask)\n', (126747, 126759), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((150326, 150365), 'numpy.AxisError', 'np.AxisError', ([], {'axis': 'axis', 'ndim': 'self.ndim'}), '(axis=axis, ndim=self.ndim)\n', (150338, 150365), True, 'import numpy as np\n'), ((153393, 153431), 'numpy.ndarray.ravel', 'ndarray.ravel', (['self._mask'], {'order': 'order'}), '(self._mask, order=order)\n', (153406, 153431), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((192331, 192382), 'numpy.copyto', 'np.copyto', (['result', 'result.fill_value'], {'where': 'newmask'}), '(result, result.fill_value, where=newmask)\n', (192340, 192382), True, 'import numpy as np\n'), ((197195, 197246), 'numpy.copyto', 'np.copyto', (['result', 'result.fill_value'], {'where': 'newmask'}), '(result, result.fill_value, where=newmask)\n', (197204, 197246), True, 'import numpy as np\n'), ((212107, 212120), 'numpy.void', 'np.void', (['mask'], {}), '(mask)\n', (212114, 212120), True, 'import numpy as np\n'), ((231524, 231543), 'numpy.core.umath.power', 'umath.power', (['fa', 'fb'], {}), '(fa, fb)\n', (231535, 231543), True, 'import numpy.core.umath as umath\n'), ((237484, 237505), 'numpy.array', 'narray', (['a'], {'copy': '(False)'}), '(a, copy=False)\n', (237490, 237505), True, 'from numpy import array as narray\n'), ((240105, 240126), 'numpy.array', 'narray', (['a'], {'copy': '(False)'}), '(a, copy=False)\n', (240111, 240126), True, 'from numpy import array as narray\n'), ((256215, 256226), 'numpy.shape', 'np.shape', (['v'], {}), '(v)\n', (256223, 256226), True, 'import numpy as np\n'), ((256275, 256286), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (256283, 256286), True, 'import numpy as np\n'), ((263046, 263057), 'numpy.isinf', 'np.isinf', (['y'], {}), '(y)\n', (263054, 263057), True, 'import numpy as np\n'), ((13439, 13467), 'numpy.array', 'np.array', (['fval'], {'dtype': 'cdtype'}), '(fval, dtype=cdtype)\n', (13447, 13467), True, 'import numpy as np\n'), ((90143, 90182), 'numpy.array', 'np.array', (['mask'], {'copy': 'copy', 'dtype': 'mdtype'}), '(mask, copy=copy, dtype=mdtype)\n', (90151, 90182), True, 'import numpy as np\n'), ((90713, 90742), 'numpy.reshape', 'np.reshape', (['mask', '_data.shape'], {}), '(mask, _data.shape)\n', (90723, 90742), True, 'import numpy as np\n'), ((91937, 91969), 'numpy.logical_or', 'np.logical_or', (['mask', '_data._mask'], {}), '(mask, _data._mask)\n', (91950, 91969), True, 'import numpy as np\n'), ((103427, 103452), 'numpy.ndarray.view', 'ndarray.view', (['self', 'dtype'], {}), '(self, dtype)\n', (103439, 103452), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((103540, 103565), 'numpy.ndarray.view', 'ndarray.view', (['self', 'dtype'], {}), '(self, dtype)\n', (103552, 103565), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((103623, 103648), 'numpy.ndarray.view', 'ndarray.view', (['self', 'dtype'], {}), '(self, dtype)\n', (103635, 103648), False, 'from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue\n'), ((111991, 112015), 'numpy.core.umath.logical_not', 'umath.logical_not', (['_mask'], {}), '(_mask)\n', (112008, 112015), True, 'import numpy.core.umath as umath\n'), ((112377, 112413), 'numpy.copyto', 'np.copyto', (['dindx', 'dval'], {'where': '(~mindx)'}), '(dindx, dval, where=~mindx)\n', (112386, 112413), True, 'import numpy as np\n'), ((125622, 125654), 'numpy.array', 'narray', (['fill_value'], {'dtype': 'object'}), '(fill_value, dtype=object)\n', (125628, 125654), True, 'from numpy import array as narray\n'), ((125724, 125753), 'numpy.choose', 'np.choose', (['m', '(d, fill_value)'], {}), '(m, (d, fill_value))\n', (125733, 125753), True, 'import numpy as np\n'), ((130113, 130151), 'numpy.split', 'np.split', (['data', '(ind, -ind)'], {'axis': 'axis'}), '(data, (ind, -ind), axis=axis)\n', (130121, 130151), True, 'import numpy as np\n'), ((130184, 130227), 'numpy.concatenate', 'np.concatenate', (['(arr[0], arr[2])'], {'axis': 'axis'}), '((arr[0], arr[2]), axis=axis)\n', (130198, 130227), True, 'import numpy as np\n'), ((130259, 130297), 'numpy.split', 'np.split', (['mask', '(ind, -ind)'], {'axis': 'axis'}), '(mask, (ind, -ind), axis=axis)\n', (130267, 130297), True, 'import numpy as np\n'), ((130330, 130373), 'numpy.concatenate', 'np.concatenate', (['(arr[0], arr[2])'], {'axis': 'axis'}), '((arr[0], arr[2]), axis=axis)\n', (130344, 130373), True, 'import numpy as np\n'), ((136583, 136617), 'numpy.broadcast_to', 'np.broadcast_to', (['mask', 'check.shape'], {}), '(mask, check.shape)\n', (136598, 136617), True, 'import numpy as np\n'), ((150503, 150554), 'numpy.array', 'np.array', (['self.size'], {'dtype': 'np.intp', 'ndmin': 'self.ndim'}), '(self.size, dtype=np.intp, ndmin=self.ndim)\n', (150511, 150554), True, 'import numpy as np\n'), ((109073, 109290), 'warnings.warn', 'warnings.warn', (['f"""Upon accessing multidimensional field {indx!s}, need to keep dimensionality of fill_value at 0. Discarding heterogeneous fill_value and setting all to {dout._fill_value[0]!s}."""'], {'stacklevel': '(2)'}), "(\n f'Upon accessing multidimensional field {indx!s}, need to keep dimensionality of fill_value at 0. Discarding heterogeneous fill_value and setting all to {dout._fill_value[0]!s}.'\n , stacklevel=2)\n", (109086, 109290), False, 'import warnings\n'), ((212296, 212324), 'numpy.array', 'np.array', (['mask'], {'dtype': 'mdtype'}), '(mask, dtype=mdtype)\n', (212304, 212324), True, 'import numpy as np\n'), ((239605, 239626), 'numpy.array', 'narray', (['a'], {'copy': '(False)'}), '(a, copy=False)\n', (239611, 239626), True, 'from numpy import array as narray\n'), ((125939, 125977), 'numpy.array', 'np.array', (['fill_value'], {'dtype': 'self.dtype'}), '(fill_value, dtype=self.dtype)\n', (125947, 125977), True, 'import numpy as np\n'), ((88903, 88933), 'numpy.asanyarray', 'np.asanyarray', (['m'], {'dtype': 'mdtype'}), '(m, dtype=mdtype)\n', (88916, 88933), True, 'import numpy as np\n')]
import json class User: """ Class User, emulates a user """ def __init__(self, json_obj): """Constructor for class User :param json_obj: The dict with the object data """ self.__dict__ = json_obj def __str__(self): """ Returns the object as a string JSON :return: JSON as a string """ return json.dumps(self.__dict__)
[ "json.dumps" ]
[((393, 418), 'json.dumps', 'json.dumps', (['self.__dict__'], {}), '(self.__dict__)\n', (403, 418), False, 'import json\n')]
""" Tab art provider code - a tab provider provides all drawing functionality to the L{AuiNotebook}. This allows the L{AuiNotebook} to have a plugable look-and-feel. By default, a L{AuiNotebook} uses an instance of this class called L{AuiDefaultTabArt} which provides bitmap art and a colour scheme that is adapted to the major platforms' look. You can either derive from that class to alter its behaviour or write a completely new tab art class. Call L{AuiNotebook.SetArtProvider} to make use this new tab art. """ __author__ = "<NAME> <<EMAIL>>" __date__ = "31 March 2009" import wx if wx.Platform == '__WXMAC__': import Carbon.Appearance from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText from aui_utilities import GetBaseColour, DrawMACCloseButton, LightColour, TakeScreenShot from aui_utilities import CopyAttributes from aui_constants import * # -- GUI helper classes and functions -- class AuiCommandCapture(wx.PyEvtHandler): """ A class to handle the dropdown window menu. """ def __init__(self): """ Default class constructor. """ wx.PyEvtHandler.__init__(self) self._last_id = 0 def GetCommandId(self): """ Returns the event command identifier. """ return self._last_id def ProcessEvent(self, event): """ Processes an event, searching event tables and calling zero or more suitable event handler function(s). :param `event`: the event to process. :note: Normally, your application would not call this function: it is called in the wxPython implementation to dispatch incoming user interface events to the framework (and application). However, you might need to call it if implementing new functionality (such as a new control) where you define new event types, as opposed to allowing the user to override functions. An instance where you might actually override the L{ProcessEvent} function is where you want to direct event processing to event handlers not normally noticed by wxPython. For example, in the document/view architecture, documents and views are potential event handlers. When an event reaches a frame, L{ProcessEvent} will need to be called on the associated document and view in case event handler functions are associated with these objects. The normal order of event table searching is as follows: 1. If the object is disabled (via a call to `SetEvtHandlerEnabled`) the function skips to step (6). 2. If the object is a `wx.Window`, L{ProcessEvent} is recursively called on the window's `wx.Validator`. If this returns ``True``, the function exits. 3. wxWidgets `SearchEventTable` is called for this event handler. If this fails, the base class table is tried, and so on until no more tables exist or an appropriate function was found, in which case the function exits. 4. The search is applied down the entire chain of event handlers (usually the chain has a length of one). If this succeeds, the function exits. 5. If the object is a `wx.Window` and the event is a `wx.CommandEvent`, L{ProcessEvent} is recursively applied to the parent window's event handler. If this returns ``True``, the function exits. 6. Finally, L{ProcessEvent} is called on the `wx.App` object. """ if event.GetEventType() == wx.wxEVT_COMMAND_MENU_SELECTED: self._last_id = event.GetId() return True if self.GetNextHandler(): return self.GetNextHandler().ProcessEvent(event) return False class AuiDefaultTabArt(object): """ Tab art provider code - a tab provider provides all drawing functionality to the L{AuiNotebook}. This allows the L{AuiNotebook} to have a plugable look-and-feel. By default, a L{AuiNotebook} uses an instance of this class called L{AuiDefaultTabArt} which provides bitmap art and a colour scheme that is adapted to the major platforms' look. You can either derive from that class to alter its behaviour or write a completely new tab art class. Call L{AuiNotebook.SetArtProvider} to make use this new tab art. """ def __init__(self): """ Default class constructor. """ self._normal_font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT) self._selected_font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT) self._selected_font.SetWeight(wx.BOLD) self._measuring_font = self._selected_font self._fixed_tab_width = 100 self._tab_ctrl_height = 0 self._buttonRect = wx.Rect() base_colour = GetBaseColour() self._base_colour = base_colour border_colour = StepColour(base_colour, 75) self._border_pen = wx.Pen(border_colour) self._base_colour_pen = wx.Pen(self._base_colour) self._base_colour_brush = wx.Brush(self._base_colour) if wx.Platform == "__WXMAC__": bmp_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DDKSHADOW) self._active_close_bmp = DrawMACCloseButton(bmp_colour) self._disabled_close_bmp = DrawMACCloseButton(wx.Colour(128, 128, 128)) else: self._active_close_bmp = BitmapFromBits(nb_close_bits, 16, 16, wx.BLACK) self._disabled_close_bmp = BitmapFromBits(nb_close_bits, 16, 16, wx.Colour(128, 128, 128)) self._hover_close_bmp = self._active_close_bmp self._pressed_close_bmp = self._active_close_bmp self._active_left_bmp = BitmapFromBits(nb_left_bits, 16, 16, wx.BLACK) self._disabled_left_bmp = BitmapFromBits(nb_left_bits, 16, 16, wx.Colour(128, 128, 128)) self._active_right_bmp = BitmapFromBits(nb_right_bits, 16, 16, wx.BLACK) self._disabled_right_bmp = BitmapFromBits(nb_right_bits, 16, 16, wx.Colour(128, 128, 128)) self._active_windowlist_bmp = BitmapFromBits(nb_list_bits, 16, 16, wx.BLACK) self._disabled_windowlist_bmp = BitmapFromBits(nb_list_bits, 16, 16, wx.Colour(128, 128, 128)) if wx.Platform == "__WXMAC__": # Get proper highlight colour for focus rectangle from the # current Mac theme. kThemeBrushFocusHighlight is # available on Mac OS 8.5 and higher if hasattr(wx, 'MacThemeColour'): c = wx.MacThemeColour(Carbon.Appearance.kThemeBrushFocusHighlight) else: brush = wx.Brush(wx.BLACK) brush.MacSetTheme(Carbon.Appearance.kThemeBrushFocusHighlight) c = brush.GetColour() self._focusPen = wx.Pen(c, 2, wx.SOLID) else: self._focusPen = wx.Pen(wx.BLACK, 1, wx.USER_DASH) self._focusPen.SetDashes([1, 1]) self._focusPen.SetCap(wx.CAP_BUTT) def Clone(self): """ Clones the art object. """ art = AuiDefaultTabArt() art.SetNormalFont(self.GetNormalFont()) art.SetSelectedFont(self.GetSelectedFont()) art.SetMeasuringFont(self.GetMeasuringFont()) art = CopyAttributes(art, self) return art def SetAGWFlags(self, agwFlags): """ Sets the tab art flags. :param `agwFlags`: a combination of the following values: ==================================== ================================== Flag name Description ==================================== ================================== ``AUI_NB_TOP`` With this style, tabs are drawn along the top of the notebook ``AUI_NB_LEFT`` With this style, tabs are drawn along the left of the notebook. Not implemented yet. ``AUI_NB_RIGHT`` With this style, tabs are drawn along the right of the notebook. Not implemented yet. ``AUI_NB_BOTTOM`` With this style, tabs are drawn along the bottom of the notebook. ``AUI_NB_TAB_SPLIT`` Allows the tab control to be split by dragging a tab ``AUI_NB_TAB_MOVE`` Allows a tab to be moved horizontally by dragging ``AUI_NB_TAB_EXTERNAL_MOVE`` Allows a tab to be moved to another tab control ``AUI_NB_TAB_FIXED_WIDTH`` With this style, all tabs have the same width ``AUI_NB_SCROLL_BUTTONS`` With this style, left and right scroll buttons are displayed ``AUI_NB_WINDOWLIST_BUTTON`` With this style, a drop-down list of windows is available ``AUI_NB_CLOSE_BUTTON`` With this style, a close button is available on the tab bar ``AUI_NB_CLOSE_ON_ACTIVE_TAB`` With this style, a close button is available on the active tab ``AUI_NB_CLOSE_ON_ALL_TABS`` With this style, a close button is available on all tabs ``AUI_NB_MIDDLE_CLICK_CLOSE`` Allows to close AuiNotebook tabs by mouse middle button click ``AUI_NB_SUB_NOTEBOOK`` This style is used by AuiManager to create automatic AuiNotebooks ``AUI_NB_HIDE_ON_SINGLE_TAB`` Hides the tab window if only one tab is present ``AUI_NB_SMART_TABS`` Use Smart Tabbing, like ``Alt``+``Tab`` on Windows ``AUI_NB_USE_IMAGES_DROPDOWN`` Uses images on dropdown window list menu instead of check items ``AUI_NB_CLOSE_ON_TAB_LEFT`` Draws the tab close button on the left instead of on the right (a la Camino browser) ``AUI_NB_TAB_FLOAT`` Allows the floating of single tabs. Known limitation: when the notebook is more or less full screen, tabs cannot be dragged far enough outside of the notebook to become floating pages ``AUI_NB_DRAW_DND_TAB`` Draws an image representation of a tab while dragging (on by default) ==================================== ================================== """ self._agwFlags = agwFlags def GetAGWFlags(self): """ Returns the tab art flags. :see: L{SetAGWFlags} for a list of possible return values. """ return self._agwFlags def SetSizingInfo(self, tab_ctrl_size, tab_count, minMaxTabWidth): """ Sets the tab sizing information. :param `tab_ctrl_size`: the size of the tab control area; :param `tab_count`: the number of tabs; :param `minMaxTabWidth`: the minimum and maximum tab widths to be used when the ``AUI_NB_TAB_FIXED_WIDTH`` style is active. """ self._fixed_tab_width = 100 minTabWidth, maxTabWidth = minMaxTabWidth tot_width = tab_ctrl_size.x - self.GetIndentSize() - 4 agwFlags = self.GetAGWFlags() if agwFlags & AUI_NB_CLOSE_BUTTON: tot_width -= self._active_close_bmp.GetWidth() if agwFlags & AUI_NB_WINDOWLIST_BUTTON: tot_width -= self._active_windowlist_bmp.GetWidth() if tab_count > 0: self._fixed_tab_width = tot_width/tab_count if self._fixed_tab_width < 100: self._fixed_tab_width = 100 if self._fixed_tab_width > tot_width/2: self._fixed_tab_width = tot_width/2 if self._fixed_tab_width > 220: self._fixed_tab_width = 220 if minTabWidth > -1: self._fixed_tab_width = max(self._fixed_tab_width, minTabWidth) if maxTabWidth > -1: self._fixed_tab_width = min(self._fixed_tab_width, maxTabWidth) self._tab_ctrl_height = tab_ctrl_size.y def DrawBackground(self, dc, wnd, rect): """ Draws the tab area background. :param `dc`: a `wx.DC` device context; :param `wnd`: a `wx.Window` instance object; :param `rect`: the tab control rectangle. """ self._buttonRect = wx.Rect() # draw background agwFlags = self.GetAGWFlags() if agwFlags & AUI_NB_BOTTOM: r = wx.Rect(rect.x, rect.y, rect.width+2, rect.height) # TODO: else if (agwFlags & AUI_NB_LEFT) # TODO: else if (agwFlags & AUI_NB_RIGHT) else: #for AUI_NB_TOP r = wx.Rect(rect.x, rect.y, rect.width+2, rect.height-3) top_colour = StepColour(self._base_colour, 90) bottom_colour = StepColour(self._base_colour, 170) dc.GradientFillLinear(r, top_colour, bottom_colour, wx.SOUTH) # draw base lines dc.SetPen(self._border_pen) y = rect.GetHeight() w = rect.GetWidth() if agwFlags & AUI_NB_BOTTOM: dc.SetBrush(wx.Brush(bottom_colour)) dc.DrawRectangle(-1, 0, w+2, 4) # TODO: else if (agwFlags & AUI_NB_LEFT) # TODO: else if (agwFlags & AUI_NB_RIGHT) else: # for AUI_NB_TOP dc.SetBrush(self._base_colour_brush) dc.DrawRectangle(-1, y-4, w+2, 4) def DrawTab(self, dc, wnd, page, in_rect, close_button_state, paint_control=False): """ Draws a single tab. :param `dc`: a `wx.DC` device context; :param `wnd`: a `wx.Window` instance object; :param `page`: the tab control page associated with the tab; :param `in_rect`: rectangle the tab should be confined to; :param `close_button_state`: the state of the close button on the tab; :param `paint_control`: whether to draw the control inside a tab (if any) on a `wx.MemoryDC`. """ # if the caption is empty, measure some temporary text caption = page.caption if not caption: caption = "Xj" dc.SetFont(self._selected_font) selected_textx, selected_texty, dummy = dc.GetMultiLineTextExtent(caption) dc.SetFont(self._normal_font) normal_textx, normal_texty, dummy = dc.GetMultiLineTextExtent(caption) control = page.control # figure out the size of the tab tab_size, x_extent = self.GetTabSize(dc, wnd, page.caption, page.bitmap, page.active, close_button_state, control) tab_height = self._tab_ctrl_height - 3 tab_width = tab_size[0] tab_x = in_rect.x tab_y = in_rect.y + in_rect.height - tab_height caption = page.caption # select pen, brush and font for the tab to be drawn if page.active: dc.SetFont(self._selected_font) textx, texty = selected_textx, selected_texty else: dc.SetFont(self._normal_font) textx, texty = normal_textx, normal_texty if not page.enabled: dc.SetTextForeground(wx.SystemSettings.GetColour(wx.SYS_COLOUR_GRAYTEXT)) pagebitmap = page.dis_bitmap else: dc.SetTextForeground(page.text_colour) pagebitmap = page.bitmap # create points that will make the tab outline clip_width = tab_width if tab_x + clip_width > in_rect.x + in_rect.width: clip_width = in_rect.x + in_rect.width - tab_x # since the above code above doesn't play well with WXDFB or WXCOCOA, # we'll just use a rectangle for the clipping region for now -- dc.SetClippingRegion(tab_x, tab_y, clip_width+1, tab_height-3) border_points = [wx.Point() for i in xrange(6)] agwFlags = self.GetAGWFlags() if agwFlags & AUI_NB_BOTTOM: border_points[0] = wx.Point(tab_x, tab_y) border_points[1] = wx.Point(tab_x, tab_y+tab_height-6) border_points[2] = wx.Point(tab_x+2, tab_y+tab_height-4) border_points[3] = wx.Point(tab_x+tab_width-2, tab_y+tab_height-4) border_points[4] = wx.Point(tab_x+tab_width, tab_y+tab_height-6) border_points[5] = wx.Point(tab_x+tab_width, tab_y) else: #if (agwFlags & AUI_NB_TOP) border_points[0] = wx.Point(tab_x, tab_y+tab_height-4) border_points[1] = wx.Point(tab_x, tab_y+2) border_points[2] = wx.Point(tab_x+2, tab_y) border_points[3] = wx.Point(tab_x+tab_width-2, tab_y) border_points[4] = wx.Point(tab_x+tab_width, tab_y+2) border_points[5] = wx.Point(tab_x+tab_width, tab_y+tab_height-4) # TODO: else if (agwFlags & AUI_NB_LEFT) # TODO: else if (agwFlags & AUI_NB_RIGHT) drawn_tab_yoff = border_points[1].y drawn_tab_height = border_points[0].y - border_points[1].y if page.active: # draw active tab # draw base background colour r = wx.Rect(tab_x, tab_y, tab_width, tab_height) dc.SetPen(self._base_colour_pen) dc.SetBrush(self._base_colour_brush) dc.DrawRectangle(r.x+1, r.y+1, r.width-1, r.height-4) # this white helps fill out the gradient at the top of the tab dc.SetPen(wx.WHITE_PEN) dc.SetBrush(wx.WHITE_BRUSH) dc.DrawRectangle(r.x+2, r.y+1, r.width-3, r.height-4) # these two points help the rounded corners appear more antialiased dc.SetPen(self._base_colour_pen) dc.DrawPoint(r.x+2, r.y+1) dc.DrawPoint(r.x+r.width-2, r.y+1) # set rectangle down a bit for gradient drawing r.SetHeight(r.GetHeight()/2) r.x += 2 r.width -= 2 r.y += r.height r.y -= 2 # draw gradient background top_colour = wx.WHITE bottom_colour = self._base_colour dc.GradientFillLinear(r, bottom_colour, top_colour, wx.NORTH) else: # draw inactive tab r = wx.Rect(tab_x, tab_y+1, tab_width, tab_height-3) # start the gradent up a bit and leave the inside border inset # by a pixel for a 3D look. Only the top half of the inactive # tab will have a slight gradient r.x += 3 r.y += 1 r.width -= 4 r.height /= 2 r.height -= 1 # -- draw top gradient fill for glossy look top_colour = self._base_colour bottom_colour = StepColour(top_colour, 160) dc.GradientFillLinear(r, bottom_colour, top_colour, wx.NORTH) r.y += r.height r.y -= 1 # -- draw bottom fill for glossy look top_colour = self._base_colour bottom_colour = self._base_colour dc.GradientFillLinear(r, top_colour, bottom_colour, wx.SOUTH) # draw tab outline dc.SetPen(self._border_pen) dc.SetBrush(wx.TRANSPARENT_BRUSH) dc.DrawPolygon(border_points) # there are two horizontal grey lines at the bottom of the tab control, # this gets rid of the top one of those lines in the tab control if page.active: if agwFlags & AUI_NB_BOTTOM: dc.SetPen(wx.Pen(StepColour(self._base_colour, 170))) # TODO: else if (agwFlags & AUI_NB_LEFT) # TODO: else if (agwFlags & AUI_NB_RIGHT) else: # for AUI_NB_TOP dc.SetPen(self._base_colour_pen) dc.DrawLine(border_points[0].x+1, border_points[0].y, border_points[5].x, border_points[5].y) text_offset = tab_x + 8 close_button_width = 0 if close_button_state != AUI_BUTTON_STATE_HIDDEN: close_button_width = self._active_close_bmp.GetWidth() if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT: text_offset += close_button_width - 5 bitmap_offset = 0 if pagebitmap.IsOk(): bitmap_offset = tab_x + 8 if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT and close_button_width: bitmap_offset += close_button_width - 5 # draw bitmap dc.DrawBitmap(pagebitmap, bitmap_offset, drawn_tab_yoff + (drawn_tab_height/2) - (pagebitmap.GetHeight()/2), True) text_offset = bitmap_offset + pagebitmap.GetWidth() text_offset += 3 # bitmap padding else: if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT == 0 or not close_button_width: text_offset = tab_x + 8 draw_text = ChopText(dc, caption, tab_width - (text_offset-tab_x) - close_button_width) ypos = drawn_tab_yoff + (drawn_tab_height)/2 - (texty/2) - 1 offset_focus = text_offset if control is not None: if control.GetPosition() != wx.Point(text_offset+1, ypos): control.SetPosition(wx.Point(text_offset+1, ypos)) if not control.IsShown(): control.Show() if paint_control: bmp = TakeScreenShot(control.GetScreenRect()) dc.DrawBitmap(bmp, text_offset+1, ypos, True) controlW, controlH = control.GetSize() text_offset += controlW + 4 textx += controlW + 4 # draw tab text rectx, recty, dummy = dc.GetMultiLineTextExtent(draw_text) dc.DrawLabel(draw_text, wx.Rect(text_offset, ypos, rectx, recty)) # draw focus rectangle self.DrawFocusRectangle(dc, page, wnd, draw_text, offset_focus, bitmap_offset, drawn_tab_yoff, drawn_tab_height, textx, texty) out_button_rect = wx.Rect() # draw close button if necessary if close_button_state != AUI_BUTTON_STATE_HIDDEN: bmp = self._disabled_close_bmp if close_button_state == AUI_BUTTON_STATE_HOVER: bmp = self._hover_close_bmp elif close_button_state == AUI_BUTTON_STATE_PRESSED: bmp = self._pressed_close_bmp shift = (agwFlags & AUI_NB_BOTTOM and [1] or [0])[0] if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT: rect = wx.Rect(tab_x + 4, tab_y + (tab_height - bmp.GetHeight())/2 - shift, close_button_width, tab_height) else: rect = wx.Rect(tab_x + tab_width - close_button_width - 1, tab_y + (tab_height - bmp.GetHeight())/2 - shift, close_button_width, tab_height) rect = IndentPressedBitmap(rect, close_button_state) dc.DrawBitmap(bmp, rect.x, rect.y, True) out_button_rect = rect out_tab_rect = wx.Rect(tab_x, tab_y, tab_width, tab_height) dc.DestroyClippingRegion() return out_tab_rect, out_button_rect, x_extent def SetCustomButton(self, bitmap_id, button_state, bmp): """ Sets a custom bitmap for the close, left, right and window list buttons. :param `bitmap_id`: the button identifier; :param `button_state`: the button state; :param `bmp`: the custom bitmap to use for the button. """ if bitmap_id == AUI_BUTTON_CLOSE: if button_state == AUI_BUTTON_STATE_NORMAL: self._active_close_bmp = bmp self._hover_close_bmp = self._active_close_bmp self._pressed_close_bmp = self._active_close_bmp self._disabled_close_bmp = self._active_close_bmp elif button_state == AUI_BUTTON_STATE_HOVER: self._hover_close_bmp = bmp elif button_state == AUI_BUTTON_STATE_PRESSED: self._pressed_close_bmp = bmp else: self._disabled_close_bmp = bmp elif bitmap_id == AUI_BUTTON_LEFT: if button_state & AUI_BUTTON_STATE_DISABLED: self._disabled_left_bmp = bmp else: self._active_left_bmp = bmp elif bitmap_id == AUI_BUTTON_RIGHT: if button_state & AUI_BUTTON_STATE_DISABLED: self._disabled_right_bmp = bmp else: self._active_right_bmp = bmp elif bitmap_id == AUI_BUTTON_WINDOWLIST: if button_state & AUI_BUTTON_STATE_DISABLED: self._disabled_windowlist_bmp = bmp else: self._active_windowlist_bmp = bmp def GetIndentSize(self): """ Returns the tabs indent size. """ return 5 def GetTabSize(self, dc, wnd, caption, bitmap, active, close_button_state, control=None): """ Returns the tab size for the given caption, bitmap and button state. :param `dc`: a `wx.DC` device context; :param `wnd`: a `wx.Window` instance object; :param `caption`: the tab text caption; :param `bitmap`: the bitmap displayed on the tab; :param `active`: whether the tab is selected or not; :param `close_button_state`: the state of the close button on the tab; :param `control`: a `wx.Window` instance inside a tab (or ``None``). """ dc.SetFont(self._measuring_font) measured_textx, measured_texty, dummy = dc.GetMultiLineTextExtent(caption) # add padding around the text tab_width = measured_textx tab_height = measured_texty # if the close button is showing, add space for it if close_button_state != AUI_BUTTON_STATE_HIDDEN: tab_width += self._active_close_bmp.GetWidth() + 3 # if there's a bitmap, add space for it if bitmap.IsOk(): tab_width += bitmap.GetWidth() tab_width += 3 # right side bitmap padding tab_height = max(tab_height, bitmap.GetHeight()) # add padding tab_width += 16 tab_height += 10 agwFlags = self.GetAGWFlags() if agwFlags & AUI_NB_TAB_FIXED_WIDTH: tab_width = self._fixed_tab_width if control is not None: tab_width += control.GetSize().GetWidth() + 4 x_extent = tab_width return (tab_width, tab_height), x_extent def DrawButton(self, dc, wnd, in_rect, button, orientation): """ Draws a button on the tab or on the tab area, depending on the button identifier. :param `dc`: a `wx.DC` device context; :param `wnd`: a `wx.Window` instance object; :param `in_rect`: rectangle the tab should be confined to; :param `button`: an instance of the button class; :param `orientation`: the tab orientation. """ bitmap_id, button_state = button.id, button.cur_state if bitmap_id == AUI_BUTTON_CLOSE: if button_state & AUI_BUTTON_STATE_DISABLED: bmp = self._disabled_close_bmp elif button_state & AUI_BUTTON_STATE_HOVER: bmp = self._hover_close_bmp elif button_state & AUI_BUTTON_STATE_PRESSED: bmp = self._pressed_close_bmp else: bmp = self._active_close_bmp elif bitmap_id == AUI_BUTTON_LEFT: if button_state & AUI_BUTTON_STATE_DISABLED: bmp = self._disabled_left_bmp else: bmp = self._active_left_bmp elif bitmap_id == AUI_BUTTON_RIGHT: if button_state & AUI_BUTTON_STATE_DISABLED: bmp = self._disabled_right_bmp else: bmp = self._active_right_bmp elif bitmap_id == AUI_BUTTON_WINDOWLIST: if button_state & AUI_BUTTON_STATE_DISABLED: bmp = self._disabled_windowlist_bmp else: bmp = self._active_windowlist_bmp else: if button_state & AUI_BUTTON_STATE_DISABLED: bmp = button.dis_bitmap else: bmp = button.bitmap if not bmp.IsOk(): return rect = wx.Rect(*in_rect) if orientation == wx.LEFT: rect.SetX(in_rect.x) rect.SetY(((in_rect.y + in_rect.height)/2) - (bmp.GetHeight()/2)) rect.SetWidth(bmp.GetWidth()) rect.SetHeight(bmp.GetHeight()) else: rect = wx.Rect(in_rect.x + in_rect.width - bmp.GetWidth(), ((in_rect.y + in_rect.height)/2) - (bmp.GetHeight()/2), bmp.GetWidth(), bmp.GetHeight()) rect = IndentPressedBitmap(rect, button_state) dc.DrawBitmap(bmp, rect.x, rect.y, True) out_rect = rect if bitmap_id == AUI_BUTTON_RIGHT: self._buttonRect = wx.Rect(rect.x, rect.y, 30, rect.height) return out_rect def DrawFocusRectangle(self, dc, page, wnd, draw_text, text_offset, bitmap_offset, drawn_tab_yoff, drawn_tab_height, textx, texty): """ Draws the focus rectangle on a tab. :param `dc`: a `wx.DC` device context; :param `page`: the page associated with the tab; :param `wnd`: a `wx.Window` instance object; :param `draw_text`: the text that has been drawn on the tab; :param `text_offset`: the text offset on the tab; :param `bitmap_offset`: the bitmap offset on the tab; :param `drawn_tab_yoff`: the y offset of the tab text; :param `drawn_tab_height`: the height of the tab; :param `textx`: the x text extent; :param `texty`: the y text extent. """ if page.active and wx.Window.FindFocus() == wnd: focusRectText = wx.Rect(text_offset, (drawn_tab_yoff + (drawn_tab_height)/2 - (texty/2)), textx, texty) if page.bitmap.IsOk(): focusRectBitmap = wx.Rect(bitmap_offset, drawn_tab_yoff + (drawn_tab_height/2) - (page.bitmap.GetHeight()/2), page.bitmap.GetWidth(), page.bitmap.GetHeight()) if page.bitmap.IsOk() and draw_text == "": focusRect = wx.Rect(*focusRectBitmap) elif not page.bitmap.IsOk() and draw_text != "": focusRect = wx.Rect(*focusRectText) elif page.bitmap.IsOk() and draw_text != "": focusRect = focusRectText.Union(focusRectBitmap) focusRect.Inflate(2, 2) dc.SetBrush(wx.TRANSPARENT_BRUSH) dc.SetPen(self._focusPen) dc.DrawRoundedRectangleRect(focusRect, 2) def GetBestTabCtrlSize(self, wnd, pages, required_bmp_size): """ Returns the best tab control size. :param `wnd`: a `wx.Window` instance object; :param `pages`: the pages associated with the tabs; :param `required_bmp_size`: the size of the bitmap on the tabs. """ dc = wx.ClientDC(wnd) dc.SetFont(self._measuring_font) # sometimes a standard bitmap size needs to be enforced, especially # if some tabs have bitmaps and others don't. This is important because # it prevents the tab control from resizing when tabs are added. measure_bmp = wx.NullBitmap if required_bmp_size.IsFullySpecified(): measure_bmp = wx.EmptyBitmap(required_bmp_size.x, required_bmp_size.y) max_y = 0 for page in pages: if measure_bmp.IsOk(): bmp = measure_bmp else: bmp = page.bitmap # we don't use the caption text because we don't # want tab heights to be different in the case # of a very short piece of text on one tab and a very # tall piece of text on another tab s, x_ext = self.GetTabSize(dc, wnd, page.caption, bmp, True, AUI_BUTTON_STATE_HIDDEN, None) max_y = max(max_y, s[1]) if page.control: controlW, controlH = page.control.GetSize() max_y = max(max_y, controlH+4) return max_y + 2 def SetNormalFont(self, font): """ Sets the normal font for drawing tab labels. :param `font`: a `wx.Font` object. """ self._normal_font = font def SetSelectedFont(self, font): """ Sets the selected tab font for drawing tab labels. :param `font`: a `wx.Font` object. """ self._selected_font = font def SetMeasuringFont(self, font): """ Sets the font for calculating text measurements. :param `font`: a `wx.Font` object. """ self._measuring_font = font def GetNormalFont(self): """ Returns the normal font for drawing tab labels. """ return self._normal_font def GetSelectedFont(self): """ Returns the selected tab font for drawing tab labels. """ return self._selected_font def GetMeasuringFont(self): """ Returns the font for calculating text measurements. """ return self._measuring_font def ShowDropDown(self, wnd, pages, active_idx): """ Shows the drop-down window menu on the tab area. :param `wnd`: a `wx.Window` derived window instance; :param `pages`: the pages associated with the tabs; :param `active_idx`: the active tab index. """ useImages = self.GetAGWFlags() & AUI_NB_USE_IMAGES_DROPDOWN menuPopup = wx.Menu() longest = 0 for i, page in enumerate(pages): caption = page.caption # if there is no caption, make it a space. This will prevent # an assert in the menu code. if caption == "": caption = " " # Save longest caption width for calculating menu width with width = wnd.GetTextExtent(caption)[0] if width > longest: longest = width if useImages: menuItem = wx.MenuItem(menuPopup, 1000+i, caption) if page.bitmap: menuItem.SetBitmap(page.bitmap) menuPopup.AppendItem(menuItem) else: menuPopup.AppendCheckItem(1000+i, caption) menuPopup.Enable(1000+i, page.enabled) if active_idx != -1 and not useImages: menuPopup.Check(1000+active_idx, True) # find out the screen coordinate at the bottom of the tab ctrl cli_rect = wnd.GetClientRect() # Calculate the approximate size of the popupmenu for setting the # position of the menu when its shown. # Account for extra padding on left/right of text on mac menus if wx.Platform in ['__WXMAC__', '__WXMSW__']: longest += 32 # Bitmap/Checkmark width + padding longest += 20 if self.GetAGWFlags() & AUI_NB_CLOSE_BUTTON: longest += 16 pt = wx.Point(cli_rect.x + cli_rect.GetWidth() - longest, cli_rect.y + cli_rect.height) cc = AuiCommandCapture() wnd.PushEventHandler(cc) wnd.PopupMenu(menuPopup, pt) command = cc.GetCommandId() wnd.PopEventHandler(True) if command >= 1000: return command - 1000 return -1 class AuiSimpleTabArt(object): """ A simple-looking implementation of a tab art. """ def __init__(self): """ Default class constructor. """ self._normal_font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT) self._selected_font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT) self._selected_font.SetWeight(wx.BOLD) self._measuring_font = self._selected_font self._agwFlags = 0 self._fixed_tab_width = 100 base_colour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DFACE) background_colour = base_colour normaltab_colour = base_colour selectedtab_colour = wx.WHITE self._bkbrush = wx.Brush(background_colour) self._normal_bkbrush = wx.Brush(normaltab_colour) self._normal_bkpen = wx.Pen(normaltab_colour) self._selected_bkbrush = wx.Brush(selectedtab_colour) self._selected_bkpen = wx.Pen(selectedtab_colour) self._active_close_bmp = BitmapFromBits(nb_close_bits, 16, 16, wx.BLACK) self._disabled_close_bmp = BitmapFromBits(nb_close_bits, 16, 16, wx.Colour(128, 128, 128)) self._active_left_bmp = BitmapFromBits(nb_left_bits, 16, 16, wx.BLACK) self._disabled_left_bmp = BitmapFromBits(nb_left_bits, 16, 16, wx.Colour(128, 128, 128)) self._active_right_bmp = BitmapFromBits(nb_right_bits, 16, 16, wx.BLACK) self._disabled_right_bmp = BitmapFromBits(nb_right_bits, 16, 16, wx.Colour(128, 128, 128)) self._active_windowlist_bmp = BitmapFromBits(nb_list_bits, 16, 16, wx.BLACK) self._disabled_windowlist_bmp = BitmapFromBits(nb_list_bits, 16, 16, wx.Colour(128, 128, 128)) def Clone(self): """ Clones the art object. """ art = AuiSimpleTabArt() art.SetNormalFont(self.GetNormalFont()) art.SetSelectedFont(self.GetSelectedFont()) art.SetMeasuringFont(self.GetMeasuringFont()) art = CopyAttributes(art, self) return art def SetAGWFlags(self, agwFlags): """ Sets the tab art flags. :param `agwFlags`: a combination of the following values: ==================================== ================================== Flag name Description ==================================== ================================== ``AUI_NB_TOP`` With this style, tabs are drawn along the top of the notebook ``AUI_NB_LEFT`` With this style, tabs are drawn along the left of the notebook. Not implemented yet. ``AUI_NB_RIGHT`` With this style, tabs are drawn along the right of the notebook. Not implemented yet. ``AUI_NB_BOTTOM`` With this style, tabs are drawn along the bottom of the notebook. ``AUI_NB_TAB_SPLIT`` Allows the tab control to be split by dragging a tab ``AUI_NB_TAB_MOVE`` Allows a tab to be moved horizontally by dragging ``AUI_NB_TAB_EXTERNAL_MOVE`` Allows a tab to be moved to another tab control ``AUI_NB_TAB_FIXED_WIDTH`` With this style, all tabs have the same width ``AUI_NB_SCROLL_BUTTONS`` With this style, left and right scroll buttons are displayed ``AUI_NB_WINDOWLIST_BUTTON`` With this style, a drop-down list of windows is available ``AUI_NB_CLOSE_BUTTON`` With this style, a close button is available on the tab bar ``AUI_NB_CLOSE_ON_ACTIVE_TAB`` With this style, a close button is available on the active tab ``AUI_NB_CLOSE_ON_ALL_TABS`` With this style, a close button is available on all tabs ``AUI_NB_MIDDLE_CLICK_CLOSE`` Allows to close AuiNotebook tabs by mouse middle button click ``AUI_NB_SUB_NOTEBOOK`` This style is used by AuiManager to create automatic AuiNotebooks ``AUI_NB_HIDE_ON_SINGLE_TAB`` Hides the tab window if only one tab is present ``AUI_NB_SMART_TABS`` Use Smart Tabbing, like ``Alt``+``Tab`` on Windows ``AUI_NB_USE_IMAGES_DROPDOWN`` Uses images on dropdown window list menu instead of check items ``AUI_NB_CLOSE_ON_TAB_LEFT`` Draws the tab close button on the left instead of on the right (a la Camino browser) ``AUI_NB_TAB_FLOAT`` Allows the floating of single tabs. Known limitation: when the notebook is more or less full screen, tabs cannot be dragged far enough outside of the notebook to become floating pages ``AUI_NB_DRAW_DND_TAB`` Draws an image representation of a tab while dragging (on by default) ==================================== ================================== """ self._agwFlags = agwFlags def GetAGWFlags(self): """ Returns the tab art flags. :see: L{SetAGWFlags} for a list of possible return values. """ return self._agwFlags def SetSizingInfo(self, tab_ctrl_size, tab_count, minMaxTabWidth): """ Sets the tab sizing information. :param `tab_ctrl_size`: the size of the tab control area; :param `tab_count`: the number of tabs; :param `minMaxTabWidth`: the minimum and maximum tab widths to be used when the ``AUI_NB_TAB_FIXED_WIDTH`` style is active. """ self._fixed_tab_width = 100 minTabWidth, maxTabWidth = minMaxTabWidth tot_width = tab_ctrl_size.x - self.GetIndentSize() - 4 if self._agwFlags & AUI_NB_CLOSE_BUTTON: tot_width -= self._active_close_bmp.GetWidth() if self._agwFlags & AUI_NB_WINDOWLIST_BUTTON: tot_width -= self._active_windowlist_bmp.GetWidth() if tab_count > 0: self._fixed_tab_width = tot_width/tab_count if self._fixed_tab_width < 100: self._fixed_tab_width = 100 if self._fixed_tab_width > tot_width/2: self._fixed_tab_width = tot_width/2 if self._fixed_tab_width > 220: self._fixed_tab_width = 220 if minTabWidth > -1: self._fixed_tab_width = max(self._fixed_tab_width, minTabWidth) if maxTabWidth > -1: self._fixed_tab_width = min(self._fixed_tab_width, maxTabWidth) self._tab_ctrl_height = tab_ctrl_size.y def DrawBackground(self, dc, wnd, rect): """ Draws the tab area background. :param `dc`: a `wx.DC` device context; :param `wnd`: a `wx.Window` instance object; :param `rect`: the tab control rectangle. """ # draw background dc.SetBrush(self._bkbrush) dc.SetPen(wx.TRANSPARENT_PEN) dc.DrawRectangle(-1, -1, rect.GetWidth()+2, rect.GetHeight()+2) # draw base line dc.SetPen(wx.GREY_PEN) dc.DrawLine(0, rect.GetHeight()-1, rect.GetWidth(), rect.GetHeight()-1) def DrawTab(self, dc, wnd, page, in_rect, close_button_state, paint_control=False): """ Draws a single tab. :param `dc`: a `wx.DC` device context; :param `wnd`: a `wx.Window` instance object; :param `page`: the tab control page associated with the tab; :param `in_rect`: rectangle the tab should be confined to; :param `close_button_state`: the state of the close button on the tab; :param `paint_control`: whether to draw the control inside a tab (if any) on a `wx.MemoryDC`. """ # if the caption is empty, measure some temporary text caption = page.caption if caption == "": caption = "Xj" agwFlags = self.GetAGWFlags() dc.SetFont(self._selected_font) selected_textx, selected_texty, dummy = dc.GetMultiLineTextExtent(caption) dc.SetFont(self._normal_font) normal_textx, normal_texty, dummy = dc.GetMultiLineTextExtent(caption) control = page.control # figure out the size of the tab tab_size, x_extent = self.GetTabSize(dc, wnd, page.caption, page.bitmap, page.active, close_button_state, control) tab_height = tab_size[1] tab_width = tab_size[0] tab_x = in_rect.x tab_y = in_rect.y + in_rect.height - tab_height caption = page.caption # select pen, brush and font for the tab to be drawn if page.active: dc.SetPen(self._selected_bkpen) dc.SetBrush(self._selected_bkbrush) dc.SetFont(self._selected_font) textx = selected_textx texty = selected_texty else: dc.SetPen(self._normal_bkpen) dc.SetBrush(self._normal_bkbrush) dc.SetFont(self._normal_font) textx = normal_textx texty = normal_texty if not page.enabled: dc.SetTextForeground(wx.SystemSettings.GetColour(wx.SYS_COLOUR_GRAYTEXT)) else: dc.SetTextForeground(page.text_colour) # -- draw line -- points = [wx.Point() for i in xrange(7)] points[0].x = tab_x points[0].y = tab_y + tab_height - 1 points[1].x = tab_x + tab_height - 3 points[1].y = tab_y + 2 points[2].x = tab_x + tab_height + 3 points[2].y = tab_y points[3].x = tab_x + tab_width - 2 points[3].y = tab_y points[4].x = tab_x + tab_width points[4].y = tab_y + 2 points[5].x = tab_x + tab_width points[5].y = tab_y + tab_height - 1 points[6] = points[0] dc.SetClippingRect(in_rect) dc.DrawPolygon(points) dc.SetPen(wx.GREY_PEN) dc.DrawLines(points) close_button_width = 0 if close_button_state != AUI_BUTTON_STATE_HIDDEN: close_button_width = self._active_close_bmp.GetWidth() if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT: if control: text_offset = tab_x + (tab_height/2) + close_button_width - (textx/2) - 2 else: text_offset = tab_x + (tab_height/2) + ((tab_width+close_button_width)/2) - (textx/2) - 2 else: if control: text_offset = tab_x + (tab_height/2) + close_button_width - (textx/2) else: text_offset = tab_x + (tab_height/2) + ((tab_width-close_button_width)/2) - (textx/2) else: text_offset = tab_x + (tab_height/3) + (tab_width/2) - (textx/2) if control: if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT: text_offset = tab_x + (tab_height/3) - (textx/2) + close_button_width + 2 else: text_offset = tab_x + (tab_height/3) - (textx/2) # set minimum text offset if text_offset < tab_x + tab_height: text_offset = tab_x + tab_height # chop text if necessary if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT: draw_text = ChopText(dc, caption, tab_width - (text_offset-tab_x)) else: draw_text = ChopText(dc, caption, tab_width - (text_offset-tab_x) - close_button_width) ypos = (tab_y + tab_height)/2 - (texty/2) + 1 if control is not None: if control.GetPosition() != wx.Point(text_offset+1, ypos): control.SetPosition(wx.Point(text_offset+1, ypos)) if not control.IsShown(): control.Show() if paint_control: bmp = TakeScreenShot(control.GetScreenRect()) dc.DrawBitmap(bmp, text_offset+1, ypos, True) controlW, controlH = control.GetSize() text_offset += controlW + 4 # draw tab text rectx, recty, dummy = dc.GetMultiLineTextExtent(draw_text) dc.DrawLabel(draw_text, wx.Rect(text_offset, ypos, rectx, recty)) # draw focus rectangle if page.active and wx.Window.FindFocus() == wnd: focusRect = wx.Rect(text_offset, ((tab_y + tab_height)/2 - (texty/2) + 1), selected_textx, selected_texty) focusRect.Inflate(2, 2) # TODO: # This should be uncommented when DrawFocusRect will become # available in wxPython # wx.RendererNative.Get().DrawFocusRect(wnd, dc, focusRect, 0) out_button_rect = wx.Rect() # draw close button if necessary if close_button_state != AUI_BUTTON_STATE_HIDDEN: if page.active: bmp = self._active_close_bmp else: bmp = self._disabled_close_bmp if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT: rect = wx.Rect(tab_x + tab_height - 2, tab_y + (tab_height/2) - (bmp.GetHeight()/2) + 1, close_button_width, tab_height - 1) else: rect = wx.Rect(tab_x + tab_width - close_button_width - 1, tab_y + (tab_height/2) - (bmp.GetHeight()/2) + 1, close_button_width, tab_height - 1) self.DrawButtons(dc, rect, bmp, wx.WHITE, close_button_state) out_button_rect = wx.Rect(*rect) out_tab_rect = wx.Rect(tab_x, tab_y, tab_width, tab_height) dc.DestroyClippingRegion() return out_tab_rect, out_button_rect, x_extent def DrawButtons(self, dc, _rect, bmp, bkcolour, button_state): """ Convenience method to draw tab buttons. :param `dc`: a `wx.DC` device context; :param `_rect`: the tab rectangle; :param `bmp`: the tab bitmap; :param `bkcolour`: the tab background colour; :param `button_state`: the state of the tab button. """ rect = wx.Rect(*_rect) if button_state == AUI_BUTTON_STATE_PRESSED: rect.x += 1 rect.y += 1 if button_state in [AUI_BUTTON_STATE_HOVER, AUI_BUTTON_STATE_PRESSED]: dc.SetBrush(wx.Brush(StepColour(bkcolour, 120))) dc.SetPen(wx.Pen(StepColour(bkcolour, 75))) # draw the background behind the button dc.DrawRectangle(rect.x, rect.y, 15, 15) # draw the button itself dc.DrawBitmap(bmp, rect.x, rect.y, True) def GetIndentSize(self): """ Returns the tabs indent size. """ return 0 def GetTabSize(self, dc, wnd, caption, bitmap, active, close_button_state, control=None): """ Returns the tab size for the given caption, bitmap and button state. :param `dc`: a `wx.DC` device context; :param `wnd`: a `wx.Window` instance object; :param `caption`: the tab text caption; :param `bitmap`: the bitmap displayed on the tab; :param `active`: whether the tab is selected or not; :param `close_button_state`: the state of the close button on the tab; :param `control`: a `wx.Window` instance inside a tab (or ``None``). """ dc.SetFont(self._measuring_font) measured_textx, measured_texty, dummy = dc.GetMultiLineTextExtent(caption) tab_height = measured_texty + 4 tab_width = measured_textx + tab_height + 5 if close_button_state != AUI_BUTTON_STATE_HIDDEN: tab_width += self._active_close_bmp.GetWidth() if self._agwFlags & AUI_NB_TAB_FIXED_WIDTH: tab_width = self._fixed_tab_width if control is not None: controlW, controlH = control.GetSize() tab_width += controlW + 4 x_extent = tab_width - (tab_height/2) - 1 return (tab_width, tab_height), x_extent def DrawButton(self, dc, wnd, in_rect, button, orientation): """ Draws a button on the tab or on the tab area, depending on the button identifier. :param `dc`: a `wx.DC` device context; :param `wnd`: a `wx.Window` instance object; :param `in_rect`: rectangle the tab should be confined to; :param `button`: an instance of the button class; :param `orientation`: the tab orientation. """ bitmap_id, button_state = button.id, button.cur_state if bitmap_id == AUI_BUTTON_CLOSE: if button_state & AUI_BUTTON_STATE_DISABLED: bmp = self._disabled_close_bmp else: bmp = self._active_close_bmp elif bitmap_id == AUI_BUTTON_LEFT: if button_state & AUI_BUTTON_STATE_DISABLED: bmp = self._disabled_left_bmp else: bmp = self._active_left_bmp elif bitmap_id == AUI_BUTTON_RIGHT: if button_state & AUI_BUTTON_STATE_DISABLED: bmp = self._disabled_right_bmp else: bmp = self._active_right_bmp elif bitmap_id == AUI_BUTTON_WINDOWLIST: if button_state & AUI_BUTTON_STATE_DISABLED: bmp = self._disabled_windowlist_bmp else: bmp = self._active_windowlist_bmp else: if button_state & AUI_BUTTON_STATE_DISABLED: bmp = button.dis_bitmap else: bmp = button.bitmap if not bmp.IsOk(): return rect = wx.Rect(*in_rect) if orientation == wx.LEFT: rect.SetX(in_rect.x) rect.SetY(((in_rect.y + in_rect.height)/2) - (bmp.GetHeight()/2)) rect.SetWidth(bmp.GetWidth()) rect.SetHeight(bmp.GetHeight()) else: rect = wx.Rect(in_rect.x + in_rect.width - bmp.GetWidth(), ((in_rect.y + in_rect.height)/2) - (bmp.GetHeight()/2), bmp.GetWidth(), bmp.GetHeight()) self.DrawButtons(dc, rect, bmp, wx.WHITE, button_state) out_rect = wx.Rect(*rect) return out_rect def ShowDropDown(self, wnd, pages, active_idx): """ Shows the drop-down window menu on the tab area. :param `wnd`: a `wx.Window` derived window instance; :param `pages`: the pages associated with the tabs; :param `active_idx`: the active tab index. """ menuPopup = wx.Menu() useImages = self.GetAGWFlags() & AUI_NB_USE_IMAGES_DROPDOWN for i, page in enumerate(pages): if useImages: menuItem = wx.MenuItem(menuPopup, 1000+i, page.caption) if page.bitmap: menuItem.SetBitmap(page.bitmap) menuPopup.AppendItem(menuItem) else: menuPopup.AppendCheckItem(1000+i, page.caption) menuPopup.Enable(1000+i, page.enabled) if active_idx != -1 and not useImages: menuPopup.Check(1000+active_idx, True) # find out where to put the popup menu of window # items. Subtract 100 for now to center the menu # a bit, until a better mechanism can be implemented pt = wx.GetMousePosition() pt = wnd.ScreenToClient(pt) if pt.x < 100: pt.x = 0 else: pt.x -= 100 # find out the screen coordinate at the bottom of the tab ctrl cli_rect = wnd.GetClientRect() pt.y = cli_rect.y + cli_rect.height cc = AuiCommandCapture() wnd.PushEventHandler(cc) wnd.PopupMenu(menuPopup, pt) command = cc.GetCommandId() wnd.PopEventHandler(True) if command >= 1000: return command-1000 return -1 def GetBestTabCtrlSize(self, wnd, pages, required_bmp_size): """ Returns the best tab control size. :param `wnd`: a `wx.Window` instance object; :param `pages`: the pages associated with the tabs; :param `required_bmp_size`: the size of the bitmap on the tabs. """ dc = wx.ClientDC(wnd) dc.SetFont(self._measuring_font) s, x_extent = self.GetTabSize(dc, wnd, "ABCDEFGHIj", wx.NullBitmap, True, AUI_BUTTON_STATE_HIDDEN, None) max_y = s[1] for page in pages: if page.control: controlW, controlH = page.control.GetSize() max_y = max(max_y, controlH+4) textx, texty, dummy = dc.GetMultiLineTextExtent(page.caption) max_y = max(max_y, texty) return max_y + 3 def SetNormalFont(self, font): """ Sets the normal font for drawing tab labels. :param `font`: a `wx.Font` object. """ self._normal_font = font def SetSelectedFont(self, font): """ Sets the selected tab font for drawing tab labels. :param `font`: a `wx.Font` object. """ self._selected_font = font def SetMeasuringFont(self, font): """ Sets the font for calculating text measurements. :param `font`: a `wx.Font` object. """ self._measuring_font = font def GetNormalFont(self): """ Returns the normal font for drawing tab labels. """ return self._normal_font def GetSelectedFont(self): """ Returns the selected tab font for drawing tab labels. """ return self._selected_font def GetMeasuringFont(self): """ Returns the font for calculating text measurements. """ return self._measuring_font def SetCustomButton(self, bitmap_id, button_state, bmp): """ Sets a custom bitmap for the close, left, right and window list buttons. :param `bitmap_id`: the button identifier; :param `button_state`: the button state; :param `bmp`: the custom bitmap to use for the button. """ if bitmap_id == AUI_BUTTON_CLOSE: if button_state == AUI_BUTTON_STATE_NORMAL: self._active_close_bmp = bmp self._hover_close_bmp = self._active_close_bmp self._pressed_close_bmp = self._active_close_bmp self._disabled_close_bmp = self._active_close_bmp elif button_state == AUI_BUTTON_STATE_HOVER: self._hover_close_bmp = bmp elif button_state == AUI_BUTTON_STATE_PRESSED: self._pressed_close_bmp = bmp else: self._disabled_close_bmp = bmp elif bitmap_id == AUI_BUTTON_LEFT: if button_state & AUI_BUTTON_STATE_DISABLED: self._disabled_left_bmp = bmp else: self._active_left_bmp = bmp elif bitmap_id == AUI_BUTTON_RIGHT: if button_state & AUI_BUTTON_STATE_DISABLED: self._disabled_right_bmp = bmp else: self._active_right_bmp = bmp elif bitmap_id == AUI_BUTTON_WINDOWLIST: if button_state & AUI_BUTTON_STATE_DISABLED: self._disabled_windowlist_bmp = bmp else: self._active_windowlist_bmp = bmp class VC71TabArt(AuiDefaultTabArt): """ A class to draw tabs using the Visual Studio 2003 (VC71) style. """ def __init__(self): """ Default class constructor. """ AuiDefaultTabArt.__init__(self) def Clone(self): """ Clones the art object. """ art = VC71TabArt() art.SetNormalFont(self.GetNormalFont()) art.SetSelectedFont(self.GetSelectedFont()) art.SetMeasuringFont(self.GetMeasuringFont()) art = CopyAttributes(art, self) return art def DrawTab(self, dc, wnd, page, in_rect, close_button_state, paint_control=False): """ Draws a single tab. :param `dc`: a `wx.DC` device context; :param `wnd`: a `wx.Window` instance object; :param `page`: the tab control page associated with the tab; :param `in_rect`: rectangle the tab should be confined to; :param `close_button_state`: the state of the close button on the tab; :param `paint_control`: whether to draw the control inside a tab (if any) on a `wx.MemoryDC`. """ # Visual studio 7.1 style # This code is based on the renderer included in FlatNotebook # figure out the size of the tab control = page.control tab_size, x_extent = self.GetTabSize(dc, wnd, page.caption, page.bitmap, page.active, close_button_state, control) tab_height = self._tab_ctrl_height - 3 tab_width = tab_size[0] tab_x = in_rect.x tab_y = in_rect.y + in_rect.height - tab_height clip_width = tab_width if tab_x + clip_width > in_rect.x + in_rect.width - 4: clip_width = (in_rect.x + in_rect.width) - tab_x - 4 dc.SetClippingRegion(tab_x, tab_y, clip_width + 1, tab_height - 3) agwFlags = self.GetAGWFlags() if agwFlags & AUI_NB_BOTTOM: tab_y -= 1 dc.SetPen((page.active and [wx.Pen(wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DHIGHLIGHT))] or \ [wx.Pen(wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DSHADOW))])[0]) dc.SetBrush((page.active and [wx.Brush(wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DFACE))] or \ [wx.TRANSPARENT_BRUSH])[0]) if page.active: tabH = tab_height - 2 dc.DrawRectangle(tab_x, tab_y, tab_width, tabH) rightLineY1 = (agwFlags & AUI_NB_BOTTOM and [vertical_border_padding - 2] or \ [vertical_border_padding - 1])[0] rightLineY2 = tabH + 3 dc.SetPen(wx.Pen(wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DSHADOW))) dc.DrawLine(tab_x + tab_width - 1, rightLineY1 + 1, tab_x + tab_width - 1, rightLineY2) if agwFlags & AUI_NB_BOTTOM: dc.DrawLine(tab_x + 1, rightLineY2 - 3 , tab_x + tab_width - 1, rightLineY2 - 3) dc.SetPen(wx.Pen(wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DDKSHADOW))) dc.DrawLine(tab_x + tab_width, rightLineY1, tab_x + tab_width, rightLineY2) if agwFlags & AUI_NB_BOTTOM: dc.DrawLine(tab_x, rightLineY2 - 2, tab_x + tab_width, rightLineY2 - 2) else: # We dont draw a rectangle for non selected tabs, but only # vertical line on the right blackLineY1 = (agwFlags & AUI_NB_BOTTOM and [vertical_border_padding + 2] or \ [vertical_border_padding + 1])[0] blackLineY2 = tab_height - 5 dc.DrawLine(tab_x + tab_width, blackLineY1, tab_x + tab_width, blackLineY2) border_points = [0, 0] if agwFlags & AUI_NB_BOTTOM: border_points[0] = wx.Point(tab_x, tab_y) border_points[1] = wx.Point(tab_x, tab_y + tab_height - 6) else: # if (agwFlags & AUI_NB_TOP) border_points[0] = wx.Point(tab_x, tab_y + tab_height - 4) border_points[1] = wx.Point(tab_x, tab_y + 2) drawn_tab_yoff = border_points[1].y drawn_tab_height = border_points[0].y - border_points[1].y text_offset = tab_x + 8 close_button_width = 0 if close_button_state != AUI_BUTTON_STATE_HIDDEN: close_button_width = self._active_close_bmp.GetWidth() if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT: text_offset += close_button_width - 5 if not page.enabled: dc.SetTextForeground(wx.SystemSettings.GetColour(wx.SYS_COLOUR_GRAYTEXT)) pagebitmap = page.dis_bitmap else: dc.SetTextForeground(page.text_colour) pagebitmap = page.bitmap shift = 0 if agwFlags & AUI_NB_BOTTOM: shift = (page.active and [1] or [2])[0] bitmap_offset = 0 if pagebitmap.IsOk(): bitmap_offset = tab_x + 8 if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT and close_button_width: bitmap_offset += close_button_width - 5 # draw bitmap dc.DrawBitmap(pagebitmap, bitmap_offset, drawn_tab_yoff + (drawn_tab_height/2) - (pagebitmap.GetHeight()/2) + shift, True) text_offset = bitmap_offset + pagebitmap.GetWidth() text_offset += 3 # bitmap padding else: if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT == 0 or not close_button_width: text_offset = tab_x + 8 # if the caption is empty, measure some temporary text caption = page.caption if caption == "": caption = "Xj" if page.active: dc.SetFont(self._selected_font) textx, texty, dummy = dc.GetMultiLineTextExtent(caption) else: dc.SetFont(self._normal_font) textx, texty, dummy = dc.GetMultiLineTextExtent(caption) draw_text = ChopText(dc, caption, tab_width - (text_offset-tab_x) - close_button_width) ypos = drawn_tab_yoff + (drawn_tab_height)/2 - (texty/2) - 1 + shift offset_focus = text_offset if control is not None: if control.GetPosition() != wx.Point(text_offset+1, ypos): control.SetPosition(wx.Point(text_offset+1, ypos)) if not control.IsShown(): control.Show() if paint_control: bmp = TakeScreenShot(control.GetScreenRect()) dc.DrawBitmap(bmp, text_offset+1, ypos, True) controlW, controlH = control.GetSize() text_offset += controlW + 4 textx += controlW + 4 # draw tab text rectx, recty, dummy = dc.GetMultiLineTextExtent(draw_text) dc.DrawLabel(draw_text, wx.Rect(text_offset, ypos, rectx, recty)) out_button_rect = wx.Rect() # draw focus rectangle self.DrawFocusRectangle(dc, page, wnd, draw_text, offset_focus, bitmap_offset, drawn_tab_yoff+shift, drawn_tab_height+shift, textx, texty) # draw 'x' on tab (if enabled) if close_button_state != AUI_BUTTON_STATE_HIDDEN: close_button_width = self._active_close_bmp.GetWidth() bmp = self._disabled_close_bmp if close_button_state == AUI_BUTTON_STATE_HOVER: bmp = self._hover_close_bmp elif close_button_state == AUI_BUTTON_STATE_PRESSED: bmp = self._pressed_close_bmp if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT: rect = wx.Rect(tab_x + 4, drawn_tab_yoff + (drawn_tab_height / 2) - (bmp.GetHeight() / 2) + shift, close_button_width, tab_height) else: rect = wx.Rect(tab_x + tab_width - close_button_width - 3, drawn_tab_yoff + (drawn_tab_height / 2) - (bmp.GetHeight() / 2) + shift, close_button_width, tab_height) # Indent the button if it is pressed down: rect = IndentPressedBitmap(rect, close_button_state) dc.DrawBitmap(bmp, rect.x, rect.y, True) out_button_rect = rect out_tab_rect = wx.Rect(tab_x, tab_y, tab_width, tab_height) dc.DestroyClippingRegion() return out_tab_rect, out_button_rect, x_extent class FF2TabArt(AuiDefaultTabArt): """ A class to draw tabs using the Firefox 2 (FF2) style. """ def __init__(self): """ Default class constructor. """ AuiDefaultTabArt.__init__(self) def Clone(self): """ Clones the art object. """ art = FF2TabArt() art.SetNormalFont(self.GetNormalFont()) art.SetSelectedFont(self.GetSelectedFont()) art.SetMeasuringFont(self.GetMeasuringFont()) art = CopyAttributes(art, self) return art def GetTabSize(self, dc, wnd, caption, bitmap, active, close_button_state, control): """ Returns the tab size for the given caption, bitmap and button state. :param `dc`: a `wx.DC` device context; :param `wnd`: a `wx.Window` instance object; :param `caption`: the tab text caption; :param `bitmap`: the bitmap displayed on the tab; :param `active`: whether the tab is selected or not; :param `close_button_state`: the state of the close button on the tab; :param `control`: a `wx.Window` instance inside a tab (or ``None``). """ tab_size, x_extent = AuiDefaultTabArt.GetTabSize(self, dc, wnd, caption, bitmap, active, close_button_state, control) tab_width, tab_height = tab_size # add some vertical padding tab_height += 2 return (tab_width, tab_height), x_extent def DrawTab(self, dc, wnd, page, in_rect, close_button_state, paint_control=False): """ Draws a single tab. :param `dc`: a `wx.DC` device context; :param `wnd`: a `wx.Window` instance object; :param `page`: the tab control page associated with the tab; :param `in_rect`: rectangle the tab should be confined to; :param `close_button_state`: the state of the close button on the tab; :param `paint_control`: whether to draw the control inside a tab (if any) on a `wx.MemoryDC`. """ # Firefox 2 style control = page.control # figure out the size of the tab tab_size, x_extent = self.GetTabSize(dc, wnd, page.caption, page.bitmap, page.active, close_button_state, control) tab_height = self._tab_ctrl_height - 2 tab_width = tab_size[0] tab_x = in_rect.x tab_y = in_rect.y + in_rect.height - tab_height clip_width = tab_width if tab_x + clip_width > in_rect.x + in_rect.width - 4: clip_width = (in_rect.x + in_rect.width) - tab_x - 4 dc.SetClippingRegion(tab_x, tab_y, clip_width + 1, tab_height - 3) tabPoints = [wx.Point() for i in xrange(7)] adjust = 0 if not page.active: adjust = 1 agwFlags = self.GetAGWFlags() tabPoints[0].x = tab_x + 3 tabPoints[0].y = (agwFlags & AUI_NB_BOTTOM and [3] or [tab_height - 2])[0] tabPoints[1].x = tabPoints[0].x tabPoints[1].y = (agwFlags & AUI_NB_BOTTOM and [tab_height - (vertical_border_padding + 2) - adjust] or \ [(vertical_border_padding + 2) + adjust])[0] tabPoints[2].x = tabPoints[1].x+2 tabPoints[2].y = (agwFlags & AUI_NB_BOTTOM and [tab_height - vertical_border_padding - adjust] or \ [vertical_border_padding + adjust])[0] tabPoints[3].x = tab_x + tab_width - 2 tabPoints[3].y = tabPoints[2].y tabPoints[4].x = tabPoints[3].x + 2 tabPoints[4].y = tabPoints[1].y tabPoints[5].x = tabPoints[4].x tabPoints[5].y = tabPoints[0].y tabPoints[6].x = tabPoints[0].x tabPoints[6].y = tabPoints[0].y rr = wx.RectPP(tabPoints[2], tabPoints[5]) self.DrawTabBackground(dc, rr, page.active, (agwFlags & AUI_NB_BOTTOM) == 0) dc.SetBrush(wx.TRANSPARENT_BRUSH) dc.SetPen(wx.Pen(wx.SystemSettings_GetColour(wx.SYS_COLOUR_BTNSHADOW))) # Draw the tab as rounded rectangle dc.DrawPolygon(tabPoints) if page.active: dc.DrawLine(tabPoints[0].x + 1, tabPoints[0].y, tabPoints[5].x , tabPoints[0].y) drawn_tab_yoff = tabPoints[1].y drawn_tab_height = tabPoints[0].y - tabPoints[2].y text_offset = tab_x + 8 close_button_width = 0 if close_button_state != AUI_BUTTON_STATE_HIDDEN: close_button_width = self._active_close_bmp.GetWidth() if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT: text_offset += close_button_width - 4 if not page.enabled: dc.SetTextForeground(wx.SystemSettings.GetColour(wx.SYS_COLOUR_GRAYTEXT)) pagebitmap = page.dis_bitmap else: dc.SetTextForeground(page.text_colour) pagebitmap = page.bitmap shift = -1 if agwFlags & AUI_NB_BOTTOM: shift = 2 bitmap_offset = 0 if pagebitmap.IsOk(): bitmap_offset = tab_x + 8 if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT and close_button_width: bitmap_offset += close_button_width - 4 # draw bitmap dc.DrawBitmap(pagebitmap, bitmap_offset, drawn_tab_yoff + (drawn_tab_height/2) - (pagebitmap.GetHeight()/2) + shift, True) text_offset = bitmap_offset + pagebitmap.GetWidth() text_offset += 3 # bitmap padding else: if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT == 0 or not close_button_width: text_offset = tab_x + 8 # if the caption is empty, measure some temporary text caption = page.caption if caption == "": caption = "Xj" if page.active: dc.SetFont(self._selected_font) textx, texty, dummy = dc.GetMultiLineTextExtent(caption) else: dc.SetFont(self._normal_font) textx, texty, dummy = dc.GetMultiLineTextExtent(caption) if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT: draw_text = ChopText(dc, caption, tab_width - (text_offset-tab_x) - close_button_width + 1) else: draw_text = ChopText(dc, caption, tab_width - (text_offset-tab_x) - close_button_width) ypos = drawn_tab_yoff + drawn_tab_height/2 - texty/2 - 1 + shift offset_focus = text_offset if control is not None: if control.GetPosition() != wx.Point(text_offset+1, ypos): control.SetPosition(wx.Point(text_offset+1, ypos)) if not control.IsShown(): control.Show() if paint_control: bmp = TakeScreenShot(control.GetScreenRect()) dc.DrawBitmap(bmp, text_offset+1, ypos, True) controlW, controlH = control.GetSize() text_offset += controlW + 4 textx += controlW + 4 # draw tab text rectx, recty, dummy = dc.GetMultiLineTextExtent(draw_text) dc.DrawLabel(draw_text, wx.Rect(text_offset, ypos, rectx, recty)) # draw focus rectangle self.DrawFocusRectangle(dc, page, wnd, draw_text, offset_focus, bitmap_offset, drawn_tab_yoff+shift, drawn_tab_height, textx, texty) out_button_rect = wx.Rect() # draw 'x' on tab (if enabled) if close_button_state != AUI_BUTTON_STATE_HIDDEN: close_button_width = self._active_close_bmp.GetWidth() bmp = self._disabled_close_bmp if close_button_state == AUI_BUTTON_STATE_HOVER: bmp = self._hover_close_bmp elif close_button_state == AUI_BUTTON_STATE_PRESSED: bmp = self._pressed_close_bmp if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT: rect = wx.Rect(tab_x + 5, drawn_tab_yoff + (drawn_tab_height / 2) - (bmp.GetHeight() / 2) + shift, close_button_width, tab_height) else: rect = wx.Rect(tab_x + tab_width - close_button_width - 3, drawn_tab_yoff + (drawn_tab_height / 2) - (bmp.GetHeight() / 2) + shift, close_button_width, tab_height) # Indent the button if it is pressed down: rect = IndentPressedBitmap(rect, close_button_state) dc.DrawBitmap(bmp, rect.x, rect.y, True) out_button_rect = rect out_tab_rect = wx.Rect(tab_x, tab_y, tab_width, tab_height) dc.DestroyClippingRegion() return out_tab_rect, out_button_rect, x_extent def DrawTabBackground(self, dc, rect, focus, upperTabs): """ Draws the tab background for the Firefox 2 style. This is more consistent with L{FlatNotebook} than before. :param `dc`: a `wx.DC` device context; :param `rect`: rectangle the tab should be confined to; :param `focus`: whether the tab has focus or not; :param `upperTabs`: whether the style is ``AUI_NB_TOP`` or ``AUI_NB_BOTTOM``. """ # Define the rounded rectangle base on the given rect # we need an array of 9 points for it regPts = [wx.Point() for indx in xrange(9)] if focus: if upperTabs: leftPt = wx.Point(rect.x, rect.y + (rect.height / 10)*8) rightPt = wx.Point(rect.x + rect.width - 2, rect.y + (rect.height / 10)*8) else: leftPt = wx.Point(rect.x, rect.y + (rect.height / 10)*5) rightPt = wx.Point(rect.x + rect.width - 2, rect.y + (rect.height / 10)*5) else: leftPt = wx.Point(rect.x, rect.y + (rect.height / 2)) rightPt = wx.Point(rect.x + rect.width - 2, rect.y + (rect.height / 2)) # Define the top region top = wx.RectPP(rect.GetTopLeft(), rightPt) bottom = wx.RectPP(leftPt, rect.GetBottomRight()) topStartColour = wx.WHITE if not focus: topStartColour = LightColour(wx.SystemSettings_GetColour(wx.SYS_COLOUR_3DFACE), 50) topEndColour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_3DFACE) bottomStartColour = topEndColour bottomEndColour = topEndColour # Incase we use bottom tabs, switch the colours if upperTabs: if focus: dc.GradientFillLinear(top, topStartColour, topEndColour, wx.SOUTH) dc.GradientFillLinear(bottom, bottomStartColour, bottomEndColour, wx.SOUTH) else: dc.GradientFillLinear(top, topEndColour , topStartColour, wx.SOUTH) dc.GradientFillLinear(bottom, bottomStartColour, bottomEndColour, wx.SOUTH) else: if focus: dc.GradientFillLinear(bottom, topEndColour, bottomEndColour, wx.SOUTH) dc.GradientFillLinear(top, topStartColour, topStartColour, wx.SOUTH) else: dc.GradientFillLinear(bottom, bottomStartColour, bottomEndColour, wx.SOUTH) dc.GradientFillLinear(top, topEndColour, topStartColour, wx.SOUTH) dc.SetBrush(wx.TRANSPARENT_BRUSH) class VC8TabArt(AuiDefaultTabArt): """ A class to draw tabs using the Visual Studio 2005 (VC8) style. """ def __init__(self): """ Default class constructor. """ AuiDefaultTabArt.__init__(self) def Clone(self): """ Clones the art object. """ art = VC8TabArt() art.SetNormalFont(self.GetNormalFont()) art.SetSelectedFont(self.GetSelectedFont()) art.SetMeasuringFont(self.GetMeasuringFont()) art = CopyAttributes(art, self) return art def SetSizingInfo(self, tab_ctrl_size, tab_count, minMaxTabWidth): """ Sets the tab sizing information. :param `tab_ctrl_size`: the size of the tab control area; :param `tab_count`: the number of tabs; :param `minMaxTabWidth`: the minimum and maximum tab widths to be used when the ``AUI_NB_TAB_FIXED_WIDTH`` style is active. """ AuiDefaultTabArt.SetSizingInfo(self, tab_ctrl_size, tab_count, minMaxTabWidth) minTabWidth, maxTabWidth = minMaxTabWidth if minTabWidth > -1: self._fixed_tab_width = max(self._fixed_tab_width, minTabWidth) if maxTabWidth > -1: self._fixed_tab_width = min(self._fixed_tab_width, maxTabWidth) self._fixed_tab_width -= 5 def GetTabSize(self, dc, wnd, caption, bitmap, active, close_button_state, control=None): """ Returns the tab size for the given caption, bitmap and button state. :param `dc`: a `wx.DC` device context; :param `wnd`: a `wx.Window` instance object; :param `caption`: the tab text caption; :param `bitmap`: the bitmap displayed on the tab; :param `active`: whether the tab is selected or not; :param `close_button_state`: the state of the close button on the tab; :param `control`: a `wx.Window` instance inside a tab (or ``None``). """ tab_size, x_extent = AuiDefaultTabArt.GetTabSize(self, dc, wnd, caption, bitmap, active, close_button_state, control) tab_width, tab_height = tab_size # add some padding tab_width += 10 tab_height += 2 return (tab_width, tab_height), x_extent def DrawTab(self, dc, wnd, page, in_rect, close_button_state, paint_control=False): """ Draws a single tab. :param `dc`: a `wx.DC` device context; :param `wnd`: a `wx.Window` instance object; :param `page`: the tab control page associated with the tab; :param `in_rect`: rectangle the tab should be confined to; :param `close_button_state`: the state of the close button on the tab; :param `paint_control`: whether to draw the control inside a tab (if any) on a `wx.MemoryDC`. """ # Visual Studio 8 style control = page.control # figure out the size of the tab tab_size, x_extent = self.GetTabSize(dc, wnd, page.caption, page.bitmap, page.active, close_button_state, control) tab_height = self._tab_ctrl_height - 1 tab_width = tab_size[0] tab_x = in_rect.x tab_y = in_rect.y + in_rect.height - tab_height clip_width = tab_width + 3 if tab_x + clip_width > in_rect.x + in_rect.width - 4: clip_width = (in_rect.x + in_rect.width) - tab_x - 4 tabPoints = [wx.Point() for i in xrange(8)] # If we draw the first tab or the active tab, # we draw a full tab, else we draw a truncated tab # # X(2) X(3) # X(1) X(4) # # X(5) # # X(0),(7) X(6) # # adjust = 0 if not page.active: adjust = 1 agwFlags = self.GetAGWFlags() tabPoints[0].x = (agwFlags & AUI_NB_BOTTOM and [tab_x] or [tab_x + adjust])[0] tabPoints[0].y = (agwFlags & AUI_NB_BOTTOM and [2] or [tab_height - 3])[0] tabPoints[1].x = tabPoints[0].x + tab_height - vertical_border_padding - 3 - adjust tabPoints[1].y = (agwFlags & AUI_NB_BOTTOM and [tab_height - (vertical_border_padding+2)] or \ [(vertical_border_padding+2)])[0] tabPoints[2].x = tabPoints[1].x + 4 tabPoints[2].y = (agwFlags & AUI_NB_BOTTOM and [tab_height - vertical_border_padding] or \ [vertical_border_padding])[0] tabPoints[3].x = tabPoints[2].x + tab_width - tab_height + vertical_border_padding tabPoints[3].y = (agwFlags & AUI_NB_BOTTOM and [tab_height - vertical_border_padding] or \ [vertical_border_padding])[0] tabPoints[4].x = tabPoints[3].x + 1 tabPoints[4].y = (agwFlags & AUI_NB_BOTTOM and [tabPoints[3].y - 1] or [tabPoints[3].y + 1])[0] tabPoints[5].x = tabPoints[4].x + 1 tabPoints[5].y = (agwFlags & AUI_NB_BOTTOM and [(tabPoints[4].y - 1)] or [tabPoints[4].y + 1])[0] tabPoints[6].x = tabPoints[2].x + tab_width - tab_height + 2 + vertical_border_padding tabPoints[6].y = tabPoints[0].y tabPoints[7].x = tabPoints[0].x tabPoints[7].y = tabPoints[0].y self.FillVC8GradientColour(dc, tabPoints, page.active) dc.SetBrush(wx.TRANSPARENT_BRUSH) dc.SetPen(wx.Pen(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNSHADOW))) dc.DrawPolygon(tabPoints) if page.active: # Delete the bottom line (or the upper one, incase we use wxBOTTOM) dc.SetPen(wx.WHITE_PEN) dc.DrawLine(tabPoints[0].x, tabPoints[0].y, tabPoints[6].x, tabPoints[6].y) dc.SetClippingRegion(tab_x, tab_y, clip_width + 2, tab_height - 3) drawn_tab_yoff = tabPoints[1].y drawn_tab_height = tabPoints[0].y - tabPoints[2].y text_offset = tab_x + 20 close_button_width = 0 if close_button_state != AUI_BUTTON_STATE_HIDDEN: close_button_width = self._active_close_bmp.GetWidth() if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT: text_offset += close_button_width if not page.enabled: dc.SetTextForeground(wx.SystemSettings.GetColour(wx.SYS_COLOUR_GRAYTEXT)) pagebitmap = page.dis_bitmap else: dc.SetTextForeground(page.text_colour) pagebitmap = page.bitmap shift = 0 if agwFlags & AUI_NB_BOTTOM: shift = (page.active and [1] or [2])[0] bitmap_offset = 0 if pagebitmap.IsOk(): bitmap_offset = tab_x + 20 if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT and close_button_width: bitmap_offset += close_button_width # draw bitmap dc.DrawBitmap(pagebitmap, bitmap_offset, drawn_tab_yoff + (drawn_tab_height/2) - (pagebitmap.GetHeight()/2) + shift, True) text_offset = bitmap_offset + pagebitmap.GetWidth() text_offset += 3 # bitmap padding else: if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT == 0 or not close_button_width: text_offset = tab_x + tab_height # if the caption is empty, measure some temporary text caption = page.caption if caption == "": caption = "Xj" if page.active: dc.SetFont(self._selected_font) textx, texty, dummy = dc.GetMultiLineTextExtent(caption) else: dc.SetFont(self._normal_font) textx, texty, dummy = dc.GetMultiLineTextExtent(caption) if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT: draw_text = ChopText(dc, caption, tab_width - (text_offset-tab_x)) else: draw_text = ChopText(dc, caption, tab_width - (text_offset-tab_x) - close_button_width) ypos = drawn_tab_yoff + drawn_tab_height/2 - texty/2 - 1 + shift offset_focus = text_offset if control is not None: if control.GetPosition() != wx.Point(text_offset+1, ypos): control.SetPosition(wx.Point(text_offset+1, ypos)) if not control.IsShown(): control.Show() if paint_control: bmp = TakeScreenShot(control.GetScreenRect()) dc.DrawBitmap(bmp, text_offset+1, ypos, True) controlW, controlH = control.GetSize() text_offset += controlW + 4 textx += controlW + 4 # draw tab text rectx, recty, dummy = dc.GetMultiLineTextExtent(draw_text) dc.DrawLabel(draw_text, wx.Rect(text_offset, ypos, rectx, recty)) # draw focus rectangle self.DrawFocusRectangle(dc, page, wnd, draw_text, offset_focus, bitmap_offset, drawn_tab_yoff+shift, drawn_tab_height+shift, textx, texty) out_button_rect = wx.Rect() # draw 'x' on tab (if enabled) if close_button_state != AUI_BUTTON_STATE_HIDDEN: close_button_width = self._active_close_bmp.GetWidth() bmp = self._disabled_close_bmp if close_button_state == AUI_BUTTON_STATE_HOVER: bmp = self._hover_close_bmp elif close_button_state == AUI_BUTTON_STATE_PRESSED: bmp = self._pressed_close_bmp if page.active: xpos = tab_x + tab_width - close_button_width + 3 else: xpos = tab_x + tab_width - close_button_width - 5 if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT: rect = wx.Rect(tab_x + 20, drawn_tab_yoff + (drawn_tab_height / 2) - (bmp.GetHeight() / 2) + shift, close_button_width, tab_height) else: rect = wx.Rect(xpos, drawn_tab_yoff + (drawn_tab_height / 2) - (bmp.GetHeight() / 2) + shift, close_button_width, tab_height) # Indent the button if it is pressed down: rect = IndentPressedBitmap(rect, close_button_state) dc.DrawBitmap(bmp, rect.x, rect.y, True) out_button_rect = rect out_tab_rect = wx.Rect(tab_x, tab_y, x_extent, tab_height) dc.DestroyClippingRegion() return out_tab_rect, out_button_rect, x_extent def FillVC8GradientColour(self, dc, tabPoints, active): """ Fills the tab with the Visual Studio 2005 gradient background. :param `dc`: a `wx.DC` device context; :param `tabPoints`: a list of `wx.Point` objects describing the tab shape; :param `active`: whether the tab is selected or not. """ xList = [pt.x for pt in tabPoints] yList = [pt.y for pt in tabPoints] minx, maxx = min(xList), max(xList) miny, maxy = min(yList), max(yList) rect = wx.Rect(minx, maxy, maxx-minx, miny-maxy+1) region = wx.RegionFromPoints(tabPoints) if self._buttonRect.width > 0: buttonRegion = wx.Region(*self._buttonRect) region.XorRegion(buttonRegion) dc.SetClippingRegionAsRegion(region) if active: bottom_colour = top_colour = wx.WHITE else: bottom_colour = StepColour(self._base_colour, 90) top_colour = StepColour(self._base_colour, 170) dc.GradientFillLinear(rect, top_colour, bottom_colour, wx.SOUTH) dc.DestroyClippingRegion() class ChromeTabArt(AuiDefaultTabArt): """ A class to draw tabs using the Google Chrome browser style. It uses custom bitmap to render the tabs, so that the look and feel is as close as possible to the Chrome style. """ def __init__(self): """ Default class constructor. """ AuiDefaultTabArt.__init__(self) self.SetBitmaps(mirror=False) closeBmp = tab_close.GetBitmap() closeHBmp = tab_close_h.GetBitmap() closePBmp = tab_close_p.GetBitmap() self.SetCustomButton(AUI_BUTTON_CLOSE, AUI_BUTTON_STATE_NORMAL, closeBmp) self.SetCustomButton(AUI_BUTTON_CLOSE, AUI_BUTTON_STATE_HOVER, closeHBmp) self.SetCustomButton(AUI_BUTTON_CLOSE, AUI_BUTTON_STATE_PRESSED, closePBmp) def SetAGWFlags(self, agwFlags): """ Sets the tab art flags. :param `agwFlags`: a combination of the following values: ==================================== ================================== Flag name Description ==================================== ================================== ``AUI_NB_TOP`` With this style, tabs are drawn along the top of the notebook ``AUI_NB_LEFT`` With this style, tabs are drawn along the left of the notebook. Not implemented yet. ``AUI_NB_RIGHT`` With this style, tabs are drawn along the right of the notebook. Not implemented yet. ``AUI_NB_BOTTOM`` With this style, tabs are drawn along the bottom of the notebook. ``AUI_NB_TAB_SPLIT`` Allows the tab control to be split by dragging a tab ``AUI_NB_TAB_MOVE`` Allows a tab to be moved horizontally by dragging ``AUI_NB_TAB_EXTERNAL_MOVE`` Allows a tab to be moved to another tab control ``AUI_NB_TAB_FIXED_WIDTH`` With this style, all tabs have the same width ``AUI_NB_SCROLL_BUTTONS`` With this style, left and right scroll buttons are displayed ``AUI_NB_WINDOWLIST_BUTTON`` With this style, a drop-down list of windows is available ``AUI_NB_CLOSE_BUTTON`` With this style, a close button is available on the tab bar ``AUI_NB_CLOSE_ON_ACTIVE_TAB`` With this style, a close button is available on the active tab ``AUI_NB_CLOSE_ON_ALL_TABS`` With this style, a close button is available on all tabs ``AUI_NB_MIDDLE_CLICK_CLOSE`` Allows to close AuiNotebook tabs by mouse middle button click ``AUI_NB_SUB_NOTEBOOK`` This style is used by AuiManager to create automatic AuiNotebooks ``AUI_NB_HIDE_ON_SINGLE_TAB`` Hides the tab window if only one tab is present ``AUI_NB_SMART_TABS`` Use Smart Tabbing, like ``Alt``+``Tab`` on Windows ``AUI_NB_USE_IMAGES_DROPDOWN`` Uses images on dropdown window list menu instead of check items ``AUI_NB_CLOSE_ON_TAB_LEFT`` Draws the tab close button on the left instead of on the right (a la Camino browser) ``AUI_NB_TAB_FLOAT`` Allows the floating of single tabs. Known limitation: when the notebook is more or less full screen, tabs cannot be dragged far enough outside of the notebook to become floating pages ``AUI_NB_DRAW_DND_TAB`` Draws an image representation of a tab while dragging (on by default) ==================================== ================================== :note: Overridden from L{AuiDefaultTabArt}. """ if agwFlags & AUI_NB_TOP: self.SetBitmaps(mirror=False) elif agwFlags & AUI_NB_BOTTOM: self.SetBitmaps(mirror=True) AuiDefaultTabArt.SetAGWFlags(self, agwFlags) def SetBitmaps(self, mirror): """ Assigns the tab custom bitmaps :param `mirror`: whether to vertically mirror the bitmap or not. """ bmps = [tab_active_left.GetBitmap(), tab_active_center.GetBitmap(), tab_active_right.GetBitmap(), tab_inactive_left.GetBitmap(), tab_inactive_center.GetBitmap(), tab_inactive_right.GetBitmap()] if mirror: for indx, bmp in enumerate(bmps): img = bmp.ConvertToImage() img = img.Mirror(horizontally=False) bmps[indx] = img.ConvertToBitmap() self._leftActiveBmp = bmps[0] self._centerActiveBmp = bmps[1] self._rightActiveBmp = bmps[2] self._leftInactiveBmp = bmps[3] self._centerInactiveBmp = bmps[4] self._rightInactiveBmp = bmps[5] def Clone(self): """ Clones the art object. """ art = ChromeTabArt() art.SetNormalFont(self.GetNormalFont()) art.SetSelectedFont(self.GetSelectedFont()) art.SetMeasuringFont(self.GetMeasuringFont()) art = CopyAttributes(art, self) return art def SetSizingInfo(self, tab_ctrl_size, tab_count, minMaxTabWidth): """ Sets the tab sizing information. :param `tab_ctrl_size`: the size of the tab control area; :param `tab_count`: the number of tabs; :param `minMaxTabWidth`: the minimum and maximum tab widths to be used when the ``AUI_NB_TAB_FIXED_WIDTH`` style is active. """ AuiDefaultTabArt.SetSizingInfo(self, tab_ctrl_size, tab_count, minMaxTabWidth) minTabWidth, maxTabWidth = minMaxTabWidth if minTabWidth > -1: self._fixed_tab_width = max(self._fixed_tab_width, minTabWidth) if maxTabWidth > -1: self._fixed_tab_width = min(self._fixed_tab_width, maxTabWidth) self._fixed_tab_width -= 5 def GetTabSize(self, dc, wnd, caption, bitmap, active, close_button_state, control=None): """ Returns the tab size for the given caption, bitmap and button state. :param `dc`: a `wx.DC` device context; :param `wnd`: a `wx.Window` instance object; :param `caption`: the tab text caption; :param `bitmap`: the bitmap displayed on the tab; :param `active`: whether the tab is selected or not; :param `close_button_state`: the state of the close button on the tab; :param `control`: a `wx.Window` instance inside a tab (or ``None``). """ tab_size, x_extent = AuiDefaultTabArt.GetTabSize(self, dc, wnd, caption, bitmap, active, close_button_state, control) tab_width, tab_height = tab_size # add some padding tab_width += self._leftActiveBmp.GetWidth() tab_height += 2 tab_height = max(tab_height, self._centerActiveBmp.GetHeight()) return (tab_width, tab_height), x_extent def DrawTab(self, dc, wnd, page, in_rect, close_button_state, paint_control=False): """ Draws a single tab. :param `dc`: a `wx.DC` device context; :param `wnd`: a `wx.Window` instance object; :param `page`: the tab control page associated with the tab; :param `in_rect`: rectangle the tab should be confined to; :param `close_button_state`: the state of the close button on the tab; :param `paint_control`: whether to draw the control inside a tab (if any) on a `wx.MemoryDC`. """ # Chrome tab style control = page.control # figure out the size of the tab tab_size, x_extent = self.GetTabSize(dc, wnd, page.caption, page.bitmap, page.active, close_button_state, control) agwFlags = self.GetAGWFlags() tab_height = self._tab_ctrl_height - 1 tab_width = tab_size[0] tab_x = in_rect.x tab_y = in_rect.y + in_rect.height - tab_height clip_width = tab_width if tab_x + clip_width > in_rect.x + in_rect.width - 4: clip_width = (in_rect.x + in_rect.width) - tab_x - 4 dc.SetClippingRegion(tab_x, tab_y, clip_width + 1, tab_height - 3) drawn_tab_yoff = 1 if page.active: left = self._leftActiveBmp center = self._centerActiveBmp right = self._rightActiveBmp else: left = self._leftInactiveBmp center = self._centerInactiveBmp right = self._rightInactiveBmp dc.DrawBitmap(left, tab_x, tab_y) leftw = left.GetWidth() centerw = center.GetWidth() rightw = right.GetWidth() available = tab_x + tab_width - rightw posx = tab_x + leftw while 1: if posx >= available: break dc.DrawBitmap(center, posx, tab_y) posx += centerw dc.DrawBitmap(right, posx, tab_y) drawn_tab_height = center.GetHeight() text_offset = tab_x + leftw close_button_width = 0 if close_button_state != AUI_BUTTON_STATE_HIDDEN: close_button_width = self._active_close_bmp.GetWidth() if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT: text_offset += close_button_width if not page.enabled: dc.SetTextForeground(wx.SystemSettings.GetColour(wx.SYS_COLOUR_GRAYTEXT)) pagebitmap = page.dis_bitmap else: dc.SetTextForeground(page.text_colour) pagebitmap = page.bitmap bitmap_offset = 0 if pagebitmap.IsOk(): bitmap_offset = tab_x + leftw if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT and close_button_width: bitmap_offset += close_button_width # draw bitmap dc.DrawBitmap(pagebitmap, bitmap_offset, drawn_tab_yoff + (drawn_tab_height/2) - (pagebitmap.GetHeight()/2), True) text_offset = bitmap_offset + pagebitmap.GetWidth() text_offset += 3 # bitmap padding else: if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT == 0 or not close_button_width: text_offset = tab_x + leftw # if the caption is empty, measure some temporary text caption = page.caption if caption == "": caption = "Xj" if page.active: dc.SetFont(self._selected_font) textx, texty, dummy = dc.GetMultiLineTextExtent(caption) else: dc.SetFont(self._normal_font) textx, texty, dummy = dc.GetMultiLineTextExtent(caption) if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT: draw_text = ChopText(dc, caption, tab_width - (text_offset-tab_x) - leftw) else: draw_text = ChopText(dc, caption, tab_width - (text_offset-tab_x) - close_button_width - leftw) ypos = drawn_tab_yoff + drawn_tab_height/2 - texty/2 - 1 if control is not None: if control.GetPosition() != wx.Point(text_offset+1, ypos): control.SetPosition(wx.Point(text_offset+1, ypos)) if not control.IsShown(): control.Show() if paint_control: bmp = TakeScreenShot(control.GetScreenRect()) dc.DrawBitmap(bmp, text_offset+1, ypos, True) controlW, controlH = control.GetSize() text_offset += controlW + 4 # draw tab text rectx, recty, dummy = dc.GetMultiLineTextExtent(draw_text) dc.DrawLabel(draw_text, wx.Rect(text_offset, ypos, rectx, recty)) out_button_rect = wx.Rect() # draw 'x' on tab (if enabled) if close_button_state != AUI_BUTTON_STATE_HIDDEN: close_button_width = self._active_close_bmp.GetWidth() bmp = self._disabled_close_bmp if close_button_state == AUI_BUTTON_STATE_HOVER: bmp = self._hover_close_bmp elif close_button_state == AUI_BUTTON_STATE_PRESSED: bmp = self._pressed_close_bmp if agwFlags & AUI_NB_CLOSE_ON_TAB_LEFT: rect = wx.Rect(tab_x + leftw - 2, drawn_tab_yoff + (drawn_tab_height / 2) - (bmp.GetHeight() / 2) + 1, close_button_width, tab_height) else: rect = wx.Rect(tab_x + tab_width - close_button_width - rightw + 2, drawn_tab_yoff + (drawn_tab_height / 2) - (bmp.GetHeight() / 2) + 1, close_button_width, tab_height) if agwFlags & AUI_NB_BOTTOM: rect.y -= 1 # Indent the button if it is pressed down: rect = IndentPressedBitmap(rect, close_button_state) dc.DrawBitmap(bmp, rect.x, rect.y, True) out_button_rect = rect out_tab_rect = wx.Rect(tab_x, tab_y, tab_width, tab_height) dc.DestroyClippingRegion() return out_tab_rect, out_button_rect, x_extent
[ "wx.Menu", "wx.Window.FindFocus", "aui_utilities.BitmapFromBits", "wx.GetMousePosition", "wx.SystemSettings_GetColour", "aui_utilities.DrawMACCloseButton", "wx.RectPP", "wx.PyEvtHandler.__init__", "wx.Colour", "wx.EmptyBitmap", "wx.SystemSettings.GetFont", "aui_utilities.CopyAttributes", "wx.SystemSettings_GetFont", "aui_utilities.StepColour", "wx.Pen", "wx.Region", "wx.Brush", "wx.MacThemeColour", "wx.RegionFromPoints", "wx.MenuItem", "aui_utilities.GetBaseColour", "aui_utilities.ChopText", "wx.Rect", "aui_utilities.IndentPressedBitmap", "wx.SystemSettings.GetColour", "wx.ClientDC", "wx.Point" ]
[((1112, 1142), 'wx.PyEvtHandler.__init__', 'wx.PyEvtHandler.__init__', (['self'], {}), '(self)\n', (1136, 1142), False, 'import wx\n'), ((4482, 4532), 'wx.SystemSettings_GetFont', 'wx.SystemSettings_GetFont', (['wx.SYS_DEFAULT_GUI_FONT'], {}), '(wx.SYS_DEFAULT_GUI_FONT)\n', (4507, 4532), False, 'import wx\n'), ((4563, 4613), 'wx.SystemSettings_GetFont', 'wx.SystemSettings_GetFont', (['wx.SYS_DEFAULT_GUI_FONT'], {}), '(wx.SYS_DEFAULT_GUI_FONT)\n', (4588, 4613), False, 'import wx\n'), ((4810, 4819), 'wx.Rect', 'wx.Rect', ([], {}), '()\n', (4817, 4819), False, 'import wx\n'), ((4843, 4858), 'aui_utilities.GetBaseColour', 'GetBaseColour', ([], {}), '()\n', (4856, 4858), False, 'from aui_utilities import GetBaseColour, DrawMACCloseButton, LightColour, TakeScreenShot\n'), ((4928, 4955), 'aui_utilities.StepColour', 'StepColour', (['base_colour', '(75)'], {}), '(base_colour, 75)\n', (4938, 4955), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((4984, 5005), 'wx.Pen', 'wx.Pen', (['border_colour'], {}), '(border_colour)\n', (4990, 5005), False, 'import wx\n'), ((5038, 5063), 'wx.Pen', 'wx.Pen', (['self._base_colour'], {}), '(self._base_colour)\n', (5044, 5063), False, 'import wx\n'), ((5098, 5125), 'wx.Brush', 'wx.Brush', (['self._base_colour'], {}), '(self._base_colour)\n', (5106, 5125), False, 'import wx\n'), ((5745, 5791), 'aui_utilities.BitmapFromBits', 'BitmapFromBits', (['nb_left_bits', '(16)', '(16)', 'wx.BLACK'], {}), '(nb_left_bits, 16, 16, wx.BLACK)\n', (5759, 5791), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((5923, 5970), 'aui_utilities.BitmapFromBits', 'BitmapFromBits', (['nb_right_bits', '(16)', '(16)', 'wx.BLACK'], {}), '(nb_right_bits, 16, 16, wx.BLACK)\n', (5937, 5970), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((6109, 6155), 'aui_utilities.BitmapFromBits', 'BitmapFromBits', (['nb_list_bits', '(16)', '(16)', 'wx.BLACK'], {}), '(nb_list_bits, 16, 16, wx.BLACK)\n', (6123, 6155), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((7275, 7300), 'aui_utilities.CopyAttributes', 'CopyAttributes', (['art', 'self'], {}), '(art, self)\n', (7289, 7300), False, 'from aui_utilities import CopyAttributes\n'), ((12139, 12148), 'wx.Rect', 'wx.Rect', ([], {}), '()\n', (12146, 12148), False, 'import wx\n'), ((12541, 12574), 'aui_utilities.StepColour', 'StepColour', (['self._base_colour', '(90)'], {}), '(self._base_colour, 90)\n', (12551, 12574), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((12599, 12633), 'aui_utilities.StepColour', 'StepColour', (['self._base_colour', '(170)'], {}), '(self._base_colour, 170)\n', (12609, 12633), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((20929, 21006), 'aui_utilities.ChopText', 'ChopText', (['dc', 'caption', '(tab_width - (text_offset - tab_x) - close_button_width)'], {}), '(dc, caption, tab_width - (text_offset - tab_x) - close_button_width)\n', (20937, 21006), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((22033, 22042), 'wx.Rect', 'wx.Rect', ([], {}), '()\n', (22040, 22042), False, 'import wx\n'), ((23118, 23162), 'wx.Rect', 'wx.Rect', (['tab_x', 'tab_y', 'tab_width', 'tab_height'], {}), '(tab_x, tab_y, tab_width, tab_height)\n', (23125, 23162), False, 'import wx\n'), ((28490, 28507), 'wx.Rect', 'wx.Rect', (['*in_rect'], {}), '(*in_rect)\n', (28497, 28507), False, 'import wx\n'), ((29020, 29059), 'aui_utilities.IndentPressedBitmap', 'IndentPressedBitmap', (['rect', 'button_state'], {}), '(rect, button_state)\n', (29039, 29059), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((31377, 31393), 'wx.ClientDC', 'wx.ClientDC', (['wnd'], {}), '(wnd)\n', (31388, 31393), False, 'import wx\n'), ((34029, 34038), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (34036, 34038), False, 'import wx\n'), ((36123, 36173), 'wx.SystemSettings.GetFont', 'wx.SystemSettings.GetFont', (['wx.SYS_DEFAULT_GUI_FONT'], {}), '(wx.SYS_DEFAULT_GUI_FONT)\n', (36148, 36173), False, 'import wx\n'), ((36204, 36254), 'wx.SystemSettings.GetFont', 'wx.SystemSettings.GetFont', (['wx.SYS_DEFAULT_GUI_FONT'], {}), '(wx.SYS_DEFAULT_GUI_FONT)\n', (36229, 36254), False, 'import wx\n'), ((36440, 36489), 'wx.SystemSettings.GetColour', 'wx.SystemSettings.GetColour', (['wx.SYS_COLOUR_3DFACE'], {}), '(wx.SYS_COLOUR_3DFACE)\n', (36467, 36489), False, 'import wx\n'), ((36633, 36660), 'wx.Brush', 'wx.Brush', (['background_colour'], {}), '(background_colour)\n', (36641, 36660), False, 'import wx\n'), ((36692, 36718), 'wx.Brush', 'wx.Brush', (['normaltab_colour'], {}), '(normaltab_colour)\n', (36700, 36718), False, 'import wx\n'), ((36748, 36772), 'wx.Pen', 'wx.Pen', (['normaltab_colour'], {}), '(normaltab_colour)\n', (36754, 36772), False, 'import wx\n'), ((36806, 36834), 'wx.Brush', 'wx.Brush', (['selectedtab_colour'], {}), '(selectedtab_colour)\n', (36814, 36834), False, 'import wx\n'), ((36866, 36892), 'wx.Pen', 'wx.Pen', (['selectedtab_colour'], {}), '(selectedtab_colour)\n', (36872, 36892), False, 'import wx\n'), ((36927, 36974), 'aui_utilities.BitmapFromBits', 'BitmapFromBits', (['nb_close_bits', '(16)', '(16)', 'wx.BLACK'], {}), '(nb_close_bits, 16, 16, wx.BLACK)\n', (36941, 36974), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((37107, 37153), 'aui_utilities.BitmapFromBits', 'BitmapFromBits', (['nb_left_bits', '(16)', '(16)', 'wx.BLACK'], {}), '(nb_left_bits, 16, 16, wx.BLACK)\n', (37121, 37153), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((37285, 37332), 'aui_utilities.BitmapFromBits', 'BitmapFromBits', (['nb_right_bits', '(16)', '(16)', 'wx.BLACK'], {}), '(nb_right_bits, 16, 16, wx.BLACK)\n', (37299, 37332), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((37471, 37517), 'aui_utilities.BitmapFromBits', 'BitmapFromBits', (['nb_list_bits', '(16)', '(16)', 'wx.BLACK'], {}), '(nb_list_bits, 16, 16, wx.BLACK)\n', (37485, 37517), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((37885, 37910), 'aui_utilities.CopyAttributes', 'CopyAttributes', (['art', 'self'], {}), '(art, self)\n', (37899, 37910), False, 'from aui_utilities import CopyAttributes\n'), ((48639, 48648), 'wx.Rect', 'wx.Rect', ([], {}), '()\n', (48646, 48648), False, 'import wx\n'), ((49580, 49624), 'wx.Rect', 'wx.Rect', (['tab_x', 'tab_y', 'tab_width', 'tab_height'], {}), '(tab_x, tab_y, tab_width, tab_height)\n', (49587, 49624), False, 'import wx\n'), ((50118, 50133), 'wx.Rect', 'wx.Rect', (['*_rect'], {}), '(*_rect)\n', (50125, 50133), False, 'import wx\n'), ((53645, 53662), 'wx.Rect', 'wx.Rect', (['*in_rect'], {}), '(*in_rect)\n', (53652, 53662), False, 'import wx\n'), ((54236, 54250), 'wx.Rect', 'wx.Rect', (['*rect'], {}), '(*rect)\n', (54243, 54250), False, 'import wx\n'), ((54612, 54621), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (54619, 54621), False, 'import wx\n'), ((55460, 55481), 'wx.GetMousePosition', 'wx.GetMousePosition', ([], {}), '()\n', (55479, 55481), False, 'import wx\n'), ((56360, 56376), 'wx.ClientDC', 'wx.ClientDC', (['wnd'], {}), '(wnd)\n', (56371, 56376), False, 'import wx\n'), ((60065, 60090), 'aui_utilities.CopyAttributes', 'CopyAttributes', (['art', 'self'], {}), '(art, self)\n', (60079, 60090), False, 'from aui_utilities import CopyAttributes\n'), ((65605, 65682), 'aui_utilities.ChopText', 'ChopText', (['dc', 'caption', '(tab_width - (text_offset - tab_x) - close_button_width)'], {}), '(dc, caption, tab_width - (text_offset - tab_x) - close_button_width)\n', (65613, 65682), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((66534, 66543), 'wx.Rect', 'wx.Rect', ([], {}), '()\n', (66541, 66543), False, 'import wx\n'), ((67961, 68005), 'wx.Rect', 'wx.Rect', (['tab_x', 'tab_y', 'tab_width', 'tab_height'], {}), '(tab_x, tab_y, tab_width, tab_height)\n', (67968, 68005), False, 'import wx\n'), ((68567, 68592), 'aui_utilities.CopyAttributes', 'CopyAttributes', (['art', 'self'], {}), '(art, self)\n', (68581, 68592), False, 'from aui_utilities import CopyAttributes\n'), ((71925, 71962), 'wx.RectPP', 'wx.RectPP', (['tabPoints[2]', 'tabPoints[5]'], {}), '(tabPoints[2], tabPoints[5])\n', (71934, 71962), False, 'import wx\n'), ((75579, 75588), 'wx.Rect', 'wx.Rect', ([], {}), '()\n', (75586, 75588), False, 'import wx\n'), ((76785, 76829), 'wx.Rect', 'wx.Rect', (['tab_x', 'tab_y', 'tab_width', 'tab_height'], {}), '(tab_x, tab_y, tab_width, tab_height)\n', (76792, 76829), False, 'import wx\n'), ((78429, 78478), 'wx.SystemSettings_GetColour', 'wx.SystemSettings_GetColour', (['wx.SYS_COLOUR_3DFACE'], {}), '(wx.SYS_COLOUR_3DFACE)\n', (78456, 78478), False, 'import wx\n'), ((79961, 79986), 'aui_utilities.CopyAttributes', 'CopyAttributes', (['art', 'self'], {}), '(art, self)\n', (79975, 79986), False, 'from aui_utilities import CopyAttributes\n'), ((88750, 88759), 'wx.Rect', 'wx.Rect', ([], {}), '()\n', (88757, 88759), False, 'import wx\n'), ((90114, 90157), 'wx.Rect', 'wx.Rect', (['tab_x', 'tab_y', 'x_extent', 'tab_height'], {}), '(tab_x, tab_y, x_extent, tab_height)\n', (90121, 90157), False, 'import wx\n'), ((90806, 90855), 'wx.Rect', 'wx.Rect', (['minx', 'maxy', '(maxx - minx)', '(miny - maxy + 1)'], {}), '(minx, maxy, maxx - minx, miny - maxy + 1)\n', (90813, 90855), False, 'import wx\n'), ((90875, 90905), 'wx.RegionFromPoints', 'wx.RegionFromPoints', (['tabPoints'], {}), '(tabPoints)\n', (90894, 90905), False, 'import wx\n'), ((96488, 96513), 'aui_utilities.CopyAttributes', 'CopyAttributes', (['art', 'self'], {}), '(art, self)\n', (96502, 96513), False, 'from aui_utilities import CopyAttributes\n'), ((103235, 103244), 'wx.Rect', 'wx.Rect', ([], {}), '()\n', (103242, 103244), False, 'import wx\n'), ((104540, 104584), 'wx.Rect', 'wx.Rect', (['tab_x', 'tab_y', 'tab_width', 'tab_height'], {}), '(tab_x, tab_y, tab_width, tab_height)\n', (104547, 104584), False, 'import wx\n'), ((5191, 5244), 'wx.SystemSettings.GetColour', 'wx.SystemSettings.GetColour', (['wx.SYS_COLOUR_3DDKSHADOW'], {}), '(wx.SYS_COLOUR_3DDKSHADOW)\n', (5218, 5244), False, 'import wx\n'), ((5282, 5312), 'aui_utilities.DrawMACCloseButton', 'DrawMACCloseButton', (['bmp_colour'], {}), '(bmp_colour)\n', (5300, 5312), False, 'from aui_utilities import GetBaseColour, DrawMACCloseButton, LightColour, TakeScreenShot\n'), ((5448, 5495), 'aui_utilities.BitmapFromBits', 'BitmapFromBits', (['nb_close_bits', '(16)', '(16)', 'wx.BLACK'], {}), '(nb_close_bits, 16, 16, wx.BLACK)\n', (5462, 5495), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((5863, 5887), 'wx.Colour', 'wx.Colour', (['(128)', '(128)', '(128)'], {}), '(128, 128, 128)\n', (5872, 5887), False, 'import wx\n'), ((6044, 6068), 'wx.Colour', 'wx.Colour', (['(128)', '(128)', '(128)'], {}), '(128, 128, 128)\n', (6053, 6068), False, 'import wx\n'), ((6233, 6257), 'wx.Colour', 'wx.Colour', (['(128)', '(128)', '(128)'], {}), '(128, 128, 128)\n', (6242, 6257), False, 'import wx\n'), ((6818, 6840), 'wx.Pen', 'wx.Pen', (['c', '(2)', 'wx.SOLID'], {}), '(c, 2, wx.SOLID)\n', (6824, 6840), False, 'import wx\n'), ((6884, 6917), 'wx.Pen', 'wx.Pen', (['wx.BLACK', '(1)', 'wx.USER_DASH'], {}), '(wx.BLACK, 1, wx.USER_DASH)\n', (6890, 6917), False, 'import wx\n'), ((12267, 12319), 'wx.Rect', 'wx.Rect', (['rect.x', 'rect.y', '(rect.width + 2)', 'rect.height'], {}), '(rect.x, rect.y, rect.width + 2, rect.height)\n', (12274, 12319), False, 'import wx\n'), ((12466, 12522), 'wx.Rect', 'wx.Rect', (['rect.x', 'rect.y', '(rect.width + 2)', '(rect.height - 3)'], {}), '(rect.x, rect.y, rect.width + 2, rect.height - 3)\n', (12473, 12522), False, 'import wx\n'), ((15628, 15638), 'wx.Point', 'wx.Point', ([], {}), '()\n', (15636, 15638), False, 'import wx\n'), ((15783, 15805), 'wx.Point', 'wx.Point', (['tab_x', 'tab_y'], {}), '(tab_x, tab_y)\n', (15791, 15805), False, 'import wx\n'), ((15849, 15888), 'wx.Point', 'wx.Point', (['tab_x', '(tab_y + tab_height - 6)'], {}), '(tab_x, tab_y + tab_height - 6)\n', (15857, 15888), False, 'import wx\n'), ((15928, 15971), 'wx.Point', 'wx.Point', (['(tab_x + 2)', '(tab_y + tab_height - 4)'], {}), '(tab_x + 2, tab_y + tab_height - 4)\n', (15936, 15971), False, 'import wx\n'), ((16007, 16062), 'wx.Point', 'wx.Point', (['(tab_x + tab_width - 2)', '(tab_y + tab_height - 4)'], {}), '(tab_x + tab_width - 2, tab_y + tab_height - 4)\n', (16015, 16062), False, 'import wx\n'), ((16086, 16137), 'wx.Point', 'wx.Point', (['(tab_x + tab_width)', '(tab_y + tab_height - 6)'], {}), '(tab_x + tab_width, tab_y + tab_height - 6)\n', (16094, 16137), False, 'import wx\n'), ((16165, 16199), 'wx.Point', 'wx.Point', (['(tab_x + tab_width)', 'tab_y'], {}), '(tab_x + tab_width, tab_y)\n', (16173, 16199), False, 'import wx\n'), ((16292, 16331), 'wx.Point', 'wx.Point', (['tab_x', '(tab_y + tab_height - 4)'], {}), '(tab_x, tab_y + tab_height - 4)\n', (16300, 16331), False, 'import wx\n'), ((16371, 16397), 'wx.Point', 'wx.Point', (['tab_x', '(tab_y + 2)'], {}), '(tab_x, tab_y + 2)\n', (16379, 16397), False, 'import wx\n'), ((16439, 16465), 'wx.Point', 'wx.Point', (['(tab_x + 2)', 'tab_y'], {}), '(tab_x + 2, tab_y)\n', (16447, 16465), False, 'import wx\n'), ((16505, 16543), 'wx.Point', 'wx.Point', (['(tab_x + tab_width - 2)', 'tab_y'], {}), '(tab_x + tab_width - 2, tab_y)\n', (16513, 16543), False, 'import wx\n'), ((16571, 16609), 'wx.Point', 'wx.Point', (['(tab_x + tab_width)', '(tab_y + 2)'], {}), '(tab_x + tab_width, tab_y + 2)\n', (16579, 16609), False, 'import wx\n'), ((16639, 16690), 'wx.Point', 'wx.Point', (['(tab_x + tab_width)', '(tab_y + tab_height - 4)'], {}), '(tab_x + tab_width, tab_y + tab_height - 4)\n', (16647, 16690), False, 'import wx\n'), ((17032, 17076), 'wx.Rect', 'wx.Rect', (['tab_x', 'tab_y', 'tab_width', 'tab_height'], {}), '(tab_x, tab_y, tab_width, tab_height)\n', (17039, 17076), False, 'import wx\n'), ((18139, 18191), 'wx.Rect', 'wx.Rect', (['tab_x', '(tab_y + 1)', 'tab_width', '(tab_height - 3)'], {}), '(tab_x, tab_y + 1, tab_width, tab_height - 3)\n', (18146, 18191), False, 'import wx\n'), ((18632, 18659), 'aui_utilities.StepColour', 'StepColour', (['top_colour', '(160)'], {}), '(top_colour, 160)\n', (18642, 18659), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((21789, 21829), 'wx.Rect', 'wx.Rect', (['text_offset', 'ypos', 'rectx', 'recty'], {}), '(text_offset, ypos, rectx, recty)\n', (21796, 21829), False, 'import wx\n'), ((22951, 22996), 'aui_utilities.IndentPressedBitmap', 'IndentPressedBitmap', (['rect', 'close_button_state'], {}), '(rect, close_button_state)\n', (22970, 22996), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((29208, 29248), 'wx.Rect', 'wx.Rect', (['rect.x', 'rect.y', '(30)', 'rect.height'], {}), '(rect.x, rect.y, 30, rect.height)\n', (29215, 29248), False, 'import wx\n'), ((30137, 30226), 'wx.Rect', 'wx.Rect', (['text_offset', '(drawn_tab_yoff + drawn_tab_height / 2 - texty / 2)', 'textx', 'texty'], {}), '(text_offset, drawn_tab_yoff + drawn_tab_height / 2 - texty / 2,\n textx, texty)\n', (30144, 30226), False, 'import wx\n'), ((31787, 31843), 'wx.EmptyBitmap', 'wx.EmptyBitmap', (['required_bmp_size.x', 'required_bmp_size.y'], {}), '(required_bmp_size.x, required_bmp_size.y)\n', (31801, 31843), False, 'import wx\n'), ((37048, 37072), 'wx.Colour', 'wx.Colour', (['(128)', '(128)', '(128)'], {}), '(128, 128, 128)\n', (37057, 37072), False, 'import wx\n'), ((37225, 37249), 'wx.Colour', 'wx.Colour', (['(128)', '(128)', '(128)'], {}), '(128, 128, 128)\n', (37234, 37249), False, 'import wx\n'), ((37406, 37430), 'wx.Colour', 'wx.Colour', (['(128)', '(128)', '(128)'], {}), '(128, 128, 128)\n', (37415, 37430), False, 'import wx\n'), ((37595, 37619), 'wx.Colour', 'wx.Colour', (['(128)', '(128)', '(128)'], {}), '(128, 128, 128)\n', (37604, 37619), False, 'import wx\n'), ((45195, 45205), 'wx.Point', 'wx.Point', ([], {}), '()\n', (45203, 45205), False, 'import wx\n'), ((47195, 47251), 'aui_utilities.ChopText', 'ChopText', (['dc', 'caption', '(tab_width - (text_offset - tab_x))'], {}), '(dc, caption, tab_width - (text_offset - tab_x))\n', (47203, 47251), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((47288, 47365), 'aui_utilities.ChopText', 'ChopText', (['dc', 'caption', '(tab_width - (text_offset - tab_x) - close_button_width)'], {}), '(dc, caption, tab_width - (text_offset - tab_x) - close_button_width)\n', (47296, 47365), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((48081, 48121), 'wx.Rect', 'wx.Rect', (['text_offset', 'ypos', 'rectx', 'recty'], {}), '(text_offset, ypos, rectx, recty)\n', (48088, 48121), False, 'import wx\n'), ((48245, 48343), 'wx.Rect', 'wx.Rect', (['text_offset', '((tab_y + tab_height) / 2 - texty / 2 + 1)', 'selected_textx', 'selected_texty'], {}), '(text_offset, (tab_y + tab_height) / 2 - texty / 2 + 1,\n selected_textx, selected_texty)\n', (48252, 48343), False, 'import wx\n'), ((49533, 49547), 'wx.Rect', 'wx.Rect', (['*rect'], {}), '(*rect)\n', (49540, 49547), False, 'import wx\n'), ((63396, 63418), 'wx.Point', 'wx.Point', (['tab_x', 'tab_y'], {}), '(tab_x, tab_y)\n', (63404, 63418), False, 'import wx\n'), ((63450, 63489), 'wx.Point', 'wx.Point', (['tab_x', '(tab_y + tab_height - 6)'], {}), '(tab_x, tab_y + tab_height - 6)\n', (63458, 63489), False, 'import wx\n'), ((63582, 63621), 'wx.Point', 'wx.Point', (['tab_x', '(tab_y + tab_height - 4)'], {}), '(tab_x, tab_y + tab_height - 4)\n', (63590, 63621), False, 'import wx\n'), ((63653, 63679), 'wx.Point', 'wx.Point', (['tab_x', '(tab_y + 2)'], {}), '(tab_x, tab_y + 2)\n', (63661, 63679), False, 'import wx\n'), ((66465, 66505), 'wx.Rect', 'wx.Rect', (['text_offset', 'ypos', 'rectx', 'recty'], {}), '(text_offset, ypos, rectx, recty)\n', (66472, 66505), False, 'import wx\n'), ((67794, 67839), 'aui_utilities.IndentPressedBitmap', 'IndentPressedBitmap', (['rect', 'close_button_state'], {}), '(rect, close_button_state)\n', (67813, 67839), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((70858, 70868), 'wx.Point', 'wx.Point', ([], {}), '()\n', (70866, 70868), False, 'import wx\n'), ((74315, 74400), 'aui_utilities.ChopText', 'ChopText', (['dc', 'caption', '(tab_width - (text_offset - tab_x) - close_button_width + 1)'], {}), '(dc, caption, tab_width - (text_offset - tab_x) -\n close_button_width + 1)\n', (74323, 74400), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((74433, 74510), 'aui_utilities.ChopText', 'ChopText', (['dc', 'caption', '(tab_width - (text_offset - tab_x) - close_button_width)'], {}), '(dc, caption, tab_width - (text_offset - tab_x) - close_button_width)\n', (74441, 74510), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((75297, 75337), 'wx.Rect', 'wx.Rect', (['text_offset', 'ypos', 'rectx', 'recty'], {}), '(text_offset, ypos, rectx, recty)\n', (75304, 75337), False, 'import wx\n'), ((76619, 76664), 'aui_utilities.IndentPressedBitmap', 'IndentPressedBitmap', (['rect', 'close_button_state'], {}), '(rect, close_button_state)\n', (76638, 76664), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((77519, 77529), 'wx.Point', 'wx.Point', ([], {}), '()\n', (77527, 77529), False, 'import wx\n'), ((77979, 78021), 'wx.Point', 'wx.Point', (['rect.x', '(rect.y + rect.height / 2)'], {}), '(rect.x, rect.y + rect.height / 2)\n', (77987, 78021), False, 'import wx\n'), ((78046, 78105), 'wx.Point', 'wx.Point', (['(rect.x + rect.width - 2)', '(rect.y + rect.height / 2)'], {}), '(rect.x + rect.width - 2, rect.y + rect.height / 2)\n', (78054, 78105), False, 'import wx\n'), ((82997, 83007), 'wx.Point', 'wx.Point', ([], {}), '()\n', (83005, 83007), False, 'import wx\n'), ((87505, 87561), 'aui_utilities.ChopText', 'ChopText', (['dc', 'caption', '(tab_width - (text_offset - tab_x))'], {}), '(dc, caption, tab_width - (text_offset - tab_x))\n', (87513, 87561), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((87598, 87675), 'aui_utilities.ChopText', 'ChopText', (['dc', 'caption', '(tab_width - (text_offset - tab_x) - close_button_width)'], {}), '(dc, caption, tab_width - (text_offset - tab_x) - close_button_width)\n', (87606, 87675), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((88454, 88494), 'wx.Rect', 'wx.Rect', (['text_offset', 'ypos', 'rectx', 'recty'], {}), '(text_offset, ypos, rectx, recty)\n', (88461, 88494), False, 'import wx\n'), ((89948, 89993), 'aui_utilities.IndentPressedBitmap', 'IndentPressedBitmap', (['rect', 'close_button_state'], {}), '(rect, close_button_state)\n', (89967, 89993), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((90973, 91001), 'wx.Region', 'wx.Region', (['*self._buttonRect'], {}), '(*self._buttonRect)\n', (90982, 91001), False, 'import wx\n'), ((91211, 91244), 'aui_utilities.StepColour', 'StepColour', (['self._base_colour', '(90)'], {}), '(self._base_colour, 90)\n', (91221, 91244), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((91270, 91304), 'aui_utilities.StepColour', 'StepColour', (['self._base_colour', '(170)'], {}), '(self._base_colour, 170)\n', (91280, 91304), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((102271, 102335), 'aui_utilities.ChopText', 'ChopText', (['dc', 'caption', '(tab_width - (text_offset - tab_x) - leftw)'], {}), '(dc, caption, tab_width - (text_offset - tab_x) - leftw)\n', (102279, 102335), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((102372, 102461), 'aui_utilities.ChopText', 'ChopText', (['dc', 'caption', '(tab_width - (text_offset - tab_x) - close_button_width - leftw)'], {}), '(dc, caption, tab_width - (text_offset - tab_x) -\n close_button_width - leftw)\n', (102380, 102461), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((103150, 103190), 'wx.Rect', 'wx.Rect', (['text_offset', 'ypos', 'rectx', 'recty'], {}), '(text_offset, ypos, rectx, recty)\n', (103157, 103190), False, 'import wx\n'), ((104370, 104415), 'aui_utilities.IndentPressedBitmap', 'IndentPressedBitmap', (['rect', 'close_button_state'], {}), '(rect, close_button_state)\n', (104389, 104415), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((5371, 5395), 'wx.Colour', 'wx.Colour', (['(128)', '(128)', '(128)'], {}), '(128, 128, 128)\n', (5380, 5395), False, 'import wx\n'), ((5573, 5597), 'wx.Colour', 'wx.Colour', (['(128)', '(128)', '(128)'], {}), '(128, 128, 128)\n', (5582, 5597), False, 'import wx\n'), ((6548, 6610), 'wx.MacThemeColour', 'wx.MacThemeColour', (['Carbon.Appearance.kThemeBrushFocusHighlight'], {}), '(Carbon.Appearance.kThemeBrushFocusHighlight)\n', (6565, 6610), False, 'import wx\n'), ((6653, 6671), 'wx.Brush', 'wx.Brush', (['wx.BLACK'], {}), '(wx.BLACK)\n', (6661, 6671), False, 'import wx\n'), ((12888, 12911), 'wx.Brush', 'wx.Brush', (['bottom_colour'], {}), '(bottom_colour)\n', (12896, 12911), False, 'import wx\n'), ((14966, 15017), 'wx.SystemSettings.GetColour', 'wx.SystemSettings.GetColour', (['wx.SYS_COLOUR_GRAYTEXT'], {}), '(wx.SYS_COLOUR_GRAYTEXT)\n', (14993, 15017), False, 'import wx\n'), ((21188, 21219), 'wx.Point', 'wx.Point', (['(text_offset + 1)', 'ypos'], {}), '(text_offset + 1, ypos)\n', (21196, 21219), False, 'import wx\n'), ((30070, 30091), 'wx.Window.FindFocus', 'wx.Window.FindFocus', ([], {}), '()\n', (30089, 30091), False, 'import wx\n'), ((30598, 30623), 'wx.Rect', 'wx.Rect', (['*focusRectBitmap'], {}), '(*focusRectBitmap)\n', (30605, 30623), False, 'import wx\n'), ((34564, 34605), 'wx.MenuItem', 'wx.MenuItem', (['menuPopup', '(1000 + i)', 'caption'], {}), '(menuPopup, 1000 + i, caption)\n', (34575, 34605), False, 'import wx\n'), ((45023, 45074), 'wx.SystemSettings.GetColour', 'wx.SystemSettings.GetColour', (['wx.SYS_COLOUR_GRAYTEXT'], {}), '(wx.SYS_COLOUR_GRAYTEXT)\n', (45050, 45074), False, 'import wx\n'), ((47526, 47557), 'wx.Point', 'wx.Point', (['(text_offset + 1)', 'ypos'], {}), '(text_offset + 1, ypos)\n', (47534, 47557), False, 'import wx\n'), ((48182, 48203), 'wx.Window.FindFocus', 'wx.Window.FindFocus', ([], {}), '()\n', (48201, 48203), False, 'import wx\n'), ((54794, 54840), 'wx.MenuItem', 'wx.MenuItem', (['menuPopup', '(1000 + i)', 'page.caption'], {}), '(menuPopup, 1000 + i, page.caption)\n', (54805, 54840), False, 'import wx\n'), ((64151, 64202), 'wx.SystemSettings.GetColour', 'wx.SystemSettings.GetColour', (['wx.SYS_COLOUR_GRAYTEXT'], {}), '(wx.SYS_COLOUR_GRAYTEXT)\n', (64178, 64202), False, 'import wx\n'), ((65876, 65907), 'wx.Point', 'wx.Point', (['(text_offset + 1)', 'ypos'], {}), '(text_offset + 1, ypos)\n', (65884, 65907), False, 'import wx\n'), ((72116, 72168), 'wx.SystemSettings_GetColour', 'wx.SystemSettings_GetColour', (['wx.SYS_COLOUR_BTNSHADOW'], {}), '(wx.SYS_COLOUR_BTNSHADOW)\n', (72143, 72168), False, 'import wx\n'), ((72834, 72885), 'wx.SystemSettings.GetColour', 'wx.SystemSettings.GetColour', (['wx.SYS_COLOUR_GRAYTEXT'], {}), '(wx.SYS_COLOUR_GRAYTEXT)\n', (72861, 72885), False, 'import wx\n'), ((74700, 74731), 'wx.Point', 'wx.Point', (['(text_offset + 1)', 'ypos'], {}), '(text_offset + 1, ypos)\n', (74708, 74731), False, 'import wx\n'), ((77623, 77670), 'wx.Point', 'wx.Point', (['rect.x', '(rect.y + rect.height / 10 * 8)'], {}), '(rect.x, rect.y + rect.height / 10 * 8)\n', (77631, 77670), False, 'import wx\n'), ((77697, 77761), 'wx.Point', 'wx.Point', (['(rect.x + rect.width - 2)', '(rect.y + rect.height / 10 * 8)'], {}), '(rect.x + rect.width - 2, rect.y + rect.height / 10 * 8)\n', (77705, 77761), False, 'import wx\n'), ((77805, 77852), 'wx.Point', 'wx.Point', (['rect.x', '(rect.y + rect.height / 10 * 5)'], {}), '(rect.x, rect.y + rect.height / 10 * 5)\n', (77813, 77852), False, 'import wx\n'), ((77879, 77943), 'wx.Point', 'wx.Point', (['(rect.x + rect.width - 2)', '(rect.y + rect.height / 10 * 5)'], {}), '(rect.x + rect.width - 2, rect.y + rect.height / 10 * 5)\n', (77887, 77943), False, 'import wx\n'), ((78350, 78399), 'wx.SystemSettings_GetColour', 'wx.SystemSettings_GetColour', (['wx.SYS_COLOUR_3DFACE'], {}), '(wx.SYS_COLOUR_3DFACE)\n', (78377, 78399), False, 'import wx\n'), ((85136, 85188), 'wx.SystemSettings.GetColour', 'wx.SystemSettings.GetColour', (['wx.SYS_COLOUR_BTNSHADOW'], {}), '(wx.SYS_COLOUR_BTNSHADOW)\n', (85163, 85188), False, 'import wx\n'), ((85998, 86049), 'wx.SystemSettings.GetColour', 'wx.SystemSettings.GetColour', (['wx.SYS_COLOUR_GRAYTEXT'], {}), '(wx.SYS_COLOUR_GRAYTEXT)\n', (86025, 86049), False, 'import wx\n'), ((87865, 87896), 'wx.Point', 'wx.Point', (['(text_offset + 1)', 'ypos'], {}), '(text_offset + 1, ypos)\n', (87873, 87896), False, 'import wx\n'), ((100873, 100924), 'wx.SystemSettings.GetColour', 'wx.SystemSettings.GetColour', (['wx.SYS_COLOUR_GRAYTEXT'], {}), '(wx.SYS_COLOUR_GRAYTEXT)\n', (100900, 100924), False, 'import wx\n'), ((102595, 102626), 'wx.Point', 'wx.Point', (['(text_offset + 1)', 'ypos'], {}), '(text_offset + 1, ypos)\n', (102603, 102626), False, 'import wx\n'), ((21255, 21286), 'wx.Point', 'wx.Point', (['(text_offset + 1)', 'ypos'], {}), '(text_offset + 1, ypos)\n', (21263, 21286), False, 'import wx\n'), ((30713, 30736), 'wx.Rect', 'wx.Rect', (['*focusRectText'], {}), '(*focusRectText)\n', (30720, 30736), False, 'import wx\n'), ((47593, 47624), 'wx.Point', 'wx.Point', (['(text_offset + 1)', 'ypos'], {}), '(text_offset + 1, ypos)\n', (47601, 47624), False, 'import wx\n'), ((50349, 50374), 'aui_utilities.StepColour', 'StepColour', (['bkcolour', '(120)'], {}), '(bkcolour, 120)\n', (50359, 50374), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((50406, 50430), 'aui_utilities.StepColour', 'StepColour', (['bkcolour', '(75)'], {}), '(bkcolour, 75)\n', (50416, 50430), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((62216, 62267), 'wx.SystemSettings.GetColour', 'wx.SystemSettings.GetColour', (['wx.SYS_COLOUR_3DSHADOW'], {}), '(wx.SYS_COLOUR_3DSHADOW)\n', (62243, 62267), False, 'import wx\n'), ((62567, 62620), 'wx.SystemSettings.GetColour', 'wx.SystemSettings.GetColour', (['wx.SYS_COLOUR_3DDKSHADOW'], {}), '(wx.SYS_COLOUR_3DDKSHADOW)\n', (62594, 62620), False, 'import wx\n'), ((65943, 65974), 'wx.Point', 'wx.Point', (['(text_offset + 1)', 'ypos'], {}), '(text_offset + 1, ypos)\n', (65951, 65974), False, 'import wx\n'), ((74767, 74798), 'wx.Point', 'wx.Point', (['(text_offset + 1)', 'ypos'], {}), '(text_offset + 1, ypos)\n', (74775, 74798), False, 'import wx\n'), ((87932, 87963), 'wx.Point', 'wx.Point', (['(text_offset + 1)', 'ypos'], {}), '(text_offset + 1, ypos)\n', (87940, 87963), False, 'import wx\n'), ((102662, 102693), 'wx.Point', 'wx.Point', (['(text_offset + 1)', 'ypos'], {}), '(text_offset + 1, ypos)\n', (102670, 102693), False, 'import wx\n'), ((19411, 19445), 'aui_utilities.StepColour', 'StepColour', (['self._base_colour', '(170)'], {}), '(self._base_colour, 170)\n', (19421, 19445), False, 'from aui_utilities import BitmapFromBits, StepColour, IndentPressedBitmap, ChopText\n'), ((61667, 61718), 'wx.SystemSettings.GetColour', 'wx.SystemSettings.GetColour', (['wx.SYS_COLOUR_3DSHADOW'], {}), '(wx.SYS_COLOUR_3DSHADOW)\n', (61694, 61718), False, 'import wx\n'), ((61578, 61632), 'wx.SystemSettings.GetColour', 'wx.SystemSettings.GetColour', (['wx.SYS_COLOUR_3DHIGHLIGHT'], {}), '(wx.SYS_COLOUR_3DHIGHLIGHT)\n', (61605, 61632), False, 'import wx\n'), ((61773, 61822), 'wx.SystemSettings.GetColour', 'wx.SystemSettings.GetColour', (['wx.SYS_COLOUR_3DFACE'], {}), '(wx.SYS_COLOUR_3DFACE)\n', (61800, 61822), False, 'import wx\n')]
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Various retriever utilities. """ import regex import unicodedata import numpy as np import scipy.sparse as sp from sklearn.utils import murmurhash3_32 try: import torch except ImportError: raise ImportError('Need to install Pytorch: go to pytorch.org') # ------------------------------------------------------------------------------ # Sparse matrix saving/loading helpers. # ------------------------------------------------------------------------------ def save_sparse_csr(filename, matrix, metadata=None): data = { 'data': matrix.data, 'indices': matrix.indices, 'indptr': matrix.indptr, 'shape': matrix.shape, 'metadata': metadata, } np.savez(filename, **data) def save_sparse_tensor(filename, matrix, metadata=None): data = { 'indices': matrix._indices(), 'values': matrix._values(), 'size': matrix.size(), 'metadata': metadata, } torch.save(data, filename) def load_sparse_csr(filename): loader = np.load(filename + '.npz', allow_pickle=True) matrix = sp.csr_matrix( (loader['data'], loader['indices'], loader['indptr']), shape=loader['shape'] ) return matrix, loader['metadata'].item(0) if 'metadata' in loader else None def load_sparse_tensor(filename): loader = torch.load(filename) matrix = torch.sparse.FloatTensor( loader['indices'], loader['values'], loader['size'] ) return matrix, loader['metadata'] if 'metadata' in loader else None # ------------------------------------------------------------------------------ # Token hashing. # ------------------------------------------------------------------------------ def hash(token, num_buckets): """ Unsigned 32 bit murmurhash for feature hashing. """ return murmurhash3_32(token, positive=True) % num_buckets # ------------------------------------------------------------------------------ # Text cleaning. # ------------------------------------------------------------------------------ STOPWORDS = { 'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', 'her', 'hers', 'herself', 'it', 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', 'should', 'now', 'd', 'll', 'm', 'o', 're', 've', 'y', 'ain', 'aren', 'couldn', 'didn', 'doesn', 'hadn', 'hasn', 'haven', 'isn', 'ma', 'mightn', 'mustn', 'needn', 'shan', 'shouldn', 'wasn', 'weren', 'won', 'wouldn', "'ll", "'re", "'ve", "n't", "'s", "'d", "'m", "''", "``", } def normalize(text): """ Resolve different type of unicode encodings. """ if type(text) != str: return text return unicodedata.normalize('NFD', text) def filter_word(text): """ Take out english stopwords, punctuation, and compound endings. """ text = normalize(text) if regex.match(r'^\p{P}+$', text): return True if text.lower() in STOPWORDS: return True return False def filter_ngram(gram, mode='any'): """ Decide whether to keep or discard an n-gram. Args: gram: list of tokens (length N) mode: Option to throw out ngram if 'any': any single token passes filter_word 'all': all tokens pass filter_word 'ends': book-ended by filterable tokens """ filtered = [filter_word(w) for w in gram] if mode == 'any': return any(filtered) elif mode == 'all': return all(filtered) elif mode == 'ends': return filtered[0] or filtered[-1] else: raise ValueError('Invalid mode: %s' % mode)
[ "unicodedata.normalize", "numpy.load", "torch.load", "torch.save", "scipy.sparse.csr_matrix", "regex.match", "numpy.savez", "torch.sparse.FloatTensor", "sklearn.utils.murmurhash3_32" ]
[((908, 934), 'numpy.savez', 'np.savez', (['filename'], {}), '(filename, **data)\n', (916, 934), True, 'import numpy as np\n'), ((1152, 1178), 'torch.save', 'torch.save', (['data', 'filename'], {}), '(data, filename)\n', (1162, 1178), False, 'import torch\n'), ((1225, 1270), 'numpy.load', 'np.load', (["(filename + '.npz')"], {'allow_pickle': '(True)'}), "(filename + '.npz', allow_pickle=True)\n", (1232, 1270), True, 'import numpy as np\n'), ((1284, 1380), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (["(loader['data'], loader['indices'], loader['indptr'])"], {'shape': "loader['shape']"}), "((loader['data'], loader['indices'], loader['indptr']), shape=\n loader['shape'])\n", (1297, 1380), True, 'import scipy.sparse as sp\n'), ((1519, 1539), 'torch.load', 'torch.load', (['filename'], {}), '(filename)\n', (1529, 1539), False, 'import torch\n'), ((1553, 1630), 'torch.sparse.FloatTensor', 'torch.sparse.FloatTensor', (["loader['indices']", "loader['values']", "loader['size']"], {}), "(loader['indices'], loader['values'], loader['size'])\n", (1577, 1630), False, 'import torch\n'), ((4312, 4346), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFD"""', 'text'], {}), "('NFD', text)\n", (4333, 4346), False, 'import unicodedata\n'), ((4489, 4519), 'regex.match', 'regex.match', (['"""^\\\\p{P}+$"""', 'text'], {}), "('^\\\\p{P}+$', text)\n", (4500, 4519), False, 'import regex\n'), ((2009, 2045), 'sklearn.utils.murmurhash3_32', 'murmurhash3_32', (['token'], {'positive': '(True)'}), '(token, positive=True)\n', (2023, 2045), False, 'from sklearn.utils import murmurhash3_32\n')]
import logging, traceback, sys, threading try: import Queue except ImportError: import queue as Queue from ..log import set_logging from ..utils import test_connect from ..storage import templates logger = logging.getLogger('itchat') def load_register(core): core.auto_login = auto_login core.configured_reply = configured_reply core.msg_register = msg_register core.run = run def auto_login(self, hotReload=False, statusStorageDir='itchat.pkl', enableCmdQR=False, picDir=None, qrCallback=None, loginCallback=None, exitCallback=None): if not test_connect(): logger.info("You can't get access to internet or wechat domain, so exit.") sys.exit() self.useHotReload = hotReload if hotReload: if self.load_login_status(statusStorageDir, loginCallback=loginCallback, exitCallback=exitCallback): return self.login(enableCmdQR=enableCmdQR, picDir=picDir, qrCallback=qrCallback, loginCallback=loginCallback, exitCallback=exitCallback) self.dump_login_status(statusStorageDir) self.hotReloadDir = statusStorageDir else: self.login(enableCmdQR=enableCmdQR, picDir=picDir, qrCallback=qrCallback, loginCallback=loginCallback, exitCallback=exitCallback) def configured_reply(self): ''' determine the type of message and reply if its method is defined however, I use a strange way to determine whether a msg is from massive platform I haven't found a better solution here The main problem I'm worrying about is the mismatching of new friends added on phone If you have any good idea, pleeeease report an issue. I will be more than grateful. ''' try: msg = self.msgList.get(timeout=1) except Queue.Empty: pass else: if isinstance(msg['User'], templates.User): replyFn = self.functionDict['FriendChat'].get(msg['Type']) elif isinstance(msg['User'], templates.MassivePlatform): replyFn = self.functionDict['MpChat'].get(msg['Type']) elif isinstance(msg['User'], templates.Chatroom): replyFn = self.functionDict['GroupChat'].get(msg['Type']) if replyFn is None: r = None else: try: r = replyFn(msg) if r is not None: self.send(r, msg.get('FromUserName')) except: logger.warning(traceback.format_exc()) def msg_register(self, msgType, isFriendChat=False, isGroupChat=False, isMpChat=False): ''' a decorator constructor return a specific decorator based on information given ''' if not (isinstance(msgType, list) or isinstance(msgType, tuple)): msgType = [msgType] def _msg_register(fn): for _msgType in msgType: if isFriendChat: self.functionDict['FriendChat'][_msgType] = fn if isGroupChat: self.functionDict['GroupChat'][_msgType] = fn if isMpChat: self.functionDict['MpChat'][_msgType] = fn if not any((isFriendChat, isGroupChat, isMpChat)): self.functionDict['FriendChat'][_msgType] = fn return fn return _msg_register def run(self, debug=False, blockThread=True): logger.info('Start auto replying.') if debug: set_logging(loggingLevel=logging.DEBUG) def reply_fn(): try: while self.alive: self.configured_reply() except KeyboardInterrupt: if self.useHotReload: self.dump_login_status() self.alive = False logger.debug('itchat received an ^C and exit.') logger.info('Bye~') if blockThread: reply_fn() else: replyThread = threading.Thread(target=reply_fn) replyThread.setDaemon(True) replyThread.start()
[ "threading.Thread", "sys.exit", "traceback.format_exc", "logging.getLogger" ]
[((226, 253), 'logging.getLogger', 'logging.getLogger', (['"""itchat"""'], {}), "('itchat')\n", (243, 253), False, 'import logging, traceback, sys, threading\n'), ((743, 753), 'sys.exit', 'sys.exit', ([], {}), '()\n', (751, 753), False, 'import logging, traceback, sys, threading\n'), ((3959, 3992), 'threading.Thread', 'threading.Thread', ([], {'target': 'reply_fn'}), '(target=reply_fn)\n', (3975, 3992), False, 'import logging, traceback, sys, threading\n'), ((2562, 2584), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2582, 2584), False, 'import logging, traceback, sys, threading\n')]
#!/usr/bin/env python3 import requests from lxml import html from unidecode import unidecode DEX_BASE_URL = 'https://dexonline.ro' DEX_API_URL_FORMAT = '{}/{}'.format(DEX_BASE_URL, 'definitie/{}/json') query = 'fir' dex_api_url = DEX_API_URL_FORMAT.format(query) dex_api_request = requests.get(dex_api_url) dex_raw_response = dex_api_request.json() dex_raw_definitions = dex_raw_response['definitions'] dex_definitions = [] for dex_raw_definition in dex_raw_definitions: dex_definition_html_rep = dex_raw_definition['htmlRep'] html_fragments = html.fragments_fromstring(dex_definition_html_rep) root = html.Element('root') for html_fragment in html_fragments: root.append(html_fragment) dex_definition_text = root.text_content() dex_definition_text = unidecode(dex_definition_text) dex_definitions.append(dex_definition_text) print(dex_definitions)
[ "lxml.html.Element", "lxml.html.fragments_fromstring", "unidecode.unidecode", "requests.get" ]
[((287, 312), 'requests.get', 'requests.get', (['dex_api_url'], {}), '(dex_api_url)\n', (299, 312), False, 'import requests\n'), ((562, 612), 'lxml.html.fragments_fromstring', 'html.fragments_fromstring', (['dex_definition_html_rep'], {}), '(dex_definition_html_rep)\n', (587, 612), False, 'from lxml import html\n'), ((625, 645), 'lxml.html.Element', 'html.Element', (['"""root"""'], {}), "('root')\n", (637, 645), False, 'from lxml import html\n'), ((796, 826), 'unidecode.unidecode', 'unidecode', (['dex_definition_text'], {}), '(dex_definition_text)\n', (805, 826), False, 'from unidecode import unidecode\n')]
from django.shortcuts import render, get_object_or_404 from django.http import HttpResponse from django.http import FileResponse # Create your views here. from posts import urls from .models import Post def posts_list(request): print("------>") # instance = Post.objects.get(id=100) queryset = Post.objects.all() result = "" for obj in queryset: result += obj.title + "\n" context = {} if request.user.is_authenticated(): context = { "Detail": result, "myqueryset": queryset, "Name": "Vinoth", "Designation": "Software Engineer" } else: context = { "Detail": "Authentication failed for List", "Name": "Vinoth", "Designation": "Software Engineer" } return render(request, "index.html", context) # return HttpResponse("<h1>List</h1>") def posts_create(request): if request.user.is_authenticated(): context = { "Detail": "Create", } else: context = { "Detail": "Create", } print("------>") # return HttpResponse("<h1>Create</h1>") return render(request, "index.html", context) def posts_update(request): print("------>") if request.user.is_authenticated(): context = { "Detail": "Update", } # return HttpResponse("<h1>Update</h1>") return render(request, "index.html", context) def posts_detail(request, id=None): instance = get_object_or_404(Post, id=id) print("------>", instance.id) context = { "Detail": "Detail", "myqueryset" : instance, } # return HttpResponse("<h1>Detail</h1>") return render(request, "post_detail.html", context) def posts_delete(request): print("------>") context = { "Detail": "Delete", } # return HttpResponse("<h1>Delete</h1>") return render(request, "index.html", context)
[ "django.shortcuts.render", "django.shortcuts.get_object_or_404" ]
[((817, 855), 'django.shortcuts.render', 'render', (['request', '"""index.html"""', 'context'], {}), "(request, 'index.html', context)\n", (823, 855), False, 'from django.shortcuts import render, get_object_or_404\n'), ((1179, 1217), 'django.shortcuts.render', 'render', (['request', '"""index.html"""', 'context'], {}), "(request, 'index.html', context)\n", (1185, 1217), False, 'from django.shortcuts import render, get_object_or_404\n'), ((1426, 1464), 'django.shortcuts.render', 'render', (['request', '"""index.html"""', 'context'], {}), "(request, 'index.html', context)\n", (1432, 1464), False, 'from django.shortcuts import render, get_object_or_404\n'), ((1518, 1548), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Post'], {'id': 'id'}), '(Post, id=id)\n', (1535, 1548), False, 'from django.shortcuts import render, get_object_or_404\n'), ((1722, 1766), 'django.shortcuts.render', 'render', (['request', '"""post_detail.html"""', 'context'], {}), "(request, 'post_detail.html', context)\n", (1728, 1766), False, 'from django.shortcuts import render, get_object_or_404\n'), ((1923, 1961), 'django.shortcuts.render', 'render', (['request', '"""index.html"""', 'context'], {}), "(request, 'index.html', context)\n", (1929, 1961), False, 'from django.shortcuts import render, get_object_or_404\n')]
#!/usr/bin/env python3 ########################################## # Duino-Coin Python PC Miner (v2.2) # https://github.com/revoxhere/duino-coin # Distributed under MIT license # © Duino-Coin Community 2019-2021 ########################################## import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os # Import libraries from pathlib import Path from signal import signal, SIGINT import locale, json def install(package): subprocess.check_call([sys.executable, "-m", "pip", "install", package]) os.execl(sys.executable, sys.executable, *sys.argv) def now(): return datetime.datetime.now() try: # Check if cpuinfo is installed import cpuinfo from multiprocessing import freeze_support except: print( now().strftime("%H:%M:%S ") + 'Cpuinfo is not installed. Miner will try to install it. If it fails, please manually install "py-cpuinfo" python3 package.\nIf you can\'t install it, use the Minimal-PC_Miner.' ) install("py-cpuinfo") try: # Check if colorama is installed from colorama import init, Fore, Back, Style except: print( now().strftime("%H:%M:%S ") + 'Colorama is not installed. Miner will try to install it. If it fails, please manually install "colorama" python3 package.\nIf you can\'t install it, use the Minimal-PC_Miner.' ) install("colorama") try: # Check if requests is installed import requests except: print( now().strftime("%H:%M:%S ") + 'Requests is not installed. Miner will try to install it. If it fails, please manually install "requests" python3 package.\nIf you can\'t install it, use the Minimal-PC_Miner.' ) install("requests") try: from pypresence import Presence except: print( 'Pypresence is not installed. Wallet will try to install it. If it fails, please manually install "pypresence" python3 package.' ) install("pypresence") # Global variables minerVersion = "2.2" # Version number timeout = 30 # Socket timeout resourcesFolder = "PCMiner_" + str(minerVersion) + "_resources" hash_mean = [] donatorrunning = False debug = "n" rigIdentifier = "None" useLowerDiff = "n" serveripfile = "https://raw.githubusercontent.com/revoxhere/duino-coin/gh-pages/serverip.txt" # Serverip file config = configparser.ConfigParser() donationlevel = 0 locale = locale.getdefaultlocale()[0] with open(f"{resourcesFolder}/langs.json") as lang_file: lang_file = json.load(lang_file) if locale == 'es_ES': lang_file = lang_file["spanish"] else: lang_file = lang_file["english"] if not os.path.exists(resourcesFolder): os.mkdir(resourcesFolder) # Create resources folder if it doesn't exist def debugOutput(text): if debug == "y": print(now().strftime(Style.DIM + "%H:%M:%S.%f ") + "DEBUG: " + text) def title(title): if os.name == "nt": os.system("title " + title) else: print("\33]0;" + title + "\a", end="") sys.stdout.flush() def handler( signal_received, frame ): # If CTRL+C or SIGINT received, send CLOSE request to server in order to exit gracefully. print( now().strftime(Style.RESET_ALL + Style.DIM + "%H:%M:%S ") + Style.BRIGHT + Back.GREEN + Fore.WHITE + " sys0 " + Back.RESET + Fore.YELLOW + lang_file["sigint_detected"] + Style.NORMAL + Fore.WHITE + lang_file["goodbye"] ) handlerShown = True try: soc.close() except: pass os._exit(0) signal(SIGINT, handler) # Enable signal handler def Greeting(): # Greeting message depending on time global greeting print(Style.RESET_ALL) current_hour = time.strptime(time.ctime(time.time())).tm_hour if current_hour < 12: greeting = lang_file["greeting_morning"] elif current_hour == 12: greeting = lang_file["greeting_noon"] elif current_hour > 12 and current_hour < 18: greeting = lang_file["greeting_afternoon"] elif current_hour >= 18: greeting = lang_file["greeting_evening"] else: greeting = lang_file["greeting_back"] print( Style.RESET_ALL + " > " + Fore.YELLOW + Style.BRIGHT + lang_file["banner"] + Style.RESET_ALL + Fore.WHITE + " (v" + str(minerVersion) + ") 2019-2021" ) # Startup message print( Style.RESET_ALL + " > " + Fore.YELLOW + "https://github.com/revoxhere/duino-coin" ) try: print( Style.RESET_ALL + " > " + Fore.WHITE + "CPU: " + Style.BRIGHT + Fore.YELLOW + str(threadcount) + "x " + str(cpu["brand_raw"]) ) except: if debug == "y": raise if os.name == "nt" or os.name == "posix": print( Style.RESET_ALL + " > " + Fore.WHITE + lang_file["donation_level"] + Style.BRIGHT + Fore.YELLOW + str(donationlevel) ) if useLowerDiff == "y": diffName = lang_file["medium_diff"] else: diffName = lang_file["net_diff"] print( Style.RESET_ALL + " > " + Fore.WHITE + lang_file["algorithm"] + Style.BRIGHT + Fore.YELLOW + "DUCO-S1 @ " + diffName ) print( Style.RESET_ALL + " > " + Fore.WHITE + lang_file["rig_identifier"] + Style.BRIGHT + Fore.YELLOW + rigIdentifier ) print( Style.RESET_ALL + " > " + Fore.WHITE + str(greeting) + ", " + Style.BRIGHT + Fore.YELLOW + str(username) + "!\n" ) if os.name == "nt": if not Path( resourcesFolder + "/Donate_executable.exe" ).is_file(): # Initial miner executable section debugOutput("OS is Windows, downloading developer donation executable") url = "https://github.com/revoxhere/duino-coin/blob/useful-tools/DonateExecutableWindows.exe?raw=true" r = requests.get(url) with open(resourcesFolder + "/Donate_executable.exe", "wb") as f: f.write(r.content) elif os.name == "posix": if not Path( resourcesFolder + "/Donate_executable" ).is_file(): # Initial miner executable section debugOutput("OS is Windows, downloading developer donation executable") url = "https://github.com/revoxhere/duino-coin/blob/useful-tools/DonateExecutableLinux?raw=true" r = requests.get(url) with open(resourcesFolder + "/Donate_executable", "wb") as f: f.write(r.content) def hashrateCalculator(hashcount, khashcount): # Hashes/sec calculation while True: hash_mean.append( hashcount.value / 1000 ) # Append last hashcount in kH to the list hashcount.value = 0 # Reset the counter khashcount.value = int( statistics.mean(hash_mean[-50:]) ) # Calculate average hashrate from last 20 hashrate measurements time.sleep(1) def loadConfig(): # Config loading section global username, efficiency, donationlevel, debug, threadcount, useLowerDiff, rigIdentifier if not Path( resourcesFolder + "/Miner_config.cfg" ).is_file(): # Initial configuration section print( Style.BRIGHT + lang_file["basic_config_tool"] + resourcesFolder + lang_file["edit_config_file_warning"] ) print( Style.RESET_ALL + lang_file["dont_have_account"] + Fore.YELLOW + lang_file["wallet"] + Fore.WHITE + lang_file["register_warning"] ) username = input( Style.RESET_ALL + Fore.YELLOW + lang_file["ask_username"] + Style.BRIGHT ) efficiency = input( Style.RESET_ALL + Fore.YELLOW + lang_file["ask_intensity"] + Style.BRIGHT ) threadcount = input( Style.RESET_ALL + Fore.YELLOW + lang_file["ask_threads"] + str(multiprocessing.cpu_count()) + "): " + Style.BRIGHT ) useLowerDiff = input( Style.RESET_ALL + Fore.YELLOW + lang_file["ask_lower_difficulty"] + Style.BRIGHT ) rigIdentifier = input( Style.RESET_ALL + Fore.YELLOW + lang_file["ask_rig_identifier"] + Style.BRIGHT ) if rigIdentifier == "y" or rigIdentifier == "Y": rigIdentifier = input( Style.RESET_ALL + Fore.YELLOW + lang_file["ask_rig_name"] + Style.BRIGHT ) else: rigIdentifier = "None" donationlevel = "0" if os.name == "nt" or os.name == "posix": donationlevel = input( Style.RESET_ALL + Fore.YELLOW + lang_file["ask_donation_level"] + Style.BRIGHT ) efficiency = re.sub("\D", "", efficiency) # Check wheter efficiency is correct if float(efficiency) > int(100): efficiency = 100 if float(efficiency) < int(1): efficiency = 1 threadcount = re.sub( "\D", "", threadcount ) # Check wheter threadcount is correct if int(threadcount) > int(8): threadcount = 8 if int(threadcount) < int(1): threadcount = 1 if useLowerDiff == "y" or useLowerDiff == "Y": useLowerDiff = "y" else: useLowerDiff = "n" donationlevel = re.sub( "\D", "", donationlevel ) # Check wheter donationlevel is correct if float(donationlevel) > int(5): donationlevel = 5 if float(donationlevel) < int(0): donationlevel = 0 config["miner"] = { # Format data "username": username, "efficiency": efficiency, "threads": threadcount, "useLowerDiff": useLowerDiff, "donate": donationlevel, "identifier": rigIdentifier, "debug": "n", } with open( resourcesFolder + "/Miner_config.cfg", "w" ) as configfile: # Write data to file config.write(configfile) efficiency = ( 100 - float(efficiency) ) * 0.01 # Calulate efficiency for use with sleep function print(Style.RESET_ALL + lang_file["config_saved"]) else: # If config already exists, load from it config.read(resourcesFolder + "/Miner_config.cfg") username = config["miner"]["username"] efficiency = config["miner"]["efficiency"] efficiency = ( 100 - float(efficiency) ) * 0.01 # Calulate efficiency for use with sleep function threadcount = config["miner"]["threads"] useLowerDiff = config["miner"]["useLowerDiff"] donationlevel = config["miner"]["donate"] rigIdentifier = config["miner"]["identifier"] debug = config["miner"]["debug"] def Donate(): global donationlevel, donatorrunning, donateExecutable if os.name == "nt": cmd = ( "cd " + resourcesFolder + "& Donate_executable.exe -o stratum+tcp://blockmasters.co:6033 -u 9RTb3ikRrWExsF6fis85g7vKqU1tQYVFuR -p PCmW,c=XMG,d=16 -s 4 -e " ) elif os.name == "posix": cmd = ( "cd " + resourcesFolder + "&& chmod +x Donate_executable && ./Donate_executable -o stratum+tcp://blockmasters.co:6033 -u 9RTb3ikRrWExsF6fis85g7vKqU1tQYVFuR -p PCmL,c=XMG,d=16 -s 4 -e " ) if int(donationlevel) <= 0: print( now().strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.GREEN + Fore.WHITE + " sys0 " + Back.RESET + Fore.YELLOW + lang_file["free_network_warning"] + Style.BRIGHT + Fore.YELLOW + lang_file["donate_warning"] + Style.RESET_ALL + Fore.GREEN + "https://duinocoin.com/donate" + Style.BRIGHT + Fore.YELLOW + lang_file["learn_more_donate"] ) time.sleep(10) if donatorrunning == False: if int(donationlevel) == 5: cmd += "100" elif int(donationlevel) == 4: cmd += "85" elif int(donationlevel) == 3: cmd += "60" elif int(donationlevel) == 2: cmd += "30" elif int(donationlevel) == 1: cmd += "15" if int(donationlevel) > 0: # Launch CMD as subprocess debugOutput("Starting donation process") donatorrunning = True donateExecutable = subprocess.Popen( cmd, shell=True, stderr=subprocess.DEVNULL ) print( now().strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.GREEN + Fore.WHITE + " sys0 " + Back.RESET + Fore.RED + lang_file["thanks_donation"] ) def Thread( threadid, hashcount, accepted, rejected, useLowerDiff, khashcount, username, efficiency, rigIdentifier, ): while True: while True: try: res = requests.get( serveripfile, data=None ) # Use request to grab data from raw github file if res.status_code == 200: # Check for response content = ( res.content.decode().splitlines() ) # Read content and split into lines masterServer_address = content[0] # Line 1 = pool address masterServer_port = content[1] # Line 2 = pool port debugOutput( "Retrieved pool IP: " + masterServer_address + ":" + str(masterServer_port) ) break except: # If it wasn't, display a message print( now().strftime(Style.RESET_ALL + Style.DIM + "%H:%M:%S ") + Style.BRIGHT + Back.BLUE + Fore.WHITE + " net" + str(threadid) + " " + Back.RESET + Fore.RED + lang_file["data_error"] ) if debug == "y": raise time.sleep(10) while True: # This section connects to the server try: soc = socket.socket() soc.connect( (str(masterServer_address), int(masterServer_port)) ) # Connect to the server serverVersion = soc.recv(3).decode() # Get server version debugOutput("Server version: " + serverVersion) if ( float(serverVersion) <= float(minerVersion) and len(serverVersion) == 3 ): # If miner is up-to-date, display a message and continue print( now().strftime(Style.RESET_ALL + Style.DIM + "%H:%M:%S ") + Style.BRIGHT + Back.BLUE + Fore.WHITE + " net" + str(threadid) + " " + Back.RESET + Fore.YELLOW + lang_file["connected"] + Style.RESET_ALL + Fore.WHITE + lang_file["connected_server"] + str(serverVersion) + ")" ) else: print( now().strftime(Style.RESET_ALL + Style.DIM + "%H:%M:%S ") + Style.BRIGHT + Back.GREEN + Fore.WHITE + " sys" + str(threadid) + " " + Back.RESET + Fore.RED + lang_file["outdated_miner"] + minerVersion + ")," + Style.RESET_ALL + Fore.RED + lang_file["server_is_on_version"] + serverVersion + lang_file["update_warning"] ) break except: print( now().strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.BLUE + Fore.WHITE + " net" + str(threadid) + " " + Style.RESET_ALL + Style.BRIGHT + Fore.RED + lang_file["connecting_error"] + Style.RESET_ALL ) if debug == "y": raise print( now().strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.GREEN + Fore.WHITE + " sys" + str(threadid) + " " + Back.RESET + Fore.YELLOW + lang_file["mining_thread"] + str(threadid) + lang_file["mining_thread_starting"] + Style.RESET_ALL + Fore.WHITE + lang_file["using_algo"] + Fore.YELLOW + str(int(100 - efficiency * 100)) + f"% {lang_file['efficiency']}" ) while True: # Mining section try: if float(100 - efficiency * 100) < 100: time.sleep( float(efficiency * 5) ) # Sleep to achieve lower efficiency if less than 100 selected while True: if useLowerDiff == "n": soc.send( bytes(f"JOB,{str(username)}", encoding="utf8") ) # Send job request else: soc.send( bytes(f"JOB,{str(username)},MEDIUM", encoding="utf8") ) # Send job request with lower diff job = soc.recv(128).decode() # Get work from pool job = job.split(",") # Split received data to job and difficulty diff = int(job[2]) if job[0] and job[1] and job[2]: debugOutput(str(threadid) + "Job received: " + str(job)) break # If job received, continue to hashing algo threadhashcount = 0 # Reset hash counter for this thread for ducos1res in range( 100 * int(diff) + 1 ): # Loop from 1 too 100*diff) ducos1 = hashlib.sha1( str(job[0] + str(ducos1res)).encode("utf-8") ).hexdigest() # Generate hash threadhashcount += ( 1 # Increment hash counter for hashrate calculator ) if job[1] == ducos1: # If result is even with job, send the result hashcount.value += threadhashcount # Add this thread hash counter to the global counter debugOutput(str(threadid) + "Result found: " + str(ducos1res)) while True: soc.send( bytes( f"{str(ducos1res)},{str(threadhashcount)},Official Python Miner v{str(minerVersion)},{str(rigIdentifier)}", encoding="utf8", ) ) # Send result of hashing algorithm to the server responsetimetart = now() feedback = soc.recv(4).decode() # Get feedback debugOutput( str(threadid) + "Feedback received: " + str(feedback) ) responsetimestop = now() # Measure server ping ping = responsetimestop - responsetimetart # Calculate ping ping = str(int(ping.microseconds / 1000)) # Convert to ms if khashcount.value > 1000: formattedhashcount = ( str(round(khashcount.value / 1000, 2)) + " MH/s" ) else: formattedhashcount = str(khashcount.value) + " kH/s" debugOutput("Ping: " + ping) if feedback == "GOOD": # If result was good accepted.value += 1 # Share accepted = increment feedback shares counter by 1 title( lang_file["duco_python_miner"] + str(minerVersion) + ") - " + str(accepted.value) + "/" + str(accepted.value + rejected.value) + lang_file["accepted_shares"] ) print( now().strftime( Style.RESET_ALL + Style.DIM + "%H:%M:%S " ) + Style.BRIGHT + Back.YELLOW + Fore.WHITE + " cpu" + str(threadid) + " " + Back.RESET + Fore.GREEN + lang_file["accepted"] + Fore.WHITE + str(accepted.value) + "/" + str(accepted.value + rejected.value) + Back.RESET + Fore.YELLOW + " (" + str( int( ( accepted.value / (accepted.value + rejected.value) * 100 ) ) ) + "%)" + Style.NORMAL + Fore.WHITE + " ∙ " + Style.BRIGHT + Fore.WHITE + str(formattedhashcount) + Style.NORMAL + " @ diff " + str(diff) + " ∙ " + Fore.BLUE + "ping " + ping + "ms" ) break # Repeat elif feedback == "BLOCK": # If block was found accepted.value += 1 # Share accepted = increment feedback shares counter by 1 title( lang_file["duco_python_miner"] + str(minerVersion) + ") - " + str(accepted.value) + "/" + str(accepted.value + rejected.value) + lang_file["accepted_shares"] ) print( now().strftime( Style.RESET_ALL + Style.DIM + "%H:%M:%S " ) + Style.BRIGHT + Back.YELLOW + Fore.WHITE + " cpu" + str(threadid) + " " + Back.RESET + Fore.CYAN + lang_file["block_found"] + Fore.WHITE + str(accepted.value) + "/" + str(accepted.value + rejected.value) + Back.RESET + Fore.YELLOW + " (" + str( int( ( accepted.value / (accepted.value + rejected.value) * 100 ) ) ) + "%)" + Style.NORMAL + Fore.WHITE + " ∙ " + Style.BRIGHT + Fore.WHITE + str(formattedhashcount) + Style.NORMAL + " @ diff " + str(diff) + " ∙ " + Fore.BLUE + "ping " + ping + "ms" ) break # Repeat elif feedback == "ERR": # If server reports internal error print( now().strftime( Style.RESET_ALL + Style.DIM + "%H:%M:%S " ) + Style.BRIGHT + Back.BLUE + Fore.WHITE + " net" + str(threadid) + " " + Back.RESET + Fore.RED + lang_file["internal_server_error"] + Style.RESET_ALL + Fore.RED + lang_file["retrying"] ) time.sleep(10) else: # If result was bad rejected.value += 1 # Share rejected = increment bad shares counter by 1 title( lang_file["duco_python_miner"] + str(minerVersion) + ") - " + str(accepted.value) + "/" + str(accepted.value + rejected.value) + lang_file["accepted_shares"] ) print( now().strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.YELLOW + Fore.WHITE + " cpu" + str(threadid) + " " + Back.RESET + Fore.RED + lang_file["rejected"] + Fore.WHITE + str(accepted.value) + "/" + str(accepted.value + rejected.value) + Back.RESET + Fore.YELLOW + " (" + str( int( ( accepted.value / (accepted.value + rejected.value) * 100 ) ) ) + "%)" + Style.NORMAL + Fore.WHITE + " ∙ " + Style.BRIGHT + Fore.WHITE + str(formattedhashcount) + Style.NORMAL + " @ diff " + str(diff) + " ∙ " + Fore.BLUE + "ping " + ping + "ms" ) break # Repeat break # Repeat except Exception as e: print( now().strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.BLUE + Fore.WHITE + " net" + str(threadid) + " " + Style.RESET_ALL + Style.BRIGHT + Fore.MAGENTA + lang_file["error_while_mining"] + Style.RESET_ALL ) if debug == "y": raise time.sleep(5) break def initRichPresence(): global RPC try: RPC = Presence(808045598447632384) RPC.connect() except: # Discord not launched if debug == "y": raise def updateRichPresence(): startTime = int(time.time()) while True: try: hashcount = statistics.mean(hash_mean[-50:]) if hashcount > 1000: hashcount = str(round(hashcount / 1000, 2)) + " MH/s" else: hashcount = str(int(hashcount)) + " kH/s" RPC.update( details="Hashrate: " + str(hashcount), start=startTime, state="Acc. shares: " + str(accepted.value) + "/" + str(rejected.value + accepted.value), large_image="ducol", large_text="Duino-Coin, a cryptocurrency that can be mined with Arduino boards", buttons=[ {"label": "Learn more", "url": "https://duinocoin.com"}, {"label": "Discord Server", "url": "https://discord.gg/k48Ht5y"}, ], ) debugOutput("Rich presence updated") except: # Discord not launched if debug == "y": raise time.sleep(15) # 15 seconds to respect Discord rate limit if __name__ == "__main__": multiprocessing.freeze_support() cpu = cpuinfo.get_cpu_info() # Processor info init(autoreset=True) # Enable colorama title(lang_file["duco_python_miner"] + str(minerVersion) + ")") try: loadConfig() # Load config file or create new one debugOutput("Config file loaded") except: print( now().strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.GREEN + Fore.WHITE + " sys0 " + Style.RESET_ALL + Style.BRIGHT + Fore.RED + lang_file["load_config_error"] + resourcesFolder + lang_file["load_config_error_warning"] + Style.RESET_ALL ) if debug == "y": raise time.sleep(10) os._exit(1) try: Greeting() # Display greeting message debugOutput("Greeting displayed") except: if debug == "y": raise try: Donate() # Start donation thread except: if debug == "y": raise hashcount = multiprocessing.Value("i", 0) khashcount = multiprocessing.Value("i", 0) accepted = multiprocessing.Value("i", 0) rejected = multiprocessing.Value("i", 0) threading.Thread( target=hashrateCalculator, args=(hashcount, khashcount) ).start() # Start hashrate calculator thread = [] for x in range(int(threadcount)): # Launch duco mining threads thread.append(x) thread[x] = multiprocessing.Process( target=Thread, args=( x, hashcount, accepted, rejected, useLowerDiff, khashcount, username, efficiency, rigIdentifier, ), ) thread[x].start() time.sleep(0.1) initRichPresence() threading.Thread(target=updateRichPresence).start()
[ "os.mkdir", "cpuinfo.get_cpu_info", "os.execl", "socket.socket", "multiprocessing.Value", "pathlib.Path", "sys.stdout.flush", "subprocess.check_call", "multiprocessing.cpu_count", "colorama.init", "pypresence.Presence", "os.path.exists", "requests.get", "configparser.ConfigParser", "re.sub", "datetime.datetime.now", "threading.Thread", "subprocess.Popen", "os.system", "time.sleep", "statistics.mean", "signal.signal", "locale.getdefaultlocale", "json.load", "time.time", "os._exit", "multiprocessing.Process", "multiprocessing.freeze_support" ]
[((2416, 2443), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (2441, 2443), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((3716, 3739), 'signal.signal', 'signal', (['SIGINT', 'handler'], {}), '(SIGINT, handler)\n', (3722, 3739), False, 'from signal import signal, SIGINT\n'), ((513, 585), 'subprocess.check_call', 'subprocess.check_call', (["[sys.executable, '-m', 'pip', 'install', package]"], {}), "([sys.executable, '-m', 'pip', 'install', package])\n", (534, 585), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((591, 642), 'os.execl', 'os.execl', (['sys.executable', 'sys.executable', '*sys.argv'], {}), '(sys.executable, sys.executable, *sys.argv)\n', (599, 642), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((671, 694), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (692, 694), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((2473, 2498), 'locale.getdefaultlocale', 'locale.getdefaultlocale', ([], {}), '()\n', (2496, 2498), False, 'import locale, json\n'), ((2579, 2599), 'json.load', 'json.load', (['lang_file'], {}), '(lang_file)\n', (2588, 2599), False, 'import locale, json\n'), ((2722, 2753), 'os.path.exists', 'os.path.exists', (['resourcesFolder'], {}), '(resourcesFolder)\n', (2736, 2753), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((2760, 2785), 'os.mkdir', 'os.mkdir', (['resourcesFolder'], {}), '(resourcesFolder)\n', (2768, 2785), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((3699, 3710), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (3707, 3710), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((34707, 34739), 'multiprocessing.freeze_support', 'multiprocessing.freeze_support', ([], {}), '()\n', (34737, 34739), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((34751, 34773), 'cpuinfo.get_cpu_info', 'cpuinfo.get_cpu_info', ([], {}), '()\n', (34771, 34773), False, 'import cpuinfo\n'), ((34797, 34817), 'colorama.init', 'init', ([], {'autoreset': '(True)'}), '(autoreset=True)\n', (34801, 34817), False, 'from colorama import init, Fore, Back, Style\n'), ((35873, 35902), 'multiprocessing.Value', 'multiprocessing.Value', (['"""i"""', '(0)'], {}), "('i', 0)\n", (35894, 35902), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((35921, 35950), 'multiprocessing.Value', 'multiprocessing.Value', (['"""i"""', '(0)'], {}), "('i', 0)\n", (35942, 35950), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((35967, 35996), 'multiprocessing.Value', 'multiprocessing.Value', (['"""i"""', '(0)'], {}), "('i', 0)\n", (35988, 35996), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((36013, 36042), 'multiprocessing.Value', 'multiprocessing.Value', (['"""i"""', '(0)'], {}), "('i', 0)\n", (36034, 36042), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((3018, 3045), 'os.system', 'os.system', (["('title ' + title)"], {}), "('title ' + title)\n", (3027, 3045), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((3114, 3132), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3130, 3132), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((7561, 7574), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (7571, 7574), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((9786, 9815), 're.sub', 're.sub', (['"""\\\\D"""', '""""""', 'efficiency'], {}), "('\\\\D', '', efficiency)\n", (9792, 9815), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((10018, 10048), 're.sub', 're.sub', (['"""\\\\D"""', '""""""', 'threadcount'], {}), "('\\\\D', '', threadcount)\n", (10024, 10048), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((10411, 10443), 're.sub', 're.sub', (['"""\\\\D"""', '""""""', 'donationlevel'], {}), "('\\\\D', '', donationlevel)\n", (10417, 10443), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((13199, 13213), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (13209, 13213), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((33351, 33379), 'pypresence.Presence', 'Presence', (['(808045598447632384)'], {}), '(808045598447632384)\n', (33359, 33379), False, 'from pypresence import Presence\n'), ((33537, 33548), 'time.time', 'time.time', ([], {}), '()\n', (33546, 33548), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((34611, 34625), 'time.sleep', 'time.sleep', (['(15)'], {}), '(15)\n', (34621, 34625), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((36312, 36458), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'Thread', 'args': '(x, hashcount, accepted, rejected, useLowerDiff, khashcount, username,\n efficiency, rigIdentifier)'}), '(target=Thread, args=(x, hashcount, accepted,\n rejected, useLowerDiff, khashcount, username, efficiency, rigIdentifier))\n', (36335, 36458), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((36698, 36713), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (36708, 36713), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((6499, 6516), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (6511, 6516), False, 'import requests\n'), ((7443, 7475), 'statistics.mean', 'statistics.mean', (['hash_mean[-50:]'], {}), '(hash_mean[-50:])\n', (7458, 7475), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((13751, 13811), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'shell': '(True)', 'stderr': 'subprocess.DEVNULL'}), '(cmd, shell=True, stderr=subprocess.DEVNULL)\n', (13767, 13811), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((33606, 33638), 'statistics.mean', 'statistics.mean', (['hash_mean[-50:]'], {}), '(hash_mean[-50:])\n', (33621, 33638), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((35548, 35562), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (35558, 35562), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((35572, 35583), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (35580, 35583), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((36050, 36123), 'threading.Thread', 'threading.Thread', ([], {'target': 'hashrateCalculator', 'args': '(hashcount, khashcount)'}), '(target=hashrateCalculator, args=(hashcount, khashcount))\n', (36066, 36123), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((36743, 36786), 'threading.Thread', 'threading.Thread', ([], {'target': 'updateRichPresence'}), '(target=updateRichPresence)\n', (36759, 36786), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((3920, 3931), 'time.time', 'time.time', ([], {}), '()\n', (3929, 3931), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((7006, 7023), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (7018, 7023), False, 'import requests\n'), ((7735, 7778), 'pathlib.Path', 'Path', (["(resourcesFolder + '/Miner_config.cfg')"], {}), "(resourcesFolder + '/Miner_config.cfg')\n", (7739, 7778), False, 'from pathlib import Path\n'), ((14446, 14483), 'requests.get', 'requests.get', (['serveripfile'], {'data': 'None'}), '(serveripfile, data=None)\n', (14458, 14483), False, 'import requests\n'), ((15887, 15902), 'socket.socket', 'socket.socket', ([], {}), '()\n', (15900, 15902), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((6161, 6209), 'pathlib.Path', 'Path', (["(resourcesFolder + '/Donate_executable.exe')"], {}), "(resourcesFolder + '/Donate_executable.exe')\n", (6165, 6209), False, 'from pathlib import Path\n'), ((15771, 15785), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (15781, 15785), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((33244, 33257), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (33254, 33257), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((6678, 6722), 'pathlib.Path', 'Path', (["(resourcesFolder + '/Donate_executable')"], {}), "(resourcesFolder + '/Donate_executable')\n", (6682, 6722), False, 'from pathlib import Path\n'), ((8739, 8766), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (8764, 8766), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n'), ((29583, 29597), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (29593, 29597), False, 'import socket, statistics, threading, multiprocessing, time, re, subprocess, hashlib, configparser, sys, datetime, os\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- import globalFunctions import json import re import os import glob class MangaRock(): def __init__(self, manga_url, download_directory, chapter_range, **kwargs): current_directory = kwargs.get("current_directory") conversion = kwargs.get("conversion") keep_files = kwargs.get("keep_files") self.logging = kwargs.get("log_flag") self.sorting = kwargs.get("sorting_order") self.manga_url = manga_url self.print_index = kwargs.get("print_index") if len(str(manga_url).split("/")) is 5: self.comic_id = str(str(manga_url).split("/")[-1]) self.full_series(comic_id=self.comic_id, sorting=self.sorting, download_directory=download_directory, chapter_range=chapter_range, conversion=conversion, keep_files=keep_files) else: self.chapter_id = str(str(manga_url).split("/")[-1]) self.comic_name, self.chapter_number = self.name_cleaner(url=manga_url, chapter_id=self.chapter_id) self.single_chapter(chapter_id=self.chapter_id, comic_name=self.comic_name, chapter_number=self.chapter_number, download_directory=download_directory, conversion=conversion, keep_files=keep_files) def name_cleaner(self, url, chapter_id): print("Fetching The Chapter Data...") info_json_url = "https://api.mangarockhd.com/query/web401/info?oid=" + str(str(url).split("/")[4]) source, cookies = globalFunctions.GlobalFunctions().page_downloader(manga_url=info_json_url) json_parse = json.loads(str(source)) comic_name = str(json_parse["data"]["name"]) chapter_number_value = "" print("") for chapter_number in json_parse["data"]["chapters"]: if str(chapter_id) in str(chapter_number.values()): chapter_number_value = chapter_number["name"] else: pass return comic_name, re.sub('[^A-Za-z0-9.\-\+\' ]+', '', chapter_number_value.replace(":", " -")) def file_decryption(self, path_to_files): """ A REALLY BIG THANKS TO 'dradzenglor' for decrypting the files! Amazing work! Follow The Thread On Reddit : https://www.reddit.com/r/codes/comments/7mdx70/need_help_decrypting_this_string/ """ for mri_file in glob.glob(os.path.abspath(path_to_files) + os.sep + "*.mri"): data = open(mri_file, "rb").read() n = len(data) + 7 header = [82, 73, 70, 70, 255 & n, n >> 8 & 255, n >> 16 & 255, n >> 24 & 255, 87, 69, 66, 80, 86, 80, 56] data = [x ^ 101 for x in data] open(str(mri_file).replace(".mri", ".jpg"), 'wb').write(bytes(header + data)) # Let's delete the .mri file os.remove(mri_file) def single_chapter(self, chapter_id, comic_name, chapter_number, download_directory, conversion, keep_files): image_api_link = "https://api.mangarockhd.com/query/web401/pages?oid=" + str(chapter_id) source, cookies = globalFunctions.GlobalFunctions().page_downloader(manga_url=image_api_link) json_parse = json.loads(str(source)) file_directory = globalFunctions.GlobalFunctions().create_file_directory(chapter_number, comic_name) directory_path = os.path.realpath(str(download_directory) + os.sep + str(file_directory)) if not os.path.exists(directory_path): os.makedirs(directory_path) links = [] file_names = [] for current_chapter, image_link in enumerate(json_parse["data"]): # file_name = str(json_parse["data"].index(image_link)) + ".mri" current_chapter += 1 file_name = str(globalFunctions.GlobalFunctions().prepend_zeroes(current_chapter, len(json_parse["data"]))) + ".mri" file_names.append(file_name) links.append(image_link) globalFunctions.GlobalFunctions().multithread_download(chapter_number, comic_name, None, directory_path, file_names, links, self.logging) print("Decrypting Files...") self.file_decryption(path_to_files=directory_path) # Calling the method that does the magic! globalFunctions.GlobalFunctions().conversion(directory_path, conversion, keep_files, comic_name, chapter_number) return 0 def full_series(self, comic_id, sorting, download_directory, chapter_range, conversion, keep_files): chapters_dict = {} api_url = "https://api.mangarockhd.com/query/web401/info?oid=" + str(comic_id) source, cookies = globalFunctions.GlobalFunctions().page_downloader(manga_url=api_url) json_parse = json.loads(str(source)) comic_name = str(json_parse["data"]["name"]) if chapter_range != "All": # -1 to shift the episode number accordingly to the INDEX of it. List starts from 0 xD! starting = int(str(chapter_range).split("-")[0]) total_chapters = int(json_parse["data"]["total_chapters"]) if str(chapter_range).split("-")[1].isdigit(): ending = int(str(chapter_range).split("-")[1]) else: ending = total_chapters if ending > total_chapters: ending = total_chapters for range_value in range(starting, ending + 1): chapters_dict[str(json_parse["data"]["chapters"][int(range_value) - 1]["oid"])] = re.sub('[^A-Za-z0-9.\-\+\' ]+', '', json_parse["data"]["chapters"][int(range_value) - 1]["name"].replace(":", " -")) else: for chapter in json_parse["data"]["chapters"]: chapters_dict[str(chapter["oid"])] = re.sub('[^A-Za-z0-9.\-\+\' ]+', '', chapter["name"].replace(":", " -")) if self.print_index: chapters_ = json_parse["data"]["chapters"] for chapter in chapters_: print(str(chapter["order"] + 1) + ": " + chapter["name"].encode('utf-8')) return for single_chapter in chapters_dict: try: self.single_chapter(chapter_id=str(single_chapter), comic_name=comic_name, chapter_number=str(chapters_dict[single_chapter]).strip().title(), download_directory=download_directory, conversion=conversion, keep_files=keep_files) except Exception as ex: break # break to continue processing other mangas # @Chr1st-oo - modified condition due to some changes on automatic download and config. if chapter_range != "All" and (chapter_range.split("-")[1] == "__EnD__" or len(chapter_range.split("-")) == 3): globalFunctions.GlobalFunctions().addOne(self.manga_url) return 0
[ "os.remove", "os.path.abspath", "os.makedirs", "os.path.exists", "globalFunctions.GlobalFunctions" ]
[((2874, 2893), 'os.remove', 'os.remove', (['mri_file'], {}), '(mri_file)\n', (2883, 2893), False, 'import os\n'), ((3478, 3508), 'os.path.exists', 'os.path.exists', (['directory_path'], {}), '(directory_path)\n', (3492, 3508), False, 'import os\n'), ((3522, 3549), 'os.makedirs', 'os.makedirs', (['directory_path'], {}), '(directory_path)\n', (3533, 3549), False, 'import os\n'), ((1569, 1602), 'globalFunctions.GlobalFunctions', 'globalFunctions.GlobalFunctions', ([], {}), '()\n', (1600, 1602), False, 'import globalFunctions\n'), ((3132, 3165), 'globalFunctions.GlobalFunctions', 'globalFunctions.GlobalFunctions', ([], {}), '()\n', (3163, 3165), False, 'import globalFunctions\n'), ((3279, 3312), 'globalFunctions.GlobalFunctions', 'globalFunctions.GlobalFunctions', ([], {}), '()\n', (3310, 3312), False, 'import globalFunctions\n'), ((3998, 4031), 'globalFunctions.GlobalFunctions', 'globalFunctions.GlobalFunctions', ([], {}), '()\n', (4029, 4031), False, 'import globalFunctions\n'), ((4359, 4392), 'globalFunctions.GlobalFunctions', 'globalFunctions.GlobalFunctions', ([], {}), '()\n', (4390, 4392), False, 'import globalFunctions\n'), ((4790, 4823), 'globalFunctions.GlobalFunctions', 'globalFunctions.GlobalFunctions', ([], {}), '()\n', (4821, 4823), False, 'import globalFunctions\n'), ((2436, 2466), 'os.path.abspath', 'os.path.abspath', (['path_to_files'], {}), '(path_to_files)\n', (2451, 2466), False, 'import os\n'), ((3806, 3839), 'globalFunctions.GlobalFunctions', 'globalFunctions.GlobalFunctions', ([], {}), '()\n', (3837, 3839), False, 'import globalFunctions\n'), ((6961, 6994), 'globalFunctions.GlobalFunctions', 'globalFunctions.GlobalFunctions', ([], {}), '()\n', (6992, 6994), False, 'import globalFunctions\n')]
import itertools import logging from bs4 import BeautifulSoup from chibi.file.temp import Chibi_temp_path from .episode import Episode from chibi_dl.site.base.site import Site logger = logging.getLogger( "chibi_dl.sites.tmo_fans.serie" ) class Serie( Site ): def __init__( self, url, *args, **kw ): super().__init__( url, *args, **kw ) def download( self, path ): serie_path = ( path + self.name ).made_safe() serie_path.mkdir() logger.info( "iniciando descarga de la serie '{}' de {}".format( self.name, self.url ) ) for episode in self.episodes: episode_path = serie_path + episode.file_name if ( episode_path.exists ): logger.info( ( "ignorando el episodio {} se encontro " "en el destino" ).format( episode.title ) ) continue downlaod_folder = Chibi_temp_path() try: episode.download( downlaod_folder ) except Exception as e: logger.exception( "paso un problema cuando intento de " "descargar el episodio" ) continue episode.compress( serie_path, downlaod_folder ) @property def name( self ): try: return self._title except AttributeError: self.load_soup() return self._title @property def episodes( self ): try: return self._episodes except AttributeError: self.load_soup() return self._episodes def load_soup( self ): page = self.get( self.url, ) soup = BeautifulSoup( page.content, 'html.parser' ) page.close() try: self._title = "".join( soup.select( ".element-title.my-2" )[0].find_all( text=True, recursive=False ) ).strip() self.load_episodes( soup ) except Exception as e: import pdb pdb.post_mortem( e.__traceback__ ) raise def load_episodes( self, soup ): self._episodes = [] if "one_shot" in self.url: self.load_one_shot( soup ) return else: chapter_container = soup.find( "div", { 'class': "card chapters" } ) chapter_container_hidden = chapter_container.find( "div", id="chapters-collapsed" ) if not chapter_container_hidden: chapters = chapter_container.ul.find_all( "li", recursive=False ) else: chapters = itertools.chain( chapter_container.ul.find_all( "li", recursive=False ), chapter_container_hidden.find_all( "li", recursive=False ) ) for chapter in chapters: links = chapter.select( "div.card.chapter-list-element" )[0].find_all( 'a' ) fansub = links[0].text title = chapter.find( "h4" ).a.text url = links[0].find_next( "i", { "class": "fas fa-play fa-2x"} ).parent.get( 'href' ) self._episodes.append( Episode.from_site( site=self, url=url, fansub=fansub, title=title ) ) def load_one_shot( self, soup ): chapters = soup.find( "div", { "class": "card chapter-list-element" } ) chapters = chapters.ul.find_all( "li", recursive=False ) for i, chapter in enumerate( chapters ): parts = chapter.div.find_all( "div", recursive=False ) url = parts[-1].a.get( "href" ).strip() self._episodes.append( Episode.from_site( site=self, url=url, fansub=parts[0].text.strip(), title=str( i ) ) )
[ "bs4.BeautifulSoup", "pdb.post_mortem", "logging.getLogger", "chibi.file.temp.Chibi_temp_path" ]
[((189, 239), 'logging.getLogger', 'logging.getLogger', (['"""chibi_dl.sites.tmo_fans.serie"""'], {}), "('chibi_dl.sites.tmo_fans.serie')\n", (206, 239), False, 'import logging\n'), ((1703, 1745), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page.content', '"""html.parser"""'], {}), "(page.content, 'html.parser')\n", (1716, 1745), False, 'from bs4 import BeautifulSoup\n'), ((924, 941), 'chibi.file.temp.Chibi_temp_path', 'Chibi_temp_path', ([], {}), '()\n', (939, 941), False, 'from chibi.file.temp import Chibi_temp_path\n'), ((2047, 2079), 'pdb.post_mortem', 'pdb.post_mortem', (['e.__traceback__'], {}), '(e.__traceback__)\n', (2062, 2079), False, 'import pdb\n')]
import random as rd K = int(input()) N = int(input()) x = int(input()) M = int(x*N/10) print ("p cnf {0} {1}".format(N, M)) for i in range(M): l = [] avail = [i+1 for i in range(N)] rd.shuffle(avail) for j in range(K): s = rd.randint(0,1) c = avail[j] if (s == 0): l+= [c] else : l+= [-c] l += [0] print(' '.join(map(str,l)))
[ "random.shuffle", "random.randint" ]
[((197, 214), 'random.shuffle', 'rd.shuffle', (['avail'], {}), '(avail)\n', (207, 214), True, 'import random as rd\n'), ((250, 266), 'random.randint', 'rd.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (260, 266), True, 'import random as rd\n')]
# Copyright The Cloud Custodian Authors. # SPDX-License-Identifier: Apache-2.0 import json import logging import mock import os from .common import BaseTest from c7n.exceptions import PolicyExecutionError from c7n.policy import Policy from c7n import handler class HandleTest(BaseTest): def test_init_config_exec_option_merge(self): policy_config = { 'execution-options': { 'region': 'us-east-1', 'assume_role': 'arn:::', 'profile': 'dev', 'tracer': 'xray', 'account_id': '004', 'dryrun': True, 'cache': '/foobar.cache'}, 'policies': [ {'mode': { 'type': 'period', 'schedule': "rate(1 minute)", 'execution-options': { 'metrics_enabled': True, 'assume_role': 'arn::::007:foo', 'output_dir': 's3://mybucket/output'}}, 'resource': 'aws.ec2', 'name': 'check-dev'} ]} self.assertEqual( dict(handler.init_config(policy_config)), {'assume_role': 'arn::::007:foo', 'metrics_enabled': 'aws', 'tracer': 'xray', 'account_id': '007', 'region': 'us-east-1', 'output_dir': 's3://mybucket/output', # defaults 'external_id': None, 'dryrun': False, 'profile': None, 'authorization_file': None, 'cache': '', 'regions': (), 'cache_period': 0, 'log_group': None, 'metrics': None}) def setupLambdaEnv( self, policy_data, environment=None, err_execs=(), log_level=logging.INFO): work_dir = self.change_cwd() self.patch(handler, 'policy_data', None) self.patch(handler, 'policy_config', None) # don't require api creds to resolve account id if 'execution-options' not in policy_data: policy_data['execution-options'] = {'account_id': '007'} elif 'account_id' not in policy_data['execution-options']: policy_data['execution-options']['account_id'] = '007' with open(os.path.join(work_dir, 'config.json'), 'w') as fh: json.dump(policy_data, fh, indent=2) output = self.capture_logging('custodian.lambda', level=log_level) if environment: self.change_environment(**environment) policy_execution = [] validation_called = [] def validate(self): validation_called.append(True) def push(self, event, context): policy_execution.append((event, context)) if err_execs: raise err_execs.pop(0) self.patch(Policy, "push", push) self.patch(Policy, "validate", validate) return output, policy_execution def test_dispatch_log_event(self): output, executions = self.setupLambdaEnv( {'policies': [{'name': 'ec2', 'resource': 'ec2'}]}, {'C7N_DEBUG_EVENT': None}, log_level=logging.DEBUG) handler.dispatch_event({'detail': {'resource': 'xyz'}}, {}) self.assertTrue('xyz' in output.getvalue()) self.patch(handler, 'C7N_DEBUG_EVENT', False) handler.dispatch_event({'detail': {'resource': 'abc'}}, {}) self.assertFalse('abc' in output.getvalue()) self.assertTrue(executions) @mock.patch('c7n.handler.PolicyCollection') def test_dispatch_err_event(self, mock_collection): output, executions = self.setupLambdaEnv({ 'execution-options': { 'output_dir': 's3://xyz', 'account_id': '004'}, 'policies': [{'resource': 'ec2', 'name': 'xyz'}]}, log_level=logging.DEBUG) mock_collection.from_data.return_value = [] handler.dispatch_event({'detail': {'errorCode': 'unauthorized'}}, None) self.assertTrue('Skipping failed operation: unauthorized' in output.getvalue()) self.patch(handler, 'C7N_SKIP_EVTERR', False) handler.dispatch_event({'detail': {'errorCode': 'foi'}}, None) self.assertFalse('Skipping failed operation: foi' in output.getvalue()) mock_collection.from_data.assert_called_once() def test_dispatch_err_handle(self): output, executions = self.setupLambdaEnv({ 'execution-options': {'output_dir': 's3://xyz', 'account_id': '004'}, 'policies': [{'resource': 'ec2', 'name': 'xyz'}]}, err_execs=[PolicyExecutionError("foo")] * 2) self.assertRaises( PolicyExecutionError, handler.dispatch_event, {'detail': {'xyz': 'oui'}}, None) self.patch(handler, 'C7N_CATCH_ERR', True) handler.dispatch_event({'detail': {'xyz': 'oui'}}, None) self.assertEqual(output.getvalue().count('error during'), 2) def test_handler(self): output, executions = self.setupLambdaEnv({ 'policies': [{ 'resource': 'asg', 'name': 'auto'}]}, ) self.assertEqual( handler.dispatch_event({"detail": {"errorCode": "404"}}, None), None ) self.assertEqual(handler.dispatch_event({"detail": {}}, None), True) self.assertEqual(executions, [({"detail": {}, "debug": True}, None)])
[ "json.dump", "c7n.exceptions.PolicyExecutionError", "mock.patch", "c7n.handler.dispatch_event", "c7n.handler.init_config", "os.path.join" ]
[((3571, 3613), 'mock.patch', 'mock.patch', (['"""c7n.handler.PolicyCollection"""'], {}), "('c7n.handler.PolicyCollection')\n", (3581, 3613), False, 'import mock\n'), ((3241, 3300), 'c7n.handler.dispatch_event', 'handler.dispatch_event', (["{'detail': {'resource': 'xyz'}}", '{}'], {}), "({'detail': {'resource': 'xyz'}}, {})\n", (3263, 3300), False, 'from c7n import handler\n'), ((3416, 3475), 'c7n.handler.dispatch_event', 'handler.dispatch_event', (["{'detail': {'resource': 'abc'}}", '{}'], {}), "({'detail': {'resource': 'abc'}}, {})\n", (3438, 3475), False, 'from c7n import handler\n'), ((3997, 4068), 'c7n.handler.dispatch_event', 'handler.dispatch_event', (["{'detail': {'errorCode': 'unauthorized'}}", 'None'], {}), "({'detail': {'errorCode': 'unauthorized'}}, None)\n", (4019, 4068), False, 'from c7n import handler\n'), ((4219, 4281), 'c7n.handler.dispatch_event', 'handler.dispatch_event', (["{'detail': {'errorCode': 'foi'}}", 'None'], {}), "({'detail': {'errorCode': 'foi'}}, None)\n", (4241, 4281), False, 'from c7n import handler\n'), ((4915, 4971), 'c7n.handler.dispatch_event', 'handler.dispatch_event', (["{'detail': {'xyz': 'oui'}}", 'None'], {}), "({'detail': {'xyz': 'oui'}}, None)\n", (4937, 4971), False, 'from c7n import handler\n'), ((2391, 2427), 'json.dump', 'json.dump', (['policy_data', 'fh'], {'indent': '(2)'}), '(policy_data, fh, indent=2)\n', (2400, 2427), False, 'import json\n'), ((5251, 5313), 'c7n.handler.dispatch_event', 'handler.dispatch_event', (["{'detail': {'errorCode': '404'}}", 'None'], {}), "({'detail': {'errorCode': '404'}}, None)\n", (5273, 5313), False, 'from c7n import handler\n'), ((5355, 5399), 'c7n.handler.dispatch_event', 'handler.dispatch_event', (["{'detail': {}}", 'None'], {}), "({'detail': {}}, None)\n", (5377, 5399), False, 'from c7n import handler\n'), ((1152, 1186), 'c7n.handler.init_config', 'handler.init_config', (['policy_config'], {}), '(policy_config)\n', (1171, 1186), False, 'from c7n import handler\n'), ((2328, 2365), 'os.path.join', 'os.path.join', (['work_dir', '"""config.json"""'], {}), "(work_dir, 'config.json')\n", (2340, 2365), False, 'import os\n'), ((4677, 4704), 'c7n.exceptions.PolicyExecutionError', 'PolicyExecutionError', (['"""foo"""'], {}), "('foo')\n", (4697, 4704), False, 'from c7n.exceptions import PolicyExecutionError\n')]
################################################################ # smallGraph.py # # Simpler graph class for use with structure confusion # histograms. # # Author: <NAME> # Copyright (c) 2013-2014 <NAME> and <NAME> ################################################################ # -*- coding: utf-8 -*- """ Created on Mon Jun 10 04:13:40 2013 @author: Harold """ import itertools import cmath from math import sqrt import compareTools class SmallGraph(object): """Class for small graphs. The individual nodes and edges have one associated label. Only for small graphes because algorithms are not optimized. see igraph or graph_tool module for bigger graph""" # Define graph data elements ('data members' for an object in the class) __slots__ = ('nodes','edges', 'rednodes', 'rededges') ################################## # Constructors (in __init__) ################################## def __init__(self,*args): """ init the small graph with 2 lists: - list of nodes (id, label) - list of edges (id,id, label) """ self.nodes = {} self.edges = {} self.rednodes = set() self.rededges = set() if(len(args) == 2 and isinstance(args[0],list) and isinstance(args[1],list)): for (i,l) in args[0]: self.nodes[i] = l for (i1,i2,l) in args[1]: self.edges[(i1,i2)] = l def printLG(self): for k in self.nodes.keys(): print ("N,"+str(k)+","+(",".join(self.nodes[k])) + ",1.0") for (e1,e2) in self.edges.keys(): print ("E,"+str(e1)+","+str(e2)+","+(",".join(self.edges[(e1,e2)])) + ",1.0") def __str__(self): """returns a string with nodes and edges. Format: nbNodes,id1,lab1,id2,lab2... ,nbedges,from1,to1,label1, from2, to2, label2,...""" out = str(len(self.nodes.keys())) for k in self.nodes.keys(): out = out + ","+str(k)+","+str(self.nodes[k]) out = out + ","+str(len(self.edges.keys())) for (e1,e2) in self.edges.keys(): out = out + ","+str(e1)+","+str(e2)+","+str(self.edges[(e1,e2)]) return out def fromStr(self, inStr): tab = inStr.split(',') nnode = int(tab[0]) i = nnode*2+1 for n in range(1,i,2): self.nodes[str(tab[n])] = str(tab[n+1]) nedg = int(tab[i]) for n in range(i+1, i+1+nedg*3,3 ): a = str(tab[n]) b = str(tab[n+1]) self.edges[(a,b)] = str(tab[n+2]) def iso(self,osg): """true if the two graphs are isomorphisms""" if(len(self.nodes.keys()) != len(osg.nodes.keys())):# or \ # problem with '_' edges which exist but should be ignored #len(self.edges.keys()) != len(osg.edges.keys())): return False myLabels = self.nodes.values() + self.edges.values() + ['_'] # add no edge label hisLabels = osg.nodes.values() + osg.edges.values() + ['_'] # add no edge label #myLabels.sort() #hisLabels.sort() #if(myLabels != hisLabels): #print myLabels myLabelsFlat = [item for sublist in myLabels for item in sublist] hisLabelsFlat = [item for sublist in hisLabels for item in sublist] if compareTools.cmpNodes(myLabelsFlat, hisLabelsFlat) != (0,[]): return False #So they seems to be isomorphims (same label counts) #let's try all permutation of nodes for m in itertools.permutations(self.nodes.keys()): if self.equal(osg,m): return True return False def equal(self,osg, mapping): """using the mapping list, check if the nodes and edges have the same labels The mapping is a list of self.nodes keys, the order give the mapping the number of nodes have to be same""" mynodes = self.nodes.keys() nb = len(mynodes) onodes = osg.nodes.keys() if(nb != len(onodes) and nb != len(mapping)): return False hisNode = dict(zip(mapping, onodes)) #print "Map : " + str(hisNode) #first check the node labels for (my,his) in hisNode.iteritems(): #if(self.nodes[my] != osg.nodes[his]): if(compareTools.cmpNodes(self.nodes[my] ,osg.nodes[his]) != (0,[])): #print str((self.nodes[my] ,osg.nodes[his])) + ' are diff' return False #then check the edges, from self to other and reverse checkedEdg = set() #from self to other for (a,b) in self.edges.iterkeys(): #id from the other through the mapping (oa,ob) = (str(hisNode[a]),str(hisNode[b])) checkedEdg.add((oa,ob)) #print str((a,b)) + " <=> " + str((oa,ob)) #if the edge does not exist or has a different label => missmatch if not (oa,ob) in osg.edges.keys(): if compareTools.cmpEdges(self.edges[(a,b)], {'_' : 1.0})!= (0,[]): #print str((oa,ob)) + " not in osg" return False else: #if self.edges[(a,b)] != osg.edges[(oa,ob)]: if compareTools.cmpEdges(self.edges[(a,b)], osg.edges[(oa,ob)])!= (0,[]): #print self.edges[(a,b)] + " != " + osg.edges[(oa,ob)] return False #from other to self except checkedEdg, normaly, only '_' edges are remaining for (oa,ob) in (set(osg.edges.iterkeys()) - checkedEdg): if compareTools.cmpEdges(osg.edges[(oa,ob)], {'_' : 1.0})!= (0,[]): return False return True def __eq__(self,o): return self.iso(o) def toSVG(self, size = 200, withDef = True, nodeShape='circle'): """ Generate a SVG XML string which draw the nodes (spread on a circle) and edges with all label. Param size : the size of svg image (square) Param withDef : if True generate the definition of the arrow (needed only once in a HTML file)""" svg = '<svg xmlns="http://www.w3.org/2000/svg" width="'+str(size)+'" height="'+str(size)+'">\n' n = len(self.nodes) r = size / 10 R = (size - 2*r) /2 if withDef: svg = svg + '<defs><marker id="Triangle"\ viewBox="0 0 10 10" refX="10" refY="5" \ markerUnits="strokeWidth"\ fill="lightgray" \ stroke="black" markerWidth="'+str(r/2.75)+'" markerHeight="'+str(r/2.5)+'"\ orient="auto">\ <path d="M 0 0 L 10 5 L 0 10 z" /> </marker> </defs>' # Draw nodes on a circle. xy = [ (cmath.rect(R,2 * x* cmath.pi/n).real + size/2,cmath.rect(R,2 * x* cmath.pi/n).imag + size/2) for x in range(n)] i = 0 parentCount = {} childCount = {} findXY = {} # Determine the number of times each node is a parent or child. for (a,b) in self.edges.keys(): if a in parentCount.keys(): parentCount[a] += 1 else: parentCount[a] = 1 if b in childCount.keys(): childCount[b] += 1 else: childCount[b] = 1 # Construct list of parent nodes (in order of parent role freq.), # add any missing nodes. childPairs = childCount.items() sortedPairs = sorted(childPairs, key=lambda tuple: tuple[1]) nodes = self.nodes.keys() if len(sortedPairs) > 0: [nodes, counts] = zip(*sortedPairs) nodeList = list(nodes) for selfNode in self.nodes.keys(): if not selfNode in nodeList: nodeList.append(selfNode) #for k in self.nodes.keys(): for k in nodeList: color = 'blue' fillcolor= 'yellow' if(k in self.rednodes): color = 'red' fillcolor='pink' if nodeShape == 'circle': svg = svg + '<circle cx="'+str(xy[i][0]) + '" cy="'+str(xy[i][1]) + '"r="'+str(r)+'" fill=' + fillcolor + ' stroke-width="2" stroke="'+color+'"/>\n' else: svg = svg + '<rect x="'+str(xy[i][0] - r) + '" y="'+str(xy[i][1] - r) + '"width="'+str(2*r)+'" height="' + str(2*r) + '" fill=' + fillcolor + ' stroke-width="2" stroke="'+color+'"/>\n' #lab = ",".join(self.nodes[k]) lab = "".join(self.nodes[k]) svg = svg + '<text x="'+str(xy[i][0]-0.5*r) + '" y="'+str(xy[i][1]+r/2) + '" font-family="Times"'+'font-size="'+str(1.5*r / sqrt(max([len(lab),1])))+'"'+'>' svg = svg + lab + '</text>\n' findXY[k] = i i = i +1 # Draw edges on a (smaller) circle R = R - r xy = [ (cmath.rect(R,2 * x* cmath.pi/n).real + size/2,cmath.rect(R,2 * x* cmath.pi/n).imag + size/2) for x in range(n)] for (a,b) in self.edges.keys(): ai = findXY[a] bi = findXY[b] color = 'blue' swidth='1.5' useMarker=True if((a,b) in self.rededges): color = 'red' swidth='2' if((a,b) in self.edges and (b,a) in self.edges): useMarker=False # Avoid using errors for bi-directional edges (segment edges) if useMarker: svg = svg + '<line stroke-width=' + swidth + ' x1="'+str(xy[ai][0]) + '" y1="'+str(xy[ai][1]) + '" x2="'+str(xy[bi][0]) + '" y2="'+str(xy[bi][1]) + '" stroke="'+color+'" marker-end="url(#Triangle)" />\n' else: svg = svg + '<line stroke-width=' + swidth + ' x1="'+str(xy[ai][0]) + '" y1="'+str(xy[ai][1]) + '" x2="'+str(xy[bi][0]) + '" y2="'+str(xy[bi][1]) + '" stroke="'+color+'" stroke-dasharray="1,5" />\n' lab = ",".join(self.edges[(a,b)]) svg = svg + '<text x="'+str((xy[ai][0] + xy[bi][0] + 4)/2) + '" y="'+str((xy[ai][1]+ xy[bi][1])/2 - 4) + '" font-family="Times"'+'font-size="'+str(1.5*r / sqrt(max([int(0.6 * len(lab)),1])))+'"'+'>' svg = svg + lab + '</text>\n' return svg + '</svg>\n' def test(): sg=SmallGraph() sg.nodes["1"] = "A" sg.nodes["2"] = "B" sg.nodes["3"] = "C" sg.edges[("1","2")] = "R" sg.edges[("1","3")] = "U" sg.printLG() line = str(sg) print (line) sg2 = SmallGraph() sg2.fromStr(line) sg2.printLG() print ("Are they Iso (Y) : " + (str(sg == sg2))) sg2.edges[('2','3')] = 'R' print ("Add an edge (2,3,R) on right side ") print ("Are they Iso (N) : " + (str(sg == sg2))) sg.edges[('2','3')] = 'U' print ("Add an edge (2,3,U) on left side ") print ("Are they Iso (N) : " + (str(sg == sg2))) print ("change edge (2,3) to R on left side ") sg.edges[('2','3')] = 'R' print ("Are they Iso (Y) : " + (str(sg == sg2))) print ("New graph : ") sg2 = SmallGraph([("1","B"),("2","C"), ("3","A")], [("3", "1", "R"), ("3", "2", "U")]) sg2.printLG() print ("Are they Iso (N) : " + (str(sg.iso(sg2)))) sg2.edges[('1','2')] = 'U' print ("Add an edge (2,1,U) on right side ") print ("Are they Iso (N) : " + (str(sg.iso(sg2))) + (str(sg2.iso(sg)))) sg2.edges[('1','2')] = 'R' print ("Change edge (2,1) to R on right side ") print ("Are they Iso (Y) : " + (str(sg.iso(sg2)))+ (str(sg2.iso(sg)))) print (" SVG test : ") sg.nodes["1"] = "Test" print (sg.toSVG())
[ "cmath.rect", "compareTools.cmpNodes", "compareTools.cmpEdges" ]
[((2934, 2984), 'compareTools.cmpNodes', 'compareTools.cmpNodes', (['myLabelsFlat', 'hisLabelsFlat'], {}), '(myLabelsFlat, hisLabelsFlat)\n', (2955, 2984), False, 'import compareTools\n'), ((3772, 3825), 'compareTools.cmpNodes', 'compareTools.cmpNodes', (['self.nodes[my]', 'osg.nodes[his]'], {}), '(self.nodes[my], osg.nodes[his])\n', (3793, 3825), False, 'import compareTools\n'), ((4814, 4866), 'compareTools.cmpEdges', 'compareTools.cmpEdges', (['osg.edges[oa, ob]', "{'_': 1.0}"], {}), "(osg.edges[oa, ob], {'_': 1.0})\n", (4835, 4866), False, 'import compareTools\n'), ((4332, 4383), 'compareTools.cmpEdges', 'compareTools.cmpEdges', (['self.edges[a, b]', "{'_': 1.0}"], {}), "(self.edges[a, b], {'_': 1.0})\n", (4353, 4383), False, 'import compareTools\n'), ((4520, 4578), 'compareTools.cmpEdges', 'compareTools.cmpEdges', (['self.edges[a, b]', 'osg.edges[oa, ob]'], {}), '(self.edges[a, b], osg.edges[oa, ob])\n', (4541, 4578), False, 'import compareTools\n'), ((5817, 5852), 'cmath.rect', 'cmath.rect', (['R', '(2 * x * cmath.pi / n)'], {}), '(R, 2 * x * cmath.pi / n)\n', (5827, 5852), False, 'import cmath\n'), ((5863, 5898), 'cmath.rect', 'cmath.rect', (['R', '(2 * x * cmath.pi / n)'], {}), '(R, 2 * x * cmath.pi / n)\n', (5873, 5898), False, 'import cmath\n'), ((7568, 7603), 'cmath.rect', 'cmath.rect', (['R', '(2 * x * cmath.pi / n)'], {}), '(R, 2 * x * cmath.pi / n)\n', (7578, 7603), False, 'import cmath\n'), ((7614, 7649), 'cmath.rect', 'cmath.rect', (['R', '(2 * x * cmath.pi / n)'], {}), '(R, 2 * x * cmath.pi / n)\n', (7624, 7649), False, 'import cmath\n')]
# Dogs vs Cats Image Classification Without Image Augmentation # In this database the Images are not of the same size # The images are also colored, need to get grayscale--- height X width X depth(RGB channels) # That means we have to use flatten and resize the images to the same dimensions from __future__ import absolute_import, division, print_function # Import Tensorflow import tensorflow as tf from tensorflow.keras.preprocessing.image import ImageDataGenerator # Reads data from disk # Helper Libraries import os import math import numpy as np import matplotlib.pyplot as plt import logging from tensorflow.python.keras.backend import sparse_categorical_crossentropy logger = tf.get_logger() logger.setLevel(logging.ERROR) # Data Loading _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' zip_dir = tf.keras.utils.get_file('cats_and_dogs_filterted.zip', origin=_URL, extract=True) zip_dir_base = os.path.dirname(zip_dir) # Set up the file path and sets base_dir = os.path.join(os.path.dirname(zip_dir), 'cats_and_dogs_filtered') train_dir = os.path.join(base_dir, 'train') validation_dir = os.path.join(base_dir, 'validation') train_cats_dir = os.path.join(train_dir, 'cats') # Dir with train cat imgs train_dogs_dir = os.path.join(train_dir, 'dogs') # Dir with train dog imgs validation_cats_dir = os.path.join(validation_dir, 'cats') # Dir with validation cat pictures validation_dogs_dir = os.path.join(validation_dir, 'dogs') # Dir with validation dog pictures # Check how many images (cats and dogs) we got in train and validation dir num_cats_tr = len(os.listdir(train_cats_dir)) num_dogs_tr = len(os.listdir(train_dogs_dir)) num_cats_val = len(os.listdir(validation_cats_dir)) num_dogs_val = len(os.listdir(validation_dogs_dir)) total_train = num_cats_tr + num_dogs_tr total_val = num_cats_val + num_dogs_val print("Total cats images are: ", num_cats_tr) # 1000 print("Total dogs images are: ", num_dogs_tr) # 1000 print("Total Validation cats images are: ", num_cats_val) # 500 print("Total Validation dogs images are: ", num_dogs_val) # 500 print("--") print("Total TRAINING images are: ", total_train) # 2000 print("Total VALIDATION images are: ", total_val) # 1000 # Set up variables that will be used on preprocesing BATCH_SIZE = 100 # Number of training examples to process before updating model variables IMG_SHAPE = 150 # Training data consists of 150X150 pixel images # Data Preparation # #(remember: read img, decode and convert to grid as per RGB, float tensor, rescale to [0,1]) train_image_generator = ImageDataGenerator(rescale = 1. / 255) # Generator for training data validation_image_generator = ImageDataGenerator(rescale = 1. / 255) # Generator for validation data # Load images and apply rescaling train_data_gen = train_image_generator.flow_from_directory(batch_size = BATCH_SIZE, directory = train_dir, shuffle = True, target_size = (IMG_SHAPE, IMG_SHAPE), #(150,150) class_mode = 'binary') val_data_gen = validation_image_generator.flow_from_directory(batch_size = BATCH_SIZE, directory = validation_dir, shuffle = False, target_size = (IMG_SHAPE, IMG_SHAPE), #(150,150) class_mode = 'binary') # Visualizing Training Images sample_training_images, _ = next(train_data_gen) # Next returns a Batch from dataset, Tuple # We disregard the labels we only look the images # This function will plot images in the form of a grid with 1 row and 5 columns where images are placed in each column. def plotImages(images_arr): fig, axes = plt.subplots(1, 5 , figsize = (20,20)) axes = axes.flatten() for img, ax in zip (images_arr, axes): ax.imshow(img) plt.tight_layout() plt.show() plotImages(sample_training_images[:5]) # Plot img 0-4 # Build the model # FOUR Convolution blocks, with one Max Pool layer each model = tf.keras.Sequential([ tf.keras.layers.Conv2D(32, (3, 3), padding = 'same', activation = tf.nn.relu, input_shape = (150, 150, 3)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (3, 3), activation = 'relu'), # can be written as such tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(128, (3, 3), activation = 'relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(128, (3, 3), activation = 'relu'), # can be written as such tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation = tf.nn.relu), #Layer with fully connected 512 units tf.keras.layers.Dense(2) # Softmax cause we want probabilities, activation = tf.nn.softmax not needed ]) # 2 - Compile the model model.compile(optimizer = 'adam', loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits = True), # can be written like so metrics = ['accuracy']) # images that are correctly classified model.summary() # Train the model EPOCHS = 15 history = model.fit_generator( train_data_gen, steps_per_epoch = int(np.ceil(total_train / float(BATCH_SIZE))), epochs = EPOCHS, validation_data = val_data_gen, validation_steps = int(np.ceil(total_val / float(BATCH_SIZE))) ) # Visualizing results after training the network acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs_range = range(EPOCHS) plt.figure(figsize = (8, 8)) plt.subplot(1, 2, 1) plt.plot(epochs_range, acc, label = 'Training Accuracy') plt.plot(epochs_range, val_acc, label = 'Validation Accuracy') plt.legend(loc = 'lower right') plt.title('Training and Validation Accuracy') plt.subplot(1, 2, 2) plt.plot(epochs_range, loss, label = 'Training Loss') plt.plot(epochs_range, val_loss, label = 'Validation Loss') plt.legend(loc = 'upper right') plt.title('Training and Validation Loss') plt.savefig('./foo.png') plt.show()
[ "matplotlib.pyplot.title", "tensorflow.keras.preprocessing.image.ImageDataGenerator", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.layers.Dense", "matplotlib.pyplot.figure", "matplotlib.pyplot.tight_layout", "os.path.join", "tensorflow.keras.layers.Flatten", "tensorflow.get_logger", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "os.path.dirname", "tensorflow.keras.utils.get_file", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "matplotlib.pyplot.legend", "os.listdir", "matplotlib.pyplot.subplot", "tensorflow.keras.layers.Conv2D", "matplotlib.pyplot.plot", "matplotlib.pyplot.savefig" ]
[((712, 727), 'tensorflow.get_logger', 'tf.get_logger', ([], {}), '()\n', (725, 727), True, 'import tensorflow as tf\n'), ((872, 958), 'tensorflow.keras.utils.get_file', 'tf.keras.utils.get_file', (['"""cats_and_dogs_filterted.zip"""'], {'origin': '_URL', 'extract': '(True)'}), "('cats_and_dogs_filterted.zip', origin=_URL, extract\n =True)\n", (895, 958), True, 'import tensorflow as tf\n'), ((970, 994), 'os.path.dirname', 'os.path.dirname', (['zip_dir'], {}), '(zip_dir)\n', (985, 994), False, 'import os\n'), ((1120, 1151), 'os.path.join', 'os.path.join', (['base_dir', '"""train"""'], {}), "(base_dir, 'train')\n", (1132, 1151), False, 'import os\n'), ((1170, 1206), 'os.path.join', 'os.path.join', (['base_dir', '"""validation"""'], {}), "(base_dir, 'validation')\n", (1182, 1206), False, 'import os\n'), ((1227, 1258), 'os.path.join', 'os.path.join', (['train_dir', '"""cats"""'], {}), "(train_dir, 'cats')\n", (1239, 1258), False, 'import os\n'), ((1303, 1334), 'os.path.join', 'os.path.join', (['train_dir', '"""dogs"""'], {}), "(train_dir, 'dogs')\n", (1315, 1334), False, 'import os\n'), ((1384, 1420), 'os.path.join', 'os.path.join', (['validation_dir', '"""cats"""'], {}), "(validation_dir, 'cats')\n", (1396, 1420), False, 'import os\n'), ((1479, 1515), 'os.path.join', 'os.path.join', (['validation_dir', '"""dogs"""'], {}), "(validation_dir, 'dogs')\n", (1491, 1515), False, 'import os\n'), ((2651, 2688), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (2669, 2688), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((2750, 2787), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (2768, 2787), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((6081, 6107), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (6091, 6107), True, 'import matplotlib.pyplot as plt\n'), ((6111, 6131), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (6122, 6131), True, 'import matplotlib.pyplot as plt\n'), ((6133, 6187), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'acc'], {'label': '"""Training Accuracy"""'}), "(epochs_range, acc, label='Training Accuracy')\n", (6141, 6187), True, 'import matplotlib.pyplot as plt\n'), ((6191, 6251), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'val_acc'], {'label': '"""Validation Accuracy"""'}), "(epochs_range, val_acc, label='Validation Accuracy')\n", (6199, 6251), True, 'import matplotlib.pyplot as plt\n'), ((6255, 6284), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (6265, 6284), True, 'import matplotlib.pyplot as plt\n'), ((6288, 6333), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Accuracy"""'], {}), "('Training and Validation Accuracy')\n", (6297, 6333), True, 'import matplotlib.pyplot as plt\n'), ((6337, 6357), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (6348, 6357), True, 'import matplotlib.pyplot as plt\n'), ((6359, 6410), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'loss'], {'label': '"""Training Loss"""'}), "(epochs_range, loss, label='Training Loss')\n", (6367, 6410), True, 'import matplotlib.pyplot as plt\n'), ((6414, 6471), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'val_loss'], {'label': '"""Validation Loss"""'}), "(epochs_range, val_loss, label='Validation Loss')\n", (6422, 6471), True, 'import matplotlib.pyplot as plt\n'), ((6475, 6504), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (6485, 6504), True, 'import matplotlib.pyplot as plt\n'), ((6508, 6549), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Loss"""'], {}), "('Training and Validation Loss')\n", (6517, 6549), True, 'import matplotlib.pyplot as plt\n'), ((6551, 6575), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./foo.png"""'], {}), "('./foo.png')\n", (6562, 6575), True, 'import matplotlib.pyplot as plt\n'), ((6577, 6587), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6585, 6587), True, 'import matplotlib.pyplot as plt\n'), ((1055, 1079), 'os.path.dirname', 'os.path.dirname', (['zip_dir'], {}), '(zip_dir)\n', (1070, 1079), False, 'import os\n'), ((1648, 1674), 'os.listdir', 'os.listdir', (['train_cats_dir'], {}), '(train_cats_dir)\n', (1658, 1674), False, 'import os\n'), ((1695, 1721), 'os.listdir', 'os.listdir', (['train_dogs_dir'], {}), '(train_dogs_dir)\n', (1705, 1721), False, 'import os\n'), ((1745, 1776), 'os.listdir', 'os.listdir', (['validation_cats_dir'], {}), '(validation_cats_dir)\n', (1755, 1776), False, 'import os\n'), ((1798, 1829), 'os.listdir', 'os.listdir', (['validation_dogs_dir'], {}), '(validation_dogs_dir)\n', (1808, 1829), False, 'import os\n'), ((4081, 4117), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(5)'], {'figsize': '(20, 20)'}), '(1, 5, figsize=(20, 20))\n', (4093, 4117), True, 'import matplotlib.pyplot as plt\n'), ((4220, 4238), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4236, 4238), True, 'import matplotlib.pyplot as plt\n'), ((4244, 4254), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4252, 4254), True, 'import matplotlib.pyplot as plt\n'), ((4432, 4536), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""', 'activation': 'tf.nn.relu', 'input_shape': '(150, 150, 3)'}), "(32, (3, 3), padding='same', activation=tf.nn.relu,\n input_shape=(150, 150, 3))\n", (4454, 4536), True, 'import tensorflow as tf\n'), ((4549, 4583), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (4577, 4583), True, 'import tensorflow as tf\n'), ((4597, 4650), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (4619, 4650), True, 'import tensorflow as tf\n'), ((4689, 4723), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (4717, 4723), True, 'import tensorflow as tf\n'), ((4737, 4791), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""'}), "(128, (3, 3), activation='relu')\n", (4759, 4791), True, 'import tensorflow as tf\n'), ((4804, 4838), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (4832, 4838), True, 'import tensorflow as tf\n'), ((4852, 4906), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""'}), "(128, (3, 3), activation='relu')\n", (4874, 4906), True, 'import tensorflow as tf\n'), ((4945, 4979), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2)', '(2)'], {}), '(2, 2)\n', (4973, 4979), True, 'import tensorflow as tf\n'), ((4993, 5018), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (5016, 5018), True, 'import tensorflow as tf\n'), ((5029, 5078), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(512)'], {'activation': 'tf.nn.relu'}), '(512, activation=tf.nn.relu)\n', (5050, 5078), True, 'import tensorflow as tf\n'), ((5129, 5153), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)'], {}), '(2)\n', (5150, 5153), True, 'import tensorflow as tf\n'), ((5322, 5385), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (5367, 5385), True, 'import tensorflow as tf\n')]
#!/usr/bin/python # encoding: utf-8 #pylint: disable=R0904 """ The annotation test class """ # upconvert.py - A universal hardware design file format converter using # Format: upverter.com/resources/open-json-format/ # Development: github.com/upverter/schematic-file-converter # # Copyright 2011 Upverter, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from upconvert.core.annotation import Annotation import unittest class AnnotationTests(unittest.TestCase): """ The tests of the core module annotation feature """ def setUp(self): """ Setup the test case. """ pass def tearDown(self): """ Teardown the test case. """ pass def test_create_new_annotation(self): """ Test the creation of a new empty annotation. """ anno = Annotation('abc', 0, 1, 2, False) assert anno.value == 'abc' assert anno.x == 0 assert anno.y == 1 assert anno.rotation == 2 assert anno.visible != True def test_annotation_bounds(self): '''Test .bounds()''' annot = Annotation('foo', 3, 6, 0, True) top_left, bottom_right = annot.bounds() # bounds() will give a square with sides 20 units long, centered on the # annotation self.assertEqual(top_left.x, 3 - 10) self.assertEqual(top_left.y, 6 - 10) self.assertEqual(bottom_right.x, 3 + 10) self.assertEqual(bottom_right.y, 6 + 10)
[ "upconvert.core.annotation.Annotation" ]
[((1303, 1336), 'upconvert.core.annotation.Annotation', 'Annotation', (['"""abc"""', '(0)', '(1)', '(2)', '(False)'], {}), "('abc', 0, 1, 2, False)\n", (1313, 1336), False, 'from upconvert.core.annotation import Annotation\n'), ((1580, 1612), 'upconvert.core.annotation.Annotation', 'Annotation', (['"""foo"""', '(3)', '(6)', '(0)', '(True)'], {}), "('foo', 3, 6, 0, True)\n", (1590, 1612), False, 'from upconvert.core.annotation import Annotation\n')]
import re from django.conf import settings from wagtail.images import get_image_model from wagtail.core.models import Collection from elasticsearch import Elasticsearch from elasticsearch_dsl import Search from cms.tagging import TRENDING pattern = re.compile(r"tat+o+") class SearchQuery: @staticmethod def search(text, operator='or', filter_args=None, exclude_args=None): if filter_args is None: filter_args = {} if exclude_args is None: exclude_args = {} # Get rid of service (since everything is a service ;) ) text = pattern.sub("", text) if not text: text = TRENDING collection_id = int(settings.SERVICE_COLLECTION_ID if settings.SERVICE_COLLECTION_ID else Collection.objects.get( name="services").id) searched = get_image_model().objects.filter( collection=collection_id, **filter_args ).exclude( tags__name__in=['nicht in Suche'], **exclude_args ).search( text, operator=operator ) return searched @staticmethod def suggestions(text): """ concat a string for 'did you mean "XY"?' check if there is an option (if not take the original word) """ s = Search(using=Elasticsearch(settings.ELASTIC_URL)) res = s.suggest('suggestion', text, term={'field': 'all_tags_str'}).execute() suggested_words = [] suggestions = res.suggest['suggestion'] for ou in suggestions: options = ou['options'] if options: suggested_words.append(options[0].text) else: suggested_words.append(ou['text']) suggested = ' '.join(suggested_words) if suggested.lower() != text.lower(): return suggested
[ "elasticsearch.Elasticsearch", "wagtail.core.models.Collection.objects.get", "wagtail.images.get_image_model", "re.compile" ]
[((252, 272), 're.compile', 're.compile', (['"""tat+o+"""'], {}), "('tat+o+')\n", (262, 272), False, 'import re\n'), ((1321, 1356), 'elasticsearch.Elasticsearch', 'Elasticsearch', (['settings.ELASTIC_URL'], {}), '(settings.ELASTIC_URL)\n', (1334, 1356), False, 'from elasticsearch import Elasticsearch\n'), ((765, 804), 'wagtail.core.models.Collection.objects.get', 'Collection.objects.get', ([], {'name': '"""services"""'}), "(name='services')\n", (787, 804), False, 'from wagtail.core.models import Collection\n'), ((842, 859), 'wagtail.images.get_image_model', 'get_image_model', ([], {}), '()\n', (857, 859), False, 'from wagtail.images import get_image_model\n')]
# MIT License # Copyright (c) 2022 christiandimaio # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from torch.utils.data import DataLoader from NeuralModels.FactoryModels import * from NeuralModels.Dataset import MyDataset from NeuralModels.Vocabulary import Vocabulary import argparse import sys, os from PIL import Image def parse_command_line_arguments(): parser = argparse.ArgumentParser(description='CLI for C[aA]RNet, some static definition are placed in the VARIABLE.py file') parser.add_argument('decoder', type=Decoder.argparse, choices=list(Decoder), help="What type of decoder do you want use?") parser.add_argument('mode', choices=['train', 'eval'], help='train or evaluate C[aA]RNet.') parser.add_argument('encoder_dim', type=int, help = 'Size of the encoder output. IF Attention is True, fixed at 2048. IF CaRNetvI as net, encoder_dim == |vocabulary|.') parser.add_argument('hidden_dim', type=int, help = 'Capacity of the LSTM Cell.') parser.add_argument('--attention', default=False, type=bool, help='Use attention model. IF True, vHCAttention decoder and CResNet50Attention encoder are mandatories. (default: False)') parser.add_argument('--attention_dim', type=int, default=0, help="The attention capacity. Valid only if attention is true. (default: 0)") parser.add_argument('--dataset_folder', type=str, default="./dataset", help='Data set folder. Used only if mode = train (Default: "./dataset")') parser.add_argument('--image_path', type=str, default="", help = "The absolute path of the image that we want to retrieve the caption. Used only if mode = eval (Default: ''") parser.add_argument('--splits', type=int, nargs="+", default=[60,30,10], help='Fraction of data to be used in train set, val set and test set (default: 60 30,10)') parser.add_argument('--batch_size', type=int, default=32, help='mini-batch size (default: 32)') parser.add_argument('--epochs', type=int, default=500, help='number of training epochs (default: 500)') parser.add_argument('--lr', type=float, default=1e-3, help='learning rate (Adam) (default: 1e-3)') parser.add_argument('--workers', type=int, default=4, help='number of working units used to load the data (default: 4)') parser.add_argument('--device', default='cpu', type=str, help='device to be used for computations (in {cpu, cuda:0, cuda:1, ...}, default: cpu)') parsed_arguments = parser.parse_args() return parsed_arguments if __name__ == "__main__": print("Coded with love by christiandimaio aka gnekt :* \n ") args = parse_command_line_arguments() for k, v in args.__dict__.items(): print(k + '=' + str(v)) #################################### Define Encoder/Decoder encoder = None decoder = None attention = None if args.attention == True: # Attention is true, encoder = CResNet50Attention, decoder = RNetvHCAttention encoder = FactoryEncoder(Encoder.CResNet50Attention) decoder = FactoryDecoder(Decoder.RNetvHCAttention) attention = FactoryAttention(Attention.Attention) args.net_name = "CARNetvHCAttention" if args.attention == False: args.net_name = f"Ca{args.decoder.name}" encoder = FactoryEncoder(Encoder.CResNet50) decoder = FactoryDecoder(args.decoder) #################################### #################################### Construct Data print("Construct data..") if args.mode == "train": print("Define dataset..") dataset = MyDataset(args.dataset_folder, percentage=8) # Percentage is fixed cause the dataset is HUGE, 8% is enough for sperimental test. print("OK.") print("Define vocabulary..") vocabulary = Vocabulary(dataset) print("OK.") # Obtain train, validation and test set print("Obtain train, validation and test set..") train_set = dataset.get_fraction_of_dataset(percentage=args.splits[0], delete_transfered_from_source=True) validation_set = dataset.get_fraction_of_dataset(percentage=args.splits[1], delete_transfered_from_source=True) test_set = dataset.get_fraction_of_dataset(percentage=args.splits[2], delete_transfered_from_source=True) print("OK.") # Define the associate dataloader print("Define the associate dataloader") dataloader_training = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, collate_fn = lambda data: dataset.pack_minibatch_training(data,vocabulary)) dataloader_validation = DataLoader(validation_set, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, collate_fn = lambda data: dataset.pack_minibatch_evaluation(data,vocabulary)) dataloader_test = DataLoader(test_set, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, collate_fn = lambda data: dataset.pack_minibatch_evaluation(data,vocabulary)) print("OK.") if args.mode == "eval": print("Define vocabulary..") vocabulary = Vocabulary() print("Ok.") print("Load the image..") if not os.path.exists(args.image_path) or os.path.isdir(args.image_path): raise ValueError(f"Got {args.image_path} as file path, error!") image: Image.Image = Image.open(args.image_path).convert('RGB') print("Ok.") #################################### #################################### Define Net print("Create the net..") net = FactoryNeuralNet(NeuralNet.CaRNet)( encoder=encoder, decoder=decoder, attention=attention, # != None only if Attention is requested attention_dim = args.attention_dim, # != 0 only if Attention is True net_name=args.net_name, encoder_dim = args.encoder_dim if args.decoder is not Decoder.RNetvI else vocabulary.embeddings.shape[1], # if Attention is True encoder_dim hasn't any meaning, cause it is 2048 internally by construction. hidden_dim= args.hidden_dim, padding_index= vocabulary.predefined_token_idx()["<PAD>"], vocab_size= len(vocabulary.word2id.keys()), embedding_dim = vocabulary.embeddings.shape[1], device=args.device ) print("OK.") #################################### Load a previous trained net, if exist print("Check if it is present a previous version of the Net..") try: net.load("./.saved") print("Found.") except Exception as ex: print("An exception has occurred.") print(ex) if args.mode == "eval": # If the mode is eval the script cannot continue print("Since you want an evaluation, the script cannot continue, please retrain the network.") sys.exit(0) # In training it creates new files. print("Not Found.") print("Since the selected mode is training, a new instance of the net will saved during the training activity.") #################################### Training or Evaluate if args.mode == "train": print("Start training..") net.train( train_set=dataloader_training, validation_set=dataloader_validation, lr=args.lr, epochs=args.epochs, vocabulary=vocabulary ) # Evaluate Test set print("Done") print(f"Test set Accuracy: {net.eval_net(dataloader_test, vocabulary):.4f}") if args.mode == "eval": print("Start evaluation..") net.eval(image, vocabulary) print("OK.") ####################################
[ "NeuralModels.Dataset.MyDataset", "argparse.ArgumentParser", "NeuralModels.Vocabulary.Vocabulary", "os.path.isdir", "os.path.exists", "PIL.Image.open", "sys.exit" ]
[((1382, 1507), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""CLI for C[aA]RNet, some static definition are placed in the VARIABLE.py file"""'}), "(description=\n 'CLI for C[aA]RNet, some static definition are placed in the VARIABLE.py file'\n )\n", (1405, 1507), False, 'import argparse\n'), ((4966, 5010), 'NeuralModels.Dataset.MyDataset', 'MyDataset', (['args.dataset_folder'], {'percentage': '(8)'}), '(args.dataset_folder, percentage=8)\n', (4975, 5010), False, 'from NeuralModels.Dataset import MyDataset\n'), ((5183, 5202), 'NeuralModels.Vocabulary.Vocabulary', 'Vocabulary', (['dataset'], {}), '(dataset)\n', (5193, 5202), False, 'from NeuralModels.Vocabulary import Vocabulary\n'), ((6591, 6603), 'NeuralModels.Vocabulary.Vocabulary', 'Vocabulary', ([], {}), '()\n', (6601, 6603), False, 'from NeuralModels.Vocabulary import Vocabulary\n'), ((6710, 6740), 'os.path.isdir', 'os.path.isdir', (['args.image_path'], {}), '(args.image_path)\n', (6723, 6740), False, 'import sys, os\n'), ((6675, 6706), 'os.path.exists', 'os.path.exists', (['args.image_path'], {}), '(args.image_path)\n', (6689, 6706), False, 'import sys, os\n'), ((6847, 6874), 'PIL.Image.open', 'Image.open', (['args.image_path'], {}), '(args.image_path)\n', (6857, 6874), False, 'from PIL import Image\n'), ((8295, 8306), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (8303, 8306), False, 'import sys, os\n')]
""" Jump search algorithm iterates through a sorted list with a step of n^(1/2), until the element compared is bigger than the one searched.If the item is not in the particular step, it shifts the entire step. It will then perform a linear search on the step until it matches the target. If not found, it returns -1. Time Complexity: O(√n) Space Complexity: O(1) """ import math arr = [0, 1, 2, 8, 13, 17, 19, 25, 31, 32, 42] target = 25 def jump_search(arr: list, x: int) -> int: """ >>> jump_search(arr, target) == (arr.index(target) if target in arr else -1) True """ n = len(arr) step = int(math.floor(math.sqrt(n))) prev = 0 while arr[min(step, n) - 1] < x: prev = step step += int(math.floor(math.sqrt(n))) if prev >= n: return -1 while arr[prev] < x: prev = prev + 1 if prev == min(step, n): return -1 if arr[prev] == x: return prev return -1 def check_sort(test: list) -> bool: """checks whether the given list is sorted or not.""" if sorted(test) == test: return True else: return False if __name__ == "__main__": if check_sort(arr): res = jump_search(arr, target) if res == -1: print("Number not found!") else: print(f"Number {target} is at index {res}") else: print("Given list is not sorted!")
[ "math.sqrt" ]
[((633, 645), 'math.sqrt', 'math.sqrt', (['n'], {}), '(n)\n', (642, 645), False, 'import math\n'), ((749, 761), 'math.sqrt', 'math.sqrt', (['n'], {}), '(n)\n', (758, 761), False, 'import math\n')]
#!/usr/bin/python # Copyright (c) 2014-2016, Intel Corporation All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED # TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys import time import pymic benchmark = sys.argv[0][2:][:-3] nrepeat = 10000 if len(sys.argv) > 1: nrepeat = int(sys.argv[1]) device = pymic.devices[0] library = device.load_library("libbenchmark_kernels.so") stream = device.get_default_stream() timings = [] ts = time.time() for i in xrange(nrepeat): stream.invoke(library.empty_kernel) stream.sync() te = time.time() try: csv = open(benchmark + ".csv", "w") print >> csv, "benchmark;latency" print >> csv, "{0};{1}".format(benchmark, (te - ts) / float(nrepeat)) finally: csv.close()
[ "time.time" ]
[((1869, 1880), 'time.time', 'time.time', ([], {}), '()\n', (1878, 1880), False, 'import time\n'), ((1970, 1981), 'time.time', 'time.time', ([], {}), '()\n', (1979, 1981), False, 'import time\n')]
# Copyright 2020 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import xarray as xr from scipy import optimize from pyrfu.pyrf import gradient, histogram2d, optimize_nbins_2d def calc_vph_current(b_xyz, j_xyz): """Estimates the phase speed of the oscillating current sheet using oscillations of J_N. Parameters ---------- b_xyz : xarray.DataArray Time series of the magnetic field. j_xyz : xarray.DataArray Time series of the current density. Returns ------- disprel : xarray.Dataset Hash table. to fill """ # Time derivative of Bl dbl_dt = gradient(b_xyz[:, 0]) n_bins = optimize_nbins_2d(dbl_dt, j_xyz[:, 2]) hist_dbl_dt_jn = histogram2d(dbl_dt, j_xyz[:, 2], bins=n_bins) # Linear model for jn vs dBdt def model_jn(x, a): return a * x v_phase_j, sigma_dbl_dt_jn = optimize.curve_fit(model_jn, dbl_dt.data, j_xyz[:, 2].data) v_phase_j = v_phase_j[0] corr_coeffs = np.corrcoef(dbl_dt.data, j_xyz[:, 2].data) rho = corr_coeffs[0, 1] # v_phase_j = -3.12 sigma_dbl_dt_jn = np.sqrt(float(sigma_dbl_dt_jn)) dbl_dt_min = -1.2 * np.max(dbl_dt) dbl_dt_max = 1.2 * np.max(dbl_dt) disprel = {"fit_db_dt_jn": v_phase_j, "hist": hist_dbl_dt_jn, "rho": rho, "sigma": sigma_dbl_dt_jn, "hires_dBdt": np.linspace(dbl_dt_min, dbl_dt_max, 100), "pred_Jn": (["hires_dBdt"], model_jn(np.linspace(dbl_dt_min, dbl_dt_max, 100), v_phase_j)), "bound_upper": (["hires_dBdt"], model_jn(np.linspace(dbl_dt_min, dbl_dt_max, 100), v_phase_j + 1.92 * sigma_dbl_dt_jn)), "bound_lower": (["hires_dBdt"], model_jn(np.linspace(dbl_dt_min, dbl_dt_max, 100), v_phase_j - 1.92 * sigma_dbl_dt_jn))} disprel = xr.Dataset(disprel) return disprel
[ "pyrfu.pyrf.optimize_nbins_2d", "numpy.corrcoef", "pyrfu.pyrf.gradient", "scipy.optimize.curve_fit", "xarray.Dataset", "numpy.max", "pyrfu.pyrf.histogram2d", "numpy.linspace" ]
[((1150, 1171), 'pyrfu.pyrf.gradient', 'gradient', (['b_xyz[:, 0]'], {}), '(b_xyz[:, 0])\n', (1158, 1171), False, 'from pyrfu.pyrf import gradient, histogram2d, optimize_nbins_2d\n'), ((1186, 1224), 'pyrfu.pyrf.optimize_nbins_2d', 'optimize_nbins_2d', (['dbl_dt', 'j_xyz[:, 2]'], {}), '(dbl_dt, j_xyz[:, 2])\n', (1203, 1224), False, 'from pyrfu.pyrf import gradient, histogram2d, optimize_nbins_2d\n'), ((1246, 1291), 'pyrfu.pyrf.histogram2d', 'histogram2d', (['dbl_dt', 'j_xyz[:, 2]'], {'bins': 'n_bins'}), '(dbl_dt, j_xyz[:, 2], bins=n_bins)\n', (1257, 1291), False, 'from pyrfu.pyrf import gradient, histogram2d, optimize_nbins_2d\n'), ((1406, 1465), 'scipy.optimize.curve_fit', 'optimize.curve_fit', (['model_jn', 'dbl_dt.data', 'j_xyz[:, 2].data'], {}), '(model_jn, dbl_dt.data, j_xyz[:, 2].data)\n', (1424, 1465), False, 'from scipy import optimize\n'), ((1565, 1607), 'numpy.corrcoef', 'np.corrcoef', (['dbl_dt.data', 'j_xyz[:, 2].data'], {}), '(dbl_dt.data, j_xyz[:, 2].data)\n', (1576, 1607), True, 'import numpy as np\n'), ((2686, 2705), 'xarray.Dataset', 'xr.Dataset', (['disprel'], {}), '(disprel)\n', (2696, 2705), True, 'import xarray as xr\n'), ((1739, 1753), 'numpy.max', 'np.max', (['dbl_dt'], {}), '(dbl_dt)\n', (1745, 1753), True, 'import numpy as np\n'), ((1777, 1791), 'numpy.max', 'np.max', (['dbl_dt'], {}), '(dbl_dt)\n', (1783, 1791), True, 'import numpy as np\n'), ((1941, 1981), 'numpy.linspace', 'np.linspace', (['dbl_dt_min', 'dbl_dt_max', '(100)'], {}), '(dbl_dt_min, dbl_dt_max, 100)\n', (1952, 1981), True, 'import numpy as np\n'), ((2062, 2102), 'numpy.linspace', 'np.linspace', (['dbl_dt_min', 'dbl_dt_max', '(100)'], {}), '(dbl_dt_min, dbl_dt_max, 100)\n', (2073, 2102), True, 'import numpy as np\n'), ((2240, 2280), 'numpy.linspace', 'np.linspace', (['dbl_dt_min', 'dbl_dt_max', '(100)'], {}), '(dbl_dt_min, dbl_dt_max, 100)\n', (2251, 2280), True, 'import numpy as np\n'), ((2499, 2539), 'numpy.linspace', 'np.linspace', (['dbl_dt_min', 'dbl_dt_max', '(100)'], {}), '(dbl_dt_min, dbl_dt_max, 100)\n', (2510, 2539), True, 'import numpy as np\n')]
#!/usr/bin/env python # Read the encode format files # -*- python -*- #BEGIN_LEGAL # #Copyright (c) 2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #END_LEGAL # There are dictionaries of nonterminals and sequencers. The # sequencers are ordered lists of nonterminals. The nonterminals # consist of rule_t's. Each rule has conditions and actions. An # action can be a simple bit encoding or it can be a binding of a # value to a field. A condition contains a list of or'ed condtion_t's # and a list of and'ed condition_t's. When all the and'ed conditions # are satisfied and one of the or'ed conditions (if any) are # satisfied, then the action should occur. The conditions are field # values that are = or != to a right hand side. The right hand side # could be a value or a nonterminal (NTLUF really). Also the field # name could be bound to some bits that are used in the action, using # square brackets after the name. import re import sys import os import optparse import stat import copy def find_dir(d): directory = os.getcwd() last = '' while directory != last: target_directory = os.path.join(directory,d) if os.path.exists(target_directory): return target_directory last = directory directory = os.path.split(directory)[0] return None mbuild_install_path = os.path.join(os.path.dirname(sys.argv[0]), '..', '..', 'mbuild') if not os.path.exists(mbuild_install_path): mbuild_install_path = find_dir('mbuild') sys.path= [mbuild_install_path] + sys.path try: import mbuild except: sys.stderr.write("\nERROR(read-encfile.py): Could not find mbuild." + " Should be a sibling of the xed2 directory.\n\n") sys.exit(1) xed2_src_path = os.path.join(os.path.dirname(sys.argv[0])) if not os.path.exists(xed2_src_path): xed2_src_path = find_dir('xed2') sys.path= [xed2_src_path] + sys.path sys.path= [ os.path.join(xed2_src_path,'pysrc') ] + sys.path try: from codegen import * from genutil import * from scatter import * from verbosity import * import slash_expand import operand_storage import nt_func_gen except: sys.stderr.write("\nERROR(read-encfile.py): Could not find " + "the xed directory for python imports.\n\n") sys.exit(1) import actions import ins_emit import encutil from patterns import * storage_fields = {} def outreg(): return operand_storage.get_op_getter_full_func('outreg', encutil.enc_strings) def error_operand(): return operand_storage.get_op_getter_full_func('error',encutil.enc_strings) xed_encoder_request = "xed_encoder_request_t" output_file_emitters = [] def _vmsgb(s,b=''): if vencode(): mbuild.msgb(s,b) def make_writable(fn): """Make the file or directory readable/writable/executable by me""" _rwx_by_me = stat.S_IWUSR| stat.S_IRUSR|stat.S_IXUSR os.chmod(fn, _rwx_by_me) def remove_file(fn): """Remove a file if it exists.""" if os.path.exists(fn): _vmsgb("Removing ", fn) make_writable(fn) os.unlink(fn) class blot_t(object): """A blot_t is a fragment of a decoder pattern""" def __init__(self,type=None): self.type = type # 'bits', 'letters', 'nt', "od" (operand decider) self.nt = None # name of a nonterminal self.value = None # integer representing this blot_t's value self.length = 0 # number of bits for this blot_t self.letters = None # sequence of substitution letters for this blot. All must be the same letter self.field_name = None # name of the operand storage field that has the values for this blot-t self.field_offset = 0 # offset within the field self.od_equals = None def make_action_string(self): """ @rtype: string or None @returns: string if the blot is something we want to make in to an action """ if vblot(): msgb("Making action for blot", str(self) ) if self.type == 'bits': binary = ''.join(decimal_to_binary(self.value)) if vblot(): msgb("CONVERT", "%s <-- %s" % ( binary, str(self))) blen = len(binary) if blen < self.length: # pad binary on the left with 0's until it is self.length bits long need_zeros = self.length - blen #msgerr("Padding with %d zeros" % need_zeros) binary = "%s%s" % ('0'*need_zeros , binary) blen = len(binary) if blen > self.length: die("bit length problem in %s --- %s" % (str(self), binary)) if self.field_name: return "%s[0b%s]" % (self.field_name,binary) return "0b%s" % binary elif self.type == 'letters': return "%s[%s]" % (self.field_name,self.letters) elif self.type == 'od': if self.od_equals == False: #return "%s!=0x%x" % (self.field_name, self.value) #EXPERIMENT 2007-08-07 if vignoreod(): msgerr("Ignoring OD != relationships in actions: %s" % str(self)) return None return "%s=0x%x" % (self.field_name, self.value) elif self.type == 'nt': return "%s()" % self.nt else: die("Unhandled type: %s" % self.type) def __str__(self): s = [] if self.type: s.append("%8s" % self.type) else: s.append("%8s" % "no-type") if self.nt: s.append("nt: %s" % self.nt) if self.field_name: s.append("field_name: %s" % self.field_name) if self.od_equals != None: if self.od_equals: v = '=' else: v = '!=' s.append(v) if self.type == 'letters': s.append( "".join(self.letters) ) if self.value != None: s.append("0x%x" % self.value) # print as HEX s.append("(raw %s)" % self.value) if self.nt == None and self.od_equals == None: s.append("length: %d" % self.length) s.append("field_offset: %d" % self.field_offset) return ' '.join(s) class operand_t(object): """These are main ISA (decode) operands being used for encode conditions. They are either individual tokens or X=y bindings. The tokens or RHS of bindings can have qualifiers separated by colons: (1) r/w/rw/cr/crcw/rcw/cw, (2) EXPL, IMPL, SUPP or ECOND, (3) length-code. The EXPL/IMPL/SUPP/ECOND is optional as is the length code. Memops must have the length code.""" convert_pattern = re.compile(r'TXT=(?P<rhs>[0-9A-Za-z_]+)') def __init__(self,s): pieces=s.split(':') op_or_binding = pieces[0] self.lencode = '?' self.vis = None explicit_vis = None self.rw = '?' self.type = None # 'token', 'binding', 'ntluf' if len(pieces) >= 2: nxt= pieces[1] if nxt in [ 'IMPL', 'SUPP','EXPL', 'ECOND']: explicit_vis = nxt else: self.rw = pieces[1] if len(pieces) >= 3: for p in pieces[2:]: cp=operand_t.convert_pattern.match(p) if cp: cvt = cp.group('rhs') # ignored elif p in [ 'IMPL', 'SUPP', 'EXPL', 'ECOND']: explicit_vis = p elif self.lencode == '?': self.lencode = p else: _vmsgb("Ignoring [%s] from %s" % (p,s)) #die("Unhandled operand: %s" % s) self.value = None self.ntluf = False ap = equals_pattern.match(op_or_binding) if ap: # binding (self.var,self.value) = ap.group('lhs','rhs') ntluf_match = nt_name_pattern.match(self.value) if ntluf_match: self.value = ntluf_match.group('ntname') self.ntluf = True self.type = 'ntluf' else: self.type = 'binding' else: # operand (MEM/IMM/DISP/etc.) self.var = op_or_binding self.type = 'token' if explicit_vis: self.vis = explicit_vis else: default_vis = storage_fields[self.var].default_visibility if default_vis == 'SUPPRESSED': self.vis = 'SUPP' elif default_vis == 'EXPLICIT': self.vis = 'EXPL' elif default_vis == 'ECOND': self.vis = 'ECOND' else: die("unhandled default visibility: %s for %s" % (default_vis, self.var)) def make_condition(self): """ @rtype: condition_t or None @return: list of conditions based on this operand """ # ignore suppressed operands in encode conditions if self.vis == 'SUPP': return None # make s, a string from which we manufacture a condition_t if self.type == 'binding': if letter_pattern.match(self.value): # associate the field with some letters s = "%s[%s]=*" % (self.var, self.value) else: s = "%s=%s" % (self.var, self.value) elif self.type == 'token': s = "%s=1" % (self.var) # FIXME need to specify memop widths elif self.type == 'ntluf': s = "%s=%s()" % (self.var,self.value) else: die("Unhandled condition: %s" % str(self)) #msgerr("MAKE COND %s" % s) c = condition_t(s) #msgerr("XCOND type: %s var: %s lencode: %s" % (self.type, self.var, str(self.lencode))) # FIXME: THIS IS A DISGUSTING HACK if self.type == 'token' and self.var == 'MEM0': # add a secondary condition for checking the width of the memop. # # NOTE: this MEM_WIDTH is not emitted! It is used in # xed_encoder_request_t::memop_compatible() c2 = condition_t('MEM_WIDTH',self.lencode) # MEM_WIDTH #msgerr("\tRETURNING LIST WITH MEM_WIDTH") return [c, c2] return [c] def __str__(self): if self.vis == 'EXPL': pvis = '' else: pvis = ":%s" % self.vis if self.lencode == '?': plen = '' else: plen = ":%s" % self.lencode if self.rw == '?': prw = '' else: prw = ":%s" % self.rw if self.value: if self.ntluf: parens = '()' else: parens = '' return "%s=%s%s%s%s%s" % ( self.var, self.value, parens, prw, plen, pvis) return "%s%s%s%s" % ( self.var, prw, plen, pvis) class rvalue_t(object): """The right hand side of an operand decider equation. It could be a value, a NTLUF, a * or an @. For thing that are bits * means any value. A @ is shorthand for ==XED_REG_INVALID""" def __init__(self, s): self.string = s m = nt_name_pattern.search(s) if m: self.nt = True self.value = m.group('ntname') else: self.nt = False if decimal_pattern.match(s) or binary_pattern.match(s): #_vmsgb("MAKING NUMERIC FOR %s" %(s)) self.value = str(make_numeric(s)) else: #_vmsgb("AVOIDING NUMERIC FOR %s" %(s)) self.value = s def nonterminal(self): """Returns True if this rvalue is a nonterminal name""" return self.nt def null(self): if self.value == '@': return True return False def any_valid(self): if self.value == '*': return True return False def __str__(self): s = self.value if self.nt: s += '()' return s class condition_t(object): """ xxx[bits]=yyyy or xxx=yyy or xxx!=yyyy. bits can be x/n where n is a repeat count. Can also be an 'otherwise' clause that is the final action for a nonterminal if no other rule applies. """ def __init__(self,s,lencode=None): #_vmsgb("examining %s" % s) self.string = s self.bits = None # bound bits self.rvalue = None self.equals = None self.lencode = lencode # for memory operands b = bit_expand_pattern.search(s) if b: expanded = b.group('bitname') * int(b.group('count')) ss = bit_expand_pattern.sub(expanded,s) else: ss = s rhs = None e= equals_pattern.search(ss) if e: #_vmsgb("examining %s --- EQUALS" % s) raw_left_side = e.group('lhs') lhs = lhs_capture_pattern.search(raw_left_side) self.equals = True rhs = e.group('rhs') self.rvalue = rvalue_t(rhs) #_vmsgb("examining %s --- EQUALS rhs = %s" % (s,str(self.rvalue))) else: ne = not_equals_pattern.search(ss) if ne: raw_left_side = ne.group('lhs') lhs = lhs_capture_pattern.search(raw_left_side) self.equals = False self.rvalue = rvalue_t(ne.group('rhs')) else: # no equals or not-equals... just a binding. assume "=*" raw_left_side = ss #msgerr("TOKEN OR BINDING %s" % (raw_left_side)) lhs = lhs_capture_pattern.search(raw_left_side) self.equals = True self.rvalue = rvalue_t('*') # the lhs is set if we capture bits for an encode action if lhs: self.field_name = lhs.group('name') self.bits = lhs.group('bits') else: #_vmsgb("examining %s --- NO LHS" % (s)) self.field_name = raw_left_side if self.is_reg_type() and self.rvalue.any_valid(): die("Not supporting 'any value' (*) for reg type in: %s" % s) if self.is_reg_type() and self.equals == False: die("Not supporting non-equal sign for reg type in: %s" % s) # Some bit bindings are done like "SIMM=iiiiiiii" instead # of "MOD[mm]=*". We must handle them as well. Modify the captured rvalue if rhs and self.equals: rhs_short = no_underscores(rhs) if letter_pattern.match(rhs_short): #msgerr("LATE LETTER BINDING %s %s" % (raw_left_side, str(self.rvalue))) self.bits = rhs_short del self.rvalue self.rvalue = rvalue_t('*') return #msgerr("NON BINDING %s" % (s)) # FIXME: what reaches here? def contains(self, s): if self.field_name == s: return True return False def capture_info(self): # FIXME: could locally bind bit fields in capture region to # avoid redundant calls to xes.operands(). return ( operand_storage.get_op_getter_full_func(self.field_name, encutil.enc_strings), self.bits ) def is_bit_capture(self): """Binding an OD to some bits""" if self.bits != None: return True return False def is_otherwise(self): """Return True if this condition is an 'otherwise' final condition.""" if self.field_name == 'otherwise': return True return False def is_reg_type(self): if self.field_name not in storage_fields: return False ctype = storage_fields[self.field_name].ctype return ctype == 'xed_reg_enum_t' def __str__(self): s = [ self.field_name ] if self.memory_condition(): # MEM_WIDTH s.append(" (MEMOP %s)" % self.lencode) if self.bits: s.append( '[%s]' % (self.bits)) if self.equals: s.append( '=' ) else: s.append('!=') s.append(str(self.rvalue)) return ''.join(s) def memory_condition(self): # MEM_WIDTH if self.lencode != None: return True return False def emit_code(self): #msgerr("CONDEMIT " + str(self)) if self.is_otherwise(): return "1" if self.equals: equals_string = '==' else: equals_string = '!=' #FIXME: get read off this old accessor op_accessor = operand_storage.get_op_getter_full_func(self.field_name, encutil.enc_strings) if self.memory_condition(): # MEM_WIDTH s = 'xed_encoder_request__memop_compatible(xes,XED_OPERAND_WIDTH_%s)' % (self.lencode.upper()) elif self.rvalue.nonterminal(): s = 'xed_encode_ntluf_%s(xes,%s)' % (self.rvalue.value,op_accessor) elif self.rvalue.any_valid(): if storage_fields[self.field_name].ctype == 'xed_reg_enum_t': # FOO=* is the same as FOO!=XED_REG_INVALID. So we # invert the equals sign here. if self.equals: any_equals = "!=" else: any_equals = "==" s = "(%s %s XED_REG_INVALID)" % (op_accessor,any_equals) else: s = '1' elif self.rvalue.null(): s = "(%s %s XED_REG_INVALID)" % (op_accessor,equals_string) else: # normal bound value test if self.rvalue.value == 'XED_REG_ERROR': s = '0' else: #msgerr("CONDEMIT2 " + str(self) + " -> " + self.rvalue.value) s = "(%s %s %s)" % (op_accessor,equals_string, self.rvalue.value) return s class conditions_t(object): """Two lists of condition_t's. One gets ANDed together and one gets ORed together. The OR-output gets ANDed with the rest of the AND terms.""" def __init__(self): self.and_conditions = [] def contains(self,s): for c in self.and_conditions: if c.contains(s): return True return False def and_cond(self, c): if is_stringish(c): nc = condition_t(c) else: nc = c self.and_conditions.append(nc) def has_otherwise(self): for a in self.and_conditions: if a.is_otherwise(): return True return False def __str__(self): s = [] for a in self.and_conditions: s.append(str(a)) s.append(' ') return ''.join(s) def _captures_from_list(self, clist): """ @type clist: list of condition_t @param clist: list of condition_t Return a list of tuples (fieldname, bits) for use by code generation (emit actions), by searching the conditions to see which ones are captures""" if vcapture(): msgb("CAPTURE COLLECTION USING:\n\t%s\n" % "\n\t".join( [ str(x) for x in clist] )) full_captures = list(filter(lambda x: x.is_bit_capture(), clist)) captures = [ x.capture_info() for x in full_captures] return captures def compute_field_capture_list(self): """Study the conditions and return a list of tuples (fieldname, bits) for use by code-emit actions, by searching the conditions to see which ones are captures""" captures = self._captures_from_list(self.and_conditions) return captures def field_names_from_list(self,clist): """Return a tuple of list of field names and list of NTS""" field_names = [] nt_names = [] for cond in clist: if cond.field_name: field_names.append(cond.field_name) if cond.rvalue and cond.rvalue.nonterminal(): nt_names.append(cond.rvalue.value) return (field_names, nt_names) def get_field_names(self): """Return a tuple of list of field names and list of NTS""" and_field_names = self.field_names_from_list(self.and_conditions) return ( and_field_names[0] , and_field_names[1]) def emit_code(self): if len(self.and_conditions) == 1: if self.and_conditions[0].is_otherwise(): return [ 'conditions_satisfied = 1;' ] # conditions_satisfied = f1 && f2 && f3 # # if conditions are operand deciders we just do the test. # if conditions are NTLUFs then we must see if the name is in # the set defined by the NTLUF. For example, BASE0=ArAX(). If # BASE0 is rAX then we are and the corresponding subexpression # should be True. s = ['conditions_satisfied = ' ] emitted = False if len(self.and_conditions) == 0: # no conditions. that's okay. encoder's job is simple in this case... s.append('1') emitted = True elif (len(self.and_conditions) == 1 and self.and_conditions[0].field_name == 'ENCODER_PREFERRED'): s.append('1') emitted = True else: first_and = True for and_cond in self.and_conditions: if and_cond.field_name == 'ENCODER_PREFERRED': continue try: t = and_cond.emit_code() if t != '1': if first_and: first_and = False else: s.append( ' &&\n\t\t ') emitted = True s.append( t ) except: die("Could not emit code for condition %s of %s" % (str(and_cond), str(self)) ) if not emitted: s.append('1') s.append(';') return [ ''.join(s) ] class iform_builder_t(object): def __init__(self): self.iforms = {} def remember_iforms(self,ntname): if ntname not in self.iforms: self.iforms[ntname] = True def _build(self): self.cgen = c_class_generator_t("xed_encoder_iforms_t", var_prefix="x_") for v in self.iforms.keys(): self.cgen.add_var(v, 'xed_uint32_t', accessors='none') def emit_header(self): self._build() return self.cgen.emit_decl() iform_builder = iform_builder_t() # FIXME GLOBAL class rule_t(object): """The encoder conditions -> actions. These are stored in nonterminals.""" def __init__(self, conditions, action_list, nt): """ @type conditions: conditions_t @param conditions: a conditions_t object specifying the encode conditions @type action_list: list of strings/action_t @param action_list: list of actions can string or action_t obj. @type nt: string @param nt: the nt which this rule is belong to """ _vmsgb("MAKING RULE", "%s - > %s" % (str(conditions), str(action_list))) self.default = False #indicates whether this rule is a default rule self.nt = nt self.index = 0 #index is used to identify the correct emit order self.conditions = self.handle_enc_preferred(conditions) self.actions = [] for action in action_list: if is_stringish(action): self.actions.append(actions.action_t(action)) else: self.actions.append(action) def __str__(self): s = [ str(self.conditions) , " ->\t" ] first = True for a in self.actions: if first: first=False else: s.append(" \t") s.append(str(a)) return ''.join(s) def handle_enc_preferred(self,conditions): ''' remove the ENCODER_PREFERRED constraint and replace it with an attribute ''' for cond in conditions.and_conditions: if cond.field_name == "ENCODER_PREFERRED": self.enc_preferred = True conditions.and_conditions.remove(cond) else: self.enc_preferred = False return conditions def compute_field_capture_list(self): """Look at the conditions and return a list of tuples (fieldname, bits) for use by code generation, by searching the conditions to see which one s are captures""" # 2009-02-08: using the bind-phase test conditions is wrong, # as we do not need to test all the bindings. return self.conditions.compute_field_capture_list() def prepare_value_for_emit(self, a): """@return: (length-in-bits, value-as-hex)""" if a.emit_type == 'numeric': v = hex(a.int_value) return (a.nbits, v) # return v with the leading 0x s = a.value if hex_pattern.match(s): return ((len(s)-2)*4, s) #hex nibbles - 0x -> bytes s_short = no_underscores(s) if bits_pattern.match(s_short): # ones and zeros return (len(s_short), hex(int(s_short,2))) die("prepare_value_for_emit: Unhandled value [%s] for rule: [%s]" %(s,str(self))) def uses_bit_vector(self): """For encoding multiple prefixes, we need to stash multiple values in the IFORM. This is the key.""" for a in self.actions: if a.is_field_binding(): if a.field_name == 'NO_RETURN': # FIXME: check value ==1? return True return False def has_nothing_action(self): for a in self.actions: if a.is_nothing(): return True return False def has_error_action(self): for a in self.actions: if a.is_error(): return True elif a.is_field_binding() and a.field_name == 'ERROR': return True return False def has_emit_action(self): for a in self.actions: if a.is_emit_action(): return True return False def has_nonterminal_action(self): for a in self.actions: if a.is_nonterminal(): return True return False def has_naked_bit_action(self): for a in self.actions: if a.naked_bits(): return True return False def has_no_return_action(self): for a in self.actions: if a.is_field_binding(): # for repeating prefixes, we have the NO_RETURN field. if a.field_name == 'NO_RETURN': # FIXME: check value ==1? return True return False def has_otherwise_rule(self): if self.conditions.has_otherwise(): return True return False def get_nt_in_cond_list(self): #returns the condition with nt, if exists nts = [] for cond in self.conditions.and_conditions: rvalue = cond.rvalue if rvalue.nonterminal(): nts.append(cond) if len(nts) == 0: return None if len(nts) == 1: return nts[0] error = ("the rule %s has more than one nt in the" + "condition list, we do not support it currently") % str(self) die(error) def emit_isa_rule(self, ith_rule, group): ''' emit code for INSTRUCTION's rule: 1. conditions. 2. set of the encoders iform index. 3. call the field binding pattern function to set values to fields. 4. nonterminal action type. ''' lines = [] # 1. lines.extend( self.conditions.emit_code() ) lines.append( "if (conditions_satisfied) {") lines.append( " okay=1;") # 2. obj_name = encutil.enc_strings['obj_str'] set_iform = 'xed_encoder_request_set_iform_index' code = '%s(%s,iform_ids[iclass_index][%d])' code = code % (set_iform,obj_name,ith_rule) lines.append(' %s;' % code) # 3. get_fb_ptrn = (' fb_ptrn_function = '+ 'xed_encoder_get_fb_ptrn(%s);' % obj_name ) lines.append(get_fb_ptrn) #call function that sets the values to the fileid lines.append(' (*fb_ptrn_function)(%s);' % obj_name) # 4. for a in self.actions: if a.is_nonterminal(): lines += a.emit_code('BIND') lines.append( " if (okay) return 1;") lines.append( "}") return lines def emit_rule_bind(self, ith_rule, nt_name, ntluf): lines = [] # # emit code for the conditions and if the conditions are true, do the action # lines.extend( self.conditions.emit_code() ) lines.append( "if (conditions_satisfied) {") lines.append( " okay=1;") # 2007-07-03 start okay over again... obj_name = encutil.enc_strings['obj_str'] do_return = True use_bit_vector = self.uses_bit_vector() has_nothing_action = self.has_nothing_action() has_error_action = self.has_error_action() has_nonterminal_action = self.has_nonterminal_action() has_emit_action = self.has_emit_action() # NESTED FUNCTION! def emit_code_bind_sub(a,lines, do_return): #msgerr("Codegen for action %s" % str(a)) if a.is_field_binding(): # for repeating prefixes, we have the NO_RETURN field. if a.field_name == 'NO_RETURN': # FIXME: could check bound value == 1. do_return = False lines.extend( a.emit_code('BIND') ) return do_return # first do the non nonterminals for a in self.actions: if not a.is_nonterminal(): do_return = emit_code_bind_sub(a, lines, do_return) # do the nonterminals after everything else for a in self.actions: if a.is_nonterminal(): do_return = emit_code_bind_sub(a, lines, do_return) #here we are setting the enc iform ordinal if (has_emit_action or has_nonterminal_action) and \ not has_error_action: # We do not set the iform for the "nothing" actions. if not has_nothing_action: if use_bit_vector: code = 'xed_encoder_request_iforms(%s)->x_%s |=(1<<%d)' code = code % (obj_name,nt_name,ith_rule) lines.append( ' %s;' % code) else: code = 'xed_encoder_request_iforms(%s)->x_%s=%d' code = code % (obj_name,nt_name,ith_rule) lines.append( ' %s;' % code) iform_builder.remember_iforms(nt_name) if do_return: # 2007-07-03 I added the condtional return to allow # checking other encode options in the event that a # sub-nonterminal (in this case SIMMz) tanks a partially made "BIND" decision. lines.append( " if (okay) return 1;") lines.append( "}") return lines def emit_rule_emit(self, ith_rule_arg, nt_name, captures): """Return a list of lines of code for the nonterminal function. @type ith_rule_arg: integer @param ith_rule_arg: number of the iform for which we are emitting code. @type ntname: string @param ntname: name of the nonterm @type captures: list @param captures: list of tuples (c-string,bits) (optional) """ lines = [] # emit code for the conditions and if the conditions are true, do the action use_bit_vector = self.uses_bit_vector() has_error_action = self.has_error_action() has_nothing_action = self.has_nothing_action() has_no_return_action = self.has_no_return_action() has_otherwise_rule = self.has_otherwise_rule() #complicated_nt are nonterminals that can not be auto generated using #hash function and look uptables due to their complexity #so we generete them in the old if statement structure complicated_nt = nt_func_gen.get_complicated_nt() # the 'INSTRUCTIONS' and the complicated nts emit iform are # using the old ordering mechnism # all other nts are using the new attribute index to set the order ith_rule = ith_rule_arg if nt_name != 'INSTRUCTIONS' and nt_name not in complicated_nt: ith_rule = self.index has_otherwise_rule = self.default if veemit(): msgb("EEMIT", "%d %s %s" % (ith_rule, nt_name, self.__str__())) if has_no_return_action: cond_else = '/* no return */ ' else: cond_else = '/* %d */ ' % (ith_rule) if has_otherwise_rule: # 2007-07-23: otherwise rules always fire in emit. There # is no "else" for the otherwise rule. It is a catch-all. lines.append( "if (1) { /*otherwise*/") elif has_nothing_action: # Some rules have 'nothing' actions that serve as epsilon accepts. lines.append( "%sif (1) { /* nothing */" % (cond_else)) for a in self.actions: if not a.is_nothing(): die("Nothing action mixed with other actions") elif has_error_action: # # do not check the iform on error actions -- just ignore # them. They are caught in the "BIND" step. return [] elif use_bit_vector: lines.append( "%sif (iform&(1<<%d)) {" % (cond_else,ith_rule)) else: lines.append( "%sif (iform==%d) {" % (cond_else,ith_rule)) do_return = True for a in self.actions: if veemit(): msgb("Codegen for action", str(a)) if a.is_field_binding(): # for repeating prefixes, we have the NO_RETURN field. if a.field_name == 'NO_RETURN': # FIXME: check value ==1? do_return = False continue if a.is_nonterminal(): if veemit(): msgb("EEMIT NT ACTION", str(a)) t = a.emit_code('EMIT') # EMIT for NTs if veemit(): for x in t: msgb("NT EMIT", x) lines.extend( t ) elif a.is_emit_action(): # emit actions require knowledge of all the conditions # which have the field bindings so we emit them here. if captures: list_of_tuples = captures else: list_of_tuples = self.compute_field_capture_list() if vtuples(): msgb("TUPLES", (" ,".join( [str(x) for x in list_of_tuples] ))) if len(list_of_tuples) == 0 or a.emit_type == 'numeric': # no substitutions required (length, s) = self.prepare_value_for_emit(a) if veemit(): msgb("SIMPLE EMIT", "bits=%d value=%s" % (length, s)) else: (length,s) = scatter_gen( a.value, list_of_tuples) #msgerr("SCATTERGEN %s %s -> %s %s" % (str(a.value), str(list_of_tuples), length, s)) t = " xed_encoder_request_encode_emit(xes,%s,%s);" % (length,s) if veemit(): msgb("EMITTING" , t) lines.append(t) if do_return: #lines.append( " if (okay && %s != XED_ERROR_NONE) okay=0;" % (error_operand())) lines.append( " if (%s != XED_ERROR_NONE) okay=0;" % (error_operand())) #lines.append( " if (okay) return 1;") lines.append( " return okay;") lines.append( "}") # close iform return lines def emit_rule(self, bind_or_emit, ith_rule, nt_name, captures=None): """Return a list of lines of code for the nonterminal function. @type bind_or_emit: string @param bind_or_emit: 'BIND', 'EMIT' or 'NTLUF' """ if bind_or_emit == 'NTLUF': ntluf = True else: ntluf = False if bind_or_emit == 'BIND' or bind_or_emit == 'NTLUF': return self.emit_rule_bind(ith_rule, nt_name, ntluf) elif bind_or_emit == 'EMIT': return self.emit_rule_emit(ith_rule, nt_name,captures) else: die("Need BIND or EMIT") def get_all_fbs(self): ''' collect all the actions that sets fields ''' fbs = [] for action in self.actions: if action.is_field_binding(): fbs.append(action) if action.is_emit_action() and action.emit_type == 'numeric': if action.field_name: fbs.append(action) return fbs def get_all_emits(self): ''' return a list of all emit type actions ''' emits = [] for action in self.actions: if action.is_emit_action(): emits.append(action) return emits def get_all_nts(self): ''' return a list of all nonterminal type actions ''' nts = [] for action in self.actions: if action.is_nonterminal(): nts.append(action) return nts class iform_t(object): """One form of an instruction""" def __init__(self, iclass, enc_conditions, enc_actions, modal_patterns, uname=None): self.iclass = iclass self.uname = uname self.enc_conditions = enc_conditions # [ operand_t ] self.enc_actions = enc_actions # [ blot_t ] self.modal_patterns = modal_patterns # [ string ] #the emit phase action pattern self.emit_actions = None #the FB actions pattern self.fb_ptrn = None self._fixup_vex_conditions() self.rule = self.make_rule() def _fixup_vex_conditions(self): """if action has VEXVALID=1, add modal_pattern MUST_USE_AVX512=0. The modal_patterns become conditions later on.""" for act in self.enc_actions: if act.field_name == 'VEXVALID' and act.value == 1: self.modal_patterns.append( "MUST_USE_EVEX=0" ) def make_operand_name_list(self): """Make an ordered list of operand storage field names that drives encode operand order checking. """ operand_names = [] for opnd in self.enc_conditions: if voperand2(): msg( "EOLIST iclass %s opnd %s vis %s" % (self.iclass, opnd.var, opnd.vis)) if opnd.vis == 'SUPP': continue if opnd.vis == 'ECOND': continue if self._check_encoder_input(opnd.var): operand_names.append(opnd.var) # 2007-07-05 We do not need to add MEM_WIDTH, since that does # not affect operand order. It is checked for memops by # encode. return operand_names def compute_binding_strings_for_emit(self): """Gather up *all* the conditions (suppressed or not) and include them as possible canditates for supplying bits for the encoder.""" captures = [] for opnd in self.enc_conditions: # each is an operand_t if opnd.type == 'binding': if letter_and_underscore_pattern.match(opnd.value): captures.append((opnd.var, no_underscores(opnd.value))) else: pass #msge("SKIPPING BINDING " + str(opnd)) # add the C decoration to the field name for emitting code. decorated_captures = [] for (f,b) in captures: decorated_captures.append((operand_storage.get_op_getter_fn(f),b)) return decorated_captures def _check_encoder_input(self,name): """Return True for things that are storage field encoder inputs""" global storage_fields if name in storage_fields and storage_fields[name].encoder_input: return True return False def find_encoder_inputs(self): """Return a set of encoder input field names""" s = set() ns = set() for mp in self.modal_patterns: if self._check_encoder_input(mp): s.add(mp) for op in self.enc_conditions: # The encoder ignores SUPP operands. if op.vis == 'SUPP': continue if op.type == 'token' or op.type == 'binding' or op.type == 'ntluf': if self._check_encoder_input(op.var): s.add(op.var) if op.lencode != '?': s.add('MEM_WIDTH') if op.ntluf: ns.add(op.value) return (s,ns) def make_rule(self): """Return a rule_t based on the conditions and action_list.""" if vrule(): msgb("MAKE RULE","for %s" % str(self)) action_list = [] # [ string ] for blot in self.enc_actions: a = blot.make_action_string() if a: action_list.append(a) cond = conditions_t() for mp in self.modal_patterns: if vrule(): msgb("Adding MODAL_PATTERN", mp) c = condition_t(mp) cond.and_cond(c) for opnd in self.enc_conditions: # some conditions we ignore: like for SUPP registers... if vrule(): msge("OPERAND: %s" % (str(opnd))) c = opnd.make_condition() if c: if vrule(): msge("\t MADE CONDITION") for subc in c: if vrule(): msge("\t\tANDCOND %s" % str(subc)) cond.and_cond(subc) else: if vrule(): msge("\t SKIPPING OPERAND in the AND CONDITIONS") #here we are handling only instructions. #Do not need to specify the nt name since the instructions have #their own emit function and this nt name is not used rule = rule_t(cond,action_list, None) self._remove_overlapping_actions(rule.actions) return rule def _remove_overlapping_actions(self, action_list): ''' for some actions the generated code looks exactly the same. for example: action1: MOD=0 action2: MOD[0b00] the generated code for both of them in the BIND phase is the same and for action1 we do nothing in the EMIT phase. we are itereting over all the field binding to see if we have overlapping emit action. modifying to input action_list ''' emit_actions = list(filter(lambda x: x.type == 'emit', action_list)) fb_actions = list(filter(lambda x: x.type == 'FB', action_list)) #iterate to find overlapping actions action_to_remove = [] for fb in fb_actions: for emit in emit_actions: if fb.field_name.lower() == emit.field_name and \ emit.emit_type == 'numeric': if fb.int_value == emit.int_value: # overlapping actions, recored this action # and remove later action_to_remove.append(fb) else: err = "FB and emit action for %s has different values" genutil.die(err % fb.field_name) #remove the overlapping actions for action in action_to_remove: action_list.remove(action) def __str__(self): s = [] s.append("ICLASS: %s" % self.iclass) s.append("CONDITIONS:") for c in self.enc_conditions: s.append("\t%s" % str(c)) s.append( "ACTIONS:") for a in self.enc_actions: s.append("\t%s" % str(a)) return '\n'.join(s) def key_rule_tuple(x): (a1,a2) = x return a1 def rule_tuple_sort(a,b): # FIXME:2017-06-10:PY3 port, no longer used (a1,a2) = a (b1,b2) = b if a1 > b1: return 1 elif a1 < b1: return -1 return 0 class nonterminal_t(object): def __init__(self, name, rettype=None): """ The return type is for the NLTUFs only. """ self.name = name self.rettype = rettype # if non None, then this is a NTLUF self.rules = [] #FIXME: this will be used in the future #self.otherwise = actions.action_t('error=XED_ERROR_GENERAL_ERROR') self.otherwise = [actions.gen_return_action('0')] def _default_rule(self): ''' return a rule_t object, where the conditions are: 'otherewise' and the actions are taken from the otherwise attribute ''' conds = conditions_t() conds.and_cond('otherwise') rule = rule_t(conds,self.otherwise,self.name) rule.default = True return rule def is_ntluf(self): if self.rettype: return True return False def add(self,rule): self.rules.append(rule) def __str__(self): s = [ self.name , "()::\n" ] for r in self.rules: s.extend(["\t" , str(r) , "\n"]) return ''.join(s) def multiple_otherwise_rules(self): c = 0 for r in self.rules: if r.has_otherwise_rule(): c = c + 1 if c > 1: return True return False def sort_for_size(self): tups = [] # PERF: want the 'nothing' bindings to occur before the error # bindings because the errors are less frequent. (Only one # "nothing" emit will occur and "error" actions do not show up # in the "emit" phase.) for rule in self.rules: if rule.has_otherwise_rule(): weight = 99999 # make it last elif rule.has_nothing_action(): weight = 99997 elif rule.has_error_action(): weight = 99998 else: weight = len(rule.actions) # try to get shortest form first... _vmsgb("RULE WEIGHT %d" % (weight), str(rule)) tups.append((weight,rule)) tups.sort(key=key_rule_tuple) newrules = [] for (x,y) in tups: newrules.append(y) self.rules = newrules def create_function(self, bind_or_emit): if self.is_ntluf(): # bind_or_emit should be 'NTLUF' fname = 'xed_encode_ntluf_%s' % self.name else: fname = 'xed_encode_nonterminal_%s_%s' % (self.name, bind_or_emit) if vntname(): msgb("NTNAME", self.name) fo = function_object_t(fname,"xed_uint_t") fo.add_arg("%s* xes" % xed_encoder_request) if self.is_ntluf(): fo.add_arg("xed_reg_enum_t arg_reg") # bind this to OUTREG below fo.add_comment(self.__str__()) fo.add_code_eol("xed_uint_t okay=1") if bind_or_emit == 'BIND' or bind_or_emit == 'NTLUF': fo.add_code_eol( "xed_uint_t conditions_satisfied=0" ) has_emit_action = False if bind_or_emit == 'EMIT': for r in self.rules: if r.has_emit_action(): has_emit_action = True break has_nonterminal_action = False if bind_or_emit == 'EMIT': for r in self.rules: if r.has_nonterminal_action(): has_nonterminal_action = True break # FIXME: PERF using OUTREG to hold arg_reg is the easiest way # to not change any of the condition code generation stuff. I # could easily optimize this later. 2007-04-10 nt_name = self.name if self.is_ntluf(): fo.add_code_eol( "%s = arg_reg" % (outreg())) # setup or read the IFORM variable if we are binding or emitting. if bind_or_emit == 'BIND' or bind_or_emit == 'NTLUF': if len(self.rules)>0: if self.rules[0].uses_bit_vector(): fo.add_code_eol( "xed_encoder_request_iforms(xes)->x_%s=0" % (nt_name) ) iform_builder.remember_iforms(nt_name) else: # EMIT if has_emit_action or has_nonterminal_action: fo.add_code_eol( "unsigned int iform = xed_encoder_request_iforms(xes)->x_%s" % (nt_name) ) iform_builder.remember_iforms(nt_name) else: # nothing to emit, so skip this... fo.add_code_eol('return 1') fo.add_code_eol('(void) okay') fo.add_code_eol('(void) xes') return fo #_vmsgb("EMITTING RULES FOR", nt_name) emitted_nothing_action=False for i,rule in enumerate(self.rules): #_vmsgb("EMITTING RULE %d" % (i+1)) emitr = True if bind_or_emit == 'EMIT' and rule.has_nothing_action(): if emitted_nothing_action: emitr = False emitted_nothing_action = True if emitr: lines = rule.emit_rule(bind_or_emit,i+1, nt_name) fo.add_lines(lines) default_rule = self._default_rule() lines = default_rule.emit_rule(bind_or_emit,0, nt_name) fo.add_lines(lines) fo.add_code('return 0; /*pacify the compiler*/') fo.add_code_eol('(void) okay') fo.add_code_eol('(void) xes') if bind_or_emit == 'EMIT': fo.add_code_eol('(void) iform') if bind_or_emit == 'BIND' or bind_or_emit == 'NTLUF': fo.add_code_eol("(void) conditions_satisfied") return fo class sequencer_t(object): def __init__(self, name): self.name = name self.nonterminals = [] def add(self,nt): t = nt_name_pattern.search(nt) if t: self.nonterminals.append(t.group('ntname')) else: self.nonterminals.append(nt) def __str__(self): s = ["SEQUENCE " , self.name , "\n"] for nt in self.nonterminals: s.extend(["\t" , str(nt) , "()\n"]) return ''.join(s) def create_function(self,sequences): fname = 'xed_encode_nonterminal_' + self.name lst = [] for x in self.nonterminals: # FIXME 2007-06-29 <NAME>: This looks odd if x in sequences: lst.append("xed_encode_nonterminal_%s" % x) else: lst.append("xed_encode_nonterminal_%s" % x) arg ='xes' fo = function_call_sequence_conditional(fname,lst,arg) fo.add_arg('%s* xes' % xed_encoder_request) return fo def group_bits_and_letter_runs(s): """ @type s: string @param s: string of the form [01a-z]+ @rtype: list of strings @return: list of binary bit strings and distinct letter runs """ out = [] run = None last_letter = None last_was_number = False # remove underscores from s for i in list(s.replace('_','')): if i=='0' or i=='1': if last_was_number: run += i else: if run: out.append(run) # end last run run = i last_was_number = True last_letter = None else: # i is a letter if last_letter and last_letter == i: run += i else: if run: out.append(run) # end last run run = i last_was_number = False last_letter = i if run: out.append(run) return out class encoder_input_files_t(object): def __init__(self, options): self.xeddir = options.xeddir self.gendir = options.gendir self.storage_fields_file = options.input_fields self.regs_input_file = options.input_regs self.decoder_input_files = options.enc_dec_patterns self.encoder_input_files = options.enc_patterns self.state_bits_file = options.input_state self.instructions_file = options.isa_input_file # dict of operand_order_t indexed by special keys stored in iform.operand_order_key self.all_operand_name_list_dict = None def input_file(self,s): """Join the xeddir and the datafiles dir to s""" return os.path.join(self.xeddir,'datafiles',s) class operand_order_t(object): def __init__(self,n,lst): self.n = n # index in to the encode_order array self.lst = lst # list of nonsuppressed operands class encoder_configuration_t(object): # decode: ipatterns -> operands # encode: conditions -> actions # normally ipatterns become actions. # normally operands become conditions. # however, # some ipatterns become conditions # and some operands become actions. # # and finally, some operands get dropped entirely. def __init__(self, encoder_input_files, amd_enabled=True): self.amd_enabled = amd_enabled self.files = encoder_input_files self.gendir = self.files.gendir self.xeddir = self.files.xeddir global storage_fields lines = open(self.files.storage_fields_file,'r') operands_storage = operand_storage.operands_storage_t(lines) storage_fields = operands_storage.get_operands() self.state_bits = None self.sequences = {} self.nonterminals = {} self.decoder_nonterminals = {} self.decoder_ntlufs = {} self.functions = [] # the main ISA decode rules are stored here before conversion # to the encode rules self.iarray = {} # dictionary by iclass of [ iform_t ] self.deleted_instructions = {} # by iclass self.deleted_unames = {} # by uname cmkdir(self.gendir) def dump_output_file_names(self): global output_file_emitters ofn = os.path.join(self.gendir,"ENCGEN-OUTPUT-FILES.txt") o = open(ofn,"w") for fe in output_file_emitters: o.write(fe.full_file_name + "\n") o.close() def parse_decode_rule(self, conds,actions ,line, nt_name): # conds -- rhs, from an encode perspective (decode operands) # actions -- lhs, from an encode perspective (decode patterns) # move some special actions to the conditions new_actions = [] for a in actions: # decode patterns if veparse(): msgb("parse_decode_rule actions", str(a)) q = lhs_pattern.match(a) if q: lhs_a = q.group('name') if lhs_a in storage_fields and storage_fields[lhs_a].encoder_input == True: if veparse(): msgb("CVT TO ENCODER CONDITION", lhs_a) conds.append(a) continue opcap = lhs_capture_pattern_end.match(a) if opcap: synth_cap = "%s=%s" % (opcap.group('name'), opcap.group('bits')) conds.append( synth_cap ) if veparse(): msge("SYNTH CONDITION FOR " + a + " --> " + synth_cap ) new_actions.append(a) continue if veparse(): msge("NEWACTION " + a) new_actions.append(a) del actions # Move some special encode conditions to the encode # actions if they are not encoder inputs. This solves # a problem with encoding IMM0SIGNED on SIMMz() # nonterminals. new_conds = [] for c in conds: # were decode operands (rhs) if veparse(): msgb("parse_decode_rule conditions", str(c)) if c.find('=') == -1: trimmed_cond = c else: ep = equals_pattern.match(c) # catches "=", but not "!=" if ep: trimmed_cond = ep.group('lhs') else: die("Bad condition: %s" % c) if veparse(): msgb("TESTING COND", "%s --> %s" % (c, trimmed_cond)) keep_in_conds = True try: if storage_fields[trimmed_cond].encoder_input == False: if veparse(): msgb("DROPPING COND", c) keep_in_conds = False # 2007-08-01 except: pass # if we have the constraint: OUTREG=some_nt() and it is not the # single constraint we want to move # the nt: some_nt() to the actions side. # e.g. the constraint: MODE=3 OUTREG=GPRv_64() -> nothing # becomes: MODE=3 -> GPRv_64() if trimmed_cond == 'OUTREG': nt = nt_name_pattern.match(c.split('=')[1]) if nt and len(conds) > 1: c = "%s(OUTREG)" % nt.group('ntname') keep_in_conds = False if keep_in_conds: new_conds.append(c) else: if veparse(): msge("COND->ACTION " + c) # FIXME: REMOVEME new_actions.append(c) conds = new_conds # signal it is okay if there is no action if len(new_actions) == 0: new_actions.append('nothing') if len(conds) == 0: conds = ['otherwise'] if len(conds) > 0: conditions = conditions_t() for c in conds: #msge("COND " + c) # FIXME: REMOVEME xr = xed_reg_pattern.match(c) # FIXME: not general enough if xr: conditions.and_cond("OUTREG=%s" % (xr.group('regname'))) else: conditions.and_cond(c) # only add a rule if we have conditions for it! rule = rule_t(conditions, new_actions, nt_name) return rule else: _vmsgb("DROP DECODE LINE (NO eCONDS)", "%s\nin NT: %s" %(line,nt_name)) return None def parse_decode_lines(self, lines): """ Read the flat decoder files (not the ISA file). Return a tuple: ( dict of nonterminals, dict of nonterminal lookup functions ) This parses the so-called flat format with the vertical bar used for all the non-instruction tables. For decode the semantics are: preconditions | dec-actions However for encode, the semantics change to: enc-actions | conditions And we must take some of the "enc-actions" and add them to the preconditions. These include the actions associated with: MODE,SMODE,EOSZ,EASZ """ nts = {} ntlufs = {} while len(lines) > 0: line = lines.pop(0) #msge("LINEOUT:" + line) line = comment_pattern.sub("",line) line = leading_whitespace_pattern.sub("",line) line = line.rstrip() if line == '': continue line = slash_expand.expand_all_slashes(line) p = ntluf_pattern.match(line) if p: nt_name = p.group('ntname') ret_type = p.group('rettype') # create a new nonterminal to use nt = nonterminal_t(nt_name, ret_type) ntlufs[nt_name] = nt continue p = nt_pattern.match(line) if p: nt_name = p.group('ntname') # create a new nonterminal to use nt = nonterminal_t(nt_name) nts[nt_name] = nt continue p = decode_rule_pattern.match(line) if p: conds = p.group('cond').split() # rhs, from an encode perspective (decode operands) actions = p.group('action').split() # lhs, from a encode perspective (decode patterns) rule = self.parse_decode_rule(conds,actions,line,nt.name) if rule: nt.add(rule) if nt.multiple_otherwise_rules(): die("Multiple otherwise rules in %s -- noninvertible" % (nt_name)) continue die("Unhandled line: %s" % line) return (nts, ntlufs) def parse_encode_lines(self,lines): """ Returns a tuple of two dictionaries: (1) a dictionary of sequencer_t's and (2) a dictionary of nonterminal_t's """ nts = {} # nonterminals_t's ntlufs = {} # nonterminals_t's seqs = {} # sequencer_t's while len(lines) > 0: line = lines.pop(0) line = comment_pattern.sub("",line) line = leading_whitespace_pattern.sub("",line) if line == '': continue line = slash_expand.expand_all_slashes(line) c = curly_pattern.search(line) if c: line = re.sub("{", " { ", line) line = re.sub("}", " } ", line) sequence = sequence_pattern.match(line) if sequence: seq = sequencer_t(sequence.group('seqname')) seqs[seq.name] = seq #msg("SEQ MATCH %s" % seq.name) nt = None continue p = ntluf_pattern.match(line) if p: nt_name = p.group('ntname') ret_type = p.group('rettype') # create a new nonterminal to use nt = nonterminal_t(nt_name, ret_type) ntlufs[nt_name] = nt seq = None continue m = nt_pattern.match(line) if m: nt_name = m.group('ntname') if nt_name in nts: nt = nts[nt_name] else: nt = nonterminal_t(nt_name) nts[nt_name] = nt seq = None continue a = arrow_pattern.match(line) if a: conds = a.group('cond').split() actns = a.group('action').split() #msg("ARROW" + str(conds) + "=>" + str(actions)) conditions = conditions_t() for c in conds: conditions.and_cond(c) rule = rule_t(conditions, actns, nt_name) if seq: seq.add(rule) else: # we do not need the rules otherwise->error/nothing in the # new encoding structure (hash tables). # instead we are holding this info in a matching attribute if rule.conditions.and_conditions[0].is_otherwise(): if rule.actions[0].is_nothing(): nt.otherwise = [actions.gen_return_action('1')] elif rule.actions[0].is_error(): nt.otherwise = [actions.gen_return_action('0')] else: nt.otherwise = [ actions.action_t(x) for x in actns] # in case we have valid action for the otherwise # rule we should finish it with returnning 1 # which is "not an error" nt.otherwise.append(actions.gen_return_action('1')) else: nt.add(rule) else: for nt in line.split(): seq.add(nt) return (seqs,nts,ntlufs) def parse_state_bits(self,lines): d = [] state_input_pattern = re.compile(r'(?P<key>[^\s]+)\s+(?P<value>.*)') while len(lines) > 0: line = lines.pop(0) line = comment_pattern.sub("",line) line = leading_whitespace_pattern.sub("",line) if line == '': continue line = slash_expand.expand_all_slashes(line) p = state_input_pattern.search(line) if p: #_vmsgb(p.group('key'), p.group('value')) #d[p.group('key')] = p.group('value') s = r'\b' + p.group('key') + r'\b' pattern = re.compile(s) d.append( (pattern, p.group('value')) ) else: die("Bad state line: %s" % line) return d def expand_state_bits_one_line(self,line): new_line = line for k,v in self.state_bits: new_line = k.sub(v,new_line) return new_line def expand_state_bits(self,lines): new_lines = [] # n^2 algorithm for line in lines: new_line = line for k,v in self.state_bits: new_line = k.sub(v,new_line) new_lines.append(new_line) return new_lines def update(self,seqs,nts,ntlufs): """Update the sequences and nonterminals dictionaries""" self.sequences.update(seqs) self.nonterminals.update(nts) self.decoder_ntlufs.update(ntlufs) def read_encoder_files(self): for f in self.files.encoder_input_files: lines = open(f,'r').readlines() lines = self.expand_state_bits(lines) (seqs,nts,ntlufs) = self.parse_encode_lines(lines) del lines self.update(seqs,nts,ntlufs) def reorder_encoder_rules(self,nts): """reorder rules so that any rules with ENCODER_PREFERRED is first """ for nt in nts.values(): first_rules = [] rest_of_the_rules = [] for r in nt.rules: if r.conditions.contains("ENCODER_PREFERRED"): first_rules.append(r) else: rest_of_the_rules.append(r) nt.rules = first_rules + rest_of_the_rules ################################################## def make_nt(self,ntname): blot = blot_t('nt') blot.nt = ntname return blot def make_hex(self,s,field_name=None): """ @param s: string with a 2 nibble hex number @rtype: blot_t @return: blot containing the integer value """ blot = blot_t('bits') blot.value = int(s,16) blot.length = 8 blot.field_name = field_name return blot def make_binary(self,s,field_name=None): """ @param s: string with a binary number @rtype: blot_t @return: blot containing the integer value """ blot = blot_t('bits') if re.search(r'^0b',s): s = re.sub('0b','',s) s = re.sub('_','',s) blot.value = int(s,2) blot.length = len(s) blot.original_bits = s # FIXME: 2007-04-20 blot.field_name = field_name return blot def make_bits_and_letters(self,s, field_name=None): """ @type s: string @param s: string of letters or binary digits representing the blot_t @type field_name: string @param field_name: name of the storage field (optional) @rtype: list of blot_t's @return: list of blot_t's """ #_vmsgb("MBAL","%s" % s) blots = [] bit_offset_in_field = 0 runs = group_bits_and_letter_runs(s) _vmsgb("RUNS\t",str(runs)) for r in runs: #_vmsgb("\t",str(r)) if len(r) == 0: die("Bad run in " + str(s)) blot = blot_t() if r[0] == '0' or r[0] == '1': blot.type = 'bits' blot.value = int(r,2) else: blot.type = 'letters' blot.letters = r blot.length = len(r) blot.field_name = field_name blot.field_offset = bit_offset_in_field bit_offset_in_field += blot.length blots.append(blot) return blots def make_decider_blot(self, lhs,rhs,equals): blot = blot_t('od') blot.field_name = lhs rhs = re.sub(r':.*','',rhs) blot.value = make_numeric(rhs,"%s %s %s" % (str(lhs),str(equals),str(rhs))) blot.od_equals = equals return blot def make_decode_patterns(self,s): """ return one or more subpatterns of type. Sometimes we specify an decode pattern like MOD[mm] or MOD[11_]. The 2nd part of the return tuple is a list of the implied decode operands such as MOD=mm or MOD=11_. @rtype: tuple @returns: (list of blot_t's representing patterns,\ a list of tuples of field bindings) """ decode_patterns = [] field_bindings = [] while 1: nt = nt_name_pattern.match(s) if nt: decode_patterns.append(self.make_nt(nt.group('ntname'))) break opcap = lhs_capture_pattern_end.match(s) if opcap: # MOD[mm] REG[0b000] bits = opcap.group('bits') field_name = opcap.group('name') if binary_pattern.match(bits): decode_patterns.append(self.make_binary(bits, field_name)) elif hex_pattern.match(bits): decode_patterns.append(self.make_hex(bits, field_name)) elif letter_pattern.match(bits): o = self.make_bits_and_letters( bits, field_name) decode_patterns.extend(o) else: genutil.die("Unrecognaized pattern '{}' for {}".format( bits, s)) field_bindings.append( opcap.group('name','bits') ) break if hex_pattern.match(s): decode_patterns.append(self.make_hex(s)) break s_nounder = no_underscores(s) if binary_pattern.match(s_nounder): decode_patterns.append(self.make_binary(s_nounder)) break if bits_and_letters_pattern.match(s_nounder): decode_patterns.extend(self.make_bits_and_letters(s_nounder)) break if letter_pattern.match(s_nounder): decode_patterns.extend(self.make_bits_and_letters(s_nounder)) break equals = equals_pattern.match(s) if equals: (lhs,rhs) = equals.group('lhs','rhs') decode_patterns.append(self.make_decider_blot(lhs,rhs,equals=True)) break not_equals = not_equals_pattern.match(s) if not_equals: (lhs,rhs) = not_equals.group('lhs','rhs') decode_patterns.append(self.make_decider_blot(lhs,rhs,equals=False)) break die("Could not process decode pattern %s" % s) return (decode_patterns, field_bindings) def parse_one_decode_rule(self, iclass, operand_str, pattern_str): """Read the decoder rule from the main ISA file and package it up for encoding. Flipping things around as necessary. @type operand_str: string @param operand_str: decode operands @type pattern_str: string @param pattern_str: decode pattern (bits, nts, ods, etc.) @rtype: tuple @return: (list decode-operands/encode-conditions as operand_t's, \ list decode-patterns/encode-actions as blot_t's \ list of modal patterns strings that should become encode condition_t objs) """ # generally: # # decode-pattern --become--> encode-action # decode-operands --become--> encode-condition # # but there are special cases: # # 1) Some decode-pattern stuff needs to become encode-conditions # as they are encoder inputs # 2) Some decode-operand stuff needs to become encode-actions # as they are encoder outputs global storage_fields patterns = [] # The extra_bindings_list is a list of implied bindings deduced # from the decode pattern, for things like MOD[mm] (etc.) that do # field captures in the pattern. We use them to create # new (decode) operands (which then become encode conditions). extra_bindings = [] # Some decode patterns become encode conditions. These are # the fields that are listed as "EI" (encoder inputs) in the # "fields description" file. modal_patterns = [] # decode-patterns *mostly* become encode-actions, except for # fields that are encoder inputs. for p in pattern_str.split(): p_short = rhs_pattern.sub('', p) # grab the lhs # special cases if (p_short in storage_fields and storage_fields[p_short].encoder_input): if voperand(): msgb("MODAL PATTERN", p_short) modal_patterns.append(p) continue if p_short in storage_fields and p == 'BCRC=1': # FIXME: 2016-01-28: MJC: HACK TO ENCODE ROUNDC/SAE CONSTRAINTS if 'SAE' in pattern_str: modal_patterns.append("SAE!=0") elif 'AVX512_ROUND' in pattern_str: modal_patterns.append("ROUNDC!=0") # The pattern_list is a list of blot_t's covering the # pattern. The extra_bindings_list is a list of # implied bindings deduced from the decode patterns. ## # The extra bindings are for MOD[mm] (etc.) that do # field captures in the pattern. We use them to create # new operands. _vmsgb("PARSING DECODE PATTERN", str(p)) # pattern_list is a list of blot_t # extra_bindings is list list of tuples (name,bits) (pattern_list, extra_bindings_list) = self.make_decode_patterns(p) s = [] for p in pattern_list: s.append(str(p)) _vmsgb("PATTERN LIST", ", ".join(s)) _vmsgb("EXTRABINDING LIST", str(extra_bindings_list)) patterns.extend(pattern_list) extra_bindings.extend(extra_bindings_list) # Decode operands are type:rw:[lencode|SUPP|IMPL|EXPL|ECOND] # where type could be X=y or MEM0. Most decode operands # become encode conditions, but some of them get converted in # to extra encode actions. operands = [] # to become encoder inputs, conditions extra_actions = [] # to become encoder outputs for x in operand_str.split(): # the encode conditions (decode operands) x_short = rhs_pattern.sub('', x) # grab the lhs # Some "operands" are really side effects of decode. They # are also side effects of encode and so we move them to # the list of actions. special_encode_action = False try: # Move some decode operands (the ones that are not # encoder inputs) to the extra encode actions. if storage_fields[x_short].encoder_input== False: if voperand(): msgb("ENCODER OUTPUT FIELD", x_short) special_encode_action = True except: pass if special_encode_action: if voperand(): msgb("SPECIAL_ENCODE_ACTION ATTRIBUTE", x) extra_actions.append(x) else: if voperand(): msgb("MAKING A DECODE-OPERAND/ENC-ACTION FROM", x) operands.append(operand_t(x)) # Add the extra encode conditions (decode-operands) implied # from the instruction decode patterns (MOD[mm] etc.). We # ignore the ones for constant bindings! for (field_name,value) in extra_bindings: if genutil.numeric(value): #msgerr("IGNORING %s %s" % (field_name, value)) pass # we ignore things that are just bits at this point. else: extra_operand = operand_t("%s=%s:SUPP" % (field_name, value)) _vmsgb("EXTRA BINDING", "%s=%s:SUPP" % (field_name, value)) operands.append(extra_operand) # Add the extra_actions were part of the decode operands as # side-effects but are really side-effects of encode too. for raw_action in extra_actions: okay = False equals = equals_pattern.match(raw_action) if equals: (lhs,rhs) = equals.group('lhs','rhs') new_blot = self.make_decider_blot(lhs,rhs,equals=True) okay = True not_equals = not_equals_pattern.match(raw_action) if not_equals: (lhs,rhs) = equals.group('lhs','rhs') new_blot = self.make_decider_blot(lhs,rhs,equals=False) okay = True if not okay: die("Bad extra action: %s" % raw_action) #msgerr("NEW BLOT: %s" % str(new_blot)) patterns.append(new_blot) # return: (decode-operands are encode-conditions, # decode-patterns are encode-actions [blot_t], # modal-patterns that become encode-conditions [string]) #msgerr("OPERANDS %s" % ' '.join( [str(x) for x in operands])) return (operands, patterns, modal_patterns) def print_iclass_info(self,iclass, operands, ipattern, conditions, actions, modal_patterns): msg(iclass + ':\t' + operands + '->' + ipattern) msg( "CONDITIONS:") for c in conditions: msg("\t" + str(c) ) msg("ACTIONS:") for a in actions: msg("\t" + str(a)) msg("MODAL PATTERNS:") for a in modal_patterns: msg("\t" + str(a)) def finalize_decode_conversion(self,iclass, operands, ipattern, uname=None): if ipattern == None: die("No ipattern for iclass %s and operands: %s" % (str(iclass), operands )) if iclass == None: die("No iclass for " + operands) # the encode conditions are the decode operands (as [ operand_t ]) # the encode actions are the decode patterns (as [ blot_t ]) (conditions, actions, modal_patterns) = \ self.parse_one_decode_rule(iclass, operands, ipattern) if vfinalize(): self.print_iclass_info(iclass, operands, ipattern, conditions, actions, modal_patterns) # FIXME do something with the operand/conditions and patterns/actions iform = iform_t(iclass, conditions, actions, modal_patterns, uname) if uname == 'NOP0F1F': # We have many fat NOPS, 0F1F is the preferred one so we # give it a higher priority in the iform sorting. iform.priority = 0 elif 'VEXVALID=2' in ipattern: # EVEX # FIXME: 2016-01-28: MJC: hack. 1st check patterns w/ ROUNDC/SAE. # (See other instance of BCRC=1 in this file) if 'BCRC=1' in ipattern: iform.priority = 0 else: iform.priority = 2 elif 'VEXVALID=3' in ipattern: # XOP iform.priority = 3 elif 'VEXVALID=4' in ipattern: # KNC iform.priority = 3 else: # EVERYTHING ELSE iform.priority = 1 try: self.iarray[iclass].append ( iform ) except: self.iarray[iclass] = [ iform ] def read_decoder_instruction_file(self): """Taking a slightly different tack with the ISA file because it is so large. Processing each line as we encounter it rather than buffering up the whole file. Also, just storing the parts I need. """ continuation_pattern = re.compile(r'\\$') _vmsgb("READING",self.files.instructions_file) lines = open(self.files.instructions_file,'r').readlines() lines = process_continuations(lines) nts = {} nt = None iclass = None uname = None unamed = None ipattern = None started = False while len(lines) > 0: line = lines.pop(0) line = comment_pattern.sub("",line) #line = leading_whitespace_pattern.sub("",line) line=line.strip() if line == '': continue line = slash_expand.expand_all_slashes(line) #_vmsgb("INPUT", line) if udelete_pattern.search(line): m = udelete_full_pattern.search(line) unamed = m.group('uname') _vmsgb("REGISTER BAD UNAME", unamed) self.deleted_unames[unamed] = True continue if delete_iclass_pattern.search(line): m = delete_iclass_full_pattern.search(line) iclass = m.group('iclass') self.deleted_instructions[iclass] = True continue line = self.expand_state_bits_one_line(line) p = nt_pattern.match(line) if p: nt_name = p.group('ntname') if nt_name in nts: nt = nts[nt_name] else: nt = nonterminal_t(nt_name) nts[nt_name] = nt continue if left_curly_pattern.match(line): if started: die("Nested instructions") started = True iclass = None uname = None continue if right_curly_pattern.match(line): if not started: die("Mis-nested instructions") started = False iclass = None uname = None continue ic = iclass_pattern.match(line) if ic: iclass = ic.group('iclass') continue un = uname_pattern.match(line) if un: uname = un.group('uname') continue ip = ipattern_pattern.match(line) if ip: ipattern = ip.group('ipattern') continue if no_operand_pattern.match(line): self.finalize_decode_conversion(iclass,'', ipattern, uname) continue op = operand_pattern.match(line) if op: operands = op.group('operands') self.finalize_decode_conversion(iclass, operands, ipattern, uname) continue return def remove_deleted(self): bad = list(self.deleted_unames.keys()) _vmsgb("BAD UNAMES", str(bad)) for ic,v in self.iarray.items(): x1 = len(v) l = [] for i in v: if i.uname not in bad: l.append(i) else: _vmsgb("PRE-DELETING IFORMS", "%s %s" % (ic, i.uname)) x2 = len(l) if x1 != x2: _vmsgb("DELETING IFORMS", "%s %d -> %d" % (ic,x1,x2)) self.iarray[ic]=l for k in list(self.deleted_instructions.keys()): if k in self.iarray: _vmsgb("DELETING", k) del self.iarray[k] def add_iform_indices(self): ''' add iform's index to all iforms. flatten all the iforms to a single list ''' all_iforms_list = [] i = 0 for iforms in self.iarray.values(): for iform in iforms: iform.rule.iform_id = i all_iforms_list.append(iform) i += 1 self.total_iforms = i return all_iforms_list def read_decoder_files(self): """Read the flat decoder input files and 'invert' them. Build two dictionaries: the NTLUFs and the NTs""" # read the main ISA tables self.read_decoder_instruction_file() # read_isa_ self.all_iforms = self.add_iform_indices() self.remove_deleted() # Read the other decoder format tables. nts = {} ntlufs = {} for f in self.files.decoder_input_files: lines = open(f,'r').readlines() lines = self.expand_state_bits(lines) (some_nts, some_ntlufs) = self.parse_decode_lines(lines) # read_flat_ nts.update(some_nts) ntlufs.update(some_ntlufs) del lines # reorder rules so that any rules with ENCODER_PREFERRED is first self.reorder_encoder_rules(nts) self.reorder_encoder_rules(ntlufs) if vread(): msgb("NONTERMINALS") for nt in nts.values(): msg( str(nt)) msgb("NTLUFS") for ntluf in ntlufs.values(): msg( str(ntluf)) _vmsgb("DONE","\n\n") self.decoder_nonterminals.update(nts) self.decoder_ntlufs.update(ntlufs) def make_isa_encode_group(self, group_index, ins_group): """Make the function object for encoding one group. The generated function tests operand order and type, then more detailed conditions. Once conditions_satisfied is true, we attempt to do more detailed bindings operations for the nonterminals in the pattern. @rtype: function_object_t @returns: an encoder function object that encodes group """ if vencode(): msgb("ENCODING GROUP", " %s -- %s" % (group_index, ins_group)) fname = "xed_encode_group_%d" % (group_index) fo = function_object_t(fname,'xed_bool_t') fo.add_arg("%s* xes" % xed_encoder_request) fo.add_code_eol( "xed_bool_t okay=1") fo.add_code_eol( "xed_bool_t conditions_satisfied=0" ) fo.add_code_eol( "xed_ptrn_func_ptr_t fb_ptrn_function" ) iform_ids_table = ins_group.get_iform_ids_table() iclasses_number = len(ins_group.get_iclasses()) iforms_number = len(ins_group.iforms) table_type = 'static const xed_uint16_t ' table_decl = 'iform_ids[%d][%d] = {' % (iclasses_number, iforms_number) table = table_type + table_decl fo.add_code(table) for line in iform_ids_table: fo.add_code(line) fo.add_code_eol('}') get_iclass_index = 'xed_encoder_get_iclasses_index_in_group' obj_name = encutil.enc_strings['obj_str'] code = 'xed_uint8_t iclass_index = %s(%s)' % (get_iclass_index,obj_name) fo.add_code_eol(code) # FIXME: 2014-04-17: copy to sorted_iforms still sorts ins_group.iforms sorted_iforms = ins_group.iforms sorted_iforms.sort(key=ins_emit.key_iform_by_bind_ptrn) sorted_iforms.sort(key=ins_emit.key_rule_length) sorted_iforms.sort(key=ins_emit.key_priority) for i,iform in enumerate(sorted_iforms): # FIXME:2007-07-05 emit the iform.operand_order check of # the xed_encode_order[][] array # emit code that checks the operand order # made special operand orders for 0 1 and 2 # operands. store the dictionary of operand orders, # look up the list. If there are zero entries, no # memcmp is needed. If there is one entry, replace the # memcmp with an equality check. If there are two # operands, replace the memcmp with two equality # tests. Otherwise use the memcmp. # FIXME 2007-09-11 use the static count of the values of # the number of operands rather than looking it up in # xed_encode_order_limit. Save many array derefs per # encode. 2014-04-15: xed_encode_order_limit[] does not # currently show up in the generated code so the above # fixme is moot. try: operand_order =\ self.all_operand_name_list_dict[iform.operand_order_key] except: operand_order = None cond1 = None nopnd = None optimized = False if operand_order: nopnd = len(operand_order.lst) if 0: msge("OPNDORDER for group %d is (%d) %s " % ( group_index, nopnd, str(operand_order.lst))) cond1 = "xes->_n_operand_order == %d" % (nopnd) if nopnd==0: optimized = True fo.add_code("if (%s) {" % (cond1)) elif nopnd ==1: optimized = True cond2 = "xes->_operand_order[0] == XED_OPERAND_%s" cond2 = cond2 % (operand_order.lst[0]) fo.add_code("if (%s && %s) {" % (cond1,cond2)) elif nopnd ==2: optimized = True cond2 = "xes->_operand_order[0] == XED_OPERAND_%s" cond2 = cond2 % (operand_order.lst[0]) cond3 = "xes->_operand_order[1] == XED_OPERAND_%s" cond3 = cond3 % (operand_order.lst[1]) fo.add_code("if (%s && %s && %s) {" % (cond1,cond2,cond3)) memcmp_type = 'xed_uint8_t' if not optimized: if cond1 == None: cond1 = "xed_encode_order_limit[%d]==xes->_n_operand_order" cond1 = cond1 % (iform.operand_order) if nopnd == None: cond2 = "memcmp(xed_encode_order[%d], "+\ "xes->_operand_order, "+\ "sizeof(%s)*xed_encode_order_limit[%d])==0" cond2 = cond2 % (iform.operand_order, memcmp_type, iform.operand_order) else: cond2 = "memcmp(xed_encode_order[%d], "+\ "xes->_operand_order, sizeof(%s)*%d)==0" cond2 = cond2 % (iform.operand_order, memcmp_type, nopnd) fo.add_code("if (%s && %s) {" % (cond1, cond2)) if viform(): msgb("IFORM", str(iform)) # For binding, this emits code that sets # conditions_satisfied based on some long expression and # then tests it and sets some operand storage fields. For # emitting, it checks the iform and emits bits. captures = None lines = iform.rule.emit_isa_rule(i,ins_group) fo.add_lines(lines) fo.add_code(' }') fo.add_code_eol('return 0') fo.add_code_eol("(void) okay") fo.add_code_eol("(void) conditions_satisfied") fo.add_code_eol("(void) xes") return fo def emit_encode_function_table_init(self): ''' emit the functions that inits encoders look up tables. ''' global output_file_emitters func_name = "xed_init_encode_table" fo = function_object_t(func_name,"void") init_table = [] template = " xed_enc_iclass2group[XED_ICLASS_%s] = %d;" iclass2group = self.ins_groups.get_iclass2group() for iclass,group_index in list(iclass2group.items()): code = template % (iclass.upper(),group_index) init_table.append(code) template = " xed_enc_iclass2index_in_group[XED_ICLASS_%s] = %d;" iclass2index_in_group = self.ins_groups.get_iclass2index_in_group() for iclass,index in list(iclass2index_in_group.items()): code = template % (iclass.upper(),index) init_table.append(code) fo.add_lines(init_table) filename = 'xed-encoder-init.c' fe = xed_file_emitter_t(self.xeddir, self.gendir, filename, shell_file=False) fe.add_header("xed-encoder.h") # FIXME confusing file name. fe.start() fo.emit_file_emitter(fe) fe.close() output_file_emitters.append(fe) def make_isa_encode_functions(self): # each iarray dictionary entry is a list: of iform_t objects ins_code_gen = ins_emit.instruction_codegen_t(self.all_iforms, self.iarray, self.gendir, self.amd_enabled) ins_code_gen.work() # copy stuff back to this class's members vars ins_code_gen.get_values(self) i=0 group_fos = [] for group in self.ins_groups.get_groups(): #generate the function object for the group bind function fo = self.make_isa_encode_group(i,group) group_fos.append(fo) i += 1 self.group_fos = group_fos def emit_iforms(self): global output_file_emitters s = iform_builder.emit_header() # FIXME GLOBAL filename = 'xed-encoder-iforms.h' fe = xed_file_emitter_t(self.xeddir, self.gendir, filename, shell_file=False) fe.headers.remove('xed-internal-header.h') fe.add_header("xed-types.h") fe.start() fe.write(s) fe.close() output_file_emitters.append(fe) def find_nt_by_name(self,nt_name): # returns nonterminal_t object that represents the nt name if nt_name in self.nonterminals: return self.nonterminals[nt_name] elif nt_name in self.decoder_nonterminals: return self.decoder_nonterminals[nt_name] elif nt_name in self.decoder_ntlufs: return self.decoder_ntlufs[nt_name] die('could not find nt object for nt name %s\n' % nt_name) def replace_outreg(self,cond_nt,conds_list): '''cond_nt: the condition with nt cond_list: list of conditions that replaces the nt if the field name of cond_nt is different than OUTREG, replace the field name OUTREG in the conds_list ''' if cond_nt.field_name == 'OUTREG': return for c in conds_list: if c.field_name == 'OUTREG': c.field_name = cond_nt.field_name def inline_nt(self,rule,cond_nt,dfile): ''' merges the conditions & actions in rule with the conditions & actions in the cond_nt, returns a list of merged rules rule: is a rule with nt in the conds list (called UPPER) cond_nt: is the condition with the nt that we want to inline (called N()) ''' nt = self.find_nt_by_name(cond_nt.rvalue.value) dfile.write("working rule:\n %s\n" % str(rule)) dfile.write("inlining rule: %s\n" % str(nt)) #remove the nt from the conds list rule.conditions.and_conditions.remove(cond_nt) inlined_rules = [] #add all the rules from N() to UPPER rule for r in nt.rules: #copying the conditions & actions #since we are going to modify them later conds = copy.deepcopy(r.conditions.and_conditions) actions = copy.deepcopy(r.actions) #replace field name OUTREG in the cond_nt with the original #field name in the rule self.replace_outreg(cond_nt,conds) if conds[0].is_otherwise() and actions: if actions[0].is_nothing() or actions[0].is_error(): # for otherwise -> nothing/error we do nothing. # if we have not succeeded to satisfy the lower nt ( N() ) # the UPPER rule will simply be rejected, # and we will continue to try satisfy the next rule. continue else: err = ("otherwise condition may get only error or"+ "nothing actions in NT: " + nt.name) die(err) new_upper_rule = copy.deepcopy(rule) new_upper_rule.conditions.and_conditions.extend(conds) if actions and actions[0].is_error(): #if we have error action in the canonical nt take it as #the only action and do not append it to other actions new_upper_rule.actions = actions elif actions and actions[0].is_nothing(): #appending nothing actions does not have any affect pass elif new_upper_rule.actions and new_upper_rule.actions[0].is_nothing(): new_upper_rule.actions = actions else: upper_actions = new_upper_rule.actions new_upper_rule.actions = actions new_upper_rule.actions.extend(upper_actions) dfile.write("new rule %s\n" % str(new_upper_rule)) inlined_rules.append(new_upper_rule) return inlined_rules def inline_conditions(self,nt_map,dfile): '''we are going to inline all the nt in the condition list example: the rule(lets call it UPPER): A=1 BASE=N() -> X=0 the nt N() is: OUTREG=EAX -> Z=1 OUTREG=RAX -> Z=2 the inlined rule are: A=1 BASE=EAX -> X=0 Z=1 A=1 BASE=RAX -> X=0 Z=2 nt_map is a map of nt name to nonterminal_t ''' for nt_name in nt_map: nt = nt_map[nt_name] rules_with_nt = [] dfile.write('nt: %s\n' % nt_name) for rule in nt.rules: cond_nt = rule.get_nt_in_cond_list() if cond_nt: #collect all the rules with nt rules_with_nt.append(rule) #we have a nt in the condtion list #create new inlined ruels inlined_rules = self.inline_nt(rule,cond_nt,dfile) nt.rules.extend(inlined_rules) #now delete all the rules with nt in the condition list for rule in rules_with_nt: nt.rules.remove(rule) def run(self): # this is the main loop # read the state bits f = self.files.state_bits_file lines = open(f,'r').readlines() self.state_bits = self.parse_state_bits(lines) del lines # writes self.sequences and self.nonterminals self.read_encoder_files() # writes self.deocoder_nonterminals and self.decoder_ntlufs self.read_decoder_files() if vdumpinput(): self.dump() ## inline all the nt in the conditions section dfile = open(mbuild.join(self.gendir,'inline_nt.txt'),'w') self.inline_conditions(self.nonterminals,dfile) self.inline_conditions(self.decoder_ntlufs,dfile) dfile.close() self.make_sequence_functions() f_gen = nt_func_gen.nt_function_gen_t(self,storage_fields) fos, operand_lu_fos = f_gen.gen_nt_functions() self.emit_lu_functions(operand_lu_fos) self.functions.extend(fos) self.make_nonterminal_functions(self.nonterminals) self.make_nonterminal_functions(self.decoder_ntlufs) self.make_nonterminal_functions(self.decoder_nonterminals) self.make_encode_order_tables()# FIXME too early? # emit the per instruction bind & emit functions self.make_isa_encode_functions() self.emit_group_encode_functions() self.emit_lu_tables() self.emit_encoder_iform_table() # write the dispatch table initialization function self.emit_encode_function_table_init() self.emit_function_bodies_and_header_numbered() self.emit_iforms() def look_for_encoder_inputs(self): encoder_inputs_by_iclass = {} # dictionary mapping iclass -> set of field names encoder_nts_by_iclass = {} # dictionary mapping iclass -> set of nt names for iclass,iform_list in self.iarray.items(): encoder_field_inputs = set() encoder_nts = set() for iform in iform_list: (field_set,nt_set) = iform.find_encoder_inputs() #msg("FIELDS: %s" % ' '.join(field_set)) #msg( "NTS: %s" % ' '.join(nt_set)) encoder_field_inputs |= field_set encoder_nts |= nt_set #msg("FIELDS: %s" % ' '.join(encoder_field_inputs)) #msg( "NTS: %s" % ' '.join(encoder_nts)) encoder_inputs_by_iclass[iclass] = encoder_field_inputs encoder_nts_by_iclass[iclass] = encoder_nts for iclass in list(encoder_inputs_by_iclass.keys()): fld_set = encoder_inputs_by_iclass[iclass] nt_set = encoder_nts_by_iclass[iclass] if vinputs(): msg("EINPUTS: %15s FIELDS: %s \tNTS: %s" % (iclass, ", ".join(fld_set), ", ".join(nt_set))) def make_encode_order_tables(self): global output_file_emitters self.all_operand_name_list_dict = self._collect_ordered_operands() (init_order_fo,max_entries, max_operands) = \ self._emit_operand_order_array(self.all_operand_name_list_dict) filename = 'xed-encoder-order-init.c' fe = xed_file_emitter_t(self.xeddir, self.gendir,filename, shell_file=False) fe.start() init_order_fo.emit_file_emitter(fe) fe.close() self.max_operand_order_entries = max_entries self.max_operand_order_operands = max_operands output_file_emitters.append(fe) def emit_encode_defines(self): global output_file_emitters filename = 'xed-encoder-gen-defs.h' fe = xed_file_emitter_t(self.xeddir, self.gendir, filename, shell_file=False) fe.headers.remove('xed-internal-header.h') fe.start() fe.write("#define XED_ENCODE_ORDER_MAX_ENTRIES %d\n" % self.max_operand_order_entries) fe.write("#define XED_ENCODE_ORDER_MAX_OPERANDS %d\n" % self.max_operand_order_operands) fe.write("#define XED_ENCODE_MAX_FB_PATTERNS %d\n" % self.max_fb_ptrns) fe.write("#define XED_ENCODE_MAX_EMIT_PATTERNS %d\n" % self.max_emit_ptrns) fe.write("#define XED_ENCODE_FB_VALUES_TABLE_SIZE %d\n" % self.fb_values_table_size) fe.write("#define XED_ENCODE_MAX_IFORMS %d\n" % self.total_iforms) fe.write("#define XED_ENC_GROUPS %d\n" % self.ins_groups.num_groups()) fe.close() output_file_emitters.append(fe) def _collect_ordered_operands(self): """Return a dictionary of ordered operand name lists that include just the encoder inputs. We denote the key to index this dictionary in each iform as iform.operand_order""" all_operand_name_list_dict = {} for iclass,iform_list in self.iarray.items(): for niform,iform in enumerate(iform_list): ordered_operand_name_list = iform.make_operand_name_list() key = "-".join(ordered_operand_name_list) if key in all_operand_name_list_dict: n = all_operand_name_list_dict[key].n else: n = len(all_operand_name_list_dict) all_operand_name_list_dict[key] = operand_order_t(n, ordered_operand_name_list) iform.operand_order = n iform.operand_order_key = key _vmsgb("TOTAL ENCODE OPERAND SEQUENCES: %d" % (len(all_operand_name_list_dict))) if vopseq(): for iclass,iform_list in self.iarray.items(): for niform,iform in enumerate(iform_list): msg("OPSEQ: %20s-%03d: %s" % (iclass, niform+1, ", ".join(all_operand_name_list_dict[iform.operand_order_key].lst))) return all_operand_name_list_dict def _emit_operand_order_array(self, all_operand_name_list_dict): """Return a function that initializes the encode order array""" fname = "xed_init_encoder_order" fo = function_object_t(fname, 'void') operands = 0 # columns entries = 0 # rows for oo in all_operand_name_list_dict.values(): # stringkeys -> operand_order_t's for j,o in enumerate(oo.lst): fo.add_code_eol("xed_encode_order[%d][%d]=XED_OPERAND_%s" % (oo.n,j,o)) t = len(oo.lst) fo.add_code_eol("xed_encode_order_limit[%d]=%d" % (oo.n,t)) if entries < oo.n+1: entries = oo.n+1 if operands < t: operands = t return (fo, entries, operands) def dump(self): msgb("NONTERMINALS") for nt in self.nonterminals.values(): msg(str(nt)) msgb("SEQUENCERS") for s in self.sequences.values(): msg(str(s)) def make_sequence_functions(self): # we pass in the list of known sequences so that we know to # call the right kind of function from the sequence function # we are creating. for s in self.sequences.values(): fo = s.create_function(self.sequences) self.functions.append(fo) def make_nonterminal_functions(self, nts): """For each nonterminal, we create two versions if it is not a NTLUF. One version does the required bindings. The other version emits the required bytes""" for nt in nts.values(): _vmsgb("SORTING FOR SIZE", nt.name) nt.sort_for_size() if nt.is_ntluf(): if nt.name in nt_func_gen.get_complicated_nt(): fo = nt.create_function(bind_or_emit='NTLUF') self.functions.append(fo) else: if nt.name in nt_func_gen.get_complicated_nt(): fo = nt.create_function(bind_or_emit='BIND') self.functions.append(fo) fo = nt.create_function(bind_or_emit='EMIT') self.functions.append(fo) def emit_function_headers(self,fname_prefix,fo_list): global output_file_emitters filename = fname_prefix + '.h' gendir = os.path.join(self.gendir, 'include-private') fe = xed_file_emitter_t(self.xeddir, gendir, filename, shell_file=False) fe.start() for fo in fo_list: s = fo.emit_header() fe.write(s) fe.close() output_file_emitters.append(fe) def emit_function_bodies_and_header(self,fname_prefix,headers,fo_list): global output_file_emitters filename = fname_prefix+ '.c' fe = xed_file_emitter_t(self.xeddir, self.gendir, filename, shell_file=False) fe.add_header(headers) fe.start() for fo in fo_list: s = fo.emit() fe.write(s) fe.close() self.emit_function_headers(fname_prefix,fo_list) output_file_emitters.append(fe) def emit_function_bodies_and_header_numbered(self): filename_prefix = 'xed-encoder' headers = ['xed-encode-private.h', 'xed-enc-operand-lu.h', 'xed-operand-accessors.h'] fe_list = emit_function_list(self.functions, filename_prefix, self.xeddir, self.gendir, os.path.join(self.gendir, 'include-private'), other_headers=headers) if 0: # move the generated header file to the private generated headers efile = os.path.join(self.gendir, 'include-private', 'xed-encoder.h') remove_file(efile) os.rename(os.path.join(self.gendir, 'xed-encoder.h'), efile) output_file_emitters.extend(fe_list) def emit_lu_functions(self, fos): ''' emit the list of lookup functions ''' filename_prefix = 'xed-enc-operand-lu' headers = ["xed-encode.h", "xed-operand-accessors.h"] self.emit_function_bodies_and_header(filename_prefix,headers,fos) def _emit_functions_lu_table(self,fe, type, values, table_name, size_def, per_line=1): table_def = "const %s %s[%s] = {" % (type,table_name,size_def) fe.write(table_def) indent = ' '*12 fe.write(indent) for i,val in enumerate(values): if i % per_line == 0: fe.write('\n%s' % indent) fe.write("%s," % val) fe.write('\n};\n') def emit_lu_tables(self): '''emit the function pointers tables ''' filename_prefix = 'xed-enc-patterns' headers = ['xed-encode.h','xed-encoder.h','xed-operand-accessors.h'] fos = self.fb_ptrs_fo_list + self.emit_ptrs_fo_list self.emit_function_bodies_and_header(filename_prefix,headers,fos) h_filename = "%s.h" % filename_prefix filename = 'xed-encoder-pattern-lu.c' fe = xed_file_emitter_t(self.xeddir, self.gendir, filename, shell_file=False) headers = [h_filename, 'xed-encoder-gen-defs.h', 'xed-encoder.h', 'xed-enc-groups.h'] fe.add_header(headers) fe.start() f_names = [ x.function_name for x in self.fb_ptrs_fo_list] self._emit_functions_lu_table(fe, 'xed_ptrn_func_ptr_t', f_names, 'xed_encode_fb_lu_table', 'XED_ENCODE_MAX_FB_PATTERNS') fe.write('\n\n\n') f_names = [ x.function_name for x in self.emit_ptrs_fo_list] self._emit_functions_lu_table(fe, 'xed_ptrn_func_ptr_t', f_names, 'xed_encode_emit_lu_table', 'XED_ENCODE_MAX_EMIT_PATTERNS') fe.write('\n\n\n') self._emit_functions_lu_table(fe,'xed_uint8_t', self.fb_values_list, 'xed_encode_fb_values_table', 'XED_ENCODE_FB_VALUES_TABLE_SIZE',20) fe.write('\n\n\n') f_names = [ x.function_name for x in self.group_fos] self._emit_functions_lu_table(fe,'xed_encode_function_pointer_t', f_names, 'xed_encode_groups', 'XED_ENC_GROUPS') fe.close() output_file_emitters.append(fe) def emit_encoder_iform_table(self): filename = 'xed-encoder-iforms-init.c' fe = xed_file_emitter_t(self.xeddir, self.gendir, filename, shell_file=False) fe.add_header('xed-ild.h') fe.start() ptrn = "/*(%4d)%20s*/ {%4d, %4d, %4s," +\ " XED_STATIC_CAST(xed_uint8_t,%15s), %4d}" iform_definitions = [] for iform in self.all_iforms:#iforms: iform_init = ptrn % (iform.rule.iform_id, iform.iclass, iform.bind_func_index, iform.emit_func_index, hex(iform.nominal_opcode), iform.map, iform.fb_index) iform_definitions.append(iform_init) self._emit_functions_lu_table(fe, 'xed_encoder_iform_t', iform_definitions, 'xed_encode_iform_db', 'XED_ENCODE_MAX_IFORMS') fe.close() output_file_emitters.append(fe) def emit_group_encode_functions(self): filename_prefix = 'xed-enc-groups' headers = ['xed-encode-private.h', 'xed-enc-operand-lu.h', 'xed-operand-accessors.h','xed-encoder.h'] self.emit_function_bodies_and_header(filename_prefix,headers, self.group_fos) ############################################################################## def setup_arg_parser(): arg_parser = optparse.OptionParser() arg_parser.add_option('--gendir', action='store', dest='gendir', default='obj', help='Directory for generated files') arg_parser.add_option('--xeddir', action='store', dest='xeddir', default='.', help='Directory for generated files') arg_parser.add_option('--input-fields', action='store', dest='input_fields', default='', help='Operand storage description input file') arg_parser.add_option('--input-state', action='store', dest='input_state', default='xed-state-bits.txt', help='state input file') arg_parser.add_option('--input-regs', action='store', dest='input_regs', default='', help='Encoder regs file') arg_parser.add_option('--enc-patterns', action='append', dest='enc_patterns', default=[], help='Encoder input files') arg_parser.add_option('--enc-dec-patterns', action='append', dest='enc_dec_patterns', default=[], help='Decoder input files used by the encoder') arg_parser.add_option('--isa', action='store', dest='isa_input_file', default='', help='Read structured input file containing the ISA INSTRUCTIONS() nonterminal') arg_parser.add_option('--no-amd', action='store_false', dest='amd_enabled', default=True, help='Omit AMD instructions') arg_parser.add_option('--verbosity', '-v', action='append', dest='verbosity', default=[], help='list of verbosity tokens, repeatable.') return arg_parser if __name__ == '__main__': arg_parser = setup_arg_parser() (options, args ) = arg_parser.parse_args() set_verbosity_options(options.verbosity) enc_inputs = encoder_input_files_t(options) enc = encoder_configuration_t(enc_inputs, options.amd_enabled) enc.run() enc.look_for_encoder_inputs() # exploratory stuff enc.emit_encode_defines() # final stuff after all tables are sized enc.dump_output_file_names() sys.exit(0)
[ "nt_func_gen.nt_function_gen_t", "ins_emit.instruction_codegen_t", "actions.gen_return_action", "os.unlink", "optparse.OptionParser", "operand_storage.operands_storage_t", "actions.action_t", "mbuild.msgb", "os.path.join", "os.path.dirname", "operand_storage.get_op_getter_full_func", "os.path.exists", "nt_func_gen.get_complicated_nt", "re.sub", "re.search", "operand_storage.get_op_getter_fn", "copy.deepcopy", "os.chmod", "sys.exit", "re.compile", "os.getcwd", "mbuild.join", "slash_expand.expand_all_slashes", "sys.stderr.write", "os.path.split" ]
[((1567, 1578), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1576, 1578), False, 'import os\n'), ((1881, 1909), 'os.path.dirname', 'os.path.dirname', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (1896, 1909), False, 'import os\n'), ((1977, 2012), 'os.path.exists', 'os.path.exists', (['mbuild_install_path'], {}), '(mbuild_install_path)\n', (1991, 2012), False, 'import os\n'), ((2326, 2354), 'os.path.dirname', 'os.path.dirname', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (2341, 2354), False, 'import os\n'), ((2363, 2392), 'os.path.exists', 'os.path.exists', (['xed2_src_path'], {}), '(xed2_src_path)\n', (2377, 2392), False, 'import os\n'), ((2995, 3065), 'operand_storage.get_op_getter_full_func', 'operand_storage.get_op_getter_full_func', (['"""outreg"""', 'encutil.enc_strings'], {}), "('outreg', encutil.enc_strings)\n", (3034, 3065), False, 'import operand_storage\n'), ((3149, 3218), 'operand_storage.get_op_getter_full_func', 'operand_storage.get_op_getter_full_func', (['"""error"""', 'encutil.enc_strings'], {}), "('error', encutil.enc_strings)\n", (3188, 3218), False, 'import operand_storage\n'), ((3522, 3546), 'os.chmod', 'os.chmod', (['fn', '_rwx_by_me'], {}), '(fn, _rwx_by_me)\n', (3530, 3546), False, 'import os\n'), ((3614, 3632), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (3628, 3632), False, 'import os\n'), ((7318, 7358), 're.compile', 're.compile', (['"""TXT=(?P<rhs>[0-9A-Za-z_]+)"""'], {}), "('TXT=(?P<rhs>[0-9A-Za-z_]+)')\n", (7328, 7358), False, 'import re\n'), ((116197, 116220), 'optparse.OptionParser', 'optparse.OptionParser', ([], {}), '()\n', (116218, 116220), False, 'import optparse\n'), ((118474, 118485), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (118482, 118485), False, 'import sys\n'), ((1649, 1675), 'os.path.join', 'os.path.join', (['directory', 'd'], {}), '(directory, d)\n', (1661, 1675), False, 'import os\n'), ((1686, 1718), 'os.path.exists', 'os.path.exists', (['target_directory'], {}), '(target_directory)\n', (1700, 1718), False, 'import os\n'), ((2139, 2268), 'sys.stderr.write', 'sys.stderr.write', (['("""\nERROR(read-encfile.py): Could not find mbuild.""" +\n """ Should be a sibling of the xed2 directory.\n\n""")'], {}), '("""\nERROR(read-encfile.py): Could not find mbuild.""" +\n """ Should be a sibling of the xed2 directory.\n\n""")\n', (2155, 2268), False, 'import sys\n'), ((2284, 2295), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2292, 2295), False, 'import sys\n'), ((2484, 2520), 'os.path.join', 'os.path.join', (['xed2_src_path', '"""pysrc"""'], {}), "(xed2_src_path, 'pysrc')\n", (2496, 2520), False, 'import os\n'), ((2732, 2846), 'sys.stderr.write', 'sys.stderr.write', (['("""\nERROR(read-encfile.py): Could not find """ +\n \'the xed directory for python imports.\\n\\n\')'], {}), '("""\nERROR(read-encfile.py): Could not find """ +\n \'the xed directory for python imports.\\n\\n\')\n', (2748, 2846), False, 'import sys\n'), ((2864, 2875), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2872, 2875), False, 'import sys\n'), ((3339, 3356), 'mbuild.msgb', 'mbuild.msgb', (['s', 'b'], {}), '(s, b)\n', (3350, 3356), False, 'import mbuild\n'), ((3697, 3710), 'os.unlink', 'os.unlink', (['fn'], {}), '(fn)\n', (3706, 3710), False, 'import os\n'), ((17552, 17629), 'operand_storage.get_op_getter_full_func', 'operand_storage.get_op_getter_full_func', (['self.field_name', 'encutil.enc_strings'], {}), '(self.field_name, encutil.enc_strings)\n', (17591, 17629), False, 'import operand_storage\n'), ((33710, 33742), 'nt_func_gen.get_complicated_nt', 'nt_func_gen.get_complicated_nt', ([], {}), '()\n', (33740, 33742), False, 'import nt_func_gen\n'), ((54567, 54608), 'os.path.join', 'os.path.join', (['self.xeddir', '"""datafiles"""', 's'], {}), "(self.xeddir, 'datafiles', s)\n", (54579, 54608), False, 'import os\n'), ((55514, 55555), 'operand_storage.operands_storage_t', 'operand_storage.operands_storage_t', (['lines'], {}), '(lines)\n', (55548, 55555), False, 'import operand_storage\n'), ((56208, 56260), 'os.path.join', 'os.path.join', (['self.gendir', '"""ENCGEN-OUTPUT-FILES.txt"""'], {}), "(self.gendir, 'ENCGEN-OUTPUT-FILES.txt')\n", (56220, 56260), False, 'import os\n'), ((66168, 66215), 're.compile', 're.compile', (['"""(?P<key>[^\\\\s]+)\\\\s+(?P<value>.*)"""'], {}), "('(?P<key>[^\\\\s]+)\\\\s+(?P<value>.*)')\n", (66178, 66215), False, 'import re\n'), ((69140, 69159), 're.search', 're.search', (['"""^0b"""', 's'], {}), "('^0b', s)\n", (69149, 69159), False, 'import re\n'), ((69207, 69225), 're.sub', 're.sub', (['"""_"""', '""""""', 's'], {}), "('_', '', s)\n", (69213, 69225), False, 'import re\n'), ((70616, 70638), 're.sub', 're.sub', (['""":.*"""', '""""""', 'rhs'], {}), "(':.*', '', rhs)\n", (70622, 70638), False, 'import re\n'), ((82723, 82742), 're.compile', 're.compile', (['"""\\\\\\\\$"""'], {}), "('\\\\\\\\$')\n", (82733, 82742), False, 'import re\n'), ((95541, 95636), 'ins_emit.instruction_codegen_t', 'ins_emit.instruction_codegen_t', (['self.all_iforms', 'self.iarray', 'self.gendir', 'self.amd_enabled'], {}), '(self.all_iforms, self.iarray, self.gendir,\n self.amd_enabled)\n', (95571, 95636), False, 'import ins_emit\n'), ((102429, 102480), 'nt_func_gen.nt_function_gen_t', 'nt_func_gen.nt_function_gen_t', (['self', 'storage_fields'], {}), '(self, storage_fields)\n', (102458, 102480), False, 'import nt_func_gen\n'), ((110013, 110057), 'os.path.join', 'os.path.join', (['self.gendir', '"""include-private"""'], {}), "(self.gendir, 'include-private')\n", (110025, 110057), False, 'import os\n'), ((1801, 1825), 'os.path.split', 'os.path.split', (['directory'], {}), '(directory)\n', (1814, 1825), False, 'import os\n'), ((15978, 16055), 'operand_storage.get_op_getter_full_func', 'operand_storage.get_op_getter_full_func', (['self.field_name', 'encutil.enc_strings'], {}), '(self.field_name, encutil.enc_strings)\n', (16017, 16055), False, 'import operand_storage\n'), ((46550, 46580), 'actions.gen_return_action', 'actions.gen_return_action', (['"""0"""'], {}), "('0')\n", (46575, 46580), False, 'import actions\n'), ((61437, 61474), 'slash_expand.expand_all_slashes', 'slash_expand.expand_all_slashes', (['line'], {}), '(line)\n', (61468, 61474), False, 'import slash_expand\n'), ((63302, 63339), 'slash_expand.expand_all_slashes', 'slash_expand.expand_all_slashes', (['line'], {}), '(line)\n', (63333, 63339), False, 'import slash_expand\n'), ((66455, 66492), 'slash_expand.expand_all_slashes', 'slash_expand.expand_all_slashes', (['line'], {}), '(line)\n', (66486, 66492), False, 'import slash_expand\n'), ((69177, 69196), 're.sub', 're.sub', (['"""0b"""', '""""""', 's'], {}), "('0b', '', s)\n", (69183, 69196), False, 'import re\n'), ((83328, 83365), 'slash_expand.expand_all_slashes', 'slash_expand.expand_all_slashes', (['line'], {}), '(line)\n', (83359, 83365), False, 'import slash_expand\n'), ((98503, 98545), 'copy.deepcopy', 'copy.deepcopy', (['r.conditions.and_conditions'], {}), '(r.conditions.and_conditions)\n', (98516, 98545), False, 'import copy\n'), ((98568, 98592), 'copy.deepcopy', 'copy.deepcopy', (['r.actions'], {}), '(r.actions)\n', (98581, 98592), False, 'import copy\n'), ((99434, 99453), 'copy.deepcopy', 'copy.deepcopy', (['rule'], {}), '(rule)\n', (99447, 99453), False, 'import copy\n'), ((102174, 102215), 'mbuild.join', 'mbuild.join', (['self.gendir', '"""inline_nt.txt"""'], {}), "(self.gendir, 'inline_nt.txt')\n", (102185, 102215), False, 'import mbuild\n'), ((111318, 111362), 'os.path.join', 'os.path.join', (['self.gendir', '"""include-private"""'], {}), "(self.gendir, 'include-private')\n", (111330, 111362), False, 'import os\n'), ((111536, 111597), 'os.path.join', 'os.path.join', (['self.gendir', '"""include-private"""', '"""xed-encoder.h"""'], {}), "(self.gendir, 'include-private', 'xed-encoder.h')\n", (111548, 111597), False, 'import os\n'), ((63425, 63449), 're.sub', 're.sub', (['"""{"""', '""" { """', 'line'], {}), "('{', ' { ', line)\n", (63431, 63449), False, 'import re\n'), ((63473, 63497), 're.sub', 're.sub', (['"""}"""', '""" } """', 'line'], {}), "('}', ' } ', line)\n", (63479, 63497), False, 'import re\n'), ((66749, 66762), 're.compile', 're.compile', (['s'], {}), '(s)\n', (66759, 66762), False, 'import re\n'), ((111651, 111693), 'os.path.join', 'os.path.join', (['self.gendir', '"""xed-encoder.h"""'], {}), "(self.gendir, 'xed-encoder.h')\n", (111663, 111693), False, 'import os\n'), ((24714, 24738), 'actions.action_t', 'actions.action_t', (['action'], {}), '(action)\n', (24730, 24738), False, 'import actions\n'), ((41555, 41590), 'operand_storage.get_op_getter_fn', 'operand_storage.get_op_getter_fn', (['f'], {}), '(f)\n', (41587, 41590), False, 'import operand_storage\n'), ((109404, 109436), 'nt_func_gen.get_complicated_nt', 'nt_func_gen.get_complicated_nt', ([], {}), '()\n', (109434, 109436), False, 'import nt_func_gen\n'), ((109598, 109630), 'nt_func_gen.get_complicated_nt', 'nt_func_gen.get_complicated_nt', ([], {}), '()\n', (109628, 109630), False, 'import nt_func_gen\n'), ((65329, 65359), 'actions.gen_return_action', 'actions.gen_return_action', (['"""1"""'], {}), "('1')\n", (65354, 65359), False, 'import actions\n'), ((65463, 65493), 'actions.gen_return_action', 'actions.gen_return_action', (['"""0"""'], {}), "('0')\n", (65488, 65493), False, 'import actions\n'), ((65570, 65589), 'actions.action_t', 'actions.action_t', (['x'], {}), '(x)\n', (65586, 65589), False, 'import actions\n'), ((65858, 65888), 'actions.gen_return_action', 'actions.gen_return_action', (['"""1"""'], {}), "('1')\n", (65883, 65888), False, 'import actions\n')]
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # Modifications copyright (C) 2018 Uber Technologies, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import horovod.common as hvd def mpi_env_rank_and_size(): """Get MPI rank and size from environment variables and return them as a tuple of integers. Most MPI implementations have an `mpirun` or `mpiexec` command that will run an MPI executable and set up all communication necessary between the different processors. As part of that set up, they will set environment variables that contain the rank and size of the MPI_COMM_WORLD communicator. We can read those environment variables from Python in order to ensure that `hvd.rank()` and `hvd.size()` return the expected values. Since MPI is just a standard, not an implementation, implementations typically choose their own environment variable names. This function tries to support several different implementation, but really it only needs to support whatever implementation we want to use for the TensorFlow test suite. If this is not running under MPI, then defaults of rank zero and size one are returned. (This is appropriate because when you call MPI_Init in an application not started with mpirun, it will create a new independent communicator with only one process in it.) """ rank_env = 'PMI_RANK OMPI_COMM_WORLD_RANK'.split() size_env = 'PMI_SIZE OMPI_COMM_WORLD_SIZE'.split() for rank_var, size_var in zip(rank_env, size_env): rank = os.environ.get(rank_var) size = os.environ.get(size_var) if rank is not None and size is not None: return int(rank), int(size) # Default to rank zero and size one if there are no environment variables return 0, 1 def test_horovod_rank(): """Test that the rank returned by hvd.rank() is correct.""" true_rank, _ = mpi_env_rank_and_size() hvd.init() rank = hvd.rank() assert true_rank == rank def test_horovod_size(): """Test that the size returned by hvd.size() is correct.""" _, true_size = mpi_env_rank_and_size() hvd.init() size = hvd.size() assert true_size == size
[ "horovod.common.rank", "os.environ.get", "horovod.common.init", "horovod.common.size" ]
[((2652, 2662), 'horovod.common.init', 'hvd.init', ([], {}), '()\n', (2660, 2662), True, 'import horovod.common as hvd\n'), ((2674, 2684), 'horovod.common.rank', 'hvd.rank', ([], {}), '()\n', (2682, 2684), True, 'import horovod.common as hvd\n'), ((2852, 2862), 'horovod.common.init', 'hvd.init', ([], {}), '()\n', (2860, 2862), True, 'import horovod.common as hvd\n'), ((2874, 2884), 'horovod.common.size', 'hvd.size', ([], {}), '()\n', (2882, 2884), True, 'import horovod.common as hvd\n'), ((2264, 2288), 'os.environ.get', 'os.environ.get', (['rank_var'], {}), '(rank_var)\n', (2278, 2288), False, 'import os\n'), ((2304, 2328), 'os.environ.get', 'os.environ.get', (['size_var'], {}), '(size_var)\n', (2318, 2328), False, 'import os\n')]
'''OpenGL extension OES.shader_io_blocks This module customises the behaviour of the OpenGL.raw.GLES2.OES.shader_io_blocks to provide a more Python-friendly API Overview (from the spec) This extension extends the functionality of interface blocks to support input and output interfaces in the OpenGL ES Shading Language. Input and output interface blocks are used for forming the interfaces between vertex, tessellation control, tessellation evaluation, geometry and fragment shaders. This accommodates passing arrays between stages, which otherwise would require multi-dimensional array support for tessellation control outputs and for tessellation control, tessellation evaluation, and geometry shader inputs. This extension provides support for application defined interface blocks which are used for passing application-specific information between shader stages. This extension moves the built-in "per-vertex" in/out variables to a new built-in gl_PerVertex block. This is necessary for tessellation and geometry shaders which require a separate instance for each vertex, but it can also be useful for vertex shaders. Finally, this extension allows the redeclaration of the gl_PerVertex block in order to reduce the set of variables that must be passed between shaders. The official definition of this extension is available here: http://www.opengl.org/registry/specs/OES/shader_io_blocks.txt ''' from OpenGL import platform, constant, arrays from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GLES2 import _types, _glgets from OpenGL.raw.GLES2.OES.shader_io_blocks import * from OpenGL.raw.GLES2.OES.shader_io_blocks import _EXTENSION_NAME def glInitShaderIoBlocksOES(): '''Return boolean indicating whether this extension is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME ) ### END AUTOGENERATED SECTION
[ "OpenGL.extensions.hasGLExtension" ]
[((1892, 1934), 'OpenGL.extensions.hasGLExtension', 'extensions.hasGLExtension', (['_EXTENSION_NAME'], {}), '(_EXTENSION_NAME)\n', (1917, 1934), False, 'from OpenGL import extensions\n')]
"""Credential definition admin routes.""" from asyncio import ensure_future, shield from aiohttp import web from aiohttp_apispec import ( docs, match_info_schema, querystring_schema, request_schema, response_schema, ) from marshmallow import fields from ...issuer.base import BaseIssuer from ...ledger.base import BaseLedger from ...storage.base import BaseStorage from ...tails.base import BaseTailsServer from ..models.openapi import OpenAPISchema from ..valid import INDY_CRED_DEF_ID, INDY_REV_REG_SIZE, INDY_SCHEMA_ID, INDY_VERSION from ...revocation.error import RevocationError, RevocationNotSupportedError from ...revocation.indy import IndyRevocation from ...ledger.error import LedgerError from .util import CredDefQueryStringSchema, CRED_DEF_TAGS, CRED_DEF_SENT_RECORD_TYPE class CredentialDefinitionSendRequestSchema(OpenAPISchema): """Request schema for schema send request.""" schema_id = fields.Str(description="Schema identifier", **INDY_SCHEMA_ID) support_revocation = fields.Boolean( required=False, description="Revocation supported flag" ) revocation_registry_size = fields.Int( description="Revocation registry size", required=False, strict=True, **INDY_REV_REG_SIZE, ) tag = fields.Str( required=False, description="Credential definition identifier tag", default="default", example="default", ) class CredentialDefinitionSendResultsSchema(OpenAPISchema): """Results schema for schema send request.""" credential_definition_id = fields.Str( description="Credential definition identifier", **INDY_CRED_DEF_ID ) class CredentialDefinitionSchema(OpenAPISchema): """Credential definition schema.""" ver = fields.Str(description="Node protocol version", **INDY_VERSION) ident = fields.Str( description="Credential definition identifier", data_key="id", **INDY_CRED_DEF_ID, ) schemaId = fields.Str( description="Schema identifier within credential definition identifier", example=":".join(INDY_CRED_DEF_ID["example"].split(":")[3:-1]), # long or short ) typ = fields.Constant( constant="CL", description="Signature type: CL for Camenisch-Lysyanskaya", data_key="type", example="CL", ) tag = fields.Str( description="Tag within credential definition identifier", example=INDY_CRED_DEF_ID["example"].split(":")[-1], ) value = fields.Dict( description="Credential definition primary and revocation values" ) class CredentialDefinitionGetResultsSchema(OpenAPISchema): """Results schema for schema get request.""" credential_definition = fields.Nested(CredentialDefinitionSchema) class CredentialDefinitionsCreatedResultsSchema(OpenAPISchema): """Results schema for cred-defs-created request.""" credential_definition_ids = fields.List( fields.Str(description="Credential definition identifiers", **INDY_CRED_DEF_ID) ) class CredDefIdMatchInfoSchema(OpenAPISchema): """Path parameters and validators for request taking cred def id.""" cred_def_id = fields.Str( description="Credential definition identifier", required=True, **INDY_CRED_DEF_ID, ) @docs( tags=["credential-definition"], summary="Sends a credential definition to the ledger", ) @request_schema(CredentialDefinitionSendRequestSchema()) @response_schema(CredentialDefinitionSendResultsSchema(), 200) async def credential_definitions_send_credential_definition(request: web.BaseRequest): """ Request handler for sending a credential definition to the ledger. Args: request: aiohttp request object Returns: The credential definition identifier """ context = request.app["request_context"] body = await request.json() schema_id = body.get("schema_id") support_revocation = bool(body.get("support_revocation")) tag = body.get("tag") rev_reg_size = body.get("revocation_registry_size") ledger: BaseLedger = await context.inject(BaseLedger, required=False) if not ledger: reason = "No ledger available" if not context.settings.get_value("wallet.type"): reason += ": missing wallet-type?" raise web.HTTPForbidden(reason=reason) issuer: BaseIssuer = await context.inject(BaseIssuer) try: # even if in wallet, send it and raise if erroneously so async with ledger: (cred_def_id, cred_def, novel) = await shield( ledger.create_and_send_credential_definition( issuer, schema_id, signature_type=None, tag=tag, support_revocation=support_revocation, ) ) except LedgerError as e: raise web.HTTPBadRequest(reason=e.message) from e # If revocation is requested and cred def is novel, create revocation registry if support_revocation and novel: tails_base_url = context.settings.get("tails_server_base_url") if not tails_base_url: raise web.HTTPBadRequest(reason="tails_server_base_url not configured") try: # Create registry revoc = IndyRevocation(context) registry_record = await revoc.init_issuer_registry( cred_def_id, max_cred_num=rev_reg_size, ) except RevocationNotSupportedError as e: raise web.HTTPBadRequest(reason=e.message) from e await shield(registry_record.generate_registry(context)) try: await registry_record.set_tails_file_public_uri( context, f"{tails_base_url}/{registry_record.revoc_reg_id}" ) await registry_record.send_def(context) await registry_record.send_entry(context) # stage pending registry independent of whether tails server is OK pending_registry_record = await revoc.init_issuer_registry( registry_record.cred_def_id, max_cred_num=registry_record.max_cred_num, ) ensure_future( pending_registry_record.stage_pending_registry(context, max_attempts=16) ) tails_server: BaseTailsServer = await context.inject(BaseTailsServer) (upload_success, reason) = await tails_server.upload_tails_file( context, registry_record.revoc_reg_id, registry_record.tails_local_path, interval=0.8, backoff=-0.5, max_attempts=5, # heuristic: respect HTTP timeout ) if not upload_success: raise web.HTTPInternalServerError( reason=( f"Tails file for rev reg {registry_record.revoc_reg_id} " f"failed to upload: {reason}" ) ) except RevocationError as e: raise web.HTTPBadRequest(reason=e.message) from e return web.json_response({"credential_definition_id": cred_def_id}) @docs( tags=["credential-definition"], summary="Search for matching credential definitions that agent originated", ) @querystring_schema(CredDefQueryStringSchema()) @response_schema(CredentialDefinitionsCreatedResultsSchema(), 200) async def credential_definitions_created(request: web.BaseRequest): """ Request handler for retrieving credential definitions that current agent created. Args: request: aiohttp request object Returns: The identifiers of matching credential definitions. """ context = request.app["request_context"] storage = await context.inject(BaseStorage) found = await storage.search_records( type_filter=CRED_DEF_SENT_RECORD_TYPE, tag_query={ tag: request.query[tag] for tag in CRED_DEF_TAGS if tag in request.query }, ).fetch_all() return web.json_response( {"credential_definition_ids": [record.value for record in found]} ) @docs( tags=["credential-definition"], summary="Gets a credential definition from the ledger", ) @match_info_schema(CredDefIdMatchInfoSchema()) @response_schema(CredentialDefinitionGetResultsSchema(), 200) async def credential_definitions_get_credential_definition(request: web.BaseRequest): """ Request handler for getting a credential definition from the ledger. Args: request: aiohttp request object Returns: The credential definition details. """ context = request.app["request_context"] cred_def_id = request.match_info["cred_def_id"] ledger: BaseLedger = await context.inject(BaseLedger, required=False) if not ledger: reason = "No ledger available" if not context.settings.get_value("wallet.type"): reason += ": missing wallet-type?" raise web.HTTPForbidden(reason=reason) async with ledger: cred_def = await ledger.get_credential_definition(cred_def_id) return web.json_response({"credential_definition": cred_def}) async def register(app: web.Application): """Register routes.""" app.add_routes( [ web.post( "/credential-definitions", credential_definitions_send_credential_definition, ), web.get( "/credential-definitions/created", credential_definitions_created, allow_head=False, ), web.get( "/credential-definitions/{cred_def_id}", credential_definitions_get_credential_definition, allow_head=False, ), ] ) def post_process_routes(app: web.Application): """Amend swagger API.""" # Add top-level tags description if "tags" not in app._state["swagger_dict"]: app._state["swagger_dict"]["tags"] = [] app._state["swagger_dict"]["tags"].append( { "name": "credential-definition", "description": "Credential definition operations", "externalDocs": { "description": "Specification", "url": ( "https://github.com/hyperledger/indy-node/blob/master/" "design/anoncreds.md#cred_def" ), }, } )
[ "marshmallow.fields.Int", "aiohttp.web.HTTPForbidden", "aiohttp.web.post", "marshmallow.fields.Constant", "aiohttp_apispec.docs", "marshmallow.fields.Dict", "aiohttp.web.HTTPInternalServerError", "marshmallow.fields.Boolean", "aiohttp.web.json_response", "aiohttp.web.HTTPBadRequest", "aiohttp.web.get", "marshmallow.fields.Str", "marshmallow.fields.Nested" ]
[((3337, 3433), 'aiohttp_apispec.docs', 'docs', ([], {'tags': "['credential-definition']", 'summary': '"""Sends a credential definition to the ledger"""'}), "(tags=['credential-definition'], summary=\n 'Sends a credential definition to the ledger')\n", (3341, 3433), False, 'from aiohttp_apispec import docs, match_info_schema, querystring_schema, request_schema, response_schema\n'), ((7252, 7369), 'aiohttp_apispec.docs', 'docs', ([], {'tags': "['credential-definition']", 'summary': '"""Search for matching credential definitions that agent originated"""'}), "(tags=['credential-definition'], summary=\n 'Search for matching credential definitions that agent originated')\n", (7256, 7369), False, 'from aiohttp_apispec import docs, match_info_schema, querystring_schema, request_schema, response_schema\n'), ((8218, 8315), 'aiohttp_apispec.docs', 'docs', ([], {'tags': "['credential-definition']", 'summary': '"""Gets a credential definition from the ledger"""'}), "(tags=['credential-definition'], summary=\n 'Gets a credential definition from the ledger')\n", (8222, 8315), False, 'from aiohttp_apispec import docs, match_info_schema, querystring_schema, request_schema, response_schema\n'), ((941, 1002), 'marshmallow.fields.Str', 'fields.Str', ([], {'description': '"""Schema identifier"""'}), "(description='Schema identifier', **INDY_SCHEMA_ID)\n", (951, 1002), False, 'from marshmallow import fields\n'), ((1028, 1099), 'marshmallow.fields.Boolean', 'fields.Boolean', ([], {'required': '(False)', 'description': '"""Revocation supported flag"""'}), "(required=False, description='Revocation supported flag')\n", (1042, 1099), False, 'from marshmallow import fields\n'), ((1145, 1250), 'marshmallow.fields.Int', 'fields.Int', ([], {'description': '"""Revocation registry size"""', 'required': '(False)', 'strict': '(True)'}), "(description='Revocation registry size', required=False, strict=\n True, **INDY_REV_REG_SIZE)\n", (1155, 1250), False, 'from marshmallow import fields\n'), ((1295, 1421), 'marshmallow.fields.Str', 'fields.Str', ([], {'required': '(False)', 'description': '"""Credential definition identifier tag"""', 'default': '"""default"""', 'example': '"""default"""'}), "(required=False, description=\n 'Credential definition identifier tag', default='default', example=\n 'default')\n", (1305, 1421), False, 'from marshmallow import fields\n'), ((1595, 1673), 'marshmallow.fields.Str', 'fields.Str', ([], {'description': '"""Credential definition identifier"""'}), "(description='Credential definition identifier', **INDY_CRED_DEF_ID)\n", (1605, 1673), False, 'from marshmallow import fields\n'), ((1790, 1853), 'marshmallow.fields.Str', 'fields.Str', ([], {'description': '"""Node protocol version"""'}), "(description='Node protocol version', **INDY_VERSION)\n", (1800, 1853), False, 'from marshmallow import fields\n'), ((1866, 1963), 'marshmallow.fields.Str', 'fields.Str', ([], {'description': '"""Credential definition identifier"""', 'data_key': '"""id"""'}), "(description='Credential definition identifier', data_key='id',\n **INDY_CRED_DEF_ID)\n", (1876, 1963), False, 'from marshmallow import fields\n'), ((2204, 2334), 'marshmallow.fields.Constant', 'fields.Constant', ([], {'constant': '"""CL"""', 'description': '"""Signature type: CL for Camenisch-Lysyanskaya"""', 'data_key': '"""type"""', 'example': '"""CL"""'}), "(constant='CL', description=\n 'Signature type: CL for Camenisch-Lysyanskaya', data_key='type',\n example='CL')\n", (2219, 2334), False, 'from marshmallow import fields\n'), ((2532, 2610), 'marshmallow.fields.Dict', 'fields.Dict', ([], {'description': '"""Credential definition primary and revocation values"""'}), "(description='Credential definition primary and revocation values')\n", (2543, 2610), False, 'from marshmallow import fields\n'), ((2764, 2805), 'marshmallow.fields.Nested', 'fields.Nested', (['CredentialDefinitionSchema'], {}), '(CredentialDefinitionSchema)\n', (2777, 2805), False, 'from marshmallow import fields\n'), ((3209, 3306), 'marshmallow.fields.Str', 'fields.Str', ([], {'description': '"""Credential definition identifier"""', 'required': '(True)'}), "(description='Credential definition identifier', required=True,\n **INDY_CRED_DEF_ID)\n", (3219, 3306), False, 'from marshmallow import fields\n'), ((7188, 7248), 'aiohttp.web.json_response', 'web.json_response', (["{'credential_definition_id': cred_def_id}"], {}), "({'credential_definition_id': cred_def_id})\n", (7205, 7248), False, 'from aiohttp import web\n'), ((8116, 8204), 'aiohttp.web.json_response', 'web.json_response', (["{'credential_definition_ids': [record.value for record in found]}"], {}), "({'credential_definition_ids': [record.value for record in\n found]})\n", (8133, 8204), False, 'from aiohttp import web\n'), ((9205, 9259), 'aiohttp.web.json_response', 'web.json_response', (["{'credential_definition': cred_def}"], {}), "({'credential_definition': cred_def})\n", (9222, 9259), False, 'from aiohttp import web\n'), ((2982, 3061), 'marshmallow.fields.Str', 'fields.Str', ([], {'description': '"""Credential definition identifiers"""'}), "(description='Credential definition identifiers', **INDY_CRED_DEF_ID)\n", (2992, 3061), False, 'from marshmallow import fields\n'), ((4358, 4390), 'aiohttp.web.HTTPForbidden', 'web.HTTPForbidden', ([], {'reason': 'reason'}), '(reason=reason)\n', (4375, 4390), False, 'from aiohttp import web\n'), ((9065, 9097), 'aiohttp.web.HTTPForbidden', 'web.HTTPForbidden', ([], {'reason': 'reason'}), '(reason=reason)\n', (9082, 9097), False, 'from aiohttp import web\n'), ((4928, 4964), 'aiohttp.web.HTTPBadRequest', 'web.HTTPBadRequest', ([], {'reason': 'e.message'}), '(reason=e.message)\n', (4946, 4964), False, 'from aiohttp import web\n'), ((5213, 5278), 'aiohttp.web.HTTPBadRequest', 'web.HTTPBadRequest', ([], {'reason': '"""tails_server_base_url not configured"""'}), "(reason='tails_server_base_url not configured')\n", (5231, 5278), False, 'from aiohttp import web\n'), ((9373, 9463), 'aiohttp.web.post', 'web.post', (['"""/credential-definitions"""', 'credential_definitions_send_credential_definition'], {}), "('/credential-definitions',\n credential_definitions_send_credential_definition)\n", (9381, 9463), False, 'from aiohttp import web\n'), ((9520, 9616), 'aiohttp.web.get', 'web.get', (['"""/credential-definitions/created"""', 'credential_definitions_created'], {'allow_head': '(False)'}), "('/credential-definitions/created', credential_definitions_created,\n allow_head=False)\n", (9527, 9616), False, 'from aiohttp import web\n'), ((9689, 9809), 'aiohttp.web.get', 'web.get', (['"""/credential-definitions/{cred_def_id}"""', 'credential_definitions_get_credential_definition'], {'allow_head': '(False)'}), "('/credential-definitions/{cred_def_id}',\n credential_definitions_get_credential_definition, allow_head=False)\n", (9696, 9809), False, 'from aiohttp import web\n'), ((5584, 5620), 'aiohttp.web.HTTPBadRequest', 'web.HTTPBadRequest', ([], {'reason': 'e.message'}), '(reason=e.message)\n', (5602, 5620), False, 'from aiohttp import web\n'), ((6842, 6971), 'aiohttp.web.HTTPInternalServerError', 'web.HTTPInternalServerError', ([], {'reason': 'f"""Tails file for rev reg {registry_record.revoc_reg_id} failed to upload: {reason}"""'}), "(reason=\n f'Tails file for rev reg {registry_record.revoc_reg_id} failed to upload: {reason}'\n )\n", (6869, 6971), False, 'from aiohttp import web\n'), ((7132, 7168), 'aiohttp.web.HTTPBadRequest', 'web.HTTPBadRequest', ([], {'reason': 'e.message'}), '(reason=e.message)\n', (7150, 7168), False, 'from aiohttp import web\n')]
#!/usr/bin/python import rospy import sched, time import sys, select, termios, tty # For terminal keyboard key press reading from std_msgs.msg import Float32, Bool, String from geometry_msgs.msg import Vector3 from math import pi msg = """ voice_control function descripetion start listening CTRL-C to quit """ speed_inc = 0.012 yaw_rate_inc = 1.5*pi/180 angle_inc = 2*pi/180 stand_s = """Stand.""" walk_s = """Work.""" idle_s = """Rest.""" stand_flag = 0 walk_flag = 0 idle_flag = 0 class SpotMicroKeyboardControl(): def __init__(self): # Create messages for body motion commands, and initialize to zero self._speed_cmd_msg = Vector3() self._speed_cmd_msg.x = 0 self._speed_cmd_msg.y = 0 self._speed_cmd_msg.z = 0 self._walk_event_cmd_msg = Bool() self._walk_event_cmd_msg.data = True # Mostly acts as an event driven action on receipt of a true message self._stand_event_cmd_msg = Bool() self._stand_event_cmd_msg.data = True self._idle_event_cmd_msg = Bool() self._idle_event_cmd_msg.data = True rospy.loginfo("Setting Up the Spot Micro Voice Control Node...") # Set up and title the ros node for this code rospy.init_node('spot_micro_keyboard_control') #rospy.Subscriber('voiceWords', String, callbackv) #rospy.spin() # Create publishers for x,y speed command, body rate command, and state command self.ros_pub_speed_cmd = rospy.Publisher('/speed_cmd',Vector3,queue_size=1) self.ros_pub_walk_cmd = rospy.Publisher('/walk_cmd',Bool, queue_size=1) self.ros_pub_stand_cmd = rospy.Publisher('/stand_cmd',Bool,queue_size=1) self.ros_pub_idle_cmd = rospy.Publisher('/idle_cmd',Bool,queue_size=1) # rospy.loginfo("> Publishers corrrectly initialized") rospy.loginfo("Initialization complete") # Setup terminal input reading, taken from teleop_twist_keyboard self.settings = termios.tcgetattr(sys.stdin) def reset_all_motion_commands_to_zero(self): '''Reset body motion cmd states to zero and publish zero value body motion commands''' self._speed_cmd_msg.x = 0 self._speed_cmd_msg.y = 0 self._speed_cmd_msg.z = 0 self.ros_pub_speed_cmd.publish(self._speed_cmd_msg) # def callbackv(data): # #global stand_flag, walk_flag, idle_flag # rospy.loginfo(rospy.get_caller_id() + 'I heard %s', data.data) # if (data.data == stand_s): # stand_flag = 1 # if (data.data == walk_s): # walk_flag = 1 # if (data.data == idle_s): # idle_flag = 1 def run(self): global stand_flag, walk_flag, idle_flag def callbackv(data): global stand_flag, walk_flag, idle_flag rospy.loginfo(rospy.get_caller_id() + 'I heard %s', data.data) if (data.data == stand_s): stand_flag = 1 if (data.data == walk_s): walk_flag = 1 if (data.data == idle_s): idle_flag = 1 self.reset_all_motion_commands_to_zero() #publish the stand mode self.ros_pub_stand_cmd.publish(self._stand_event_cmd_msg) ros_pub_tts_cmd = rospy.Publisher('voice_tts',String,queue_size=10) rospy.Subscriber('voiceWords', String, callbackv) while not rospy.is_shutdown(): #print(msg) rospy.loginfo('wait for the commond ') time.sleep(1) if (stand_flag == 1): self.ros_pub_stand_cmd.publish(self._stand_event_cmd_msg) ros_pub_tts_cmd.publish(msg) print('stand mode') time.sleep(5) stand_flag = 0 elif(idle_flag): self.ros_pub_idle_cmd.publish(self._idle_event_cmd_msg) self.ros_pub_tts_cmd.publish(msg) print('idle mode') idle_flag = 0 elif(walk_flag): self.ros_pub_stand_cmd.publish(self._stand_event_cmd_msg) self.ros_pub_tts_cmd.publish(msg) time.sleep(1) self.reset_all_motion_commands_to_zero() self.ros_pub_walk_cmd.publish(self._walk_event_cmd_msg) print('walk mode') time.sleep( 0.1 ) self._speed_cmd_msg.x = 0.05 self.ros_pub_speed_cmd.publish(self._speed_cmd_msg) print('Cmd Values: x speed: %1.3f m/s, y speed: %1.3f m/s '\ %(self._speed_cmd_msg.x,self._speed_cmd_msg.y)) time.sleep( 5 ) self._speed_cmd_msg.x = 0 self._speed_cmd_msg.y = 0 self._speed_cmd_msg.z = 0 self.ros_pub_speed_cmd.publish(self._speed_cmd_msg) self.ros_pub_stand_cmd.publish(self._stand_event_cmd_msg) walk_flag = 0 if __name__ == "__main__": smkc = SpotMicroKeyboardControl() smkc.run()
[ "geometry_msgs.msg.Vector3", "rospy.Subscriber", "termios.tcgetattr", "rospy.Publisher", "time.sleep", "rospy.loginfo", "rospy.is_shutdown", "rospy.init_node", "std_msgs.msg.Bool", "rospy.get_caller_id" ]
[((660, 669), 'geometry_msgs.msg.Vector3', 'Vector3', ([], {}), '()\n', (667, 669), False, 'from geometry_msgs.msg import Vector3\n'), ((808, 814), 'std_msgs.msg.Bool', 'Bool', ([], {}), '()\n', (812, 814), False, 'from std_msgs.msg import Float32, Bool, String\n'), ((966, 972), 'std_msgs.msg.Bool', 'Bool', ([], {}), '()\n', (970, 972), False, 'from std_msgs.msg import Float32, Bool, String\n'), ((1062, 1068), 'std_msgs.msg.Bool', 'Bool', ([], {}), '()\n', (1066, 1068), False, 'from std_msgs.msg import Float32, Bool, String\n'), ((1123, 1187), 'rospy.loginfo', 'rospy.loginfo', (['"""Setting Up the Spot Micro Voice Control Node..."""'], {}), "('Setting Up the Spot Micro Voice Control Node...')\n", (1136, 1187), False, 'import rospy\n'), ((1251, 1297), 'rospy.init_node', 'rospy.init_node', (['"""spot_micro_keyboard_control"""'], {}), "('spot_micro_keyboard_control')\n", (1266, 1297), False, 'import rospy\n'), ((1506, 1558), 'rospy.Publisher', 'rospy.Publisher', (['"""/speed_cmd"""', 'Vector3'], {'queue_size': '(1)'}), "('/speed_cmd', Vector3, queue_size=1)\n", (1521, 1558), False, 'import rospy\n'), ((1595, 1643), 'rospy.Publisher', 'rospy.Publisher', (['"""/walk_cmd"""', 'Bool'], {'queue_size': '(1)'}), "('/walk_cmd', Bool, queue_size=1)\n", (1610, 1643), False, 'import rospy\n'), ((1681, 1730), 'rospy.Publisher', 'rospy.Publisher', (['"""/stand_cmd"""', 'Bool'], {'queue_size': '(1)'}), "('/stand_cmd', Bool, queue_size=1)\n", (1696, 1730), False, 'import rospy\n'), ((1767, 1815), 'rospy.Publisher', 'rospy.Publisher', (['"""/idle_cmd"""', 'Bool'], {'queue_size': '(1)'}), "('/idle_cmd', Bool, queue_size=1)\n", (1782, 1815), False, 'import rospy\n'), ((1843, 1895), 'rospy.loginfo', 'rospy.loginfo', (['"""> Publishers corrrectly initialized"""'], {}), "('> Publishers corrrectly initialized')\n", (1856, 1895), False, 'import rospy\n'), ((1905, 1945), 'rospy.loginfo', 'rospy.loginfo', (['"""Initialization complete"""'], {}), "('Initialization complete')\n", (1918, 1945), False, 'import rospy\n'), ((2044, 2072), 'termios.tcgetattr', 'termios.tcgetattr', (['sys.stdin'], {}), '(sys.stdin)\n', (2061, 2072), False, 'import sys, select, termios, tty\n'), ((3402, 3453), 'rospy.Publisher', 'rospy.Publisher', (['"""voice_tts"""', 'String'], {'queue_size': '(10)'}), "('voice_tts', String, queue_size=10)\n", (3417, 3453), False, 'import rospy\n'), ((3460, 3509), 'rospy.Subscriber', 'rospy.Subscriber', (['"""voiceWords"""', 'String', 'callbackv'], {}), "('voiceWords', String, callbackv)\n", (3476, 3509), False, 'import rospy\n'), ((3529, 3548), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (3546, 3548), False, 'import rospy\n'), ((3586, 3624), 'rospy.loginfo', 'rospy.loginfo', (['"""wait for the commond """'], {}), "('wait for the commond ')\n", (3599, 3624), False, 'import rospy\n'), ((3637, 3650), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3647, 3650), False, 'import sched, time\n'), ((3856, 3869), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (3866, 3869), False, 'import sched, time\n'), ((2932, 2953), 'rospy.get_caller_id', 'rospy.get_caller_id', ([], {}), '()\n', (2951, 2953), False, 'import rospy\n'), ((4316, 4329), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4326, 4329), False, 'import sched, time\n'), ((4511, 4526), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (4521, 4526), False, 'import sched, time\n'), ((4811, 4824), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (4821, 4824), False, 'import sched, time\n')]
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # this script will cover VMdeployment with Userdata tests for MultiNic from marvin.cloudstackTestCase import cloudstackTestCase from marvin.lib.base import (Account, Network, NetworkOffering, ServiceOffering, VirtualMachine) from marvin.lib.common import (get_domain, get_template, get_zone, list_virtual_machines) from marvin.lib.utils import cleanup_resources from nose.plugins.attrib import attr import base64 import random import string _multiprocess_shared_ = True class TestDeployVmWithUserDataMultiNic(cloudstackTestCase): """Tests for UserData """ @classmethod def setUpClass(cls): cls.testClient = super(TestDeployVmWithUserDataMultiNic, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.test_data = cls.testClient.getParsedTestDataConfig() # Get Domain, Zone, Template cls.domain = get_domain(cls.api_client) cls.zone = get_zone( cls.api_client, cls.testClient.getZoneForTests()) cls.template = get_template( cls.api_client, cls.zone.id, cls.test_data["ostype"] ) if cls.zone.localstorageenabled: cls.storagetype = 'local' cls.test_data["service_offerings"][ "tiny"]["storagetype"] = 'local' else: cls.storagetype = 'shared' cls.test_data["service_offerings"][ "tiny"]["storagetype"] = 'shared' cls.service_offering = ServiceOffering.create( cls.api_client, cls.test_data["service_offerings"]["tiny"] ) # Create Network offering without userdata cls.network_offering_nouserdata = NetworkOffering.create( cls.api_client, cls.test_data["network_offering"] ) # Enable Network offering cls.network_offering_nouserdata.update(cls.api_client, state='Enabled') # Create Network Offering with all the serices cls.network_offering_all = NetworkOffering.create( cls.api_client, cls.test_data["isolated_network_offering"] ) # Enable Network offering cls.network_offering_all.update(cls.api_client, state='Enabled') cls._cleanup = [ cls.service_offering, cls.network_offering_nouserdata, cls.network_offering_all ] # Generate userdata of 2500 bytes. This is larger than the 2048 bytes limit. # CS however allows for upto 4K bytes in the code. So this must succeed. # Overall, the query length must not exceed 4K, for then the json decoder # will fail this operation at the marvin client side itcls. cls.userdata = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(2500)) def setUp(self): self.apiclient = self.testClient.getApiClient() self.hypervisor = self.testClient.getHypervisorInfo() self.dbclient = self.testClient.getDbConnection() self.account = Account.create( self.apiclient, self.test_data["account"], admin=True, domainid=self.domain.id ) self.cleanup = [] return def tearDown(self): try: self.account.delete(self.apiclient) cleanup_resources(self.apiclient, self.cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return @attr(tags=["simulator", "devcloud", "basic", "advanced"], required_hardware="false") def test_deployvm_multinic(self): """Test userdata update when non default nic is without userdata for deploy and update """ self.userdata = base64.b64encode(self.userdata) network1 = Network.create( self.apiclient, self.test_data["isolated_network"], accountid=self.account.name, domainid=self.account.domainid, networkofferingid=self.network_offering_all.id, zoneid=self.zone.id ) self.test_data["network_without_acl"]["netmask"] = "255.255.255.128" network2 = Network.create( self.apiclient, self.test_data["network_without_acl"], accountid=self.account.name, domainid=self.account.domainid, networkofferingid=self.network_offering_nouserdata.id, gateway="10.2.1.1", zoneid=self.zone.id ) deployVmResponse = VirtualMachine.create( self.apiclient, services=self.test_data["virtual_machine_userdata"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, networkids=[str(network1.id), str(network2.id)], templateid=self.template.id, zoneid=self.zone.id ) vms = list_virtual_machines( self.apiclient, account=self.account.name, domainid=self.account.domainid, id=deployVmResponse.id ) self.assert_(len(vms) > 0, "There are no Vms deployed in the account %s" % self.account.name) vm = vms[0] self.assert_(vm.id == str(deployVmResponse.id), "Vm deployed is different from the test") self.assert_(vm.state == "Running", "VM is not in Running state") try: updateresponse = deployVmResponse.update(self.apiclient, userdata=self.userdata) except Exception as e: self.fail("Failed to update userdata: %s" % e) self.debug("virtual machine update response is: %s" % updateresponse) @classmethod def tearDownClass(cls): try: # Cleanup resources used cleanup_resources(cls.api_client, cls._cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e)
[ "marvin.lib.utils.cleanup_resources", "marvin.lib.base.NetworkOffering.create", "marvin.lib.common.get_template", "marvin.lib.base.Account.create", "random.choice", "marvin.lib.base.ServiceOffering.create", "marvin.lib.base.Network.create", "marvin.lib.common.list_virtual_machines", "marvin.lib.common.get_domain", "base64.b64encode", "nose.plugins.attrib.attr" ]
[((4513, 4602), 'nose.plugins.attrib.attr', 'attr', ([], {'tags': "['simulator', 'devcloud', 'basic', 'advanced']", 'required_hardware': '"""false"""'}), "(tags=['simulator', 'devcloud', 'basic', 'advanced'], required_hardware\n ='false')\n", (4517, 4602), False, 'from nose.plugins.attrib import attr\n'), ((1876, 1902), 'marvin.lib.common.get_domain', 'get_domain', (['cls.api_client'], {}), '(cls.api_client)\n', (1886, 1902), False, 'from marvin.lib.common import get_domain, get_template, get_zone, list_virtual_machines\n'), ((2029, 2095), 'marvin.lib.common.get_template', 'get_template', (['cls.api_client', 'cls.zone.id', "cls.test_data['ostype']"], {}), "(cls.api_client, cls.zone.id, cls.test_data['ostype'])\n", (2041, 2095), False, 'from marvin.lib.common import get_domain, get_template, get_zone, list_virtual_machines\n'), ((2501, 2588), 'marvin.lib.base.ServiceOffering.create', 'ServiceOffering.create', (['cls.api_client', "cls.test_data['service_offerings']['tiny']"], {}), "(cls.api_client, cls.test_data['service_offerings'][\n 'tiny'])\n", (2523, 2588), False, 'from marvin.lib.base import Account, Network, NetworkOffering, ServiceOffering, VirtualMachine\n'), ((2712, 2785), 'marvin.lib.base.NetworkOffering.create', 'NetworkOffering.create', (['cls.api_client', "cls.test_data['network_offering']"], {}), "(cls.api_client, cls.test_data['network_offering'])\n", (2734, 2785), False, 'from marvin.lib.base import Account, Network, NetworkOffering, ServiceOffering, VirtualMachine\n'), ((3025, 3112), 'marvin.lib.base.NetworkOffering.create', 'NetworkOffering.create', (['cls.api_client', "cls.test_data['isolated_network_offering']"], {}), "(cls.api_client, cls.test_data[\n 'isolated_network_offering'])\n", (3047, 3112), False, 'from marvin.lib.base import Account, Network, NetworkOffering, ServiceOffering, VirtualMachine\n'), ((4047, 4145), 'marvin.lib.base.Account.create', 'Account.create', (['self.apiclient', "self.test_data['account']"], {'admin': '(True)', 'domainid': 'self.domain.id'}), "(self.apiclient, self.test_data['account'], admin=True,\n domainid=self.domain.id)\n", (4061, 4145), False, 'from marvin.lib.base import Account, Network, NetworkOffering, ServiceOffering, VirtualMachine\n'), ((4768, 4799), 'base64.b64encode', 'base64.b64encode', (['self.userdata'], {}), '(self.userdata)\n', (4784, 4799), False, 'import base64\n'), ((4820, 5024), 'marvin.lib.base.Network.create', 'Network.create', (['self.apiclient', "self.test_data['isolated_network']"], {'accountid': 'self.account.name', 'domainid': 'self.account.domainid', 'networkofferingid': 'self.network_offering_all.id', 'zoneid': 'self.zone.id'}), "(self.apiclient, self.test_data['isolated_network'],\n accountid=self.account.name, domainid=self.account.domainid,\n networkofferingid=self.network_offering_all.id, zoneid=self.zone.id)\n", (4834, 5024), False, 'from marvin.lib.base import Account, Network, NetworkOffering, ServiceOffering, VirtualMachine\n'), ((5197, 5436), 'marvin.lib.base.Network.create', 'Network.create', (['self.apiclient', "self.test_data['network_without_acl']"], {'accountid': 'self.account.name', 'domainid': 'self.account.domainid', 'networkofferingid': 'self.network_offering_nouserdata.id', 'gateway': '"""10.2.1.1"""', 'zoneid': 'self.zone.id'}), "(self.apiclient, self.test_data['network_without_acl'],\n accountid=self.account.name, domainid=self.account.domainid,\n networkofferingid=self.network_offering_nouserdata.id, gateway=\n '10.2.1.1', zoneid=self.zone.id)\n", (5211, 5436), False, 'from marvin.lib.base import Account, Network, NetworkOffering, ServiceOffering, VirtualMachine\n'), ((5962, 6087), 'marvin.lib.common.list_virtual_machines', 'list_virtual_machines', (['self.apiclient'], {'account': 'self.account.name', 'domainid': 'self.account.domainid', 'id': 'deployVmResponse.id'}), '(self.apiclient, account=self.account.name, domainid=\n self.account.domainid, id=deployVmResponse.id)\n', (5983, 6087), False, 'from marvin.lib.common import get_domain, get_template, get_zone, list_virtual_machines\n'), ((4339, 4386), 'marvin.lib.utils.cleanup_resources', 'cleanup_resources', (['self.apiclient', 'self.cleanup'], {}), '(self.apiclient, self.cleanup)\n', (4356, 4386), False, 'from marvin.lib.utils import cleanup_resources\n'), ((6819, 6866), 'marvin.lib.utils.cleanup_resources', 'cleanup_resources', (['cls.api_client', 'cls._cleanup'], {}), '(cls.api_client, cls._cleanup)\n', (6836, 6866), False, 'from marvin.lib.utils import cleanup_resources\n'), ((3750, 3803), 'random.choice', 'random.choice', (['(string.ascii_uppercase + string.digits)'], {}), '(string.ascii_uppercase + string.digits)\n', (3763, 3803), False, 'import random\n')]
import logging import subprocess import yaml from cekit.errors import CekitError from cekit.generator.base import Generator logger = logging.getLogger('cekit') class DockerGenerator(Generator): def __init__(self, descriptor_path, target, builder, overrides, params): self._params = params super(DockerGenerator, self).__init__(descriptor_path, target, builder, overrides, params) self._fetch_repos = True def _prepare_repository_odcs_pulp(self, repo): """Create pulp content set in ODCS and returns its url Args: repo - repository object to generate ODCS pulp for""" try: # idealy this will be API for ODCS, but there is no python3 package for ODCS cmd = ['odcs'] if self._params.get('redhat', False): cmd.append('--redhat') cmd.extend(['create', 'pulp', repo['odcs']['pulp']]) logger.debug("Creating ODCS content set via '%s'" % cmd) output = subprocess.check_output(cmd) normalized_output = '\n'.join(output.replace(" u'", " '") .replace(' u"', ' "') .split('\n')[1:]) odcs_result = yaml.safe_load(normalized_output) if odcs_result['state'] != 2: raise CekitError("Cannot create content set: '%s'" % odcs_result['state_reason']) repo_url = odcs_result['result_repofile'] repo['url']['repository'] = repo_url return True except CekitError as ex: raise ex except OSError as ex: raise CekitError("ODCS is not installed, please install 'odcs-client' package") except subprocess.CalledProcessError as ex: raise CekitError("Cannot create content set: '%s'" % ex.output) except Exception as ex: raise CekitError('Cannot create content set!', ex) def _prepare_repository_rpm(self, repo): # no special handling is needed here, everything is in template pass
[ "cekit.errors.CekitError", "subprocess.check_output", "yaml.safe_load", "logging.getLogger" ]
[((135, 161), 'logging.getLogger', 'logging.getLogger', (['"""cekit"""'], {}), "('cekit')\n", (152, 161), False, 'import logging\n'), ((1008, 1036), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {}), '(cmd)\n', (1031, 1036), False, 'import subprocess\n'), ((1258, 1291), 'yaml.safe_load', 'yaml.safe_load', (['normalized_output'], {}), '(normalized_output)\n', (1272, 1291), False, 'import yaml\n'), ((1357, 1432), 'cekit.errors.CekitError', 'CekitError', (['("Cannot create content set: \'%s\'" % odcs_result[\'state_reason\'])'], {}), '("Cannot create content set: \'%s\'" % odcs_result[\'state_reason\'])\n', (1367, 1432), False, 'from cekit.errors import CekitError\n'), ((1699, 1772), 'cekit.errors.CekitError', 'CekitError', (['"""ODCS is not installed, please install \'odcs-client\' package"""'], {}), '("ODCS is not installed, please install \'odcs-client\' package")\n', (1709, 1772), False, 'from cekit.errors import CekitError\n'), ((1843, 1900), 'cekit.errors.CekitError', 'CekitError', (['("Cannot create content set: \'%s\'" % ex.output)'], {}), '("Cannot create content set: \'%s\'" % ex.output)\n', (1853, 1900), False, 'from cekit.errors import CekitError\n'), ((1951, 1995), 'cekit.errors.CekitError', 'CekitError', (['"""Cannot create content set!"""', 'ex'], {}), "('Cannot create content set!', ex)\n", (1961, 1995), False, 'from cekit.errors import CekitError\n')]
from pymatgen import Structure from pymatgen.analysis.chemenv.coordination_environments import coordination_geometries_files as cg_files from pymatgen.analysis.chemenv.coordination_environments import coordination_geometry_finder as polyfinder from pymatgen.analysis.chemenv.coordination_environments import chemenv_strategies as strategies from pymatgen.analysis.chemenv.coordination_environments import structure_environments as se from chemenv_util import find_site_ce, find_species_string_ce, find_species_ce_from_light_se import unittest import json import os class TestChemEnvUtil(unittest.TestCase): def setUp(self): """Set up Structure from Cif file and path to jsons folder in Chemenv module in Pymatgen""" self.structure = Structure.from_file('LiCoO2.cif', True, False) self.structure.make_supercell([3, 3, 3]) for isite, site in enumerate(self.structure._sites): if site.species_string == 'Li': self.first_site = site self.ifirst_site = isite break self.path_to_jsons = os.path.dirname(cg_files.__file__) def test_site_chemenv(self): """Test finding the coordination environment (mp symbol) of a single site in a structure using Chemenv""" ce = find_site_ce(self.structure, self.ifirst_site) with open(self.path_to_jsons+"/%s.json" % ce) as json_file: data = json.load(json_file) self.assertEqual(data['mp_symbol'], "O:6", "Li polyhedra should be 6-coordinated octahedron") self.assertEqual(data['coordination'], 6, "Li polyhedra should be 6-coordinated octahedron") self.assertEqual(data['name'], "Octahedron", "Li polyhedra should be 6-coordinated octahedron") def test_species_chemenv(self): """Test finding the coordination environments (mp symbols) of a specific species in a structure using Chemenv""" ces = find_species_string_ce(self.structure, self.first_site.species_string) for ce in ces: with open(self.path_to_jsons+"/%s.json" % ce) as json_file: data = json.load(json_file) self.assertEqual(data['mp_symbol'], "O:6", "Li polyhedra should be 6-coordinated octahedron") self.assertEqual(data['coordination'], 6, "Li polyhedra should be 6-coordinated octahedron") self.assertEqual(data['name'], "Octahedron", "Li polyhedra should be 6-coordinated octahedron") def test_species_from_light_structure_chemenv(self): """Test finding the ce's (mp symbols) of a species but starting from a Light Structure Environment object""" s1_finder = polyfinder.LocalGeometryFinder() s1_finder.setup_structure(self.structure) s1_finder.setup_parameters(centering_type='standard', structure_refinement='none') environments = s1_finder.compute_structure_environments_detailed_voronoi(maximum_distance_factor=1.5) light_se = se.LightStructureEnvironments(strategies.SimplestChemenvStrategy(), environments) ces = find_species_ce_from_light_se(light_se, self.first_site.species_string) for ce in ces: with open(self.path_to_jsons+"/%s.json" % ce) as json_file: data = json.load(json_file) self.assertEqual(data['mp_symbol'], "O:6", "Li polyhedra should be 6-coordinated octahedron") self.assertEqual(data['coordination'], 6, "Li polyhedra should be 6-coordinated octahedron") self.assertEqual(data['name'], "Octahedron", "Li polyhedra should be 6-coordinated octahedron") if __name__ == '__main__': unittest.main()
[ "unittest.main", "chemenv_util.find_species_string_ce", "json.load", "pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder.LocalGeometryFinder", "pymatgen.Structure.from_file", "os.path.dirname", "pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies.SimplestChemenvStrategy", "chemenv_util.find_site_ce", "chemenv_util.find_species_ce_from_light_se" ]
[((3609, 3624), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3622, 3624), False, 'import unittest\n'), ((756, 802), 'pymatgen.Structure.from_file', 'Structure.from_file', (['"""LiCoO2.cif"""', '(True)', '(False)'], {}), "('LiCoO2.cif', True, False)\n", (775, 802), False, 'from pymatgen import Structure\n'), ((1089, 1123), 'os.path.dirname', 'os.path.dirname', (['cg_files.__file__'], {}), '(cg_files.__file__)\n', (1104, 1123), False, 'import os\n'), ((1285, 1331), 'chemenv_util.find_site_ce', 'find_site_ce', (['self.structure', 'self.ifirst_site'], {}), '(self.structure, self.ifirst_site)\n', (1297, 1331), False, 'from chemenv_util import find_site_ce, find_species_string_ce, find_species_ce_from_light_se\n'), ((1919, 1989), 'chemenv_util.find_species_string_ce', 'find_species_string_ce', (['self.structure', 'self.first_site.species_string'], {}), '(self.structure, self.first_site.species_string)\n', (1941, 1989), False, 'from chemenv_util import find_site_ce, find_species_string_ce, find_species_ce_from_light_se\n'), ((2644, 2676), 'pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder.LocalGeometryFinder', 'polyfinder.LocalGeometryFinder', ([], {}), '()\n', (2674, 2676), True, 'from pymatgen.analysis.chemenv.coordination_environments import coordination_geometry_finder as polyfinder\n'), ((3045, 3116), 'chemenv_util.find_species_ce_from_light_se', 'find_species_ce_from_light_se', (['light_se', 'self.first_site.species_string'], {}), '(light_se, self.first_site.species_string)\n', (3074, 3116), False, 'from chemenv_util import find_site_ce, find_species_string_ce, find_species_ce_from_light_se\n'), ((1419, 1439), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (1428, 1439), False, 'import json\n'), ((2978, 3014), 'pymatgen.analysis.chemenv.coordination_environments.chemenv_strategies.SimplestChemenvStrategy', 'strategies.SimplestChemenvStrategy', ([], {}), '()\n', (3012, 3014), True, 'from pymatgen.analysis.chemenv.coordination_environments import chemenv_strategies as strategies\n'), ((2109, 2129), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (2118, 2129), False, 'import json\n'), ((3236, 3256), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (3245, 3256), False, 'import json\n')]
# Generated by Django 3.0.3 on 2020-04-07 12:07 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('backend', '0004_auto_20200406_1401'), ] operations = [ migrations.RemoveField( model_name='filter', name='cameras', ), migrations.AddField( model_name='filter', name='excluded_cameras', field=models.ManyToManyField(related_name='excluded_in_filter', to='backend.Camera'), ), migrations.AddField( model_name='filter', name='included_cameras', field=models.ManyToManyField(related_name='included_in_filter', to='backend.Camera'), ), migrations.AddField( model_name='filter', name='matching_cameras', field=models.ManyToManyField(related_name='filter', to='backend.Camera'), ), ]
[ "django.db.migrations.RemoveField", "django.db.models.ManyToManyField" ]
[((235, 294), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""filter"""', 'name': '"""cameras"""'}), "(model_name='filter', name='cameras')\n", (257, 294), False, 'from django.db import migrations, models\n'), ((448, 526), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""excluded_in_filter"""', 'to': '"""backend.Camera"""'}), "(related_name='excluded_in_filter', to='backend.Camera')\n", (470, 526), False, 'from django.db import migrations, models\n'), ((656, 734), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""included_in_filter"""', 'to': '"""backend.Camera"""'}), "(related_name='included_in_filter', to='backend.Camera')\n", (678, 734), False, 'from django.db import migrations, models\n'), ((864, 930), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""filter"""', 'to': '"""backend.Camera"""'}), "(related_name='filter', to='backend.Camera')\n", (886, 930), False, 'from django.db import migrations, models\n')]
from django.conf.urls.defaults import * from article.views import ArticleListView, ArticleDetailView urlpatterns = patterns('', url(r'^(?P<slug>[^/]+)/$', ArticleDetailView.as_view(), name='article-details'), url(r'^$', ArticleListView.as_view(), name='article-list'), )
[ "article.views.ArticleDetailView.as_view", "article.views.ArticleListView.as_view" ]
[((160, 187), 'article.views.ArticleDetailView.as_view', 'ArticleDetailView.as_view', ([], {}), '()\n', (185, 187), False, 'from article.views import ArticleListView, ArticleDetailView\n'), ((229, 254), 'article.views.ArticleListView.as_view', 'ArticleListView.as_view', ([], {}), '()\n', (252, 254), False, 'from article.views import ArticleListView, ArticleDetailView\n')]
from django.contrib import admin # Register your models here. from tastybitauth.models import BitAuth class BitAuthAdmin(admin.ModelAdmin): list_display = ('user', 'sin',) raw_id_fields = ('user',) autocomplete_lookup_fields = { 'fk': ['user_id'], } admin.site.register(BitAuth, BitAuthAdmin)
[ "django.contrib.admin.site.register" ]
[((279, 321), 'django.contrib.admin.site.register', 'admin.site.register', (['BitAuth', 'BitAuthAdmin'], {}), '(BitAuth, BitAuthAdmin)\n', (298, 321), False, 'from django.contrib import admin\n')]
from django.core.exceptions import ImproperlyConfigured from django.db import models class ObjectTypeRegistry: """ Implements a lookup table for mapping objects to values according to the object type. The most specific type according to the object's inheritance chain is selected. """ def __init__(self): # values in this dict will be returned if the field type exactly matches an item here self.values_by_exact_class = {} # values in this dict will be returned if any class in the field's inheritance chain # matches, preferring more specific subclasses self.values_by_class = {} def register(self, cls, value=None, exact_class=False): if exact_class: self.values_by_exact_class[cls] = value else: self.values_by_class[cls] = value def get_by_type(self, cls): try: return self.values_by_exact_class[cls] except KeyError: for ancestor in cls.mro(): try: return self.values_by_class[ancestor] except KeyError: pass def get(self, obj): value = self.get_by_type(obj.__class__) if callable(value) and not isinstance(value, type): value = value(obj) return value class ModelFieldRegistry(ObjectTypeRegistry): """ Handles the recurring pattern where we need to register different values for different model field types, and retrieve the one that most closely matches a given model field, according to its type (taking inheritance into account), and in the case of foreign keys, the type of the related model (again, taking inheritance into account). For example, this is used by wagtail.admin.forms.models when constructing model forms: we use such a registry to retrieve the appropriate dict of arguments to pass to the form field constructor. A lookup for a models.TextField will return a dict specifying a text area widget, and a lookup for a foreign key to Image will return a dict specifying an image chooser widget. """ def __init__(self): super().__init__() self.values_by_class[models.ForeignKey] = self.foreign_key_lookup # values in this dict will be returned if the field is a foreign key to a related # model in here, matching most specific subclass first self.values_by_fk_related_model = {} def register(self, field_class, to=None, value=None, exact_class=False): if to: if field_class == models.ForeignKey: self.values_by_fk_related_model[to] = value else: raise ImproperlyConfigured( "The 'to' argument on ModelFieldRegistry.register is only valid for ForeignKey fields" ) else: super().register(field_class, value=value, exact_class=exact_class) def foreign_key_lookup(self, field): value = None target_model = field.remote_field.model for model in target_model.mro(): if model in self.values_by_fk_related_model: value = self.values_by_fk_related_model[model] break if callable(value) and not isinstance(value, type): value = value(field) return value
[ "django.core.exceptions.ImproperlyConfigured" ]
[((2703, 2821), 'django.core.exceptions.ImproperlyConfigured', 'ImproperlyConfigured', (['"""The \'to\' argument on ModelFieldRegistry.register is only valid for ForeignKey fields"""'], {}), '(\n "The \'to\' argument on ModelFieldRegistry.register is only valid for ForeignKey fields"\n )\n', (2723, 2821), False, 'from django.core.exceptions import ImproperlyConfigured\n')]
import sys import time import tempfile from ....tests.helper import pytest from ....utils.data import get_pkg_data_filename from ..hub import SAMPHubServer from ..integrated_client import SAMPIntegratedClient from ..constants import SAMP_STATUS_OK from ..errors import SAMPClientError, SAMPProxyError # By default, tests should not use the internet. from .. import conf from .test_helpers import random_params, Receiver, assert_output, TEST_REPLY def setup_module(module): conf.use_internet = False TEST_CERT1 = get_pkg_data_filename('data/test1.crt') TEST_KEY1 = get_pkg_data_filename('data/test1.key') TEST_CERT2 = get_pkg_data_filename('data/test2.crt') TEST_KEY2 = get_pkg_data_filename('data/test2.key') class TestStandardProfile(object): conf = 'no_https' @property def hub_init_kwargs(self): return {} @property def client_init_kwargs(self): return {} @property def client_connect_kwargs(self): return {} def setup_method(self, method): self.tmpdir = tempfile.mkdtemp() self.hub = SAMPHubServer(web_profile=False, mode='multiple', pool_size=1, **self.hub_init_kwargs) self.hub.start() self.client1 = SAMPIntegratedClient(**self.client_init_kwargs) self.client1.connect(hub=self.hub, pool_size=1, **self.client_connect_kwargs) self.client2 = SAMPIntegratedClient(**self.client_init_kwargs) self.client2.connect(hub=self.hub, pool_size=1, **self.client_connect_kwargs) def teardown_method(self, method): if self.client1.is_connected: self.client1.disconnect() if self.client2.is_connected: self.client2.disconnect() self.hub.stop() def test_main(self): self.client1_id = self.client1.get_public_id() self.client2_id = self.client2.get_public_id() self.metadata1 = {"samp.name": "Client 1", "samp.description.text": "Client 1 Description", "client.version": "1.1"} self.metadata2 = {"samp.name": "Client 2", "samp.description.text": "Client 2 Description", "client.version": "1.2"} # Check that the clients are connected assert self.client1.is_connected assert self.client2.is_connected # Check that ping works self.client1.ping() self.client2.ping() # Check that get_registered_clients works as expected. assert self.client1_id not in self.client1.get_registered_clients() assert self.client2_id in self.client1.get_registered_clients() assert self.client1_id in self.client2.get_registered_clients() assert self.client2_id not in self.client2.get_registered_clients() # Check that get_metadata works as expected assert self.client1.get_metadata(self.client1_id) == {} assert self.client1.get_metadata(self.client2_id) == {} assert self.client2.get_metadata(self.client1_id) == {} assert self.client2.get_metadata(self.client2_id) == {} self.client1.declare_metadata(self.metadata1) assert self.client1.get_metadata(self.client1_id) == self.metadata1 assert self.client2.get_metadata(self.client1_id) == self.metadata1 assert self.client1.get_metadata(self.client2_id) == {} assert self.client2.get_metadata(self.client2_id) == {} self.client2.declare_metadata(self.metadata2) assert self.client1.get_metadata(self.client1_id) == self.metadata1 assert self.client2.get_metadata(self.client1_id) == self.metadata1 assert self.client1.get_metadata(self.client2_id) == self.metadata2 assert self.client2.get_metadata(self.client2_id) == self.metadata2 # Check that, without subscriptions, sending a notification from one # client to another raises an error. message = {} message['samp.mtype'] = "table.load.votable" message['samp.params'] = {} with pytest.raises(SAMPProxyError): self.client1.notify(self.client2_id, message) # Check that there are no currently active subscriptions assert self.client1.get_subscribed_clients('table.load.votable') == {} assert self.client2.get_subscribed_clients('table.load.votable') == {} # We now test notifications and calls rec1 = Receiver(self.client1) rec2 = Receiver(self.client2) self.client2.bind_receive_notification('table.load.votable', rec2.receive_notification) self.client2.bind_receive_call('table.load.votable', rec2.receive_call) self.client1.bind_receive_response('test-tag', rec1.receive_response) # Check resulting subscriptions assert self.client1.get_subscribed_clients('table.load.votable') == {self.client2_id: {}} assert self.client2.get_subscribed_clients('table.load.votable') == {} assert 'table.load.votable' in self.client1.get_subscriptions(self.client2_id) assert 'table.load.votable' in self.client2.get_subscriptions(self.client2_id) # Once we have finished with the calls and notifications, we will # check the data got across correctly. # Test notify params = random_params(self.tmpdir) self.client1.notify(self.client2.get_public_id(), {'samp.mtype':'table.load.votable', 'samp.params':params}) assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) params = random_params(self.tmpdir) self.client1.enotify(self.client2.get_public_id(), "table.load.votable", **params) assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) # Test notify_all params = random_params(self.tmpdir) self.client1.notify_all({'samp.mtype':'table.load.votable', 'samp.params':params}) assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) params = random_params(self.tmpdir) self.client1.enotify_all("table.load.votable", **params) assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) # Test call params = random_params(self.tmpdir) self.client1.call(self.client2.get_public_id(), 'test-tag', {'samp.mtype':'table.load.votable', 'samp.params':params}) assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) params = random_params(self.tmpdir) self.client1.ecall(self.client2.get_public_id(), 'test-tag', "table.load.votable", **params) assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) # Test call_all params = random_params(self.tmpdir) self.client1.call_all('tag1', {'samp.mtype':'table.load.votable', 'samp.params':params}) assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) params = random_params(self.tmpdir) self.client1.ecall_all('tag2', "table.load.votable", **params) assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) # Test call_and_wait params = random_params(self.tmpdir) result = self.client1.call_and_wait(self.client2.get_public_id(), {'samp.mtype':'table.load.votable', 'samp.params':params}, timeout=5) assert result == TEST_REPLY assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) params = random_params(self.tmpdir) result = self.client1.ecall_and_wait(self.client2.get_public_id(), "table.load.votable", timeout=5, **params) assert result == TEST_REPLY assert_output('table.load.votable', self.client2.get_private_key(), self.client1_id, params, timeout=60) # TODO: check that receive_response received the right data # Note: The TestStandardProfileHTTPSHub and / or TestStandardProfileHTTPSHubClient # tests are "randomly" failing from time to time # on travis-ci, so for now we mark them xfail and skip them # We have the following issues to make sure this is not forgotten: # https://github.com/astropy/astropy/issues/2064 # https://github.com/astropy/astropy/issues/2126 # https://github.com/astropy/astropy/issues/2321 class TestStandardProfileHTTPSHub(TestStandardProfile): conf = 'https_hub' @property def hub_init_kwargs(self): return { 'https': True, 'cert_file': TEST_CERT1, 'key_file': TEST_KEY1, } class TestStandardProfileHTTPSHubClient(TestStandardProfile): conf = 'https_hub_client' @property def hub_init_kwargs(self): return { 'https': True, 'cert_file': TEST_CERT1, 'key_file': TEST_KEY1, } @property def client_init_kwargs(self): return { 'https': True, 'cert_file': TEST_CERT2, 'key_file': TEST_KEY2, }
[ "tempfile.mkdtemp" ]
[((1042, 1060), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (1058, 1060), False, 'import tempfile\n')]
# -*- coding: utf-8 -*- """ Script Name: PipelineTool.py Author: <NAME>/Jimmy - 3D artist. Description: This is main UI of PipelineTool. """ # ------------------------------------------------------------------------------------------------------------- """ Import """ # PLM from PLM import __homepage__, __appName__ from pyPLM.damg import DAMGDICT from pyPLM.Widgets import MainWindow, Widget, GridLayout from pyPLM.Gui import LogoIcon from .components import MainStatusBar, MidTab, BotTab, MainHeader from .models import ButtonManager, ActionManager from PLM.cores import ThreadManager # ------------------------------------------------------------------------------------------------------------- """ Pipeline Tool main layout """ class PipelineManager(MainWindow): key = 'PipelineManager' _name = __appName__ toolBars = DAMGDICT() menus = DAMGDICT() _count = 0 def __init__(self, parent=None): super(PipelineManager, self).__init__(parent) self.url = __homepage__ self.setObjectName(self.name) self.setWindowTitle(self.name) self.setWindowIcon(LogoIcon('PLM')) self.actionManager = ActionManager(self.parent) self.buttonManager = ButtonManager(self.parent) self.threadManager = ThreadManager(self.parent) self.mainWidget = Widget() self.layout = GridLayout() self.mainWidget.setLayout(self.layout) self.setCentralWidget(self.mainWidget) self.buildUI() def buildUI(self): self.header = MainHeader(self.parent) self.body = MidTab(self.buttonManager, self) self.footer = BotTab(self) self.statusBar = MainStatusBar(self) self.menus = self.header.menuBar.menus self.toolBars = self.header.toolBar.toolBars self.mns = self.header.menuBar.mns self.tbs = self.header.toolBar.tbs self.updating = self.header.connectStatus.updating self.server = self.header.connectStatus.server self.connectServer = self.header.connectStatus.connectServer self.connectInternet = self.header.connectStatus.connectInternet self.layouts = [self.header, self.body, self.footer, self.statusBar] self.layout.addWidget(self.header, 0, 0, 2, 9) self.layout.addWidget(self.body, 2, 0, 8, 9) self.layout.addWidget(self.footer, 10, 0, 6, 9) self.setStatusBar(self.statusBar) self.body.setFixedHeight(400) self.updateSize() def resizeEvent(self, event): self.updateSize() # print('header: {0}, body: {1}, footer: {2}'.format(self.header.height(), self.body.height(), self.footer.height())) super(PipelineManager, self).resizeEvent(event) def updateSize(self): bodySize = self.body.size() baseW = bodySize.width() baseH = bodySize.height() self.header.resize(baseW, baseH / 4) self.footer.resize(baseW, baseH * 3 / 4) @property def count(self): return self._count @count.setter def count(self, val): self._count = val # ------------------------------------------------------------------------------------------------------------- # Created by panda on 6/07/2018 - 11:31 AM # © 2017 - 2018 DAMGTEAM. All rights reserved
[ "pyPLM.Gui.LogoIcon", "PLM.cores.ThreadManager", "pyPLM.Widgets.Widget", "pyPLM.damg.DAMGDICT", "pyPLM.Widgets.GridLayout" ]
[((1109, 1119), 'pyPLM.damg.DAMGDICT', 'DAMGDICT', ([], {}), '()\n', (1117, 1119), False, 'from pyPLM.damg import DAMGDICT\n'), ((1162, 1172), 'pyPLM.damg.DAMGDICT', 'DAMGDICT', ([], {}), '()\n', (1170, 1172), False, 'from pyPLM.damg import DAMGDICT\n'), ((1667, 1693), 'PLM.cores.ThreadManager', 'ThreadManager', (['self.parent'], {}), '(self.parent)\n', (1680, 1693), False, 'from PLM.cores import ThreadManager\n'), ((1737, 1745), 'pyPLM.Widgets.Widget', 'Widget', ([], {}), '()\n', (1743, 1745), False, 'from pyPLM.Widgets import MainWindow, Widget, GridLayout\n'), ((1788, 1800), 'pyPLM.Widgets.GridLayout', 'GridLayout', ([], {}), '()\n', (1798, 1800), False, 'from pyPLM.Widgets import MainWindow, Widget, GridLayout\n'), ((1469, 1484), 'pyPLM.Gui.LogoIcon', 'LogoIcon', (['"""PLM"""'], {}), "('PLM')\n", (1477, 1484), False, 'from pyPLM.Gui import LogoIcon\n')]
import unittest from werkzeug.datastructures import MultiDict from tests.app.app_context_test_case import AppContextTestCase from app.helpers.form_helper import get_mapped_answers, get_form_for_location, post_form_for_location from app.questionnaire.location import Location from app.questionnaire.questionnaire_schema import QuestionnaireSchema from app.utilities.schema import load_schema_from_params from app.data_model.answer_store import AnswerStore, Answer from app.validation.validators import DateRequired, OptionalForm class TestFormHelper(AppContextTestCase): def test_get_form_for_block_location(self): with self.app_request_context(): schema = load_schema_from_params('test', '0102') block_json = schema.get_block('reporting-period') location = Location(group_id='rsi', group_instance=0, block_id='introduction') form = get_form_for_location(schema, block_json, location, AnswerStore(), metadata=None) self.assertTrue(hasattr(form, 'period-to')) self.assertTrue(hasattr(form, 'period-from')) period_from_field = getattr(form, 'period-from') period_to_field = getattr(form, 'period-to') self.assertIsInstance(period_from_field.month.validators[0], DateRequired) self.assertIsInstance(period_to_field.month.validators[0], DateRequired) def test_get_form_and_disable_mandatory_answers(self): with self.app_request_context(): schema = load_schema_from_params('test', '0102') block_json = schema.get_block('reporting-period') location = Location(group_id='rsi', group_instance=0, block_id='introduction') form = get_form_for_location(schema, block_json, location, AnswerStore(), metadata=None, disable_mandatory=True) self.assertTrue(hasattr(form, 'period-from')) self.assertTrue(hasattr(form, 'period-to')) period_from_field = getattr(form, 'period-from') period_to_field = getattr(form, 'period-to') self.assertIsInstance(period_from_field.month.validators[0], OptionalForm) self.assertIsInstance(period_to_field.month.validators[0], OptionalForm) def test_post_form_for_block_location(self): with self.app_request_context(): schema = load_schema_from_params('test', '0102') block_json = schema.get_block('reporting-period') location = Location(group_id='rsi', group_instance=0, block_id='introduction') form = post_form_for_location(schema, block_json, location, AnswerStore(), metadata=None, request_form={ 'period-from-day': '1', 'period-from-month': '05', 'period-from-year': '2015', 'period-to-day': '1', 'period-to-month': '09', 'period-to-year': '2017', }) self.assertTrue(hasattr(form, 'period-to')) self.assertTrue(hasattr(form, 'period-from')) period_to_field = getattr(form, 'period-to') period_from_field = getattr(form, 'period-from') self.assertIsInstance(period_from_field.month.validators[0], DateRequired) self.assertIsInstance(period_to_field.month.validators[0], DateRequired) self.assertEqual(period_from_field.data, '2015-05-01') self.assertEqual(period_to_field.data, '2017-09-01') def test_post_form_and_disable_mandatory(self): with self.app_request_context(): schema = load_schema_from_params('test', '0102') block_json = schema.get_block('reporting-period') location = Location(group_id='rsi', group_instance=0, block_id='introduction') form = post_form_for_location(schema, block_json, location, AnswerStore(), metadata=None, request_form={ }, disable_mandatory=True) self.assertTrue(hasattr(form, 'period-from')) self.assertTrue(hasattr(form, 'period-to')) period_from_field = getattr(form, 'period-from') period_to_field = getattr(form, 'period-to') self.assertIsInstance(period_from_field.month.validators[0], OptionalForm) self.assertIsInstance(period_to_field.month.validators[0], OptionalForm) def test_get_form_for_household_composition(self): with self.app_request_context(): schema = load_schema_from_params('census', 'household') block_json = schema.get_block('household-composition') location = Location('who-lives-here', 0, 'household-composition') error_messages = schema.error_messages form = get_form_for_location(schema, block_json, location, AnswerStore(), error_messages) self.assertTrue(hasattr(form, 'household')) self.assertEqual(len(form.household.entries), 1) first_field_entry = form.household[0] self.assertTrue(hasattr(first_field_entry, 'first-name')) self.assertTrue(hasattr(first_field_entry, 'middle-names')) self.assertTrue(hasattr(first_field_entry, 'last-name')) def test_post_form_for_household_composition(self): with self.app_request_context(): schema = load_schema_from_params('census', 'household') block_json = schema.get_block('household-composition') location = Location('who-lives-here', 0, 'household-composition') form = post_form_for_location(schema, block_json, location, AnswerStore(), metadata=None, request_form={ 'household-0-first-name': 'Joe', 'household-0-last-name': '', 'household-1-first-name': 'Bob', 'household-1-last-name': 'Seymour', }) self.assertEqual(len(form.household.entries), 2) self.assertEqual(form.household.entries[0].data, { 'first-name': 'Joe', 'middle-names': '', 'last-name': '' }) self.assertEqual(form.household.entries[1].data, { 'first-name': 'Bob', 'middle-names': '', 'last-name': 'Seymour' }) def test_get_form_for_household_relationship(self): with self.app_request_context(): schema = load_schema_from_params('census', 'household') block_json = schema.get_block('household-relationships') location = Location('who-lives-here-relationship', 0, 'household-relationships') error_messages = schema.error_messages answer_store = AnswerStore([ { 'group_id': 'who-lives-here-relationship', 'group_instance': 0, 'answer_id': 'first-name', 'block_id': 'household-composition', 'value': 'Joe', 'answer_instance': 0, }, { 'group_id': 'who-lives-here-relationship', 'group_instance': 0, 'answer_id': 'last-name', 'block_id': 'household-composition', 'value': 'Bloggs', 'answer_instance': 0, }, { 'group_id': 'who-lives-here-relationship', 'group_instance': 1, 'answer_id': 'first-name', 'block_id': 'household-composition', 'value': 'Jane', 'answer_instance': 1, }, { 'group_id': 'who-lives-here-relationship', 'group_instance': 1, 'answer_id': 'last-name', 'block_id': 'household-composition', 'value': 'Bloggs', 'answer_instance': 1, } ]) form = get_form_for_location(schema, block_json, location, answer_store, error_messages) answer = schema.get_answers_for_block('household-relationships')[0] self.assertTrue(hasattr(form, answer['id'])) field_list = getattr(form, answer['id']) # With two people, we need to define 1 relationship self.assertEqual(len(field_list.entries), 1) def test_post_form_for_household_relationship(self): with self.app_request_context(): schema = load_schema_from_params('census', 'household') block_json = schema.get_block('household-relationships') location = Location('who-lives-here-relationship', 0, 'household-relationships') answer_store = AnswerStore([ { 'answer_id': 'first-name', 'block_id': 'household-composition', 'value': 'Joe', 'answer_instance': 0, }, { 'answer_id': 'last-name', 'block_id': 'household-composition', 'value': 'Bloggs', 'answer_instance': 0, }, { 'answer_id': 'first-name', 'block_id': 'household-composition', 'value': 'Jane', 'answer_instance': 1, }, { 'answer_id': 'last-name', 'block_id': 'household-composition', 'value': 'Bloggs', 'answer_instance': 1, } ]) answer = schema.get_answers_for_block('household-relationships')[0] form = post_form_for_location(schema, block_json, location, answer_store, metadata=None, request_form=MultiDict({'{answer_id}-0'.format(answer_id=answer['id']): '3'})) self.assertTrue(hasattr(form, answer['id'])) field_list = getattr(form, answer['id']) # With two people, we need to define 1 relationship self.assertEqual(len(field_list.entries), 1) # Check the data matches what was passed from request self.assertEqual(field_list.entries[0].data, '3') def test_post_form_for_radio_other_not_selected(self): with self.app_request_context(): schema = load_schema_from_params('test', 'radio_mandatory_with_mandatory_other') block_json = schema.get_block('radio-mandatory') location = Location('radio', 0, 'radio-mandatory') answer_store = AnswerStore([ { 'answer_id': 'radio-mandatory-answer', 'block_id': 'radio-mandatory', 'value': 'Other', 'answer_instance': 0, }, { 'answer_id': 'other-answer-mandatory', 'block_id': 'radio-mandatory', 'value': 'Other text field value', 'answer_instance': 0, } ]) form = post_form_for_location(schema, block_json, location, answer_store, metadata=None, request_form=MultiDict({'radio-mandatory-answer': 'Bacon', 'other-answer-mandatory': 'Old other text'})) self.assertTrue(hasattr(form, 'radio-mandatory-answer')) other_text_field = getattr(form, 'other-answer-mandatory') self.assertEqual(other_text_field.data, '') def test_post_form_for_radio_other_selected(self): with self.app_request_context(): schema = load_schema_from_params('test', 'radio_mandatory_with_mandatory_other') block_json = schema.get_block('radio-mandatory') location = Location('radio', 0, 'radio-mandatory') answer_store = AnswerStore([ { 'answer_id': 'radio-mandatory-answer', 'block_id': 'radio-mandatory', 'value': 'Other', 'answer_instance': 0, }, { 'answer_id': 'other-answer-mandatory', 'block_id': 'block-1', 'value': 'Other text field value', 'answer_instance': 0, } ]) radio_answer = schema.get_answers_for_block('radio-mandatory')[0] text_answer = 'other-answer-mandatory' form = post_form_for_location(schema, block_json, location, answer_store, metadata=None, request_form=MultiDict({ '{answer_id}'.format(answer_id=radio_answer['id']): 'Other', '{answer_id}'.format(answer_id=text_answer): 'Other text field value', })) other_text_field = getattr(form, 'other-answer-mandatory') self.assertEqual(other_text_field.data, 'Other text field value') class TestGetMappedAnswers(unittest.TestCase): def setUp(self): self.store = AnswerStore(None) def tearDown(self): self.store.clear() def test_maps_and_filters_answers(self): questionnaire = { 'sections': [{ 'id': 'section1', 'groups': [ { 'id': 'group1', 'blocks': [ { 'id': 'block1', 'questions': [{ 'id': 'question1', 'answers': [ { 'id': 'answer1', 'type': 'TextArea' } ] }] }, { 'id': 'block2', 'questions': [{ 'id': 'question2', 'answers': [ { 'id': 'answer2', 'type': 'TextArea' } ] }] }] }] }] } schema = QuestionnaireSchema(questionnaire) answer_1 = Answer( answer_id='answer2', answer_instance=1, group_instance=1, value=25, ) answer_2 = Answer( answer_id='answer1', answer_instance=1, group_instance=1, value=65, ) self.store.add(answer_1) self.store.add(answer_2) expected_answers = { 'answer1_1': 65 } self.assertEqual(get_mapped_answers(schema, self.store, block_id='block1', group_instance=1), expected_answers) def test_returns_ordered_map(self): questionnaire = { 'sections': [{ 'id': 'section1', 'groups': [{ 'id': 'group1', 'blocks': [{ 'id': 'block1', 'questions': [{ 'id': 'question1', 'answers': [ { 'id': 'answer1', 'type': 'TextArea' } ] }] }] }] }] } schema = QuestionnaireSchema(questionnaire) answer = Answer( answer_id='answer1', group_instance=1, value=25, ) for i in range(0, 100): answer.answer_instance = i self.store.add(answer) last_instance = -1 self.assertEqual(len(self.store.answers), 100) mapped = get_mapped_answers(schema, self.store, block_id='block1', group_instance=1) for key, _ in mapped.items(): pos = key.find('_') instance = 0 if pos == -1 else int(key[pos + 1:]) self.assertGreater(instance, last_instance) last_instance = instance
[ "app.data_model.answer_store.AnswerStore", "app.helpers.form_helper.get_form_for_location", "werkzeug.datastructures.MultiDict", "app.questionnaire.questionnaire_schema.QuestionnaireSchema", "app.helpers.form_helper.get_mapped_answers", "app.utilities.schema.load_schema_from_params", "app.questionnaire.location.Location", "app.data_model.answer_store.Answer" ]
[((13413, 13430), 'app.data_model.answer_store.AnswerStore', 'AnswerStore', (['None'], {}), '(None)\n', (13424, 13430), False, 'from app.data_model.answer_store import AnswerStore, Answer\n'), ((14891, 14925), 'app.questionnaire.questionnaire_schema.QuestionnaireSchema', 'QuestionnaireSchema', (['questionnaire'], {}), '(questionnaire)\n', (14910, 14925), False, 'from app.questionnaire.questionnaire_schema import QuestionnaireSchema\n'), ((14946, 15020), 'app.data_model.answer_store.Answer', 'Answer', ([], {'answer_id': '"""answer2"""', 'answer_instance': '(1)', 'group_instance': '(1)', 'value': '(25)'}), "(answer_id='answer2', answer_instance=1, group_instance=1, value=25)\n", (14952, 15020), False, 'from app.data_model.answer_store import AnswerStore, Answer\n'), ((15099, 15173), 'app.data_model.answer_store.Answer', 'Answer', ([], {'answer_id': '"""answer1"""', 'answer_instance': '(1)', 'group_instance': '(1)', 'value': '(65)'}), "(answer_id='answer1', answer_instance=1, group_instance=1, value=65)\n", (15105, 15173), False, 'from app.data_model.answer_store import AnswerStore, Answer\n'), ((16201, 16235), 'app.questionnaire.questionnaire_schema.QuestionnaireSchema', 'QuestionnaireSchema', (['questionnaire'], {}), '(questionnaire)\n', (16220, 16235), False, 'from app.questionnaire.questionnaire_schema import QuestionnaireSchema\n'), ((16254, 16309), 'app.data_model.answer_store.Answer', 'Answer', ([], {'answer_id': '"""answer1"""', 'group_instance': '(1)', 'value': '(25)'}), "(answer_id='answer1', group_instance=1, value=25)\n", (16260, 16309), False, 'from app.data_model.answer_store import AnswerStore, Answer\n'), ((16567, 16642), 'app.helpers.form_helper.get_mapped_answers', 'get_mapped_answers', (['schema', 'self.store'], {'block_id': '"""block1"""', 'group_instance': '(1)'}), "(schema, self.store, block_id='block1', group_instance=1)\n", (16585, 16642), False, 'from app.helpers.form_helper import get_mapped_answers, get_form_for_location, post_form_for_location\n'), ((684, 723), 'app.utilities.schema.load_schema_from_params', 'load_schema_from_params', (['"""test"""', '"""0102"""'], {}), "('test', '0102')\n", (707, 723), False, 'from app.utilities.schema import load_schema_from_params\n'), ((810, 877), 'app.questionnaire.location.Location', 'Location', ([], {'group_id': '"""rsi"""', 'group_instance': '(0)', 'block_id': '"""introduction"""'}), "(group_id='rsi', group_instance=0, block_id='introduction')\n", (818, 877), False, 'from app.questionnaire.location import Location\n'), ((1573, 1612), 'app.utilities.schema.load_schema_from_params', 'load_schema_from_params', (['"""test"""', '"""0102"""'], {}), "('test', '0102')\n", (1596, 1612), False, 'from app.utilities.schema import load_schema_from_params\n'), ((1699, 1766), 'app.questionnaire.location.Location', 'Location', ([], {'group_id': '"""rsi"""', 'group_instance': '(0)', 'block_id': '"""introduction"""'}), "(group_id='rsi', group_instance=0, block_id='introduction')\n", (1707, 1766), False, 'from app.questionnaire.location import Location\n'), ((2517, 2556), 'app.utilities.schema.load_schema_from_params', 'load_schema_from_params', (['"""test"""', '"""0102"""'], {}), "('test', '0102')\n", (2540, 2556), False, 'from app.utilities.schema import load_schema_from_params\n'), ((2643, 2710), 'app.questionnaire.location.Location', 'Location', ([], {'group_id': '"""rsi"""', 'group_instance': '(0)', 'block_id': '"""introduction"""'}), "(group_id='rsi', group_instance=0, block_id='introduction')\n", (2651, 2710), False, 'from app.questionnaire.location import Location\n'), ((3811, 3850), 'app.utilities.schema.load_schema_from_params', 'load_schema_from_params', (['"""test"""', '"""0102"""'], {}), "('test', '0102')\n", (3834, 3850), False, 'from app.utilities.schema import load_schema_from_params\n'), ((3937, 4004), 'app.questionnaire.location.Location', 'Location', ([], {'group_id': '"""rsi"""', 'group_instance': '(0)', 'block_id': '"""introduction"""'}), "(group_id='rsi', group_instance=0, block_id='introduction')\n", (3945, 4004), False, 'from app.questionnaire.location import Location\n'), ((4751, 4797), 'app.utilities.schema.load_schema_from_params', 'load_schema_from_params', (['"""census"""', '"""household"""'], {}), "('census', 'household')\n", (4774, 4797), False, 'from app.utilities.schema import load_schema_from_params\n'), ((4889, 4943), 'app.questionnaire.location.Location', 'Location', (['"""who-lives-here"""', '(0)', '"""household-composition"""'], {}), "('who-lives-here', 0, 'household-composition')\n", (4897, 4943), False, 'from app.questionnaire.location import Location\n'), ((5598, 5644), 'app.utilities.schema.load_schema_from_params', 'load_schema_from_params', (['"""census"""', '"""household"""'], {}), "('census', 'household')\n", (5621, 5644), False, 'from app.utilities.schema import load_schema_from_params\n'), ((5736, 5790), 'app.questionnaire.location.Location', 'Location', (['"""who-lives-here"""', '(0)', '"""household-composition"""'], {}), "('who-lives-here', 0, 'household-composition')\n", (5744, 5790), False, 'from app.questionnaire.location import Location\n'), ((6673, 6719), 'app.utilities.schema.load_schema_from_params', 'load_schema_from_params', (['"""census"""', '"""household"""'], {}), "('census', 'household')\n", (6696, 6719), False, 'from app.utilities.schema import load_schema_from_params\n'), ((6813, 6882), 'app.questionnaire.location.Location', 'Location', (['"""who-lives-here-relationship"""', '(0)', '"""household-relationships"""'], {}), "('who-lives-here-relationship', 0, 'household-relationships')\n", (6821, 6882), False, 'from app.questionnaire.location import Location\n'), ((6962, 7694), 'app.data_model.answer_store.AnswerStore', 'AnswerStore', (["[{'group_id': 'who-lives-here-relationship', 'group_instance': 0,\n 'answer_id': 'first-name', 'block_id': 'household-composition', 'value':\n 'Joe', 'answer_instance': 0}, {'group_id':\n 'who-lives-here-relationship', 'group_instance': 0, 'answer_id':\n 'last-name', 'block_id': 'household-composition', 'value': 'Bloggs',\n 'answer_instance': 0}, {'group_id': 'who-lives-here-relationship',\n 'group_instance': 1, 'answer_id': 'first-name', 'block_id':\n 'household-composition', 'value': 'Jane', 'answer_instance': 1}, {\n 'group_id': 'who-lives-here-relationship', 'group_instance': 1,\n 'answer_id': 'last-name', 'block_id': 'household-composition', 'value':\n 'Bloggs', 'answer_instance': 1}]"], {}), "([{'group_id': 'who-lives-here-relationship', 'group_instance': \n 0, 'answer_id': 'first-name', 'block_id': 'household-composition',\n 'value': 'Joe', 'answer_instance': 0}, {'group_id':\n 'who-lives-here-relationship', 'group_instance': 0, 'answer_id':\n 'last-name', 'block_id': 'household-composition', 'value': 'Bloggs',\n 'answer_instance': 0}, {'group_id': 'who-lives-here-relationship',\n 'group_instance': 1, 'answer_id': 'first-name', 'block_id':\n 'household-composition', 'value': 'Jane', 'answer_instance': 1}, {\n 'group_id': 'who-lives-here-relationship', 'group_instance': 1,\n 'answer_id': 'last-name', 'block_id': 'household-composition', 'value':\n 'Bloggs', 'answer_instance': 1}])\n", (6973, 7694), False, 'from app.data_model.answer_store import AnswerStore, Answer\n'), ((8258, 8343), 'app.helpers.form_helper.get_form_for_location', 'get_form_for_location', (['schema', 'block_json', 'location', 'answer_store', 'error_messages'], {}), '(schema, block_json, location, answer_store,\n error_messages)\n', (8279, 8343), False, 'from app.helpers.form_helper import get_mapped_answers, get_form_for_location, post_form_for_location\n'), ((8775, 8821), 'app.utilities.schema.load_schema_from_params', 'load_schema_from_params', (['"""census"""', '"""household"""'], {}), "('census', 'household')\n", (8798, 8821), False, 'from app.utilities.schema import load_schema_from_params\n'), ((8915, 8984), 'app.questionnaire.location.Location', 'Location', (['"""who-lives-here-relationship"""', '(0)', '"""household-relationships"""'], {}), "('who-lives-here-relationship', 0, 'household-relationships')\n", (8923, 8984), False, 'from app.questionnaire.location import Location\n'), ((9013, 9473), 'app.data_model.answer_store.AnswerStore', 'AnswerStore', (["[{'answer_id': 'first-name', 'block_id': 'household-composition', 'value':\n 'Joe', 'answer_instance': 0}, {'answer_id': 'last-name', 'block_id':\n 'household-composition', 'value': 'Bloggs', 'answer_instance': 0}, {\n 'answer_id': 'first-name', 'block_id': 'household-composition', 'value':\n 'Jane', 'answer_instance': 1}, {'answer_id': 'last-name', 'block_id':\n 'household-composition', 'value': 'Bloggs', 'answer_instance': 1}]"], {}), "([{'answer_id': 'first-name', 'block_id':\n 'household-composition', 'value': 'Joe', 'answer_instance': 0}, {\n 'answer_id': 'last-name', 'block_id': 'household-composition', 'value':\n 'Bloggs', 'answer_instance': 0}, {'answer_id': 'first-name', 'block_id':\n 'household-composition', 'value': 'Jane', 'answer_instance': 1}, {\n 'answer_id': 'last-name', 'block_id': 'household-composition', 'value':\n 'Bloggs', 'answer_instance': 1}])\n", (9024, 9473), False, 'from app.data_model.answer_store import AnswerStore, Answer\n'), ((10663, 10734), 'app.utilities.schema.load_schema_from_params', 'load_schema_from_params', (['"""test"""', '"""radio_mandatory_with_mandatory_other"""'], {}), "('test', 'radio_mandatory_with_mandatory_other')\n", (10686, 10734), False, 'from app.utilities.schema import load_schema_from_params\n'), ((10820, 10859), 'app.questionnaire.location.Location', 'Location', (['"""radio"""', '(0)', '"""radio-mandatory"""'], {}), "('radio', 0, 'radio-mandatory')\n", (10828, 10859), False, 'from app.questionnaire.location import Location\n'), ((10888, 11155), 'app.data_model.answer_store.AnswerStore', 'AnswerStore', (["[{'answer_id': 'radio-mandatory-answer', 'block_id': 'radio-mandatory',\n 'value': 'Other', 'answer_instance': 0}, {'answer_id':\n 'other-answer-mandatory', 'block_id': 'radio-mandatory', 'value':\n 'Other text field value', 'answer_instance': 0}]"], {}), "([{'answer_id': 'radio-mandatory-answer', 'block_id':\n 'radio-mandatory', 'value': 'Other', 'answer_instance': 0}, {\n 'answer_id': 'other-answer-mandatory', 'block_id': 'radio-mandatory',\n 'value': 'Other text field value', 'answer_instance': 0}])\n", (10899, 11155), False, 'from app.data_model.answer_store import AnswerStore, Answer\n'), ((12018, 12089), 'app.utilities.schema.load_schema_from_params', 'load_schema_from_params', (['"""test"""', '"""radio_mandatory_with_mandatory_other"""'], {}), "('test', 'radio_mandatory_with_mandatory_other')\n", (12041, 12089), False, 'from app.utilities.schema import load_schema_from_params\n'), ((12175, 12214), 'app.questionnaire.location.Location', 'Location', (['"""radio"""', '(0)', '"""radio-mandatory"""'], {}), "('radio', 0, 'radio-mandatory')\n", (12183, 12214), False, 'from app.questionnaire.location import Location\n'), ((12243, 12502), 'app.data_model.answer_store.AnswerStore', 'AnswerStore', (["[{'answer_id': 'radio-mandatory-answer', 'block_id': 'radio-mandatory',\n 'value': 'Other', 'answer_instance': 0}, {'answer_id':\n 'other-answer-mandatory', 'block_id': 'block-1', 'value':\n 'Other text field value', 'answer_instance': 0}]"], {}), "([{'answer_id': 'radio-mandatory-answer', 'block_id':\n 'radio-mandatory', 'value': 'Other', 'answer_instance': 0}, {\n 'answer_id': 'other-answer-mandatory', 'block_id': 'block-1', 'value':\n 'Other text field value', 'answer_instance': 0}])\n", (12254, 12502), False, 'from app.data_model.answer_store import AnswerStore, Answer\n'), ((15394, 15469), 'app.helpers.form_helper.get_mapped_answers', 'get_mapped_answers', (['schema', 'self.store'], {'block_id': '"""block1"""', 'group_instance': '(1)'}), "(schema, self.store, block_id='block1', group_instance=1)\n", (15412, 15469), False, 'from app.helpers.form_helper import get_mapped_answers, get_form_for_location, post_form_for_location\n'), ((1014, 1027), 'app.data_model.answer_store.AnswerStore', 'AnswerStore', ([], {}), '()\n', (1025, 1027), False, 'from app.data_model.answer_store import AnswerStore, Answer\n'), ((1944, 1957), 'app.data_model.answer_store.AnswerStore', 'AnswerStore', ([], {}), '()\n', (1955, 1957), False, 'from app.data_model.answer_store import AnswerStore, Answer\n'), ((2848, 2861), 'app.data_model.answer_store.AnswerStore', 'AnswerStore', ([], {}), '()\n', (2859, 2861), False, 'from app.data_model.answer_store import AnswerStore, Answer\n'), ((4142, 4155), 'app.data_model.answer_store.AnswerStore', 'AnswerStore', ([], {}), '()\n', (4153, 4155), False, 'from app.data_model.answer_store import AnswerStore, Answer\n'), ((5067, 5080), 'app.data_model.answer_store.AnswerStore', 'AnswerStore', ([], {}), '()\n', (5078, 5080), False, 'from app.data_model.answer_store import AnswerStore, Answer\n'), ((5864, 5877), 'app.data_model.answer_store.AnswerStore', 'AnswerStore', ([], {}), '()\n', (5875, 5877), False, 'from app.data_model.answer_store import AnswerStore, Answer\n'), ((11544, 11638), 'werkzeug.datastructures.MultiDict', 'MultiDict', (["{'radio-mandatory-answer': 'Bacon', 'other-answer-mandatory': 'Old other text'}"], {}), "({'radio-mandatory-answer': 'Bacon', 'other-answer-mandatory':\n 'Old other text'})\n", (11553, 11638), False, 'from werkzeug.datastructures import MultiDict\n')]
from django.conf import settings from django.contrib.contenttypes.models import ContentType from django.template import TemplateSyntaxError from django.template.loader import render_to_string from django.utils.encoding import smart_text from django.utils.safestring import mark_safe def inlines(value, return_list=False): """ Return ``value`` with all <inline/> tags rendered. """ try: from BeautifulSoup import BeautifulStoneSoup as bs1 BS = bs1 except ImportError: try: from beautifulsoup import BeautifulStoneSoup as bs2 BS = bs2 except ImportError: try: from bs4 import BeautifulSoup as bs4 BS = bs4 except ImportError: return '' content = BS(value, "html.parser") inline_list = [] if return_list: for inline in content.findAll('inline'): rendered_inline = render_inline(inline) inline_list.append(rendered_inline['context']) return inline_list else: for inline in content.findAll('inline'): rendered_inline = render_inline(inline) if rendered_inline: string = BS( render_to_string( rendered_inline['template'], rendered_inline['context']), "html.parser") inline.replaceWith(string) else: inline.replaceWith('') return mark_safe(str(content)) def render_inline(inline): """ Replace inline markup with template markup that matches the appropriate app and model. """ # Look for inline type, 'app.model' try: app_label, model_name = inline['type'].split('.') except: if settings.DEBUG: raise TemplateSyntaxError( "Couldn't find the attribute 'type' in the <inline> tag.") else: return '' # Look for content type try: content_type = ContentType.objects.get( app_label=app_label, model=model_name) model = content_type.model_class() except ContentType.DoesNotExist: if settings.DEBUG: raise TemplateSyntaxError("Inline ContentType not found.") else: return '' # Check for an inline class attribute try: inline_class = smart_text(' '.join(inline['class'])) except: inline_class = '' try: count = int(inline['count']) try: qs_name = inline['queryset'] obj_list = model.objects.__getattribute__(qs_name)() except (KeyError, AttributeError): # Use the default queryset obj_list = model.objects.all() try: order_by = inline['sort'] obj_list = obj_list.order_by(order_by) except KeyError: # 'sort' is optional pass try: filtering = inline['filter'] filters = filtering.split(',') query_dict = {} for f in filters: pair = f.split(':') if not len(pair) == 2: continue key = pair[0] if pair[1].lower() == "true": value = True elif pair[1].lower() == "false": value = False else: value = pair[1] query_dict[key] = value obj_list = obj_list.filter(**query_dict) except KeyError: # 'filter' is optional pass except: if settings.DEBUG: raise ValueError('Invalid query string: %s' % filtering) else: return '' context = {'object_list': obj_list[:count], 'class': inline_class} except ValueError as e: if settings.DEBUG: raise ValueError( "The <inline> `count` and/or `sort` attributes " "are missing or invalid: %s" % e) else: return '' except KeyError: try: id_list = [int(i) for i in inline['ids'].split(',')] obj_list = model.objects.in_bulk(id_list) obj_list = list(obj_list[int(i)] for i in id_list) context = {'object_list': obj_list, 'class': inline_class} except ValueError: if settings.DEBUG: raise ValueError( "The <inline> ids attribute is missing or invalid.") else: return '' except KeyError: try: obj = model.objects.get(pk=inline['id']) context = { 'content_type': "%s.%s" % (app_label, model_name), 'object': obj, 'class': inline_class, 'settings': settings } except model.DoesNotExist: if settings.DEBUG: raise model.DoesNotExist( "%s with pk of '%s' does not exist" % ( model_name, inline['id'])) else: return '' except: if settings.DEBUG: raise TemplateSyntaxError( "The <inline> id attribute is missing or invalid.") else: return '' template = [ "inlines/%s_%s.html" % (app_label, model_name), "inlines/default.html"] rendered_inline = { 'template': template, 'context': context } return rendered_inline
[ "django.template.TemplateSyntaxError", "django.template.loader.render_to_string", "django.contrib.contenttypes.models.ContentType.objects.get" ]
[((2034, 2096), 'django.contrib.contenttypes.models.ContentType.objects.get', 'ContentType.objects.get', ([], {'app_label': 'app_label', 'model': 'model_name'}), '(app_label=app_label, model=model_name)\n', (2057, 2096), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((1841, 1919), 'django.template.TemplateSyntaxError', 'TemplateSyntaxError', (['"""Couldn\'t find the attribute \'type\' in the <inline> tag."""'], {}), '("Couldn\'t find the attribute \'type\' in the <inline> tag.")\n', (1860, 1919), False, 'from django.template import TemplateSyntaxError\n'), ((2235, 2287), 'django.template.TemplateSyntaxError', 'TemplateSyntaxError', (['"""Inline ContentType not found."""'], {}), "('Inline ContentType not found.')\n", (2254, 2287), False, 'from django.template import TemplateSyntaxError\n'), ((1237, 1310), 'django.template.loader.render_to_string', 'render_to_string', (["rendered_inline['template']", "rendered_inline['context']"], {}), "(rendered_inline['template'], rendered_inline['context'])\n", (1253, 1310), False, 'from django.template.loader import render_to_string\n'), ((5300, 5371), 'django.template.TemplateSyntaxError', 'TemplateSyntaxError', (['"""The <inline> id attribute is missing or invalid."""'], {}), "('The <inline> id attribute is missing or invalid.')\n", (5319, 5371), False, 'from django.template import TemplateSyntaxError\n')]
import uuid from django.db import models from users.models import User class normalUserProfile(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='profile') first_name = models.CharField(max_length=50, unique=False) last_name = models.CharField(max_length=50, unique=False) phone_number = models.CharField(max_length=10, unique=True, null=False, blank=False) age = models.PositiveIntegerField(null=False, blank=False) GENDER_CHOICES = ( ('M', 'Male'), ('F', 'Female'), ) gender = models.CharField(max_length=1, choices=GENDER_CHOICES) class Meta: db_table = "profile" class vipUserProfile(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) user = models.ForeignKey(normalUserProfile, on_delete=models.CASCADE, related_name='normal') rate = models.IntegerField() class Meta: db_table = "vip"
[ "django.db.models.OneToOneField", "django.db.models.CharField", "django.db.models.ForeignKey", "django.db.models.PositiveIntegerField", "django.db.models.IntegerField", "django.db.models.UUIDField" ]
[((123, 193), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'primary_key': '(True)', 'default': 'uuid.uuid4', 'editable': '(False)'}), '(primary_key=True, default=uuid.uuid4, editable=False)\n', (139, 193), False, 'from django.db import models\n'), ((205, 281), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {'on_delete': 'models.CASCADE', 'related_name': '"""profile"""'}), "(User, on_delete=models.CASCADE, related_name='profile')\n", (225, 281), False, 'from django.db import models\n'), ((299, 344), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'unique': '(False)'}), '(max_length=50, unique=False)\n', (315, 344), False, 'from django.db import models\n'), ((361, 406), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'unique': '(False)'}), '(max_length=50, unique=False)\n', (377, 406), False, 'from django.db import models\n'), ((426, 495), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'unique': '(True)', 'null': '(False)', 'blank': '(False)'}), '(max_length=10, unique=True, null=False, blank=False)\n', (442, 495), False, 'from django.db import models\n'), ((506, 558), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'null': '(False)', 'blank': '(False)'}), '(null=False, blank=False)\n', (533, 558), False, 'from django.db import models\n'), ((649, 703), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1)', 'choices': 'GENDER_CHOICES'}), '(max_length=1, choices=GENDER_CHOICES)\n', (665, 703), False, 'from django.db import models\n'), ((798, 868), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'primary_key': '(True)', 'default': 'uuid.uuid4', 'editable': '(False)'}), '(primary_key=True, default=uuid.uuid4, editable=False)\n', (814, 868), False, 'from django.db import models\n'), ((880, 970), 'django.db.models.ForeignKey', 'models.ForeignKey', (['normalUserProfile'], {'on_delete': 'models.CASCADE', 'related_name': '"""normal"""'}), "(normalUserProfile, on_delete=models.CASCADE, related_name\n ='normal')\n", (897, 970), False, 'from django.db import models\n'), ((977, 998), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (996, 998), False, 'from django.db import models\n')]
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Placement API handlers for resource classes.""" from oslo_serialization import jsonutils from oslo_utils import encodeutils from oslo_utils import timeutils import webob from designer_family import exception from designer_family import microversion from designer_family.objects import resource_class as rc_obj from designer_family.policies import resource_class as policies from designer_family.schemas import resource_class as schema from designer_family import util from designer_family import wsgi_wrapper def _serialize_links(environ, rc): url = util.resource_class_url(environ, rc) links = [{'rel': 'self', 'href': url}] return links def _serialize_resource_class(environ, rc): data = { 'name': rc.name, 'links': _serialize_links(environ, rc) } return data def _serialize_resource_classes(environ, rcs, want_version): output = [] last_modified = None get_last_modified = want_version.matches((1, 15)) for rc in rcs: if get_last_modified: last_modified = util.pick_last_modified(last_modified, rc) data = _serialize_resource_class(environ, rc) output.append(data) last_modified = last_modified or timeutils.utcnow(with_timezone=True) return ({"resource_classes": output}, last_modified) @wsgi_wrapper.PlacementWsgify @microversion.version_handler('1.2') @util.require_content('application/json') def create_resource_class(req): """POST to create a resource class. On success return a 201 response with an empty body and a location header pointing to the newly created resource class. """ context = req.environ['designer_family.context'] context.can(policies.CREATE) data = util.extract_json(req.body, schema.POST_RC_SCHEMA_V1_2) try: rc = rc_obj.ResourceClass(context, name=data['name']) rc.create() except exception.ResourceClassExists: raise webob.exc.HTTPConflict( 'Conflicting resource class already exists: %(name)s' % {'name': data['name']}) except exception.MaxDBRetriesExceeded: raise webob.exc.HTTPConflict( 'Max retries of DB transaction exceeded attempting ' 'to create resource class: %(name)s, please ' 'try again.' % {'name': data['name']}) req.response.location = util.resource_class_url(req.environ, rc) req.response.status = 201 req.response.content_type = None return req.response @wsgi_wrapper.PlacementWsgify @microversion.version_handler('1.2') def delete_resource_class(req): """DELETE to destroy a single resource class. On success return a 204 and an empty body. """ name = util.wsgi_path_item(req.environ, 'name') context = req.environ['designer_family.context'] context.can(policies.DELETE) # The containing application will catch a not found here. rc = rc_obj.ResourceClass.get_by_name(context, name) try: rc.destroy() except exception.ResourceClassCannotDeleteStandard as exc: raise webob.exc.HTTPBadRequest( 'Error in delete resource class: %(error)s' % {'error': exc}) except exception.ResourceClassInUse as exc: raise webob.exc.HTTPConflict( 'Error in delete resource class: %(error)s' % {'error': exc}) req.response.status = 204 req.response.content_type = None return req.response @wsgi_wrapper.PlacementWsgify @microversion.version_handler('1.2') @util.check_accept('application/json') def get_resource_class(req): """Get a single resource class. On success return a 200 with an application/json body representing the resource class. """ name = util.wsgi_path_item(req.environ, 'name') context = req.environ['designer_family.context'] context.can(policies.SHOW) want_version = req.environ[microversion.MICROVERSION_ENVIRON] # The containing application will catch a not found here. rc = rc_obj.ResourceClass.get_by_name(context, name) req.response.body = encodeutils.to_utf8(jsonutils.dumps( _serialize_resource_class(req.environ, rc)) ) req.response.content_type = 'application/json' if want_version.matches((1, 15)): req.response.cache_control = 'no-cache' # Non-custom resource classes will return None from pick_last_modified, # so the 'or' causes utcnow to be used. last_modified = util.pick_last_modified(None, rc) or timeutils.utcnow( with_timezone=True) req.response.last_modified = last_modified return req.response @wsgi_wrapper.PlacementWsgify @microversion.version_handler('1.2') @util.check_accept('application/json') def list_resource_classes(req): """GET a list of resource classes. On success return a 200 and an application/json body representing a collection of resource classes. """ context = req.environ['designer_family.context'] context.can(policies.LIST) want_version = req.environ[microversion.MICROVERSION_ENVIRON] rcs = rc_obj.get_all(context) response = req.response output, last_modified = _serialize_resource_classes( req.environ, rcs, want_version) response.body = encodeutils.to_utf8(jsonutils.dumps(output)) response.content_type = 'application/json' if want_version.matches((1, 15)): response.last_modified = last_modified response.cache_control = 'no-cache' return response @wsgi_wrapper.PlacementWsgify @microversion.version_handler('1.2', '1.6') @util.require_content('application/json') def update_resource_class(req): """PUT to update a single resource class. On success return a 200 response with a representation of the updated resource class. """ name = util.wsgi_path_item(req.environ, 'name') context = req.environ['designer_family.context'] context.can(policies.UPDATE) data = util.extract_json(req.body, schema.PUT_RC_SCHEMA_V1_2) # The containing application will catch a not found here. rc = rc_obj.ResourceClass.get_by_name(context, name) rc.name = data['name'] try: rc.save() except exception.ResourceClassExists: raise webob.exc.HTTPConflict( 'Resource class already exists: %(name)s' % {'name': rc.name}) except exception.ResourceClassCannotUpdateStandard: raise webob.exc.HTTPBadRequest( 'Cannot update standard resource class %(rp_name)s' % {'rp_name': name}) req.response.body = encodeutils.to_utf8(jsonutils.dumps( _serialize_resource_class(req.environ, rc)) ) req.response.status = 200 req.response.content_type = 'application/json' return req.response @wsgi_wrapper.PlacementWsgify # noqa @microversion.version_handler('1.7') def update_resource_class(req): """PUT to create or validate the existence of single resource class. On a successful create return 201. Return 204 if the class already exists. If the resource class is not a custom resource class, return a 400. 409 might be a better choice, but 400 aligns with previous code. """ name = util.wsgi_path_item(req.environ, 'name') context = req.environ['designer_family.context'] context.can(policies.UPDATE) # Use JSON validation to validation resource class name. util.extract_json('{"name": "%s"}' % name, schema.PUT_RC_SCHEMA_V1_2) status = 204 try: rc = rc_obj.ResourceClass.get_by_name(context, name) except exception.NotFound: try: rc = rc_obj.ResourceClass(context, name=name) rc.create() status = 201 # We will not see ResourceClassCannotUpdateStandard because # that was already caught when validating the {name}. except exception.ResourceClassExists: # Someone just now created the class, so stick with 204 pass req.response.status = status req.response.content_type = None req.response.location = util.resource_class_url(req.environ, rc) return req.response
[ "webob.exc.HTTPBadRequest", "designer_family.microversion.version_handler", "designer_family.util.check_accept", "designer_family.objects.resource_class.get_all", "oslo_serialization.jsonutils.dumps", "oslo_utils.timeutils.utcnow", "designer_family.objects.resource_class.ResourceClass", "webob.exc.HTTPConflict", "designer_family.util.resource_class_url", "designer_family.util.wsgi_path_item", "designer_family.util.pick_last_modified", "designer_family.util.extract_json", "designer_family.objects.resource_class.ResourceClass.get_by_name", "designer_family.util.require_content" ]
[((1907, 1942), 'designer_family.microversion.version_handler', 'microversion.version_handler', (['"""1.2"""'], {}), "('1.2')\n", (1935, 1942), False, 'from designer_family import microversion\n'), ((1944, 1984), 'designer_family.util.require_content', 'util.require_content', (['"""application/json"""'], {}), "('application/json')\n", (1964, 1984), False, 'from designer_family import util\n'), ((3084, 3119), 'designer_family.microversion.version_handler', 'microversion.version_handler', (['"""1.2"""'], {}), "('1.2')\n", (3112, 3119), False, 'from designer_family import microversion\n'), ((4006, 4041), 'designer_family.microversion.version_handler', 'microversion.version_handler', (['"""1.2"""'], {}), "('1.2')\n", (4034, 4041), False, 'from designer_family import microversion\n'), ((4043, 4080), 'designer_family.util.check_accept', 'util.check_accept', (['"""application/json"""'], {}), "('application/json')\n", (4060, 4080), False, 'from designer_family import util\n'), ((5175, 5210), 'designer_family.microversion.version_handler', 'microversion.version_handler', (['"""1.2"""'], {}), "('1.2')\n", (5203, 5210), False, 'from designer_family import microversion\n'), ((5212, 5249), 'designer_family.util.check_accept', 'util.check_accept', (['"""application/json"""'], {}), "('application/json')\n", (5229, 5249), False, 'from designer_family import util\n'), ((6042, 6084), 'designer_family.microversion.version_handler', 'microversion.version_handler', (['"""1.2"""', '"""1.6"""'], {}), "('1.2', '1.6')\n", (6070, 6084), False, 'from designer_family import microversion\n'), ((6086, 6126), 'designer_family.util.require_content', 'util.require_content', (['"""application/json"""'], {}), "('application/json')\n", (6106, 6126), False, 'from designer_family import util\n'), ((7315, 7350), 'designer_family.microversion.version_handler', 'microversion.version_handler', (['"""1.7"""'], {}), "('1.7')\n", (7343, 7350), False, 'from designer_family import microversion\n'), ((1133, 1169), 'designer_family.util.resource_class_url', 'util.resource_class_url', (['environ', 'rc'], {}), '(environ, rc)\n', (1156, 1169), False, 'from designer_family import util\n'), ((2291, 2346), 'designer_family.util.extract_json', 'util.extract_json', (['req.body', 'schema.POST_RC_SCHEMA_V1_2'], {}), '(req.body, schema.POST_RC_SCHEMA_V1_2)\n', (2308, 2346), False, 'from designer_family import util\n'), ((2919, 2959), 'designer_family.util.resource_class_url', 'util.resource_class_url', (['req.environ', 'rc'], {}), '(req.environ, rc)\n', (2942, 2959), False, 'from designer_family import util\n'), ((3269, 3309), 'designer_family.util.wsgi_path_item', 'util.wsgi_path_item', (['req.environ', '"""name"""'], {}), "(req.environ, 'name')\n", (3288, 3309), False, 'from designer_family import util\n'), ((3467, 3514), 'designer_family.objects.resource_class.ResourceClass.get_by_name', 'rc_obj.ResourceClass.get_by_name', (['context', 'name'], {}), '(context, name)\n', (3499, 3514), True, 'from designer_family.objects import resource_class as rc_obj\n'), ((4261, 4301), 'designer_family.util.wsgi_path_item', 'util.wsgi_path_item', (['req.environ', '"""name"""'], {}), "(req.environ, 'name')\n", (4280, 4301), False, 'from designer_family import util\n'), ((4523, 4570), 'designer_family.objects.resource_class.ResourceClass.get_by_name', 'rc_obj.ResourceClass.get_by_name', (['context', 'name'], {}), '(context, name)\n', (4555, 4570), True, 'from designer_family.objects import resource_class as rc_obj\n'), ((5598, 5621), 'designer_family.objects.resource_class.get_all', 'rc_obj.get_all', (['context'], {}), '(context)\n', (5612, 5621), True, 'from designer_family.objects import resource_class as rc_obj\n'), ((6319, 6359), 'designer_family.util.wsgi_path_item', 'util.wsgi_path_item', (['req.environ', '"""name"""'], {}), "(req.environ, 'name')\n", (6338, 6359), False, 'from designer_family import util\n'), ((6458, 6512), 'designer_family.util.extract_json', 'util.extract_json', (['req.body', 'schema.PUT_RC_SCHEMA_V1_2'], {}), '(req.body, schema.PUT_RC_SCHEMA_V1_2)\n', (6475, 6512), False, 'from designer_family import util\n'), ((6585, 6632), 'designer_family.objects.resource_class.ResourceClass.get_by_name', 'rc_obj.ResourceClass.get_by_name', (['context', 'name'], {}), '(context, name)\n', (6617, 6632), True, 'from designer_family.objects import resource_class as rc_obj\n'), ((7696, 7736), 'designer_family.util.wsgi_path_item', 'util.wsgi_path_item', (['req.environ', '"""name"""'], {}), "(req.environ, 'name')\n", (7715, 7736), False, 'from designer_family import util\n'), ((7889, 7958), 'designer_family.util.extract_json', 'util.extract_json', (['(\'{"name": "%s"}\' % name)', 'schema.PUT_RC_SCHEMA_V1_2'], {}), '(\'{"name": "%s"}\' % name, schema.PUT_RC_SCHEMA_V1_2)\n', (7906, 7958), False, 'from designer_family import util\n'), ((8558, 8598), 'designer_family.util.resource_class_url', 'util.resource_class_url', (['req.environ', 'rc'], {}), '(req.environ, rc)\n', (8581, 8598), False, 'from designer_family import util\n'), ((1780, 1816), 'oslo_utils.timeutils.utcnow', 'timeutils.utcnow', ([], {'with_timezone': '(True)'}), '(with_timezone=True)\n', (1796, 1816), False, 'from oslo_utils import timeutils\n'), ((2370, 2418), 'designer_family.objects.resource_class.ResourceClass', 'rc_obj.ResourceClass', (['context'], {'name': "data['name']"}), "(context, name=data['name'])\n", (2390, 2418), True, 'from designer_family.objects import resource_class as rc_obj\n'), ((5788, 5811), 'oslo_serialization.jsonutils.dumps', 'jsonutils.dumps', (['output'], {}), '(output)\n', (5803, 5811), False, 'from oslo_serialization import jsonutils\n'), ((7999, 8046), 'designer_family.objects.resource_class.ResourceClass.get_by_name', 'rc_obj.ResourceClass.get_by_name', (['context', 'name'], {}), '(context, name)\n', (8031, 8046), True, 'from designer_family.objects import resource_class as rc_obj\n'), ((1618, 1660), 'designer_family.util.pick_last_modified', 'util.pick_last_modified', (['last_modified', 'rc'], {}), '(last_modified, rc)\n', (1641, 1660), False, 'from designer_family import util\n'), ((2495, 2607), 'webob.exc.HTTPConflict', 'webob.exc.HTTPConflict', (["('Conflicting resource class already exists: %(name)s' % {'name': data['name']}\n )"], {}), "(\n 'Conflicting resource class already exists: %(name)s' % {'name': data[\n 'name']})\n", (2517, 2607), False, 'import webob\n'), ((2680, 2844), 'webob.exc.HTTPConflict', 'webob.exc.HTTPConflict', (["('Max retries of DB transaction exceeded attempting to create resource class: %(name)s, please try again.'\n % {'name': data['name']})"], {}), "(\n 'Max retries of DB transaction exceeded attempting to create resource class: %(name)s, please try again.'\n % {'name': data['name']})\n", (2702, 2844), False, 'import webob\n'), ((3622, 3713), 'webob.exc.HTTPBadRequest', 'webob.exc.HTTPBadRequest', (["('Error in delete resource class: %(error)s' % {'error': exc})"], {}), "('Error in delete resource class: %(error)s' % {\n 'error': exc})\n", (3646, 3713), False, 'import webob\n'), ((3784, 3873), 'webob.exc.HTTPConflict', 'webob.exc.HTTPConflict', (["('Error in delete resource class: %(error)s' % {'error': exc})"], {}), "('Error in delete resource class: %(error)s' % {\n 'error': exc})\n", (3806, 3873), False, 'import webob\n'), ((4980, 5013), 'designer_family.util.pick_last_modified', 'util.pick_last_modified', (['None', 'rc'], {}), '(None, rc)\n', (5003, 5013), False, 'from designer_family import util\n'), ((5017, 5053), 'oslo_utils.timeutils.utcnow', 'timeutils.utcnow', ([], {'with_timezone': '(True)'}), '(with_timezone=True)\n', (5033, 5053), False, 'from oslo_utils import timeutils\n'), ((6745, 6834), 'webob.exc.HTTPConflict', 'webob.exc.HTTPConflict', (["('Resource class already exists: %(name)s' % {'name': rc.name})"], {}), "('Resource class already exists: %(name)s' % {'name':\n rc.name})\n", (6767, 6834), False, 'import webob\n'), ((6926, 7028), 'webob.exc.HTTPBadRequest', 'webob.exc.HTTPBadRequest', (["('Cannot update standard resource class %(rp_name)s' % {'rp_name': name})"], {}), "(\n 'Cannot update standard resource class %(rp_name)s' % {'rp_name': name})\n", (6950, 7028), False, 'import webob\n'), ((8108, 8148), 'designer_family.objects.resource_class.ResourceClass', 'rc_obj.ResourceClass', (['context'], {'name': 'name'}), '(context, name=name)\n', (8128, 8148), True, 'from designer_family.objects import resource_class as rc_obj\n')]