_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 31 13.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q257500 | GTFS.get_trip_counts_per_day | validation | def get_trip_counts_per_day(self):
"""
Get trip counts per day between the start and end day of the feed.
Returns
-------
trip_counts : pandas.DataFrame
Has columns "date_str" (dtype str) "trip_counts" (dtype int)
"""
query = "SELECT date, count(*) AS number_of_trips FROM day_trips GROUP BY date"
# this yields the actual data
trip_counts_per_day = pd.read_sql_query(query, self.conn, index_col="date")
# the rest is simply code for filling out "gaps" in the time span
# (necessary for some visualizations)
max_day = trip_counts_per_day.index.max()
min_day = trip_counts_per_day.index.min()
min_date = datetime.datetime.strptime(min_day, '%Y-%m-%d')
max_date = datetime.datetime.strptime(max_day, '%Y-%m-%d')
num_days = (max_date - min_date).days
dates = [min_date + datetime.timedelta(days=x) for x in range(num_days + 1)]
trip_counts = []
date_strings = []
for date in dates:
date_string = date.strftime("%Y-%m-%d")
date_strings.append(date_string)
try:
| python | {
"resource": ""
} |
q257501 | GTFS.get_spreading_trips | validation | def get_spreading_trips(self, start_time_ut, lat, lon,
max_duration_ut=4 * 3600,
min_transfer_time=30,
use_shapes=False):
"""
Starting from a specific point and time, get complete single source
shortest path spreading dynamics as trips, or "events".
Parameters
----------
start_time_ut: number
Start time of the spreading.
lat: float
latitude of the spreading seed location
lon: float
longitude of the spreading seed location
max_duration_ut: int
maximum duration of the spreading process (in seconds)
min_transfer_time : int
minimum transfer time in seconds
use_shapes : bool
whether to include shapes
Returns
-------
trips: dict
trips['trips'] is a list whose each element (e.g. el = trips['trips'][0])
| python | {
"resource": ""
} |
q257502 | GTFS.get_closest_stop | validation | def get_closest_stop(self, lat, lon):
"""
Get closest stop to a given location.
Parameters
----------
lat: float
latitude coordinate of the location
lon: float
longitude coordinate of the location
Returns
-------
stop_I: int
the index of the stop in the database
"""
cur = self.conn.cursor()
min_dist = float("inf")
min_stop_I = None
rows = | python | {
"resource": ""
} |
q257503 | GTFS.tripI_takes_place_on_dsut | validation | def tripI_takes_place_on_dsut(self, trip_I, day_start_ut):
"""
Check that a trip takes place during a day
Parameters
----------
trip_I : int
index of the trip in the gtfs data base
day_start_ut : int
the starting time of the day in unix time (seconds)
Returns
-------
takes_place: bool
boolean value describing whether the trip takes place during
the given day or not
"""
query = "SELECT * FROM days | python | {
"resource": ""
} |
q257504 | GTFS.day_start_ut | validation | def day_start_ut(self, ut):
"""
Convert unixtime to unixtime on GTFS start-of-day.
GTFS defines the start of a day as "noon minus 12 hours" to solve
most DST-related problems. This means that on DST-changing days,
the day start isn't midnight. This function isn't idempotent.
Running it twice on the "move clocks backwards" day will result in
being one day too early.
Parameters
----------
ut: int
Unixtime
| python | {
"resource": ""
} |
q257505 | GTFS.increment_day_start_ut | validation | def increment_day_start_ut(self, day_start_ut, n_days=1):
"""Increment the GTFS-definition of "day start".
Parameters
----------
day_start_ut : int
unixtime of the previous start of day. If this time is between
12:00 or greater, there *will* be bugs. To solve this, run the
input through day_start_ut first.
n_days: int
number of days to increment
"""
old_tz = self.set_current_process_time_zone()
day0 = | python | {
"resource": ""
} |
q257506 | GTFS._get_possible_day_starts | validation | def _get_possible_day_starts(self, start_ut, end_ut, max_time_overnight=None):
"""
Get all possible day start times between start_ut and end_ut
Currently this function is used only by get_tripIs_within_range_by_dsut
Parameters
----------
start_ut : list<int>
start time in unix time
end_ut : list<int>
end time in unix time
max_time_overnight : list<int>
the maximum length of time that a trip can take place on
during the next day (i.e. after midnight run times like 25:35)
Returns
-------
day_start_times_ut : list
list of ints (unix times in seconds) for returning all possible day
start times
start_times_ds : list
list of ints (unix times in seconds) stating the valid start time in
day seconds
end_times_ds : list
list of ints (unix times in seconds) stating the valid end times in
day_seconds
"""
if max_time_overnight is None:
# 7 hours:
max_time_overnight = 7 * 60 * 60
# sanity checks for the timezone parameter
# assert timezone < 14
# assert timezone > -14
# tz_seconds = int(timezone*3600)
assert start_ut < end_ut
start_day_ut = self.day_start_ut(start_ut)
# start_day_ds = int(start_ut+tz_seconds) % seconds_in_a_day #??? needed?
start_day_ds = start_ut - start_day_ut
# assert (start_day_ut+tz_seconds) % seconds_in_a_day == 0
end_day_ut = self.day_start_ut(end_ut)
# end_day_ds = int(end_ut+tz_seconds) % seconds_in_a_day #??? needed?
# end_day_ds = end_ut - end_day_ut
| python | {
"resource": ""
} |
q257507 | GTFS.stop | validation | def stop(self, stop_I):
"""
Get all stop data as a pandas DataFrame for all stops, or an individual stop'
Parameters
----------
stop_I : int
stop index
Returns
| python | {
"resource": ""
} |
q257508 | GTFS.get_transit_events | validation | def get_transit_events(self, start_time_ut=None, end_time_ut=None, route_type=None):
"""
Obtain a list of events that take place during a time interval.
Each event needs to be only partially overlap the given time interval.
Does not include walking events.
Parameters
----------
start_time_ut : int
start of the time interval in unix time (seconds)
end_time_ut: int
end of the time interval in unix time (seconds)
route_type: int
consider only events for this route_type
Returns
-------
events: pandas.DataFrame
with the following columns and types
dep_time_ut: int
arr_time_ut: int
from_stop_I: int
to_stop_I: int
trip_I : int
shape_id : int
route_type : int
See also
--------
get_transit_events_in_time_span : an older version of the same thing
"""
table_name = self._get_day_trips_table_name()
event_query = "SELECT stop_I, seq, trip_I, route_I, routes.route_id AS route_id, routes.type AS route_type, " \
"shape_id, day_start_ut+dep_time_ds AS dep_time_ut, day_start_ut+arr_time_ds AS arr_time_ut " \
"FROM " + table_name + " " \
"JOIN trips USING(trip_I) " \
"JOIN routes USING(route_I) " \
"JOIN stop_times USING(trip_I)"
where_clauses = []
if end_time_ut:
where_clauses.append(table_name + ".start_time_ut< {end_time_ut}".format(end_time_ut=end_time_ut))
where_clauses.append("dep_time_ut <={end_time_ut}".format(end_time_ut=end_time_ut))
if start_time_ut:
where_clauses.append(table_name + ".end_time_ut > {start_time_ut}".format(start_time_ut=start_time_ut))
where_clauses.append("arr_time_ut >={start_time_ut}".format(start_time_ut=start_time_ut))
if route_type is not None:
assert route_type in ALL_ROUTE_TYPES
where_clauses.append("routes.type={route_type}".format(route_type=route_type))
if len(where_clauses) > 0:
event_query += " WHERE "
for i, where_clause in enumerate(where_clauses):
if i is not 0:
event_query += " AND "
event_query += where_clause
# ordering is required for later stages
event_query += " ORDER BY trip_I, day_start_ut+dep_time_ds;"
events_result = pd.read_sql_query(event_query, self.conn)
# 'filter' results so that only real "events" are taken into account
from_indices = numpy.nonzero(
(events_result['trip_I'][:-1].values == events_result['trip_I'][1:].values) | python | {
"resource": ""
} |
q257509 | GTFS.get_day_start_ut_span | validation | def get_day_start_ut_span(self):
"""
Return the first and last day_start_ut
Returns
-------
first_day_start_ut: int
last_day_start_ut: int
"""
cur = self.conn.cursor()
first_day_start_ut, last_day_start_ut = \
| python | {
"resource": ""
} |
q257510 | TravelImpedanceDataStore.read_data_as_dataframe | validation | def read_data_as_dataframe(self,
travel_impedance_measure,
from_stop_I=None,
to_stop_I=None,
statistic=None):
"""
Recover pre-computed travel_impedance between od-pairs from the database.
Returns
-------
values: number | Pandas DataFrame
"""
to_select = []
where_clauses = []
to_select.append("from_stop_I")
to_select.append("to_stop_I")
if from_stop_I is not None:
where_clauses.append("from_stop_I=" + str(int(from_stop_I)))
if to_stop_I is not None:
where_clauses.append("to_stop_I=" + str(int(to_stop_I)))
where_clause = ""
| python | {
"resource": ""
} |
q257511 | NodeProfileMultiObjective._check_dep_time_is_valid | validation | def _check_dep_time_is_valid(self, dep_time):
"""
A simple checker, that connections are coming in descending order of departure time
and that no departure time has been "skipped".
Parameters
----------
dep_time
Returns
-------
None
"""
assert dep_time <= self._min_dep_time, "Labels should be entered in decreasing order of departure time."
dep_time_index = self.dep_times_to_index[dep_time]
if self._min_dep_time < float('inf'):
| python | {
"resource": ""
} |
q257512 | NodeProfileMultiObjective.update | validation | def update(self, new_labels, departure_time_backup=None):
"""
Update the profile with the new labels.
Each new label should have the same departure_time.
Parameters
----------
new_labels: list[LabelTime]
Returns
-------
added: bool
whether new_pareto_tuple was added to the set of pareto-optimal tuples
"""
if self._closed:
raise RuntimeError("Profile is closed, no updates can be made")
try:
departure_time = next(iter(new_labels)).departure_time
except StopIteration:
departure_time = departure_time_backup
self._check_dep_time_is_valid(departure_time)
for new_label in new_labels:
assert (new_label.departure_time == departure_time)
dep_time_index = self.dep_times_to_index[departure_time]
if dep_time_index > 0:
| python | {
"resource": ""
} |
q257513 | NodeProfileMultiObjective.evaluate | validation | def evaluate(self, dep_time, first_leg_can_be_walk=True, connection_arrival_time=None):
"""
Get the pareto_optimal set of Labels, given a departure time.
Parameters
----------
dep_time : float, int
time in unix seconds
first_leg_can_be_walk : bool, optional
whether to allow walking to target to be included into the profile
(I.e. whether this function is called when scanning a pseudo-connection:
"double" walks are not allowed.)
connection_arrival_time: float, int, optional
used for computing the walking label if dep_time, i.e., connection.arrival_stop_next_departure_time, is infinity)
connection: connection object
Returns
-------
pareto_optimal_labels : set
Set of Labels
"""
walk_labels = list()
# walk label towards target
if first_leg_can_be_walk and self._walk_to_target_duration != float('inf'):
# add walk_label
if connection_arrival_time is not None:
walk_labels.append(self._get_label_to_target(connection_arrival_time))
| python | {
"resource": ""
} |
q257514 | TableLoader.create_table | validation | def create_table(self, conn):
"""Make table definitions"""
# Make cursor
cur = conn.cursor()
# Drop table if it already exists, to be recreated. This
# could in the future abort if table already exists, and not
# recreate it from scratch.
#cur.execute('''DROP TABLE IF EXISTS %s'''%self.table)
#conn.commit()
if self.tabledef is None:
return
if not self.tabledef.startswith('CREATE'):
# "normal" table creation.
| python | {
"resource": ""
} |
q257515 | TableLoader.import_ | validation | def import_(self, conn):
"""Do the actual import. Copy data and store in connection object.
This function:
- Creates the tables
- Imports data (using self.gen_rows)
- Run any post_import hooks.
- Creates any indexs
- Does *not* run self.make_views - those must be done
after all tables are loaded.
"""
if self.print_progress:
print('Beginning', self.__class__.__name__)
# what is this mystical self._conn ?
self._conn = conn
self.create_table(conn)
# This does insertions
if self.mode in ('all', 'import') and self.fname and self.exists() and self.table not in ignore_tables:
| python | {
"resource": ""
} |
q257516 | TableLoader.copy | validation | def copy(cls, conn, **where):
"""Copy data from one table to another while filtering data at the same time
Parameters
----------
conn: sqlite3 DB connection. It must have a second database
attached as "other".
**where : keyword arguments
| python | {
"resource": ""
} |
q257517 | get_median_lat_lon_of_stops | validation | def get_median_lat_lon_of_stops(gtfs):
"""
Get median latitude AND longitude of stops
Parameters
----------
gtfs: GTFS
Returns
-------
| python | {
"resource": ""
} |
q257518 | get_centroid_of_stops | validation | def get_centroid_of_stops(gtfs):
"""
Get mean latitude AND longitude of stops
Parameters
----------
gtfs: GTFS
Returns
| python | {
"resource": ""
} |
q257519 | write_stats_as_csv | validation | def write_stats_as_csv(gtfs, path_to_csv, re_write=False):
"""
Writes data from get_stats to csv file
Parameters
----------
gtfs: GTFS
path_to_csv: str
filepath to the csv file to be generated
re_write:
insted of appending, create a new one.
"""
stats_dict = get_stats(gtfs)
# check if file exist
if re_write:
os.remove(path_to_csv)
#if not os.path.isfile(path_to_csv):
# is_new = True
#else:
# is_new = False
is_new = True
mode = 'r' if os.path.exists(path_to_csv) else 'w+'
with open(path_to_csv, mode) as csvfile:
for line in csvfile:
if line:
is_new = False
else:
is_new = True
with open(path_to_csv, 'a') as csvfile:
if (sys.version_info > (3, 0)):
delimiter = u","
else:
| python | {
"resource": ""
} |
q257520 | _distribution | validation | def _distribution(gtfs, table, column):
"""Count occurrences of values AND return it as a string.
Example return value: '1:5 2:15'"""
cur = gtfs.conn.cursor()
cur.execute('SELECT {column}, count(*) '
'FROM {table} GROUP BY {column} '
| python | {
"resource": ""
} |
q257521 | _feed_calendar_span | validation | def _feed_calendar_span(gtfs, stats):
"""
Computes the temporal coverage of each source feed
Parameters
----------
gtfs: gtfspy.GTFS object
stats: dict
where to append the stats
Returns
-------
stats: dict
"""
n_feeds = _n_gtfs_sources(gtfs)[0]
max_start = None
min_end = None
if n_feeds > 1:
for i in range(n_feeds):
feed_key = "feed_" + str(i) + "_"
start_key = feed_key + "calendar_start"
end_key = feed_key + "calendar_end"
calendar_span = gtfs.conn.cursor().execute(
'SELECT min(date), max(date) FROM trips, days '
'WHERE trips.trip_I = days.trip_I AND trip_id LIKE ?;', (feed_key + '%',)).fetchone()
stats[start_key] = calendar_span[0]
stats[end_key] = calendar_span[1]
if calendar_span[0] is not None and calendar_span[1] is not None:
if not max_start and not min_end:
max_start = calendar_span[0]
min_end = calendar_span[1]
| python | {
"resource": ""
} |
q257522 | route_frequencies | validation | def route_frequencies(gtfs, results_by_mode=False):
"""
Return the frequency of all types of routes per day.
Parameters
-----------
gtfs: GTFS
Returns
-------
pandas.DataFrame with columns
route_I, type, frequency
"""
day = gtfs.get_suitable_date_for_daily_extract()
query = (
" SELECT f.route_I, type, frequency FROM routes as r"
" JOIN"
" (SELECT route_I, COUNT(route_I) as frequency"
" FROM"
" (SELECT date, route_I, trip_I"
| python | {
"resource": ""
} |
q257523 | get_vehicle_hours_by_type | validation | def get_vehicle_hours_by_type(gtfs, route_type):
"""
Return the sum of vehicle hours in a particular day by route type.
"""
day = gtfs.get_suitable_date_for_daily_extract()
query = (" SELECT * , SUM(end_time_ds - start_time_ds)/3600 as vehicle_hours_type"
" FROM"
" (SELECT * FROM day_trips as q1"
" INNER JOIN"
" (SELECT route_I, type FROM routes) as q2"
| python | {
"resource": ""
} |
q257524 | ConnectionScan._scan_footpaths | validation | def _scan_footpaths(self, stop_id, walk_departure_time):
"""
Scan the footpaths originating from stop_id
Parameters
----------
stop_id: int
"""
for _, neighbor, data in self._walk_network.edges_iter(nbunch=[stop_id], data=True):
d_walk = | python | {
"resource": ""
} |
q257525 | timeit | validation | def timeit(method):
"""
A Python decorator for printing out the execution time for a function.
Adapted from:
www.andreas-jung.com/contents/a-python-decorator-for-measuring-the-execution-time-of-methods
"""
def timed(*args, **kw):
time_start = time.time()
result = method(*args, **kw)
| python | {
"resource": ""
} |
q257526 | AuthForm.clean | validation | def clean(self):
"""When receiving the filled out form, check for valid access."""
cleaned_data = super(AuthForm, self).clean()
user = self.get_user()
if self.staff_only and (not | python | {
"resource": ""
} |
q257527 | get_lockdown_form | validation | def get_lockdown_form(form_path):
"""Return a form class for a given string pointing to a lockdown form."""
if not form_path:
raise ImproperlyConfigured('No LOCKDOWN_FORM specified.')
form_path_list = form_path.split(".")
new_module = ".".join(form_path_list[:-1])
attr = form_path_list[-1]
try:
mod = import_module(new_module)
except (ImportError, ValueError):
raise ImproperlyConfigured('Module configured in LOCKDOWN_FORM (%s) to'
' contain the form class couldn\'t be '
| python | {
"resource": ""
} |
q257528 | LockdownMiddleware.process_request | validation | def process_request(self, request):
"""Check if each request is allowed to access the current resource."""
try:
session = request.session
except AttributeError:
raise ImproperlyConfigured('django-lockdown requires the Django '
'sessions framework')
# Don't lock down if django-lockdown is disabled altogether.
if settings.ENABLED is False:
return None
# Don't lock down if the client REMOTE_ADDR matched and is part of the
# exception list.
if self.remote_addr_exceptions:
remote_addr_exceptions = self.remote_addr_exceptions
else:
remote_addr_exceptions = settings.REMOTE_ADDR_EXCEPTIONS
if remote_addr_exceptions:
# If forwarding proxies are used they must be listed as trusted
trusted_proxies = self.trusted_proxies or settings.TRUSTED_PROXIES
remote_addr = request.META.get('REMOTE_ADDR')
if remote_addr in remote_addr_exceptions:
return None
if remote_addr in trusted_proxies:
# If REMOTE_ADDR is a trusted proxy check x-forwarded-for
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
remote_addr = x_forwarded_for.split(',')[-1].strip()
if remote_addr in remote_addr_exceptions:
return None
# Don't lock down if the URL matches an exception pattern.
if self.url_exceptions:
url_exceptions = compile_url_exceptions(self.url_exceptions)
else:
url_exceptions = compile_url_exceptions(settings.URL_EXCEPTIONS)
for pattern in url_exceptions:
if pattern.search(request.path):
return None
# Don't lock down if the URL resolves to a whitelisted view.
try:
resolved_path = resolve(request.path)
except Resolver404:
pass
else:
if resolved_path.func in settings.VIEW_EXCEPTIONS:
return None
# Don't lock down if outside of the lockdown dates.
if self.until_date:
until_date = self.until_date
else:
until_date = settings.UNTIL_DATE
if self.after_date:
after_date = self.after_date
else:
after_date = settings.AFTER_DATE
if until_date or after_date:
| python | {
"resource": ""
} |
q257529 | LockdownMiddleware.redirect | validation | def redirect(self, request):
"""Handle redirects properly."""
url = request.path
querystring = request.GET.copy()
if self.logout_key and self.logout_key in request.GET:
del | python | {
"resource": ""
} |
q257530 | Registry.get | validation | def get(self, profile_id):
'''Returns the profile with the received ID as a dict
If a local copy of the profile exists, it'll be returned. If not, it'll
be downloaded from the web. The results are cached, so any subsequent
calls won't hit the filesystem or the web.
Args:
profile_id (str): The ID of the profile you want.
Raises:
RegistryError: If there was some problem opening the profile file
or its format was incorrect.
'''
| python | {
"resource": ""
} |
q257531 | get_descriptor_base_path | validation | def get_descriptor_base_path(descriptor):
"""Get descriptor base path if string or return None.
"""
# Infer from path/url
if isinstance(descriptor, six.string_types):
if os.path.exists(descriptor):
base_path = os.path.dirname(os.path.abspath(descriptor))
else:
| python | {
"resource": ""
} |
q257532 | retrieve_descriptor | validation | def retrieve_descriptor(descriptor):
"""Retrieve descriptor.
"""
the_descriptor = descriptor
if the_descriptor is None:
the_descriptor = {}
if isinstance(the_descriptor, six.string_types):
try:
if os.path.isfile(the_descriptor):
with open(the_descriptor, 'r') as f:
the_descriptor = json.load(f)
else:
req = requests.get(the_descriptor)
req.raise_for_status()
# Force UTF8 encoding for 'text/plain' sources
req.encoding = 'utf8'
the_descriptor = req.json()
except (IOError, requests.exceptions.RequestException) as error:
message = 'Unable to load JSON at "%s"' % descriptor
six.raise_from(exceptions.DataPackageException(message), error)
except ValueError as error:
| python | {
"resource": ""
} |
q257533 | is_safe_path | validation | def is_safe_path(path):
"""Check if path is safe and allowed.
"""
contains_windows_var = lambda val: re.match(r'%.+%', val)
contains_posix_var = lambda val: re.match(r'\$.+', val)
unsafeness_conditions = [
os.path.isabs(path),
('..%s' % os.path.sep) in path,
| python | {
"resource": ""
} |
q257534 | _validate_zip | validation | def _validate_zip(the_zip):
"""Validate zipped data package
"""
datapackage_jsons = [f for f in the_zip.namelist() if f.endswith('datapackage.json')]
if len(datapackage_jsons) != 1:
msg = | python | {
"resource": ""
} |
q257535 | _slugify_foreign_key | validation | def _slugify_foreign_key(schema):
"""Slugify foreign key
"""
for foreign_key in schema.get('foreignKeys', []):
| python | {
"resource": ""
} |
q257536 | Package.validate | validation | def validate(self):
""""Validate this Data Package.
"""
# Deprecate
warnings.warn(
'Property "package.validate" is deprecated.',
| python | {
"resource": ""
} |
q257537 | push_datapackage | validation | def push_datapackage(descriptor, backend, **backend_options):
"""Push Data Package to storage.
All parameters should be used as keyword arguments.
Args:
descriptor (str): path to descriptor
backend (str): backend name like `sql` or `bigquery`
backend_options (dict): backend options mentioned in backend docs
"""
# Deprecated
warnings.warn(
'Functions "push/pull_datapackage" are deprecated. '
'Please use "Package" class',
UserWarning)
# Init maps
tables = []
schemas = []
datamap = {}
mapping = {}
# Init model
model = Package(descriptor)
# Get storage
plugin = import_module('jsontableschema.plugins.%s' % backend)
storage = plugin.Storage(**backend_options)
# Collect tables/schemas/data
for resource in model.resources:
if not resource.tabular:
continue
name = resource.descriptor.get('name', None)
table = _convert_path(resource.descriptor['path'], name)
schema = resource.descriptor['schema']
data = resource.table.iter(keyed=True)
# TODO: review
def values(schema, data):
for item in data:
row = []
| python | {
"resource": ""
} |
q257538 | pull_datapackage | validation | def pull_datapackage(descriptor, name, backend, **backend_options):
"""Pull Data Package from storage.
All parameters should be used as keyword arguments.
Args:
descriptor (str): path where to store descriptor
name (str): name of the pulled datapackage
backend (str): backend name like `sql` or `bigquery`
backend_options (dict): backend options mentioned in backend docs
"""
# Deprecated
warnings.warn(
'Functions "push/pull_datapackage" are deprecated. '
'Please use "Package" class',
UserWarning)
# Save datapackage name
datapackage_name = name
# Get storage
plugin = import_module('jsontableschema.plugins.%s' % backend)
storage = plugin.Storage(**backend_options)
# Iterate over tables
resources = []
for table in storage.buckets:
# Prepare
schema = storage.describe(table)
base = os.path.dirname(descriptor)
path, name = _restore_path(table)
fullpath = os.path.join(base, path)
# Write data
helpers.ensure_dir(fullpath)
with io.open(fullpath, 'wb') as file:
model = Schema(deepcopy(schema))
data = storage.iter(table)
writer = csv.writer(file, encoding='utf-8')
| python | {
"resource": ""
} |
q257539 | _convert_path | validation | def _convert_path(path, name):
"""Convert resource's path and name to storage's table name.
Args:
path (str): resource path
name (str): resource name
Returns:
str: table name
"""
table = os.path.splitext(path)[0]
table = table.replace(os.path.sep, '__')
| python | {
"resource": ""
} |
q257540 | _restore_path | validation | def _restore_path(table):
"""Restore resource's path and name from storage's table.
Args:
table (str): table name
Returns:
(str, str): resource path and name
"""
name = None
splited = table.split('___')
path = splited[0]
if | python | {
"resource": ""
} |
q257541 | _convert_schemas | validation | def _convert_schemas(mapping, schemas):
"""Convert schemas to be compatible with storage schemas.
Foreign keys related operations.
Args:
mapping (dict): mapping between resource name and table name
schemas (list): schemas
Raises:
ValueError: if there is no resource
for some foreign key in given mapping
Returns:
list: converted schemas
"""
schemas = deepcopy(schemas)
for schema in schemas:
for fk in schema.get('foreignKeys', []):
resource = fk['reference']['resource']
| python | {
"resource": ""
} |
q257542 | _restore_resources | validation | def _restore_resources(resources):
"""Restore schemas from being compatible with storage schemas.
Foreign keys related operations.
Args:
list: resources from storage
Returns:
list: restored resources
"""
resources = deepcopy(resources)
for resource in resources:
schema = resource['schema']
| python | {
"resource": ""
} |
q257543 | _buffer_incomplete_responses | validation | def _buffer_incomplete_responses(raw_output, buf):
"""It is possible for some of gdb's output to be read before it completely finished its response.
In that case, a partial mi response was read, which cannot be parsed into structured data.
We want to ALWAYS parse complete mi records. To do this, we store a buffer of gdb's
output if the output did not end in a newline.
Args:
raw_output: Contents of the gdb mi output
buf (str): Buffered gdb response from the past. This is incomplete and needs to be prepended to
gdb's next output.
Returns:
(raw_output, buf)
"""
if raw_output:
if buf:
# concatenate buffer and new output
raw_output = b"".join([buf, raw_output])
buf = None
if b"\n" not in raw_output:
# newline was not found, so assume output is incomplete and store in buffer
| python | {
"resource": ""
} |
q257544 | GdbController.verify_valid_gdb_subprocess | validation | def verify_valid_gdb_subprocess(self):
"""Verify there is a process object, and that it is still running.
Raise NoGdbProcessError if either of the above are not true."""
if not self.gdb_process:
raise NoGdbProcessError("gdb process is not attached")
elif self.gdb_process.poll() is not None:
| python | {
"resource": ""
} |
q257545 | GdbController.write | validation | def write(
self,
mi_cmd_to_write,
timeout_sec=DEFAULT_GDB_TIMEOUT_SEC,
raise_error_on_timeout=True,
read_response=True,
):
"""Write to gdb process. Block while parsing responses from gdb for a maximum of timeout_sec.
Args:
mi_cmd_to_write (str or list): String to write to gdb. If list, it is joined by newlines.
timeout_sec (float): Maximum number of seconds to wait for response before exiting. Must be >= 0.
raise_error_on_timeout (bool): If read_response is True, raise error if no response is received
read_response (bool): Block and read response. If there is a separate thread running,
this can be false, and the reading thread read the output.
Returns:
List of parsed gdb responses if read_response is True, otherwise []
Raises:
NoGdbProcessError if there is no gdb subprocess running
TypeError if mi_cmd_to_write is not valid
"""
self.verify_valid_gdb_subprocess()
if timeout_sec < 0:
self.logger.warning("timeout_sec was negative, replacing with 0")
| python | {
"resource": ""
} |
q257546 | GdbController.get_gdb_response | validation | def get_gdb_response(
self, timeout_sec=DEFAULT_GDB_TIMEOUT_SEC, raise_error_on_timeout=True
):
"""Get response from GDB, and block while doing so. If GDB does not have any response ready to be read
by timeout_sec, an exception is raised.
Args:
timeout_sec (float): Maximum time to wait for reponse. Must be >= 0. Will return after
raise_error_on_timeout (bool): Whether an exception should be raised if no response was found
after timeout_sec
Returns:
List of parsed GDB responses, returned from gdbmiparser.parse_response, with the
| python | {
"resource": ""
} |
q257547 | GdbController._get_responses_windows | validation | def _get_responses_windows(self, timeout_sec):
"""Get responses on windows. Assume no support for select and use a while loop."""
timeout_time_sec = time.time() + timeout_sec
responses = []
while True:
try:
self.gdb_process.stdout.flush()
if PYTHON3:
raw_output = self.gdb_process.stdout.readline().replace(
b"\r", b"\n"
)
else:
raw_output = self.gdb_process.stdout.read().replace(b"\r", b"\n")
| python | {
"resource": ""
} |
q257548 | GdbController._get_responses_unix | validation | def _get_responses_unix(self, timeout_sec):
"""Get responses on unix-like system. Use select to wait for output."""
timeout_time_sec = time.time() + timeout_sec
responses = []
while True:
select_timeout = timeout_time_sec - time.time()
# I prefer to not pass a negative value to select
if select_timeout <= 0:
select_timeout = 0
events, _, _ = select.select(self.read_list, [], [], select_timeout)
responses_list = None # to avoid infinite loop if using Python 2
try:
for fileno in events:
# new data is ready to read
if fileno == self.stdout_fileno:
self.gdb_process.stdout.flush()
raw_output = self.gdb_process.stdout.read()
stream = "stdout"
elif fileno == self.stderr_fileno:
self.gdb_process.stderr.flush()
raw_output = self.gdb_process.stderr.read()
stream = "stderr"
else:
raise ValueError(
"Developer error. Got unexpected file number %d" % fileno
)
responses_list = self._get_responses_list(raw_output, stream)
responses += responses_list
| python | {
"resource": ""
} |
q257549 | main | validation | def main(verbose=True):
"""Build and debug an application programatically
For a list of GDB MI commands, see https://www.sourceware.org/gdb/onlinedocs/gdb/GDB_002fMI.html
"""
# Build C program
find_executable(MAKE_CMD)
if not find_executable(MAKE_CMD):
print(
'Could not find executable "%s". Ensure it is installed and on your $PATH.'
% MAKE_CMD
)
exit(1)
subprocess.check_output([MAKE_CMD, "-C", SAMPLE_C_CODE_DIR, "--quiet"])
# Initialize object that manages gdb subprocess
gdbmi = GdbController(verbose=verbose)
# Send gdb commands. Gdb machine interface commands are easier to script around,
# hence the name "machine interface".
# Responses are automatically printed as they are received if verbose is True.
# Responses are returned after writing, by default.
# Load the file
responses = gdbmi.write("-file-exec-and-symbols | python | {
"resource": ""
} |
q257550 | StringStream.read | validation | def read(self, count):
"""Read count characters starting at self.index,
and return those characters as a string
"""
new_index = self.index + count
if new_index > self.len:
buf = self.raw_text[self.index :] # return to the end, | python | {
"resource": ""
} |
q257551 | StringStream.advance_past_string_with_gdb_escapes | validation | def advance_past_string_with_gdb_escapes(self, chars_to_remove_gdb_escape=None):
"""characters that gdb escapes that should not be
escaped by this parser
"""
if chars_to_remove_gdb_escape is None:
chars_to_remove_gdb_escape = ['"']
buf = ""
while True:
c = self.raw_text[self.index]
self.index += 1
logging.debug("%s", fmt_cyan(c))
if c == "\\":
# We are on a backslash and | python | {
"resource": ""
} |
q257552 | parse_response | validation | def parse_response(gdb_mi_text):
"""Parse gdb mi text and turn it into a dictionary.
See https://sourceware.org/gdb/onlinedocs/gdb/GDB_002fMI-Stream-Records.html#GDB_002fMI-Stream-Records
for details on types of gdb mi output.
Args:
gdb_mi_text (str): String output from gdb
Returns:
dict with the following keys:
type (either 'notify', 'result', 'console', 'log', 'target', 'done'),
message (str or None),
payload (str, list, dict, or None)
"""
stream = StringStream(gdb_mi_text, debug=_DEBUG)
if _GDB_MI_NOTIFY_RE.match(gdb_mi_text):
token, message, payload = _get_notify_msg_and_payload(gdb_mi_text, stream)
return {
"type": "notify",
"message": message,
"payload": payload,
"token": token,
}
elif _GDB_MI_RESULT_RE.match(gdb_mi_text):
token, message, payload = _get_result_msg_and_payload(gdb_mi_text, stream)
return {
"type": "result",
"message": message,
"payload": payload,
"token": token,
}
elif _GDB_MI_CONSOLE_RE.match(gdb_mi_text):
return {
"type": "console",
| python | {
"resource": ""
} |
q257553 | _get_notify_msg_and_payload | validation | def _get_notify_msg_and_payload(result, stream):
"""Get notify message and payload dict"""
token = stream.advance_past_chars(["=", "*"])
token = int(token) if token != "" else None
logger.debug("%s", fmt_green("parsing message"))
message = stream.advance_past_chars([","])
| python | {
"resource": ""
} |
q257554 | _get_result_msg_and_payload | validation | def _get_result_msg_and_payload(result, stream):
"""Get result message and payload dict"""
groups = _GDB_MI_RESULT_RE.match(result).groups()
token = int(groups[0]) if groups[0] != "" else None
message = groups[1]
if groups[2] is None:
payload = None
| python | {
"resource": ""
} |
q257555 | BroadcastQueue._get_or_create_subscription | validation | def _get_or_create_subscription(self):
"""In a broadcast queue, workers have a unique subscription ensuring
that every worker recieves a copy of every task."""
topic_path = self._get_topic_path()
subscription_name = '{}-{}-{}-worker'.format(
queue.PUBSUB_OBJECT_PREFIX, self.name, uuid4().hex)
subscription_path = self.subscriber_client.subscription_path(
self.project, subscription_name)
try:
| python | {
"resource": ""
} |
q257556 | BroadcastQueue.cleanup | validation | def cleanup(self):
"""Deletes this worker's subscription."""
if self.subscription:
logger.info("Deleting worker subscription...")
| python | {
"resource": ""
} |
q257557 | Queue._get_or_create_subscription | validation | def _get_or_create_subscription(self):
"""Workers all share the same subscription so that tasks are
distributed across all workers."""
topic_path = self._get_topic_path()
subscription_name = '{}-{}-shared'.format(
PUBSUB_OBJECT_PREFIX, self.name)
subscription_path = self.subscriber_client.subscription_path(
self.project, subscription_name)
try:
self.subscriber_client.get_subscription(subscription_path)
except google.cloud.exceptions.NotFound:
logger.info("Creating shared subscription {}".format(
subscription_name))
| python | {
"resource": ""
} |
q257558 | Queue.enqueue | validation | def enqueue(self, f, *args, **kwargs):
"""Enqueues a function for the task queue to execute."""
task = Task(uuid4().hex, f, args, kwargs)
| python | {
"resource": ""
} |
q257559 | Queue.enqueue_task | validation | def enqueue_task(self, task):
"""Enqueues a task directly. This is used when a task is retried or if
a task was manually created.
Note that this does not store the task.
"""
data = dumps(task)
if self._async:
self.publisher_client.publish(self.topic_path, data=data)
| python | {
"resource": ""
} |
q257560 | main | validation | def main(path, pid, queue):
"""
Standalone PSQ worker.
The queue argument must be the full importable path to a psq.Queue
instance.
Example usage:
psqworker config.q
psqworker --path /opt/app queues.fast
"""
setup_logging()
if pid:
with open(os.path.expanduser(pid), "w") as f:
| python | {
"resource": ""
} |
q257561 | TaskResult.result | validation | def result(self, timeout=None):
"""Gets the result of the task.
Arguments:
timeout: Maximum seconds to wait for a result before raising a
TimeoutError. If set to None, this will wait forever. If the
queue doesn't store results and timeout is None, this call will
never return.
"""
start = time.time()
while True:
task = self.get_task()
if not task or | python | {
"resource": ""
} |
q257562 | service_start | validation | def service_start(service=None, param=None):
"""
Launch a Process, return his pid
"""
if service is not None:
to_run = ["python", service]
| python | {
"resource": ""
} |
q257563 | update_running_pids | validation | def update_running_pids(old_procs):
"""
Update the list of the running process and return the list
"""
new_procs = []
for proc in old_procs:
if proc.poll() is None and check_pid(proc.pid):
| python | {
"resource": ""
} |
q257564 | run_splitted_processing | validation | def run_splitted_processing(max_simultaneous_processes, process_name,
filenames):
"""
Run processes which push the routing dump of the RIPE in a redis
database.
The dump has been splitted in multiple files and each process run
on one of this files.
"""
pids = []
while len(filenames) > 0:
while len(filenames) > 0 and len(pids) < max_simultaneous_processes:
filename = filenames.pop()
pids.append(service_start(service=process_name,
param=['-f', | python | {
"resource": ""
} |
q257565 | fsplit | validation | def fsplit(file_to_split):
"""
Split the file and return the list of filenames.
"""
dirname = file_to_split + '_splitted'
if not os.path.exists(dirname):
os.mkdir(dirname)
part_file_size = os.path.getsize(file_to_split) / number_of_files + 1
splitted_files = []
with open(file_to_split, "r") as f:
number = 0
actual = 0
while 1:
prec = actual
# Jump of "size" from the current place in the file
f.seek(part_file_size, os.SEEK_CUR)
# find the next separator or EOF
s = f.readline()
if len(s) == 0:
s = f.readline()
while len(s) != 0 and s != separator:
s = f.readline()
# Get the current place
actual = f.tell()
new_file = | python | {
"resource": ""
} |
q257566 | IPASN.asn | validation | def asn(self, ip, announce_date=None):
"""
Give an IP, maybe a date, get the ASN.
This is the fastest command.
:param ip: IP address to search for
:param announce_date: Date of the announcement
:rtype: String, ASN.
| python | {
"resource": ""
} |
q257567 | IPASN.date_asn_block | validation | def date_asn_block(self, ip, announce_date=None):
"""
Get the ASN and the IP Block announcing the IP at a specific date.
:param ip: IP address to search for
:param announce_date: Date of the announcement
:rtype: tuple
.. code-block:: python
(announce_date, asn, block)
.. note::
the returned announce_date might be different of the one
given in parameter because some raw files are missing and we
don't have the information. In this case, the nearest known
| python | {
"resource": ""
} |
q257568 | IPASN.history | validation | def history(self, ip, days_limit=None):
"""
Get the full history of an IP. It takes time.
:param ip: IP address to search for
:param days_limit: Max amount of days to query. (None means no limit)
:rtype: list. For each day in the database: day, asn, block
""" | python | {
"resource": ""
} |
q257569 | IPASN.aggregate_history | validation | def aggregate_history(self, ip, days_limit=None):
"""
Get the full history of an IP, aggregate the result instead of
returning one line per day.
:param ip: IP address to search for
:param days_limit: Max amount of days to query. (None means no limit)
:rtype: list. For each change: FirstDay, LastDay, ASN, Block
"""
first_date = None
last_date = None
prec_asn = None
prec_block = None
for entry in self.history(ip, days_limit):
if entry is None:
continue
date, asn, block = entry
if first_date is None:
last_date = date
first_date = date
prec_asn = asn
| python | {
"resource": ""
} |
q257570 | downloadURL | validation | def downloadURL(url, filename):
"""
Inconditianilly download the URL in a temporary directory.
When finished, the file is moved in the real directory.
Like this an other process will not attempt to extract an inclomplete file.
"""
path_temp_bviewfile = os.path.join(c.raw_data, c.bview_dir, 'tmp', filename)
| python | {
"resource": ""
} |
q257571 | already_downloaded | validation | def already_downloaded(filename):
"""
Verify that the file has not already been downloaded.
"""
cur_file = os.path.join(c.bview_dir, filename) | python | {
"resource": ""
} |
q257572 | strToBool | validation | def strToBool(val):
"""
Helper function to turn a string representation of "true" into
boolean True.
"""
if isinstance(val, | python | {
"resource": ""
} |
q257573 | get_page_url | validation | def get_page_url(page_num, current_app, url_view_name, url_extra_args, url_extra_kwargs, url_param_name, url_get_params, url_anchor):
"""
Helper function to return a valid URL string given the template tag parameters
"""
if url_view_name is not None:
# Add page param to the kwargs list. Overrides any previously set parameter of the same name.
url_extra_kwargs[url_param_name] = page_num
try:
url = reverse(url_view_name, args=url_extra_args, kwargs=url_extra_kwargs, current_app=current_app)
except NoReverseMatch as e: # Attempt to load view from application root, allowing the use of non-namespaced view names if your view is defined in the root application
if settings.SETTINGS_MODULE:
if django.VERSION < (1, 9, 0):
separator = '.'
else:
separator = ':' # Namespace separator changed to colon after 1.8
project_name = settings.SETTINGS_MODULE.split('.')[0]
try:
url = reverse(project_name + separator + url_view_name, args=url_extra_args, kwargs=url_extra_kwargs, current_app=current_app)
| python | {
"resource": ""
} |
q257574 | bootstrap_paginate | validation | def bootstrap_paginate(parser, token):
"""
Renders a Page object as a Twitter Bootstrap styled pagination bar.
Compatible with Bootstrap 3.x and 4.x only.
Example::
{% bootstrap_paginate page_obj range=10 %}
Named Parameters::
range - The size of the pagination bar (ie, if set to 10 then, at most,
10 page numbers will display at any given time) Defaults to
None, which shows all pages.
size - Accepts "small", and "large". Defaults to
None which is the standard size.
show_prev_next - Accepts "true" or "false". Determines whether or not
to show the previous and next page links. Defaults to
"true"
show_first_last - Accepts "true" or "false". Determines whether or not
to show the first and last page links. Defaults to
"false"
previous_label - The text to display for the previous page link.
Defaults to "←"
next_label - The text to display for the next page link. Defaults to
"→"
first_label - The text to display for the first page link. Defaults to
"«"
last_label - The text to display for the last page link. Defaults to
"»"
url_view_name - The named URL to use. Defaults to None. If None, then the
default template simply appends the url parameter as a
relative URL link, eg: <a href="?page=1">1</a>
url_param_name - The name of the parameter to use in the URL. If
url_view_name is set to None, this string is used as the
parameter name in the relative URL path. If a URL
name is specified, this string is used as the
parameter name passed into the reverse() method for
the URL.
url_extra_args - This is used only in conjunction with url_view_name.
When referencing a URL, additional arguments may be
| python | {
"resource": ""
} |
q257575 | get_regressions | validation | def get_regressions(
package_descriptors, targets,
building_repo_data, testing_repo_data, main_repo_data):
"""
For each package and target check if it is a regression.
This is the case if the main repo contains a package version which is
higher then in any of the other repos or if any of the other repos does not
contain that package at all.
:return: a dict indexed by package names containing
dicts indexed by targets containing a boolean flag
"""
regressions = {}
for package_descriptor in package_descriptors.values():
pkg_name = package_descriptor.pkg_name
debian_pkg_name = package_descriptor.debian_pkg_name
regressions[pkg_name] = {}
for target in targets:
regressions[pkg_name][target] = False
| python | {
"resource": ""
} |
q257576 | _strip_version_suffix | validation | def _strip_version_suffix(version):
"""
Remove trailing junk from the version number.
>>> strip_version_suffix('')
''
>>> strip_version_suffix('None')
'None'
>>> strip_version_suffix('1.2.3-4trusty-20140131-1359-+0000')
'1.2.3-4'
>>> strip_version_suffix('1.2.3-foo')
'1.2.3'
| python | {
"resource": ""
} |
q257577 | get_homogeneous | validation | def get_homogeneous(package_descriptors, targets, repos_data):
"""
For each package check if the version in one repo is equal for all targets.
The version could be different in different repos though.
:return: a dict indexed by package names containing a boolean flag
"""
homogeneous = {}
for package_descriptor in package_descriptors.values():
pkg_name = package_descriptor.pkg_name
debian_pkg_name = package_descriptor.debian_pkg_name
versions = []
for repo_data in repos_data:
versions.append(set([]))
for target | python | {
"resource": ""
} |
q257578 | get_package_counts | validation | def get_package_counts(package_descriptors, targets, repos_data):
"""
Get the number of packages per target and repository.
:return: a dict indexed by targets containing
a list of integer values (one for each repo)
"""
counts = {}
for target in targets:
counts[target] = [0] * len(repos_data)
for package_descriptor in package_descriptors.values():
debian_pkg_name = package_descriptor.debian_pkg_name
| python | {
"resource": ""
} |
q257579 | get_jenkins_job_urls | validation | def get_jenkins_job_urls(
rosdistro_name, jenkins_url, release_build_name, targets):
"""
Get the Jenkins job urls for each target.
The placeholder {pkg} needs to be replaced with the ROS package name.
:return: a dict indexed by targets containing a string
"""
urls = {}
for target in targets:
view_name = get_release_view_name(
rosdistro_name, release_build_name,
target.os_name, target.os_code_name, target.arch)
base_url = jenkins_url + '/view/%s/job/%s__{pkg}__' % \
(view_name, view_name)
if target.arch == 'source':
| python | {
"resource": ""
} |
q257580 | configure_ci_jobs | validation | def configure_ci_jobs(
config_url, rosdistro_name, ci_build_name,
groovy_script=None, dry_run=False):
"""Configure all Jenkins CI jobs."""
config = get_config_index(config_url)
build_files = get_ci_build_files(config, rosdistro_name)
build_file = build_files[ci_build_name]
index = get_index(config.rosdistro_index_url)
# get targets
targets = []
for os_name in build_file.targets.keys():
for os_code_name in build_file.targets[os_name].keys():
for arch in build_file.targets[os_name][os_code_name]:
targets.append((os_name, os_code_name, arch))
print('The build file contains the following targets:')
for os_name, os_code_name, arch in targets:
print(' -', os_name, os_code_name, arch)
dist_file = get_distribution_file(index, rosdistro_name, build_file)
if not dist_file:
print('No distribution file matches the build file')
return
ci_view_name = get_ci_view_name(rosdistro_name)
# all further configuration will be handled by either the Jenkins API
# or by a generated groovy script
from ros_buildfarm.jenkins import connect
jenkins = connect(config.jenkins_url) if groovy_script is None else False
| python | {
"resource": ""
} |
q257581 | configure_ci_job | validation | def configure_ci_job(
config_url, rosdistro_name, ci_build_name,
os_name, os_code_name, arch,
config=None, build_file=None,
index=None, dist_file=None,
jenkins=None, views=None,
is_disabled=False,
groovy_script=None,
build_targets=None,
dry_run=False,
underlay_source_paths=None,
trigger_timer=None):
"""
Configure a single Jenkins CI job.
This includes the following steps:
- clone the ros_buildfarm repository
- write the distribution repository keys into files
- invoke the ci/run_ci_job.py script
"""
if config is None:
config = get_config_index(config_url)
if build_file is None:
build_files = get_ci_build_files(config, rosdistro_name)
build_file = build_files[ci_build_name]
# Overwrite build_file.targets if build_targets is specified
if build_targets is not None:
build_file.targets = build_targets
if index is None:
index = get_index(config.rosdistro_index_url)
if dist_file is None:
dist_file = get_distribution_file(index, rosdistro_name, build_file)
if not dist_file:
raise JobValidationError(
'No distribution file matches the build file')
if os_name not in build_file.targets.keys():
raise JobValidationError(
"Invalid OS name '%s' " % os_name +
'choose one of the following: ' +
| python | {
"resource": ""
} |
q257582 | write_groovy_script_and_configs | validation | def write_groovy_script_and_configs(
filename, content, job_configs, view_configs=None):
"""Write out the groovy script and configs to file.
This writes the reconfigure script to the file location
and places the expanded configs in subdirectories 'view_configs' /
'job_configs' that the script can then access when run.
"""
with open(filename, 'w') as h:
h.write(content)
if view_configs:
view_config_dir = os.path.join(os.path.dirname(filename), 'view_configs')
if not os.path.isdir(view_config_dir):
os.makedirs(view_config_dir)
for config_name, config_body in view_configs.items():
config_filename = os.path.join(view_config_dir, config_name)
with open(config_filename, 'w') as config_fh:
config_fh.write(config_body)
| python | {
"resource": ""
} |
q257583 | topological_order_packages | validation | def topological_order_packages(packages):
"""
Order packages topologically.
First returning packages which have message generators and then
the rest based on all direct depends and indirect recursive run_depends.
:param packages: A dict mapping relative paths to ``Package`` objects ``dict``
:returns: A list of tuples containing the relative path and a ``Package`` object, ``list``
"""
from catkin_pkg.topological_order import _PackageDecorator
from catkin_pkg.topological_order import _sort_decorated_packages
decorators_by_name = {}
for path, package in packages.items():
decorators_by_name[package.name] = _PackageDecorator(package, path)
# calculate transitive dependencies
for decorator in decorators_by_name.values():
| python | {
"resource": ""
} |
q257584 | _unarmor_pem | validation | def _unarmor_pem(data, password=None):
"""
Removes PEM-encoding from a public key, private key or certificate. If the
private key is encrypted, the password will be used to decrypt it.
:param data:
A byte string of the PEM-encoded data
:param password:
A byte string of the encryption password, or None
:return:
A 3-element tuple in the format: (key_type, algorithm, der_bytes). The
key_type will be a unicode string of "public key", "private key" or
"certificate". The algorithm will be a unicode string of "rsa", "dsa"
or "ec".
"""
object_type, headers, der_bytes = pem.unarmor(data)
type_regex = '^((DSA|EC|RSA) PRIVATE KEY|ENCRYPTED PRIVATE KEY|PRIVATE KEY|PUBLIC KEY|RSA PUBLIC KEY|CERTIFICATE)' | python | {
"resource": ""
} |
q257585 | _decrypt_encrypted_data | validation | def _decrypt_encrypted_data(encryption_algorithm_info, encrypted_content, password):
"""
Decrypts encrypted ASN.1 data
:param encryption_algorithm_info:
An instance of asn1crypto.pkcs5.Pkcs5EncryptionAlgorithm
:param encrypted_content:
A byte string of the encrypted content
:param password:
A byte string of the encrypted content's password
:return:
A byte string of the decrypted plaintext
"""
decrypt_func = crypto_funcs[encryption_algorithm_info.encryption_cipher]
# Modern, PKCS#5 PBES2-based encryption
if encryption_algorithm_info.kdf == 'pbkdf2':
if encryption_algorithm_info.encryption_cipher == 'rc5':
raise ValueError(pretty_message(
'''
PBES2 encryption scheme utilizing RC5 encryption is not supported
'''
))
enc_key = pbkdf2(
encryption_algorithm_info.kdf_hmac,
password,
encryption_algorithm_info.kdf_salt,
encryption_algorithm_info.kdf_iterations,
encryption_algorithm_info.key_length
)
enc_iv = encryption_algorithm_info.encryption_iv
plaintext = decrypt_func(enc_key, encrypted_content, enc_iv)
elif encryption_algorithm_info.kdf == 'pbkdf1':
derived_output = pbkdf1(
encryption_algorithm_info.kdf_hmac,
password,
encryption_algorithm_info.kdf_salt,
encryption_algorithm_info.kdf_iterations,
encryption_algorithm_info.key_length + 8
)
enc_key = derived_output[0:8]
| python | {
"resource": ""
} |
q257586 | _setup_evp_encrypt_decrypt | validation | def _setup_evp_encrypt_decrypt(cipher, data):
"""
Creates an EVP_CIPHER pointer object and determines the buffer size
necessary for the parameter specified.
:param evp_cipher_ctx:
An EVP_CIPHER_CTX pointer
:param cipher:
A unicode string of "aes128", "aes192", "aes256", "des",
"tripledes_2key", "tripledes_3key", "rc2", "rc4"
:param key:
The key byte string
:param data:
The plaintext or ciphertext as a byte string
:param padding:
If padding is to be used
:return:
A 2-element tuple with the first element being an EVP_CIPHER pointer
and the second being an integer that is the required buffer size
"""
evp_cipher = {
'aes128': libcrypto.EVP_aes_128_cbc,
'aes192': libcrypto.EVP_aes_192_cbc,
'aes256': libcrypto.EVP_aes_256_cbc,
| python | {
"resource": ""
} |
q257587 | _advapi32_interpret_rsa_key_blob | validation | def _advapi32_interpret_rsa_key_blob(bit_size, blob_struct, blob):
"""
Takes a CryptoAPI RSA private key blob and converts it into the ASN.1
structures for the public and private keys
:param bit_size:
The integer bit size of the key
:param blob_struct:
An instance of the advapi32.RSAPUBKEY struct
:param blob:
A byte string of the binary data after the header
:return:
A 2-element tuple of (asn1crypto.keys.PublicKeyInfo,
asn1crypto.keys.PrivateKeyInfo)
"""
len1 = bit_size // 8
len2 = bit_size // 16
prime1_offset = len1
prime2_offset = prime1_offset + len2
exponent1_offset = prime2_offset + len2
exponent2_offset = exponent1_offset + len2
coefficient_offset = exponent2_offset + len2
private_exponent_offset = coefficient_offset + len2
public_exponent = blob_struct.rsapubkey.pubexp
modulus = int_from_bytes(blob[0:prime1_offset][::-1])
prime1 = int_from_bytes(blob[prime1_offset:prime2_offset][::-1])
prime2 = int_from_bytes(blob[prime2_offset:exponent1_offset][::-1])
exponent1 = int_from_bytes(blob[exponent1_offset:exponent2_offset][::-1])
exponent2 = int_from_bytes(blob[exponent2_offset:coefficient_offset][::-1])
coefficient = int_from_bytes(blob[coefficient_offset:private_exponent_offset][::-1])
private_exponent = int_from_bytes(blob[private_exponent_offset:private_exponent_offset + len1][::-1])
public_key_info = keys.PublicKeyInfo({
'algorithm': keys.PublicKeyAlgorithm({
| python | {
"resource": ""
} |
q257588 | _advapi32_interpret_dsa_key_blob | validation | def _advapi32_interpret_dsa_key_blob(bit_size, public_blob, private_blob):
"""
Takes a CryptoAPI DSS private key blob and converts it into the ASN.1
structures for the public and private keys
:param bit_size:
The integer bit size of the key
:param public_blob:
A byte string of the binary data after the public key header
:param private_blob:
A byte string of the binary data after the private key header
:return:
A 2-element tuple of (asn1crypto.keys.PublicKeyInfo,
asn1crypto.keys.PrivateKeyInfo)
"""
len1 = 20
len2 = bit_size // 8
q_offset = len2
g_offset = q_offset + len1
x_offset = g_offset + len2
y_offset = x_offset
p = int_from_bytes(private_blob[0:q_offset][::-1])
q = int_from_bytes(private_blob[q_offset:g_offset][::-1])
g = int_from_bytes(private_blob[g_offset:x_offset][::-1])
x = int_from_bytes(private_blob[x_offset:x_offset + len1][::-1])
y = int_from_bytes(public_blob[y_offset:y_offset + len2][::-1])
public_key_info = keys.PublicKeyInfo({
'algorithm': keys.PublicKeyAlgorithm({
| python | {
"resource": ""
} |
q257589 | _advapi32_load_key | validation | def _advapi32_load_key(key_object, key_info, container):
"""
Loads a certificate, public key or private key into a Certificate,
PublicKey or PrivateKey object via CryptoAPI
:param key_object:
An asn1crypto.x509.Certificate, asn1crypto.keys.PublicKeyInfo or
asn1crypto.keys.PrivateKeyInfo object
:param key_info:
An asn1crypto.keys.PublicKeyInfo or asn1crypto.keys.PrivateKeyInfo
object
:param container:
The class of the object to hold the key_handle
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
oscrypto.errors.AsymmetricKeyError - when the key is incompatible with the OS crypto library
OSError - when an error is returned by the OS crypto library
:return:
A PrivateKey, PublicKey or Certificate object, based on container
"""
key_type = 'public' if isinstance(key_info, keys.PublicKeyInfo) else 'private'
algo = key_info.algorithm
if algo == 'rsa':
provider = Advapi32Const.MS_ENH_RSA_AES_PROV
else:
provider = Advapi32Const.MS_ENH_DSS_DH_PROV
context_handle = None
key_handle = None
try:
context_handle = open_context_handle(provider, verify_only=key_type == 'public')
blob = _advapi32_create_blob(key_info, key_type, algo)
buffer_ = buffer_from_bytes(blob)
key_handle_pointer = new(advapi32, 'HCRYPTKEY *')
res = advapi32.CryptImportKey(
context_handle,
| python | {
"resource": ""
} |
q257590 | rsa_pkcs1v15_verify | validation | def rsa_pkcs1v15_verify(certificate_or_public_key, signature, data, hash_algorithm):
"""
Verifies an RSASSA-PKCS-v1.5 signature.
When the hash_algorithm is "raw", the operation is identical to RSA
public key decryption. That is: the data is not hashed and no ASN.1
structure with an algorithm identifier of the hash algorithm is placed in
the encrypted byte string.
:param certificate_or_public_key:
A Certificate or PublicKey instance to verify the signature with
:param signature:
A byte string of the signature to verify
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha256", "sha384", "sha512" or "raw"
| python | {
"resource": ""
} |
q257591 | _advapi32_verify | validation | def _advapi32_verify(certificate_or_public_key, signature, data, hash_algorithm, rsa_pss_padding=False):
"""
Verifies an RSA, DSA or ECDSA signature via CryptoAPI
:param certificate_or_public_key:
A Certificate or PublicKey instance to verify the signature with
:param signature:
A byte string of the signature to verify
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha256", "sha384", "sha512" or "raw"
:param rsa_pss_padding:
If PSS padding should be used for RSA keys
:raises:
oscrypto.errors.SignatureError - when the signature is determined to be invalid
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
"""
algo = certificate_or_public_key.algorithm
if algo == 'rsa' and rsa_pss_padding:
hash_length = {
'sha1': 20,
'sha224': 28,
'sha256': 32,
'sha384': 48,
'sha512': 64
}.get(hash_algorithm, 0)
decrypted_signature = raw_rsa_public_crypt(certificate_or_public_key, signature)
key_size = certificate_or_public_key.bit_size
if not verify_pss_padding(hash_algorithm, hash_length, key_size, data, decrypted_signature):
raise SignatureError('Signature is invalid')
return
if algo == 'rsa' and hash_algorithm == 'raw':
padded_plaintext = raw_rsa_public_crypt(certificate_or_public_key, signature)
try:
plaintext = remove_pkcs1v15_signature_padding(certificate_or_public_key.byte_size, padded_plaintext)
if not constant_compare(plaintext, data):
raise ValueError()
except (ValueError):
raise SignatureError('Signature is invalid')
return
hash_handle = None
try:
alg_id = {
'md5': Advapi32Const.CALG_MD5,
'sha1': Advapi32Const.CALG_SHA1,
'sha256': Advapi32Const.CALG_SHA_256,
'sha384': Advapi32Const.CALG_SHA_384,
'sha512': Advapi32Const.CALG_SHA_512,
}[hash_algorithm]
hash_handle_pointer = new(advapi32, 'HCRYPTHASH *')
res = advapi32.CryptCreateHash(
certificate_or_public_key.context_handle,
alg_id,
null(),
0,
| python | {
"resource": ""
} |
q257592 | _bcrypt_verify | validation | def _bcrypt_verify(certificate_or_public_key, signature, data, hash_algorithm, rsa_pss_padding=False):
"""
Verifies an RSA, DSA or ECDSA signature via CNG
:param certificate_or_public_key:
A Certificate or PublicKey instance to verify the signature with
:param signature:
A byte string of the signature to verify
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha256", "sha384", "sha512" or "raw"
:param rsa_pss_padding:
If PSS padding should be used for RSA keys
:raises:
oscrypto.errors.SignatureError - when the signature is determined to be invalid
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
"""
if hash_algorithm == 'raw':
digest = data
else:
hash_constant = {
'md5': BcryptConst.BCRYPT_MD5_ALGORITHM,
'sha1': BcryptConst.BCRYPT_SHA1_ALGORITHM,
'sha256': BcryptConst.BCRYPT_SHA256_ALGORITHM,
'sha384': BcryptConst.BCRYPT_SHA384_ALGORITHM,
'sha512': BcryptConst.BCRYPT_SHA512_ALGORITHM
}[hash_algorithm]
digest = getattr(hashlib, hash_algorithm)(data).digest()
padding_info = null()
flags = 0
| python | {
"resource": ""
} |
q257593 | dsa_sign | validation | def dsa_sign(private_key, data, hash_algorithm):
"""
Generates a DSA signature
:param private_key:
The PrivateKey to generate the signature with
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha256", "sha384" or "sha512"
| python | {
"resource": ""
} |
q257594 | ecdsa_sign | validation | def ecdsa_sign(private_key, data, hash_algorithm):
"""
Generates an ECDSA signature
:param private_key:
The PrivateKey to generate the signature with
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha256", "sha384" or "sha512"
| python | {
"resource": ""
} |
q257595 | _advapi32_sign | validation | def _advapi32_sign(private_key, data, hash_algorithm, rsa_pss_padding=False):
"""
Generates an RSA, DSA or ECDSA signature via CryptoAPI
:param private_key:
The PrivateKey to generate the signature with
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha256", "sha384", "sha512" or "raw"
:param rsa_pss_padding:
If PSS padding should be used for RSA keys
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the signature
"""
algo = private_key.algorithm
if algo == 'rsa' and hash_algorithm == 'raw':
padded_data = add_pkcs1v15_signature_padding(private_key.byte_size, data)
return raw_rsa_private_crypt(private_key, padded_data)
if algo == 'rsa' and rsa_pss_padding:
hash_length = {
'sha1': 20,
'sha224': 28,
'sha256': 32,
'sha384': 48,
'sha512': 64
}.get(hash_algorithm, 0)
padded_data = add_pss_padding(hash_algorithm, hash_length, private_key.bit_size, data)
return raw_rsa_private_crypt(private_key, padded_data)
if private_key.algorithm == 'dsa' and hash_algorithm == 'md5':
raise ValueError(pretty_message(
'''
Windows does not support md5 signatures with DSA keys
'''
))
hash_handle = None
try:
alg_id = {
'md5': Advapi32Const.CALG_MD5,
'sha1': Advapi32Const.CALG_SHA1,
'sha256': Advapi32Const.CALG_SHA_256,
'sha384': Advapi32Const.CALG_SHA_384,
'sha512': Advapi32Const.CALG_SHA_512,
}[hash_algorithm]
hash_handle_pointer = new(advapi32, 'HCRYPTHASH *')
res = advapi32.CryptCreateHash(
private_key.context_handle,
alg_id,
null(),
0,
hash_handle_pointer
)
handle_error(res)
hash_handle = unwrap(hash_handle_pointer)
res = advapi32.CryptHashData(hash_handle, | python | {
"resource": ""
} |
q257596 | _bcrypt_sign | validation | def _bcrypt_sign(private_key, data, hash_algorithm, rsa_pss_padding=False):
"""
Generates an RSA, DSA or ECDSA signature via CNG
:param private_key:
The PrivateKey to generate the signature with
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha256", "sha384", "sha512" or "raw"
:param rsa_pss_padding:
If PSS padding should be used for RSA keys
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the signature
"""
if hash_algorithm == 'raw':
digest = data
else:
hash_constant = {
'md5': BcryptConst.BCRYPT_MD5_ALGORITHM,
'sha1': BcryptConst.BCRYPT_SHA1_ALGORITHM,
'sha256': BcryptConst.BCRYPT_SHA256_ALGORITHM,
'sha384': BcryptConst.BCRYPT_SHA384_ALGORITHM,
'sha512': BcryptConst.BCRYPT_SHA512_ALGORITHM
}[hash_algorithm]
digest = getattr(hashlib, hash_algorithm)(data).digest()
padding_info = null()
flags = 0
if private_key.algorithm == 'rsa':
if rsa_pss_padding:
hash_length = {
'md5': 16,
'sha1': 20,
'sha256': 32,
'sha384': 48,
'sha512': 64
}[hash_algorithm]
flags = BcryptConst.BCRYPT_PAD_PSS
padding_info_struct_pointer = struct(bcrypt, 'BCRYPT_PSS_PADDING_INFO')
padding_info_struct = unwrap(padding_info_struct_pointer)
# This has to be assigned to a variable to prevent cffi from gc'ing it
hash_buffer = buffer_from_unicode(hash_constant)
padding_info_struct.pszAlgId = cast(bcrypt, 'wchar_t *', hash_buffer)
padding_info_struct.cbSalt = hash_length
else:
flags = BcryptConst.BCRYPT_PAD_PKCS1
padding_info_struct_pointer = struct(bcrypt, 'BCRYPT_PKCS1_PADDING_INFO')
padding_info_struct = unwrap(padding_info_struct_pointer)
# This has to be assigned to a variable to prevent cffi from gc'ing it
if hash_algorithm == 'raw':
padding_info_struct.pszAlgId = null()
else:
hash_buffer = buffer_from_unicode(hash_constant)
padding_info_struct.pszAlgId = cast(bcrypt, 'wchar_t *', hash_buffer)
padding_info = cast(bcrypt, 'void *', padding_info_struct_pointer)
| python | {
"resource": ""
} |
q257597 | _advapi32_encrypt | validation | def _advapi32_encrypt(certificate_or_public_key, data, rsa_oaep_padding=False):
"""
Encrypts a value using an RSA public key via CryptoAPI
:param certificate_or_public_key:
A Certificate or PublicKey instance to encrypt with
:param data:
A byte string of the data to encrypt
:param rsa_oaep_padding:
If OAEP padding should be used instead of PKCS#1 v1.5
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext
"""
flags = 0
if rsa_oaep_padding:
flags = Advapi32Const.CRYPT_OAEP
out_len = new(advapi32, 'DWORD *', len(data))
res = advapi32.CryptEncrypt(
certificate_or_public_key.ex_key_handle,
null(),
True,
flags,
null(),
out_len,
0
| python | {
"resource": ""
} |
q257598 | _bcrypt_encrypt | validation | def _bcrypt_encrypt(certificate_or_public_key, data, rsa_oaep_padding=False):
"""
Encrypts a value using an RSA public key via CNG
:param certificate_or_public_key:
A Certificate or PublicKey instance to encrypt with
:param data:
A byte string of the data to encrypt
:param rsa_oaep_padding:
If OAEP padding should be used instead of PKCS#1 v1.5
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext
"""
flags = BcryptConst.BCRYPT_PAD_PKCS1
if rsa_oaep_padding is True:
flags = BcryptConst.BCRYPT_PAD_OAEP
padding_info_struct_pointer = struct(bcrypt, 'BCRYPT_OAEP_PADDING_INFO')
| python | {
"resource": ""
} |
q257599 | _advapi32_decrypt | validation | def _advapi32_decrypt(private_key, ciphertext, rsa_oaep_padding=False):
"""
Encrypts a value using an RSA private key via CryptoAPI
:param private_key:
A PrivateKey instance to decrypt with
:param ciphertext:
A byte string of the data to decrypt
:param rsa_oaep_padding:
If OAEP padding should be used instead of PKCS#1 v1.5
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the plaintext
"""
| python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.