_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q257500 | GTFS.get_trip_counts_per_day | validation | def get_trip_counts_per_day(self):
"""
Get trip counts per day between the start and end day of the feed.
Returns
-------
trip_counts : pandas.DataFrame
Has columns "date_str" (dtype str) "trip_counts" (dtype int)
"""
query = "SELECT date, count(*) AS number_of_trips FROM day_trips GROUP BY date"
# this yields the actual data
trip_counts_per_day = pd.read_sql_query(query, self.conn, index_col="date")
# the rest is simply code for filling out "gaps" in the time span
# (necessary for some visualizations)
max_day = trip_counts_per_day.index.max()
min_day = trip_counts_per_day.index.min()
min_date = datetime.datetime.strptime(min_day, '%Y-%m-%d')
max_date = datetime.datetime.strptime(max_day, '%Y-%m-%d')
num_days = (max_date - min_date).days
dates = [min_date + datetime.timedelta(days=x) for x in range(num_days + 1)]
trip_counts = []
date_strings = []
for date in dates:
date_string = date.strftime("%Y-%m-%d")
date_strings.append(date_string)
try:
value = trip_counts_per_day.loc[date_string, 'number_of_trips']
except KeyError:
# set value to 0 if dsut is not present, i.e. when no trips
# take place on that day
value = 0
trip_counts.append(value)
# check that all date_strings are included (move this to tests?)
for date_string in trip_counts_per_day.index:
assert date_string in date_strings
data = {"date": dates, "date_str": date_strings, "trip_counts": trip_counts}
return pd.DataFrame(data) | python | {
"resource": ""
} |
q257501 | GTFS.get_spreading_trips | validation | def get_spreading_trips(self, start_time_ut, lat, lon,
max_duration_ut=4 * 3600,
min_transfer_time=30,
use_shapes=False):
"""
Starting from a specific point and time, get complete single source
shortest path spreading dynamics as trips, or "events".
Parameters
----------
start_time_ut: number
Start time of the spreading.
lat: float
latitude of the spreading seed location
lon: float
longitude of the spreading seed location
max_duration_ut: int
maximum duration of the spreading process (in seconds)
min_transfer_time : int
minimum transfer time in seconds
use_shapes : bool
whether to include shapes
Returns
-------
trips: dict
trips['trips'] is a list whose each element (e.g. el = trips['trips'][0])
is a dict with the following properties:
el['lats'] : list of latitudes
el['lons'] : list of longitudes
el['times'] : list of passage_times
el['route_type'] : type of vehicle as specified by GTFS, or -1 if walking
el['name'] : name of the route
"""
from gtfspy.spreading.spreader import Spreader
spreader = Spreader(self, start_time_ut, lat, lon, max_duration_ut, min_transfer_time, use_shapes)
return spreader.spread() | python | {
"resource": ""
} |
q257502 | GTFS.get_closest_stop | validation | def get_closest_stop(self, lat, lon):
"""
Get closest stop to a given location.
Parameters
----------
lat: float
latitude coordinate of the location
lon: float
longitude coordinate of the location
Returns
-------
stop_I: int
the index of the stop in the database
"""
cur = self.conn.cursor()
min_dist = float("inf")
min_stop_I = None
rows = cur.execute("SELECT stop_I, lat, lon FROM stops")
for stop_I, lat_s, lon_s in rows:
dist_now = wgs84_distance(lat, lon, lat_s, lon_s)
if dist_now < min_dist:
min_dist = dist_now
min_stop_I = stop_I
return min_stop_I | python | {
"resource": ""
} |
q257503 | GTFS.tripI_takes_place_on_dsut | validation | def tripI_takes_place_on_dsut(self, trip_I, day_start_ut):
"""
Check that a trip takes place during a day
Parameters
----------
trip_I : int
index of the trip in the gtfs data base
day_start_ut : int
the starting time of the day in unix time (seconds)
Returns
-------
takes_place: bool
boolean value describing whether the trip takes place during
the given day or not
"""
query = "SELECT * FROM days WHERE trip_I=? AND day_start_ut=?"
params = (trip_I, day_start_ut)
cur = self.conn.cursor()
rows = list(cur.execute(query, params))
if len(rows) == 0:
return False
else:
assert len(rows) == 1, 'On a day, a trip_I should be present at most once'
return True | python | {
"resource": ""
} |
q257504 | GTFS.day_start_ut | validation | def day_start_ut(self, ut):
"""
Convert unixtime to unixtime on GTFS start-of-day.
GTFS defines the start of a day as "noon minus 12 hours" to solve
most DST-related problems. This means that on DST-changing days,
the day start isn't midnight. This function isn't idempotent.
Running it twice on the "move clocks backwards" day will result in
being one day too early.
Parameters
----------
ut: int
Unixtime
Returns
-------
ut: int
Unixtime corresponding to start of day
"""
# set timezone to the one of gtfs
old_tz = self.set_current_process_time_zone()
ut = time.mktime(time.localtime(ut)[:3] + (12, 00, 0, 0, 0, -1)) - 43200
set_process_timezone(old_tz)
return ut | python | {
"resource": ""
} |
q257505 | GTFS.increment_day_start_ut | validation | def increment_day_start_ut(self, day_start_ut, n_days=1):
"""Increment the GTFS-definition of "day start".
Parameters
----------
day_start_ut : int
unixtime of the previous start of day. If this time is between
12:00 or greater, there *will* be bugs. To solve this, run the
input through day_start_ut first.
n_days: int
number of days to increment
"""
old_tz = self.set_current_process_time_zone()
day0 = time.localtime(day_start_ut + 43200) # time of noon
dayN = time.mktime(day0[:2] + # YYYY, MM
(day0[2] + n_days,) + # DD
(12, 00, 0, 0, 0, -1)) - 43200 # HHMM, etc. Minus 12 hours.
set_process_timezone(old_tz)
return dayN | python | {
"resource": ""
} |
q257506 | GTFS._get_possible_day_starts | validation | def _get_possible_day_starts(self, start_ut, end_ut, max_time_overnight=None):
"""
Get all possible day start times between start_ut and end_ut
Currently this function is used only by get_tripIs_within_range_by_dsut
Parameters
----------
start_ut : list<int>
start time in unix time
end_ut : list<int>
end time in unix time
max_time_overnight : list<int>
the maximum length of time that a trip can take place on
during the next day (i.e. after midnight run times like 25:35)
Returns
-------
day_start_times_ut : list
list of ints (unix times in seconds) for returning all possible day
start times
start_times_ds : list
list of ints (unix times in seconds) stating the valid start time in
day seconds
end_times_ds : list
list of ints (unix times in seconds) stating the valid end times in
day_seconds
"""
if max_time_overnight is None:
# 7 hours:
max_time_overnight = 7 * 60 * 60
# sanity checks for the timezone parameter
# assert timezone < 14
# assert timezone > -14
# tz_seconds = int(timezone*3600)
assert start_ut < end_ut
start_day_ut = self.day_start_ut(start_ut)
# start_day_ds = int(start_ut+tz_seconds) % seconds_in_a_day #??? needed?
start_day_ds = start_ut - start_day_ut
# assert (start_day_ut+tz_seconds) % seconds_in_a_day == 0
end_day_ut = self.day_start_ut(end_ut)
# end_day_ds = int(end_ut+tz_seconds) % seconds_in_a_day #??? needed?
# end_day_ds = end_ut - end_day_ut
# assert (end_day_ut+tz_seconds) % seconds_in_a_day == 0
# If we are early enough in a day that we might have trips from
# the previous day still running, decrement the start day.
if start_day_ds < max_time_overnight:
start_day_ut = self.increment_day_start_ut(start_day_ut, n_days=-1)
# day_start_times_ut = range(start_day_ut, end_day_ut+seconds_in_a_day, seconds_in_a_day)
# Create a list of all possible day start times. This is roughly
# range(day_start_ut, day_end_ut+1day, 1day).
day_start_times_ut = [start_day_ut]
while day_start_times_ut[-1] < end_day_ut:
day_start_times_ut.append(self.increment_day_start_ut(day_start_times_ut[-1]))
start_times_ds = []
end_times_ds = []
# For every possible day start:
for dsut in day_start_times_ut:
# start day_seconds starts at either zero, or time - daystart
day_start_ut = max(0, start_ut - dsut)
start_times_ds.append(day_start_ut)
# end day_seconds is time-day_start
day_end_ut = end_ut - dsut
end_times_ds.append(day_end_ut)
# Return three tuples which can be zip:ped together.
return day_start_times_ut, start_times_ds, end_times_ds | python | {
"resource": ""
} |
q257507 | GTFS.stop | validation | def stop(self, stop_I):
"""
Get all stop data as a pandas DataFrame for all stops, or an individual stop'
Parameters
----------
stop_I : int
stop index
Returns
-------
stop: pandas.DataFrame
"""
return pd.read_sql_query("SELECT * FROM stops WHERE stop_I={stop_I}".format(stop_I=stop_I), self.conn) | python | {
"resource": ""
} |
q257508 | GTFS.get_transit_events | validation | def get_transit_events(self, start_time_ut=None, end_time_ut=None, route_type=None):
"""
Obtain a list of events that take place during a time interval.
Each event needs to be only partially overlap the given time interval.
Does not include walking events.
Parameters
----------
start_time_ut : int
start of the time interval in unix time (seconds)
end_time_ut: int
end of the time interval in unix time (seconds)
route_type: int
consider only events for this route_type
Returns
-------
events: pandas.DataFrame
with the following columns and types
dep_time_ut: int
arr_time_ut: int
from_stop_I: int
to_stop_I: int
trip_I : int
shape_id : int
route_type : int
See also
--------
get_transit_events_in_time_span : an older version of the same thing
"""
table_name = self._get_day_trips_table_name()
event_query = "SELECT stop_I, seq, trip_I, route_I, routes.route_id AS route_id, routes.type AS route_type, " \
"shape_id, day_start_ut+dep_time_ds AS dep_time_ut, day_start_ut+arr_time_ds AS arr_time_ut " \
"FROM " + table_name + " " \
"JOIN trips USING(trip_I) " \
"JOIN routes USING(route_I) " \
"JOIN stop_times USING(trip_I)"
where_clauses = []
if end_time_ut:
where_clauses.append(table_name + ".start_time_ut< {end_time_ut}".format(end_time_ut=end_time_ut))
where_clauses.append("dep_time_ut <={end_time_ut}".format(end_time_ut=end_time_ut))
if start_time_ut:
where_clauses.append(table_name + ".end_time_ut > {start_time_ut}".format(start_time_ut=start_time_ut))
where_clauses.append("arr_time_ut >={start_time_ut}".format(start_time_ut=start_time_ut))
if route_type is not None:
assert route_type in ALL_ROUTE_TYPES
where_clauses.append("routes.type={route_type}".format(route_type=route_type))
if len(where_clauses) > 0:
event_query += " WHERE "
for i, where_clause in enumerate(where_clauses):
if i is not 0:
event_query += " AND "
event_query += where_clause
# ordering is required for later stages
event_query += " ORDER BY trip_I, day_start_ut+dep_time_ds;"
events_result = pd.read_sql_query(event_query, self.conn)
# 'filter' results so that only real "events" are taken into account
from_indices = numpy.nonzero(
(events_result['trip_I'][:-1].values == events_result['trip_I'][1:].values) *
(events_result['seq'][:-1].values < events_result['seq'][1:].values)
)[0]
to_indices = from_indices + 1
# these should have same trip_ids
assert (events_result['trip_I'][from_indices].values == events_result['trip_I'][to_indices].values).all()
trip_Is = events_result['trip_I'][from_indices]
from_stops = events_result['stop_I'][from_indices]
to_stops = events_result['stop_I'][to_indices]
shape_ids = events_result['shape_id'][from_indices]
dep_times = events_result['dep_time_ut'][from_indices]
arr_times = events_result['arr_time_ut'][to_indices]
route_types = events_result['route_type'][from_indices]
route_ids = events_result['route_id'][from_indices]
route_Is = events_result['route_I'][from_indices]
durations = arr_times.values - dep_times.values
assert (durations >= 0).all()
from_seqs = events_result['seq'][from_indices]
to_seqs = events_result['seq'][to_indices]
data_tuples = zip(from_stops, to_stops, dep_times, arr_times,
shape_ids, route_types, route_ids, trip_Is,
durations, from_seqs, to_seqs, route_Is)
columns = ["from_stop_I", "to_stop_I", "dep_time_ut", "arr_time_ut",
"shape_id", "route_type", "route_id", "trip_I",
"duration", "from_seq", "to_seq", "route_I"]
df = pd.DataFrame.from_records(data_tuples, columns=columns)
return df | python | {
"resource": ""
} |
q257509 | GTFS.get_day_start_ut_span | validation | def get_day_start_ut_span(self):
"""
Return the first and last day_start_ut
Returns
-------
first_day_start_ut: int
last_day_start_ut: int
"""
cur = self.conn.cursor()
first_day_start_ut, last_day_start_ut = \
cur.execute("SELECT min(day_start_ut), max(day_start_ut) FROM days;").fetchone()
return first_day_start_ut, last_day_start_ut | python | {
"resource": ""
} |
q257510 | TravelImpedanceDataStore.read_data_as_dataframe | validation | def read_data_as_dataframe(self,
travel_impedance_measure,
from_stop_I=None,
to_stop_I=None,
statistic=None):
"""
Recover pre-computed travel_impedance between od-pairs from the database.
Returns
-------
values: number | Pandas DataFrame
"""
to_select = []
where_clauses = []
to_select.append("from_stop_I")
to_select.append("to_stop_I")
if from_stop_I is not None:
where_clauses.append("from_stop_I=" + str(int(from_stop_I)))
if to_stop_I is not None:
where_clauses.append("to_stop_I=" + str(int(to_stop_I)))
where_clause = ""
if len(where_clauses) > 0:
where_clause = " WHERE " + " AND ".join(where_clauses)
if not statistic:
to_select.extend(["min", "mean", "median", "max"])
else:
to_select.append(statistic)
to_select_clause = ",".join(to_select)
if not to_select_clause:
to_select_clause = "*"
sql = "SELECT " + to_select_clause + " FROM " + travel_impedance_measure + where_clause + ";"
df = pd.read_sql(sql, self.conn)
return df | python | {
"resource": ""
} |
q257511 | NodeProfileMultiObjective._check_dep_time_is_valid | validation | def _check_dep_time_is_valid(self, dep_time):
"""
A simple checker, that connections are coming in descending order of departure time
and that no departure time has been "skipped".
Parameters
----------
dep_time
Returns
-------
None
"""
assert dep_time <= self._min_dep_time, "Labels should be entered in decreasing order of departure time."
dep_time_index = self.dep_times_to_index[dep_time]
if self._min_dep_time < float('inf'):
min_dep_index = self.dep_times_to_index[self._min_dep_time]
assert min_dep_index == dep_time_index or (min_dep_index == dep_time_index - 1), \
"dep times should be ordered sequentially"
else:
assert dep_time_index is 0, "first dep_time index should be zero (ensuring that all connections are properly handled)"
self._min_dep_time = dep_time | python | {
"resource": ""
} |
q257512 | NodeProfileMultiObjective.update | validation | def update(self, new_labels, departure_time_backup=None):
"""
Update the profile with the new labels.
Each new label should have the same departure_time.
Parameters
----------
new_labels: list[LabelTime]
Returns
-------
added: bool
whether new_pareto_tuple was added to the set of pareto-optimal tuples
"""
if self._closed:
raise RuntimeError("Profile is closed, no updates can be made")
try:
departure_time = next(iter(new_labels)).departure_time
except StopIteration:
departure_time = departure_time_backup
self._check_dep_time_is_valid(departure_time)
for new_label in new_labels:
assert (new_label.departure_time == departure_time)
dep_time_index = self.dep_times_to_index[departure_time]
if dep_time_index > 0:
# Departure time is modified in order to not pass on labels which are not Pareto-optimal when departure time is ignored.
mod_prev_labels = [label.get_copy_with_specified_departure_time(departure_time) for label
in self._label_bags[dep_time_index - 1]]
else:
mod_prev_labels = list()
mod_prev_labels += self._label_bags[dep_time_index]
walk_label = self._get_label_to_target(departure_time)
if walk_label:
new_labels = new_labels + [walk_label]
new_frontier = merge_pareto_frontiers(new_labels, mod_prev_labels)
self._label_bags[dep_time_index] = new_frontier
return True | python | {
"resource": ""
} |
q257513 | NodeProfileMultiObjective.evaluate | validation | def evaluate(self, dep_time, first_leg_can_be_walk=True, connection_arrival_time=None):
"""
Get the pareto_optimal set of Labels, given a departure time.
Parameters
----------
dep_time : float, int
time in unix seconds
first_leg_can_be_walk : bool, optional
whether to allow walking to target to be included into the profile
(I.e. whether this function is called when scanning a pseudo-connection:
"double" walks are not allowed.)
connection_arrival_time: float, int, optional
used for computing the walking label if dep_time, i.e., connection.arrival_stop_next_departure_time, is infinity)
connection: connection object
Returns
-------
pareto_optimal_labels : set
Set of Labels
"""
walk_labels = list()
# walk label towards target
if first_leg_can_be_walk and self._walk_to_target_duration != float('inf'):
# add walk_label
if connection_arrival_time is not None:
walk_labels.append(self._get_label_to_target(connection_arrival_time))
else:
walk_labels.append(self._get_label_to_target(dep_time))
# if dep time is larger than the largest dep time -> only walk labels are possible
if dep_time in self.dep_times_to_index:
assert (dep_time != float('inf'))
index = self.dep_times_to_index[dep_time]
labels = self._label_bags[index]
pareto_optimal_labels = merge_pareto_frontiers(labels, walk_labels)
else:
pareto_optimal_labels = walk_labels
if not first_leg_can_be_walk:
pareto_optimal_labels = [label for label in pareto_optimal_labels if not label.first_leg_is_walk]
return pareto_optimal_labels | python | {
"resource": ""
} |
q257514 | TableLoader.create_table | validation | def create_table(self, conn):
"""Make table definitions"""
# Make cursor
cur = conn.cursor()
# Drop table if it already exists, to be recreated. This
# could in the future abort if table already exists, and not
# recreate it from scratch.
#cur.execute('''DROP TABLE IF EXISTS %s'''%self.table)
#conn.commit()
if self.tabledef is None:
return
if not self.tabledef.startswith('CREATE'):
# "normal" table creation.
cur.execute('CREATE TABLE IF NOT EXISTS %s %s'
% (self.table, self.tabledef)
)
else:
# When tabledef contains the full CREATE statement (for
# virtual tables).
cur.execute(self.tabledef)
conn.commit() | python | {
"resource": ""
} |
q257515 | TableLoader.import_ | validation | def import_(self, conn):
"""Do the actual import. Copy data and store in connection object.
This function:
- Creates the tables
- Imports data (using self.gen_rows)
- Run any post_import hooks.
- Creates any indexs
- Does *not* run self.make_views - those must be done
after all tables are loaded.
"""
if self.print_progress:
print('Beginning', self.__class__.__name__)
# what is this mystical self._conn ?
self._conn = conn
self.create_table(conn)
# This does insertions
if self.mode in ('all', 'import') and self.fname and self.exists() and self.table not in ignore_tables:
self.insert_data(conn)
# This makes indexes in the DB.
if self.mode in ('all', 'index') and hasattr(self, 'index'):
self.create_index(conn)
# Any post-processing to be done after the full import.
if self.mode in ('all', 'import') and hasattr(self, 'post_import'):
self.run_post_import(conn)
# Commit it all
conn.commit() | python | {
"resource": ""
} |
q257516 | TableLoader.copy | validation | def copy(cls, conn, **where):
"""Copy data from one table to another while filtering data at the same time
Parameters
----------
conn: sqlite3 DB connection. It must have a second database
attached as "other".
**where : keyword arguments
specifying (start_ut and end_ut for filtering, see the copy_where clause in the subclasses)
"""
cur = conn.cursor()
if where and cls.copy_where:
copy_where = cls.copy_where.format(**where)
# print(copy_where)
else:
copy_where = ''
cur.execute('INSERT INTO %s '
'SELECT * FROM source.%s %s' % (cls.table, cls.table, copy_where)) | python | {
"resource": ""
} |
q257517 | get_median_lat_lon_of_stops | validation | def get_median_lat_lon_of_stops(gtfs):
"""
Get median latitude AND longitude of stops
Parameters
----------
gtfs: GTFS
Returns
-------
median_lat : float
median_lon : float
"""
stops = gtfs.get_table("stops")
median_lat = numpy.percentile(stops['lat'].values, 50)
median_lon = numpy.percentile(stops['lon'].values, 50)
return median_lat, median_lon | python | {
"resource": ""
} |
q257518 | get_centroid_of_stops | validation | def get_centroid_of_stops(gtfs):
"""
Get mean latitude AND longitude of stops
Parameters
----------
gtfs: GTFS
Returns
-------
mean_lat : float
mean_lon : float
"""
stops = gtfs.get_table("stops")
mean_lat = numpy.mean(stops['lat'].values)
mean_lon = numpy.mean(stops['lon'].values)
return mean_lat, mean_lon | python | {
"resource": ""
} |
q257519 | write_stats_as_csv | validation | def write_stats_as_csv(gtfs, path_to_csv, re_write=False):
"""
Writes data from get_stats to csv file
Parameters
----------
gtfs: GTFS
path_to_csv: str
filepath to the csv file to be generated
re_write:
insted of appending, create a new one.
"""
stats_dict = get_stats(gtfs)
# check if file exist
if re_write:
os.remove(path_to_csv)
#if not os.path.isfile(path_to_csv):
# is_new = True
#else:
# is_new = False
is_new = True
mode = 'r' if os.path.exists(path_to_csv) else 'w+'
with open(path_to_csv, mode) as csvfile:
for line in csvfile:
if line:
is_new = False
else:
is_new = True
with open(path_to_csv, 'a') as csvfile:
if (sys.version_info > (3, 0)):
delimiter = u","
else:
delimiter = b","
statswriter = csv.writer(csvfile, delimiter=delimiter)
# write column names if
if is_new:
statswriter.writerow([key for key in sorted(stats_dict.keys())])
row_to_write = []
# write stats row sorted by column name
for key in sorted(stats_dict.keys()):
row_to_write.append(stats_dict[key])
statswriter.writerow(row_to_write) | python | {
"resource": ""
} |
q257520 | _distribution | validation | def _distribution(gtfs, table, column):
"""Count occurrences of values AND return it as a string.
Example return value: '1:5 2:15'"""
cur = gtfs.conn.cursor()
cur.execute('SELECT {column}, count(*) '
'FROM {table} GROUP BY {column} '
'ORDER BY {column}'.format(column=column, table=table))
return ' '.join('%s:%s' % (t, c) for t, c in cur) | python | {
"resource": ""
} |
q257521 | _feed_calendar_span | validation | def _feed_calendar_span(gtfs, stats):
"""
Computes the temporal coverage of each source feed
Parameters
----------
gtfs: gtfspy.GTFS object
stats: dict
where to append the stats
Returns
-------
stats: dict
"""
n_feeds = _n_gtfs_sources(gtfs)[0]
max_start = None
min_end = None
if n_feeds > 1:
for i in range(n_feeds):
feed_key = "feed_" + str(i) + "_"
start_key = feed_key + "calendar_start"
end_key = feed_key + "calendar_end"
calendar_span = gtfs.conn.cursor().execute(
'SELECT min(date), max(date) FROM trips, days '
'WHERE trips.trip_I = days.trip_I AND trip_id LIKE ?;', (feed_key + '%',)).fetchone()
stats[start_key] = calendar_span[0]
stats[end_key] = calendar_span[1]
if calendar_span[0] is not None and calendar_span[1] is not None:
if not max_start and not min_end:
max_start = calendar_span[0]
min_end = calendar_span[1]
else:
if gtfs.get_day_start_ut(calendar_span[0]) > gtfs.get_day_start_ut(max_start):
max_start = calendar_span[0]
if gtfs.get_day_start_ut(calendar_span[1]) < gtfs.get_day_start_ut(min_end):
min_end = calendar_span[1]
stats["latest_feed_start_date"] = max_start
stats["earliest_feed_end_date"] = min_end
else:
stats["latest_feed_start_date"] = stats["start_date"]
stats["earliest_feed_end_date"] = stats["end_date"]
return stats | python | {
"resource": ""
} |
q257522 | route_frequencies | validation | def route_frequencies(gtfs, results_by_mode=False):
"""
Return the frequency of all types of routes per day.
Parameters
-----------
gtfs: GTFS
Returns
-------
pandas.DataFrame with columns
route_I, type, frequency
"""
day = gtfs.get_suitable_date_for_daily_extract()
query = (
" SELECT f.route_I, type, frequency FROM routes as r"
" JOIN"
" (SELECT route_I, COUNT(route_I) as frequency"
" FROM"
" (SELECT date, route_I, trip_I"
" FROM day_stop_times"
" WHERE date = '{day}'"
" GROUP by route_I, trip_I)"
" GROUP BY route_I) as f"
" ON f.route_I = r.route_I"
" ORDER BY frequency DESC".format(day=day))
return pd.DataFrame(gtfs.execute_custom_query_pandas(query)) | python | {
"resource": ""
} |
q257523 | get_vehicle_hours_by_type | validation | def get_vehicle_hours_by_type(gtfs, route_type):
"""
Return the sum of vehicle hours in a particular day by route type.
"""
day = gtfs.get_suitable_date_for_daily_extract()
query = (" SELECT * , SUM(end_time_ds - start_time_ds)/3600 as vehicle_hours_type"
" FROM"
" (SELECT * FROM day_trips as q1"
" INNER JOIN"
" (SELECT route_I, type FROM routes) as q2"
" ON q1.route_I = q2.route_I"
" WHERE type = {route_type}"
" AND date = '{day}')".format(day=day, route_type=route_type))
df = gtfs.execute_custom_query_pandas(query)
return df['vehicle_hours_type'].item() | python | {
"resource": ""
} |
q257524 | ConnectionScan._scan_footpaths | validation | def _scan_footpaths(self, stop_id, walk_departure_time):
"""
Scan the footpaths originating from stop_id
Parameters
----------
stop_id: int
"""
for _, neighbor, data in self._walk_network.edges_iter(nbunch=[stop_id], data=True):
d_walk = data["d_walk"]
arrival_time = walk_departure_time + d_walk / self._walk_speed
self._update_stop_label(neighbor, arrival_time) | python | {
"resource": ""
} |
q257525 | timeit | validation | def timeit(method):
"""
A Python decorator for printing out the execution time for a function.
Adapted from:
www.andreas-jung.com/contents/a-python-decorator-for-measuring-the-execution-time-of-methods
"""
def timed(*args, **kw):
time_start = time.time()
result = method(*args, **kw)
time_end = time.time()
print('timeit: %r %2.2f sec (%r, %r) ' % (method.__name__, time_end-time_start, str(args)[:20], kw))
return result
return timed | python | {
"resource": ""
} |
q257526 | AuthForm.clean | validation | def clean(self):
"""When receiving the filled out form, check for valid access."""
cleaned_data = super(AuthForm, self).clean()
user = self.get_user()
if self.staff_only and (not user or not user.is_staff):
raise forms.ValidationError('Sorry, only staff are allowed.')
if self.superusers_only and (not user or not user.is_superuser):
raise forms.ValidationError('Sorry, only superusers are allowed.')
return cleaned_data | python | {
"resource": ""
} |
q257527 | get_lockdown_form | validation | def get_lockdown_form(form_path):
"""Return a form class for a given string pointing to a lockdown form."""
if not form_path:
raise ImproperlyConfigured('No LOCKDOWN_FORM specified.')
form_path_list = form_path.split(".")
new_module = ".".join(form_path_list[:-1])
attr = form_path_list[-1]
try:
mod = import_module(new_module)
except (ImportError, ValueError):
raise ImproperlyConfigured('Module configured in LOCKDOWN_FORM (%s) to'
' contain the form class couldn\'t be '
'found.' % new_module)
try:
form = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('The module configured in LOCKDOWN_FORM '
' (%s) doesn\'t define a "%s" form.'
% (new_module, attr))
return form | python | {
"resource": ""
} |
q257528 | LockdownMiddleware.process_request | validation | def process_request(self, request):
"""Check if each request is allowed to access the current resource."""
try:
session = request.session
except AttributeError:
raise ImproperlyConfigured('django-lockdown requires the Django '
'sessions framework')
# Don't lock down if django-lockdown is disabled altogether.
if settings.ENABLED is False:
return None
# Don't lock down if the client REMOTE_ADDR matched and is part of the
# exception list.
if self.remote_addr_exceptions:
remote_addr_exceptions = self.remote_addr_exceptions
else:
remote_addr_exceptions = settings.REMOTE_ADDR_EXCEPTIONS
if remote_addr_exceptions:
# If forwarding proxies are used they must be listed as trusted
trusted_proxies = self.trusted_proxies or settings.TRUSTED_PROXIES
remote_addr = request.META.get('REMOTE_ADDR')
if remote_addr in remote_addr_exceptions:
return None
if remote_addr in trusted_proxies:
# If REMOTE_ADDR is a trusted proxy check x-forwarded-for
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
remote_addr = x_forwarded_for.split(',')[-1].strip()
if remote_addr in remote_addr_exceptions:
return None
# Don't lock down if the URL matches an exception pattern.
if self.url_exceptions:
url_exceptions = compile_url_exceptions(self.url_exceptions)
else:
url_exceptions = compile_url_exceptions(settings.URL_EXCEPTIONS)
for pattern in url_exceptions:
if pattern.search(request.path):
return None
# Don't lock down if the URL resolves to a whitelisted view.
try:
resolved_path = resolve(request.path)
except Resolver404:
pass
else:
if resolved_path.func in settings.VIEW_EXCEPTIONS:
return None
# Don't lock down if outside of the lockdown dates.
if self.until_date:
until_date = self.until_date
else:
until_date = settings.UNTIL_DATE
if self.after_date:
after_date = self.after_date
else:
after_date = settings.AFTER_DATE
if until_date or after_date:
locked_date = False
if until_date and datetime.datetime.now() < until_date:
locked_date = True
if after_date and datetime.datetime.now() > after_date:
locked_date = True
if not locked_date:
return None
form_data = request.POST if request.method == 'POST' else None
if self.form:
form_class = self.form
else:
form_class = get_lockdown_form(settings.FORM)
form = form_class(data=form_data, **self.form_kwargs)
authorized = False
token = session.get(self.session_key)
if hasattr(form, 'authenticate'):
if form.authenticate(token):
authorized = True
elif token is True:
authorized = True
if authorized and self.logout_key and self.logout_key in request.GET:
if self.session_key in session:
del session[self.session_key]
querystring = request.GET.copy()
del querystring[self.logout_key]
return self.redirect(request)
# Don't lock down if the user is already authorized for previewing.
if authorized:
return None
if form.is_valid():
if hasattr(form, 'generate_token'):
token = form.generate_token()
else:
token = True
session[self.session_key] = token
return self.redirect(request)
page_data = {'until_date': until_date, 'after_date': after_date}
if not hasattr(form, 'show_form') or form.show_form():
page_data['form'] = form
if self.extra_context:
page_data.update(self.extra_context)
return render(request, 'lockdown/form.html', page_data) | python | {
"resource": ""
} |
q257529 | LockdownMiddleware.redirect | validation | def redirect(self, request):
"""Handle redirects properly."""
url = request.path
querystring = request.GET.copy()
if self.logout_key and self.logout_key in request.GET:
del querystring[self.logout_key]
if querystring:
url = '%s?%s' % (url, querystring.urlencode())
return HttpResponseRedirect(url) | python | {
"resource": ""
} |
q257530 | Registry.get | validation | def get(self, profile_id):
'''Returns the profile with the received ID as a dict
If a local copy of the profile exists, it'll be returned. If not, it'll
be downloaded from the web. The results are cached, so any subsequent
calls won't hit the filesystem or the web.
Args:
profile_id (str): The ID of the profile you want.
Raises:
RegistryError: If there was some problem opening the profile file
or its format was incorrect.
'''
if profile_id not in self._profiles:
try:
self._profiles[profile_id] = self._get_profile(profile_id)
except (ValueError,
IOError) as e:
six.raise_from(RegistryError(e), e)
return self._profiles[profile_id] | python | {
"resource": ""
} |
q257531 | get_descriptor_base_path | validation | def get_descriptor_base_path(descriptor):
"""Get descriptor base path if string or return None.
"""
# Infer from path/url
if isinstance(descriptor, six.string_types):
if os.path.exists(descriptor):
base_path = os.path.dirname(os.path.abspath(descriptor))
else:
# suppose descriptor is a URL
base_path = os.path.dirname(descriptor)
# Current dir by default
else:
base_path = '.'
return base_path | python | {
"resource": ""
} |
q257532 | retrieve_descriptor | validation | def retrieve_descriptor(descriptor):
"""Retrieve descriptor.
"""
the_descriptor = descriptor
if the_descriptor is None:
the_descriptor = {}
if isinstance(the_descriptor, six.string_types):
try:
if os.path.isfile(the_descriptor):
with open(the_descriptor, 'r') as f:
the_descriptor = json.load(f)
else:
req = requests.get(the_descriptor)
req.raise_for_status()
# Force UTF8 encoding for 'text/plain' sources
req.encoding = 'utf8'
the_descriptor = req.json()
except (IOError, requests.exceptions.RequestException) as error:
message = 'Unable to load JSON at "%s"' % descriptor
six.raise_from(exceptions.DataPackageException(message), error)
except ValueError as error:
# Python2 doesn't have json.JSONDecodeError (use ValueErorr)
message = 'Unable to parse JSON at "%s". %s' % (descriptor, error)
six.raise_from(exceptions.DataPackageException(message), error)
if hasattr(the_descriptor, 'read'):
try:
the_descriptor = json.load(the_descriptor)
except ValueError as e:
six.raise_from(exceptions.DataPackageException(str(e)), e)
if not isinstance(the_descriptor, dict):
msg = 'Data must be a \'dict\', but was a \'{0}\''
raise exceptions.DataPackageException(msg.format(type(the_descriptor).__name__))
return the_descriptor | python | {
"resource": ""
} |
q257533 | is_safe_path | validation | def is_safe_path(path):
"""Check if path is safe and allowed.
"""
contains_windows_var = lambda val: re.match(r'%.+%', val)
contains_posix_var = lambda val: re.match(r'\$.+', val)
unsafeness_conditions = [
os.path.isabs(path),
('..%s' % os.path.sep) in path,
path.startswith('~'),
os.path.expandvars(path) != path,
contains_windows_var(path),
contains_posix_var(path),
]
return not any(unsafeness_conditions) | python | {
"resource": ""
} |
q257534 | _validate_zip | validation | def _validate_zip(the_zip):
"""Validate zipped data package
"""
datapackage_jsons = [f for f in the_zip.namelist() if f.endswith('datapackage.json')]
if len(datapackage_jsons) != 1:
msg = 'DataPackage must have only one "datapackage.json" (had {n})'
raise exceptions.DataPackageException(msg.format(n=len(datapackage_jsons))) | python | {
"resource": ""
} |
q257535 | _slugify_foreign_key | validation | def _slugify_foreign_key(schema):
"""Slugify foreign key
"""
for foreign_key in schema.get('foreignKeys', []):
foreign_key['reference']['resource'] = _slugify_resource_name(
foreign_key['reference'].get('resource', ''))
return schema | python | {
"resource": ""
} |
q257536 | Package.validate | validation | def validate(self):
""""Validate this Data Package.
"""
# Deprecate
warnings.warn(
'Property "package.validate" is deprecated.',
UserWarning)
descriptor = self.to_dict()
self.profile.validate(descriptor) | python | {
"resource": ""
} |
q257537 | push_datapackage | validation | def push_datapackage(descriptor, backend, **backend_options):
"""Push Data Package to storage.
All parameters should be used as keyword arguments.
Args:
descriptor (str): path to descriptor
backend (str): backend name like `sql` or `bigquery`
backend_options (dict): backend options mentioned in backend docs
"""
# Deprecated
warnings.warn(
'Functions "push/pull_datapackage" are deprecated. '
'Please use "Package" class',
UserWarning)
# Init maps
tables = []
schemas = []
datamap = {}
mapping = {}
# Init model
model = Package(descriptor)
# Get storage
plugin = import_module('jsontableschema.plugins.%s' % backend)
storage = plugin.Storage(**backend_options)
# Collect tables/schemas/data
for resource in model.resources:
if not resource.tabular:
continue
name = resource.descriptor.get('name', None)
table = _convert_path(resource.descriptor['path'], name)
schema = resource.descriptor['schema']
data = resource.table.iter(keyed=True)
# TODO: review
def values(schema, data):
for item in data:
row = []
for field in schema['fields']:
row.append(item.get(field['name'], None))
yield tuple(row)
tables.append(table)
schemas.append(schema)
datamap[table] = values(schema, data)
if name is not None:
mapping[name] = table
schemas = _convert_schemas(mapping, schemas)
# Create tables
for table in tables:
if table in storage.buckets:
storage.delete(table)
storage.create(tables, schemas)
# Write data to tables
for table in storage.buckets:
if table in datamap:
storage.write(table, datamap[table])
return storage | python | {
"resource": ""
} |
q257538 | pull_datapackage | validation | def pull_datapackage(descriptor, name, backend, **backend_options):
"""Pull Data Package from storage.
All parameters should be used as keyword arguments.
Args:
descriptor (str): path where to store descriptor
name (str): name of the pulled datapackage
backend (str): backend name like `sql` or `bigquery`
backend_options (dict): backend options mentioned in backend docs
"""
# Deprecated
warnings.warn(
'Functions "push/pull_datapackage" are deprecated. '
'Please use "Package" class',
UserWarning)
# Save datapackage name
datapackage_name = name
# Get storage
plugin = import_module('jsontableschema.plugins.%s' % backend)
storage = plugin.Storage(**backend_options)
# Iterate over tables
resources = []
for table in storage.buckets:
# Prepare
schema = storage.describe(table)
base = os.path.dirname(descriptor)
path, name = _restore_path(table)
fullpath = os.path.join(base, path)
# Write data
helpers.ensure_dir(fullpath)
with io.open(fullpath, 'wb') as file:
model = Schema(deepcopy(schema))
data = storage.iter(table)
writer = csv.writer(file, encoding='utf-8')
writer.writerow(model.headers)
for row in data:
writer.writerow(row)
# Add resource
resource = {'schema': schema, 'path': path}
if name is not None:
resource['name'] = name
resources.append(resource)
# Write descriptor
mode = 'w'
encoding = 'utf-8'
if six.PY2:
mode = 'wb'
encoding = None
resources = _restore_resources(resources)
helpers.ensure_dir(descriptor)
with io.open(descriptor,
mode=mode,
encoding=encoding) as file:
descriptor = {
'name': datapackage_name,
'resources': resources,
}
json.dump(descriptor, file, indent=4)
return storage | python | {
"resource": ""
} |
q257539 | _convert_path | validation | def _convert_path(path, name):
"""Convert resource's path and name to storage's table name.
Args:
path (str): resource path
name (str): resource name
Returns:
str: table name
"""
table = os.path.splitext(path)[0]
table = table.replace(os.path.sep, '__')
if name is not None:
table = '___'.join([table, name])
table = re.sub('[^0-9a-zA-Z_]+', '_', table)
table = table.lower()
return table | python | {
"resource": ""
} |
q257540 | _restore_path | validation | def _restore_path(table):
"""Restore resource's path and name from storage's table.
Args:
table (str): table name
Returns:
(str, str): resource path and name
"""
name = None
splited = table.split('___')
path = splited[0]
if len(splited) == 2:
name = splited[1]
path = path.replace('__', os.path.sep)
path += '.csv'
return path, name | python | {
"resource": ""
} |
q257541 | _convert_schemas | validation | def _convert_schemas(mapping, schemas):
"""Convert schemas to be compatible with storage schemas.
Foreign keys related operations.
Args:
mapping (dict): mapping between resource name and table name
schemas (list): schemas
Raises:
ValueError: if there is no resource
for some foreign key in given mapping
Returns:
list: converted schemas
"""
schemas = deepcopy(schemas)
for schema in schemas:
for fk in schema.get('foreignKeys', []):
resource = fk['reference']['resource']
if resource != 'self':
if resource not in mapping:
message = 'Not resource "%s" for foreign key "%s"'
message = message % (resource, fk)
raise ValueError(message)
fk['reference']['resource'] = mapping[resource]
return schemas | python | {
"resource": ""
} |
q257542 | _restore_resources | validation | def _restore_resources(resources):
"""Restore schemas from being compatible with storage schemas.
Foreign keys related operations.
Args:
list: resources from storage
Returns:
list: restored resources
"""
resources = deepcopy(resources)
for resource in resources:
schema = resource['schema']
for fk in schema.get('foreignKeys', []):
_, name = _restore_path(fk['reference']['resource'])
fk['reference']['resource'] = name
return resources | python | {
"resource": ""
} |
q257543 | _buffer_incomplete_responses | validation | def _buffer_incomplete_responses(raw_output, buf):
"""It is possible for some of gdb's output to be read before it completely finished its response.
In that case, a partial mi response was read, which cannot be parsed into structured data.
We want to ALWAYS parse complete mi records. To do this, we store a buffer of gdb's
output if the output did not end in a newline.
Args:
raw_output: Contents of the gdb mi output
buf (str): Buffered gdb response from the past. This is incomplete and needs to be prepended to
gdb's next output.
Returns:
(raw_output, buf)
"""
if raw_output:
if buf:
# concatenate buffer and new output
raw_output = b"".join([buf, raw_output])
buf = None
if b"\n" not in raw_output:
# newline was not found, so assume output is incomplete and store in buffer
buf = raw_output
raw_output = None
elif not raw_output.endswith(b"\n"):
# raw output doesn't end in a newline, so store everything after the last newline (if anything)
# in the buffer, and parse everything before it
remainder_offset = raw_output.rindex(b"\n") + 1
buf = raw_output[remainder_offset:]
raw_output = raw_output[:remainder_offset]
return (raw_output, buf) | python | {
"resource": ""
} |
q257544 | GdbController.verify_valid_gdb_subprocess | validation | def verify_valid_gdb_subprocess(self):
"""Verify there is a process object, and that it is still running.
Raise NoGdbProcessError if either of the above are not true."""
if not self.gdb_process:
raise NoGdbProcessError("gdb process is not attached")
elif self.gdb_process.poll() is not None:
raise NoGdbProcessError(
"gdb process has already finished with return code: %s"
% str(self.gdb_process.poll())
) | python | {
"resource": ""
} |
q257545 | GdbController.write | validation | def write(
self,
mi_cmd_to_write,
timeout_sec=DEFAULT_GDB_TIMEOUT_SEC,
raise_error_on_timeout=True,
read_response=True,
):
"""Write to gdb process. Block while parsing responses from gdb for a maximum of timeout_sec.
Args:
mi_cmd_to_write (str or list): String to write to gdb. If list, it is joined by newlines.
timeout_sec (float): Maximum number of seconds to wait for response before exiting. Must be >= 0.
raise_error_on_timeout (bool): If read_response is True, raise error if no response is received
read_response (bool): Block and read response. If there is a separate thread running,
this can be false, and the reading thread read the output.
Returns:
List of parsed gdb responses if read_response is True, otherwise []
Raises:
NoGdbProcessError if there is no gdb subprocess running
TypeError if mi_cmd_to_write is not valid
"""
self.verify_valid_gdb_subprocess()
if timeout_sec < 0:
self.logger.warning("timeout_sec was negative, replacing with 0")
timeout_sec = 0
# Ensure proper type of the mi command
if type(mi_cmd_to_write) in [str, unicode]:
pass
elif type(mi_cmd_to_write) == list:
mi_cmd_to_write = "\n".join(mi_cmd_to_write)
else:
raise TypeError(
"The gdb mi command must a be str or list. Got "
+ str(type(mi_cmd_to_write))
)
self.logger.debug("writing: %s", mi_cmd_to_write)
if not mi_cmd_to_write.endswith("\n"):
mi_cmd_to_write_nl = mi_cmd_to_write + "\n"
else:
mi_cmd_to_write_nl = mi_cmd_to_write
if USING_WINDOWS:
# select not implemented in windows for pipes
# assume it's always ready
outputready = [self.stdin_fileno]
else:
_, outputready, _ = select.select([], self.write_list, [], timeout_sec)
for fileno in outputready:
if fileno == self.stdin_fileno:
# ready to write
self.gdb_process.stdin.write(mi_cmd_to_write_nl.encode())
# don't forget to flush for Python3, otherwise gdb won't realize there is data
# to evaluate, and we won't get a response
self.gdb_process.stdin.flush()
else:
self.logger.error("got unexpected fileno %d" % fileno)
if read_response is True:
return self.get_gdb_response(
timeout_sec=timeout_sec, raise_error_on_timeout=raise_error_on_timeout
)
else:
return [] | python | {
"resource": ""
} |
q257546 | GdbController.get_gdb_response | validation | def get_gdb_response(
self, timeout_sec=DEFAULT_GDB_TIMEOUT_SEC, raise_error_on_timeout=True
):
"""Get response from GDB, and block while doing so. If GDB does not have any response ready to be read
by timeout_sec, an exception is raised.
Args:
timeout_sec (float): Maximum time to wait for reponse. Must be >= 0. Will return after
raise_error_on_timeout (bool): Whether an exception should be raised if no response was found
after timeout_sec
Returns:
List of parsed GDB responses, returned from gdbmiparser.parse_response, with the
additional key 'stream' which is either 'stdout' or 'stderr'
Raises:
GdbTimeoutError if response is not received within timeout_sec
ValueError if select returned unexpected file number
NoGdbProcessError if there is no gdb subprocess running
"""
self.verify_valid_gdb_subprocess()
if timeout_sec < 0:
self.logger.warning("timeout_sec was negative, replacing with 0")
timeout_sec = 0
if USING_WINDOWS:
retval = self._get_responses_windows(timeout_sec)
else:
retval = self._get_responses_unix(timeout_sec)
if not retval and raise_error_on_timeout:
raise GdbTimeoutError(
"Did not get response from gdb after %s seconds" % timeout_sec
)
else:
return retval | python | {
"resource": ""
} |
q257547 | GdbController._get_responses_windows | validation | def _get_responses_windows(self, timeout_sec):
"""Get responses on windows. Assume no support for select and use a while loop."""
timeout_time_sec = time.time() + timeout_sec
responses = []
while True:
try:
self.gdb_process.stdout.flush()
if PYTHON3:
raw_output = self.gdb_process.stdout.readline().replace(
b"\r", b"\n"
)
else:
raw_output = self.gdb_process.stdout.read().replace(b"\r", b"\n")
responses += self._get_responses_list(raw_output, "stdout")
except IOError:
pass
try:
self.gdb_process.stderr.flush()
if PYTHON3:
raw_output = self.gdb_process.stderr.readline().replace(
b"\r", b"\n"
)
else:
raw_output = self.gdb_process.stderr.read().replace(b"\r", b"\n")
responses += self._get_responses_list(raw_output, "stderr")
except IOError:
pass
if time.time() > timeout_time_sec:
break
return responses | python | {
"resource": ""
} |
q257548 | GdbController._get_responses_unix | validation | def _get_responses_unix(self, timeout_sec):
"""Get responses on unix-like system. Use select to wait for output."""
timeout_time_sec = time.time() + timeout_sec
responses = []
while True:
select_timeout = timeout_time_sec - time.time()
# I prefer to not pass a negative value to select
if select_timeout <= 0:
select_timeout = 0
events, _, _ = select.select(self.read_list, [], [], select_timeout)
responses_list = None # to avoid infinite loop if using Python 2
try:
for fileno in events:
# new data is ready to read
if fileno == self.stdout_fileno:
self.gdb_process.stdout.flush()
raw_output = self.gdb_process.stdout.read()
stream = "stdout"
elif fileno == self.stderr_fileno:
self.gdb_process.stderr.flush()
raw_output = self.gdb_process.stderr.read()
stream = "stderr"
else:
raise ValueError(
"Developer error. Got unexpected file number %d" % fileno
)
responses_list = self._get_responses_list(raw_output, stream)
responses += responses_list
except IOError: # only occurs in python 2.7
pass
if timeout_sec == 0: # just exit immediately
break
elif responses_list and self._allow_overwrite_timeout_times:
# update timeout time to potentially be closer to now to avoid lengthy wait times when nothing is being output by gdb
timeout_time_sec = min(
time.time() + self.time_to_check_for_additional_output_sec,
timeout_time_sec,
)
elif time.time() > timeout_time_sec:
break
return responses | python | {
"resource": ""
} |
q257549 | main | validation | def main(verbose=True):
"""Build and debug an application programatically
For a list of GDB MI commands, see https://www.sourceware.org/gdb/onlinedocs/gdb/GDB_002fMI.html
"""
# Build C program
find_executable(MAKE_CMD)
if not find_executable(MAKE_CMD):
print(
'Could not find executable "%s". Ensure it is installed and on your $PATH.'
% MAKE_CMD
)
exit(1)
subprocess.check_output([MAKE_CMD, "-C", SAMPLE_C_CODE_DIR, "--quiet"])
# Initialize object that manages gdb subprocess
gdbmi = GdbController(verbose=verbose)
# Send gdb commands. Gdb machine interface commands are easier to script around,
# hence the name "machine interface".
# Responses are automatically printed as they are received if verbose is True.
# Responses are returned after writing, by default.
# Load the file
responses = gdbmi.write("-file-exec-and-symbols %s" % SAMPLE_C_BINARY)
# Get list of source files used to compile the binary
responses = gdbmi.write("-file-list-exec-source-files")
# Add breakpoint
responses = gdbmi.write("-break-insert main")
# Run
responses = gdbmi.write("-exec-run")
responses = gdbmi.write("-exec-next")
responses = gdbmi.write("-exec-next")
responses = gdbmi.write("-exec-continue") # noqa: F841
# gdbmi.gdb_process will be None because the gdb subprocess (and its inferior
# program) will be terminated
gdbmi.exit() | python | {
"resource": ""
} |
q257550 | StringStream.read | validation | def read(self, count):
"""Read count characters starting at self.index,
and return those characters as a string
"""
new_index = self.index + count
if new_index > self.len:
buf = self.raw_text[self.index :] # return to the end, don't fail
else:
buf = self.raw_text[self.index : new_index]
self.index = new_index
return buf | python | {
"resource": ""
} |
q257551 | StringStream.advance_past_string_with_gdb_escapes | validation | def advance_past_string_with_gdb_escapes(self, chars_to_remove_gdb_escape=None):
"""characters that gdb escapes that should not be
escaped by this parser
"""
if chars_to_remove_gdb_escape is None:
chars_to_remove_gdb_escape = ['"']
buf = ""
while True:
c = self.raw_text[self.index]
self.index += 1
logging.debug("%s", fmt_cyan(c))
if c == "\\":
# We are on a backslash and there is another character after the backslash
# to parse. Handle this case specially since gdb escaped it for us
# Get the next char that is being escaped
c2 = self.raw_text[self.index]
self.index += 1
# only store the escaped character in the buffer; don't store the backslash
# (don't leave it escaped)
buf += c2
elif c == '"':
# Quote is closed. Exit (and don't include the end quote).
break
else:
# capture this character, and keep capturing
buf += c
return buf | python | {
"resource": ""
} |
q257552 | parse_response | validation | def parse_response(gdb_mi_text):
"""Parse gdb mi text and turn it into a dictionary.
See https://sourceware.org/gdb/onlinedocs/gdb/GDB_002fMI-Stream-Records.html#GDB_002fMI-Stream-Records
for details on types of gdb mi output.
Args:
gdb_mi_text (str): String output from gdb
Returns:
dict with the following keys:
type (either 'notify', 'result', 'console', 'log', 'target', 'done'),
message (str or None),
payload (str, list, dict, or None)
"""
stream = StringStream(gdb_mi_text, debug=_DEBUG)
if _GDB_MI_NOTIFY_RE.match(gdb_mi_text):
token, message, payload = _get_notify_msg_and_payload(gdb_mi_text, stream)
return {
"type": "notify",
"message": message,
"payload": payload,
"token": token,
}
elif _GDB_MI_RESULT_RE.match(gdb_mi_text):
token, message, payload = _get_result_msg_and_payload(gdb_mi_text, stream)
return {
"type": "result",
"message": message,
"payload": payload,
"token": token,
}
elif _GDB_MI_CONSOLE_RE.match(gdb_mi_text):
return {
"type": "console",
"message": None,
"payload": _GDB_MI_CONSOLE_RE.match(gdb_mi_text).groups()[0],
}
elif _GDB_MI_LOG_RE.match(gdb_mi_text):
return {
"type": "log",
"message": None,
"payload": _GDB_MI_LOG_RE.match(gdb_mi_text).groups()[0],
}
elif _GDB_MI_TARGET_OUTPUT_RE.match(gdb_mi_text):
return {
"type": "target",
"message": None,
"payload": _GDB_MI_TARGET_OUTPUT_RE.match(gdb_mi_text).groups()[0],
}
elif response_is_finished(gdb_mi_text):
return {"type": "done", "message": None, "payload": None}
else:
# This was not gdb mi output, so it must have just been printed by
# the inferior program that's being debugged
return {"type": "output", "message": None, "payload": gdb_mi_text} | python | {
"resource": ""
} |
q257553 | _get_notify_msg_and_payload | validation | def _get_notify_msg_and_payload(result, stream):
"""Get notify message and payload dict"""
token = stream.advance_past_chars(["=", "*"])
token = int(token) if token != "" else None
logger.debug("%s", fmt_green("parsing message"))
message = stream.advance_past_chars([","])
logger.debug("parsed message")
logger.debug("%s", fmt_green(message))
payload = _parse_dict(stream)
return token, message.strip(), payload | python | {
"resource": ""
} |
q257554 | _get_result_msg_and_payload | validation | def _get_result_msg_and_payload(result, stream):
"""Get result message and payload dict"""
groups = _GDB_MI_RESULT_RE.match(result).groups()
token = int(groups[0]) if groups[0] != "" else None
message = groups[1]
if groups[2] is None:
payload = None
else:
stream.advance_past_chars([","])
payload = _parse_dict(stream)
return token, message, payload | python | {
"resource": ""
} |
q257555 | BroadcastQueue._get_or_create_subscription | validation | def _get_or_create_subscription(self):
"""In a broadcast queue, workers have a unique subscription ensuring
that every worker recieves a copy of every task."""
topic_path = self._get_topic_path()
subscription_name = '{}-{}-{}-worker'.format(
queue.PUBSUB_OBJECT_PREFIX, self.name, uuid4().hex)
subscription_path = self.subscriber_client.subscription_path(
self.project, subscription_name)
try:
self.subscriber_client.get_subscription(subscription_path)
except google.cloud.exceptions.NotFound:
logger.info("Creating worker subscription {}".format(
subscription_name))
self.subscriber_client.create_subscription(
subscription_path, topic_path)
return subscription_path | python | {
"resource": ""
} |
q257556 | BroadcastQueue.cleanup | validation | def cleanup(self):
"""Deletes this worker's subscription."""
if self.subscription:
logger.info("Deleting worker subscription...")
self.subscriber_client.delete_subscription(self.subscription) | python | {
"resource": ""
} |
q257557 | Queue._get_or_create_subscription | validation | def _get_or_create_subscription(self):
"""Workers all share the same subscription so that tasks are
distributed across all workers."""
topic_path = self._get_topic_path()
subscription_name = '{}-{}-shared'.format(
PUBSUB_OBJECT_PREFIX, self.name)
subscription_path = self.subscriber_client.subscription_path(
self.project, subscription_name)
try:
self.subscriber_client.get_subscription(subscription_path)
except google.cloud.exceptions.NotFound:
logger.info("Creating shared subscription {}".format(
subscription_name))
try:
self.subscriber_client.create_subscription(
subscription_path, topic=topic_path)
except google.cloud.exceptions.Conflict:
# Another worker created the subscription before us, ignore.
pass
return subscription_path | python | {
"resource": ""
} |
q257558 | Queue.enqueue | validation | def enqueue(self, f, *args, **kwargs):
"""Enqueues a function for the task queue to execute."""
task = Task(uuid4().hex, f, args, kwargs)
self.storage.put_task(task)
return self.enqueue_task(task) | python | {
"resource": ""
} |
q257559 | Queue.enqueue_task | validation | def enqueue_task(self, task):
"""Enqueues a task directly. This is used when a task is retried or if
a task was manually created.
Note that this does not store the task.
"""
data = dumps(task)
if self._async:
self.publisher_client.publish(self.topic_path, data=data)
logger.info('Task {} queued.'.format(task.id))
else:
unpickled_task = unpickle(data)
logger.info(
'Executing task {} synchronously.'.format(unpickled_task.id)
)
with measure_time() as summary, self.queue_context():
unpickled_task.execute(queue=self)
summary(unpickled_task.summary())
return TaskResult(task.id, self) | python | {
"resource": ""
} |
q257560 | main | validation | def main(path, pid, queue):
"""
Standalone PSQ worker.
The queue argument must be the full importable path to a psq.Queue
instance.
Example usage:
psqworker config.q
psqworker --path /opt/app queues.fast
"""
setup_logging()
if pid:
with open(os.path.expanduser(pid), "w") as f:
f.write(str(os.getpid()))
if not path:
path = os.getcwd()
sys.path.insert(0, path)
queue = import_queue(queue)
import psq
worker = psq.Worker(queue=queue)
worker.listen() | python | {
"resource": ""
} |
q257561 | TaskResult.result | validation | def result(self, timeout=None):
"""Gets the result of the task.
Arguments:
timeout: Maximum seconds to wait for a result before raising a
TimeoutError. If set to None, this will wait forever. If the
queue doesn't store results and timeout is None, this call will
never return.
"""
start = time.time()
while True:
task = self.get_task()
if not task or task.status not in (FINISHED, FAILED):
if not timeout:
continue
elif time.time() - start < timeout:
continue
else:
raise TimeoutError()
if task.status == FAILED:
raise task.result
return task.result | python | {
"resource": ""
} |
q257562 | service_start | validation | def service_start(service=None, param=None):
"""
Launch a Process, return his pid
"""
if service is not None:
to_run = ["python", service]
if param is not None:
to_run += param
return subprocess.Popen(to_run)
return False | python | {
"resource": ""
} |
q257563 | update_running_pids | validation | def update_running_pids(old_procs):
"""
Update the list of the running process and return the list
"""
new_procs = []
for proc in old_procs:
if proc.poll() is None and check_pid(proc.pid):
publisher.debug(str(proc.pid) + ' is alive')
new_procs.append(proc)
else:
try:
publisher.debug(str(proc.pid) + ' is gone')
os.kill(proc.pid, signal.SIGKILL)
except:
# the process is just already gone
pass
return new_procs | python | {
"resource": ""
} |
q257564 | run_splitted_processing | validation | def run_splitted_processing(max_simultaneous_processes, process_name,
filenames):
"""
Run processes which push the routing dump of the RIPE in a redis
database.
The dump has been splitted in multiple files and each process run
on one of this files.
"""
pids = []
while len(filenames) > 0:
while len(filenames) > 0 and len(pids) < max_simultaneous_processes:
filename = filenames.pop()
pids.append(service_start(service=process_name,
param=['-f', filename, '-d',
imported_day]))
while len(pids) == max_simultaneous_processes:
time.sleep(sleep_timer)
pids = update_running_pids(pids)
while len(pids) > 0:
# Wait until all the processes are finished
time.sleep(sleep_timer)
pids = update_running_pids(pids) | python | {
"resource": ""
} |
q257565 | fsplit | validation | def fsplit(file_to_split):
"""
Split the file and return the list of filenames.
"""
dirname = file_to_split + '_splitted'
if not os.path.exists(dirname):
os.mkdir(dirname)
part_file_size = os.path.getsize(file_to_split) / number_of_files + 1
splitted_files = []
with open(file_to_split, "r") as f:
number = 0
actual = 0
while 1:
prec = actual
# Jump of "size" from the current place in the file
f.seek(part_file_size, os.SEEK_CUR)
# find the next separator or EOF
s = f.readline()
if len(s) == 0:
s = f.readline()
while len(s) != 0 and s != separator:
s = f.readline()
# Get the current place
actual = f.tell()
new_file = os.path.join(dirname, str(number))
# Create the new file
with open(file_to_split, "r") as temp:
temp.seek(prec)
# Get the text we want to put in the new file
copy = temp.read(actual - prec)
# Write the new file
open(new_file, 'w').write(copy)
splitted_files.append(new_file)
number += 1
# End of file
if len(s) == 0:
break
return splitted_files | python | {
"resource": ""
} |
q257566 | IPASN.asn | validation | def asn(self, ip, announce_date=None):
"""
Give an IP, maybe a date, get the ASN.
This is the fastest command.
:param ip: IP address to search for
:param announce_date: Date of the announcement
:rtype: String, ASN.
"""
assignations, announce_date, _ = self.run(ip, announce_date)
return next((assign for assign in assignations if assign is not None), None), announce_date | python | {
"resource": ""
} |
q257567 | IPASN.date_asn_block | validation | def date_asn_block(self, ip, announce_date=None):
"""
Get the ASN and the IP Block announcing the IP at a specific date.
:param ip: IP address to search for
:param announce_date: Date of the announcement
:rtype: tuple
.. code-block:: python
(announce_date, asn, block)
.. note::
the returned announce_date might be different of the one
given in parameter because some raw files are missing and we
don't have the information. In this case, the nearest known
date will be chosen,
"""
assignations, announce_date, keys = self.run(ip, announce_date)
pos = next((i for i, j in enumerate(assignations) if j is not None), None)
if pos is not None:
block = keys[pos]
if block != '0.0.0.0/0':
return announce_date, assignations[pos], block
return None | python | {
"resource": ""
} |
q257568 | IPASN.history | validation | def history(self, ip, days_limit=None):
"""
Get the full history of an IP. It takes time.
:param ip: IP address to search for
:param days_limit: Max amount of days to query. (None means no limit)
:rtype: list. For each day in the database: day, asn, block
"""
all_dates = sorted(self.routing_db.smembers('imported_dates'), reverse=True)
if days_limit is not None:
all_dates = all_dates[:days_limit]
return [self.date_asn_block(ip, date) for date in all_dates] | python | {
"resource": ""
} |
q257569 | IPASN.aggregate_history | validation | def aggregate_history(self, ip, days_limit=None):
"""
Get the full history of an IP, aggregate the result instead of
returning one line per day.
:param ip: IP address to search for
:param days_limit: Max amount of days to query. (None means no limit)
:rtype: list. For each change: FirstDay, LastDay, ASN, Block
"""
first_date = None
last_date = None
prec_asn = None
prec_block = None
for entry in self.history(ip, days_limit):
if entry is None:
continue
date, asn, block = entry
if first_date is None:
last_date = date
first_date = date
prec_asn = asn
prec_block = block
elif prec_asn == asn and prec_block == block:
first_date = date
else:
yield first_date, last_date, prec_asn, prec_block
last_date = date
first_date = date
prec_asn = asn
prec_block = block
if first_date is not None:
yield first_date, last_date, prec_asn, prec_block | python | {
"resource": ""
} |
q257570 | downloadURL | validation | def downloadURL(url, filename):
"""
Inconditianilly download the URL in a temporary directory.
When finished, the file is moved in the real directory.
Like this an other process will not attempt to extract an inclomplete file.
"""
path_temp_bviewfile = os.path.join(c.raw_data, c.bview_dir, 'tmp', filename)
path_bviewfile = os.path.join(c.raw_data, c.bview_dir, filename)
try:
f = urlopen(url)
except:
return False
if f.getcode() != 200:
publisher.warning('{} unavailable, code: {}'.format(url, f.getcode()))
return False
try:
with open(path_temp_bviewfile, 'w') as outfile:
outfile.write(f.read())
os.rename(path_temp_bviewfile, path_bviewfile)
except:
os.remove(path_temp_bviewfile)
return False
return True | python | {
"resource": ""
} |
q257571 | already_downloaded | validation | def already_downloaded(filename):
"""
Verify that the file has not already been downloaded.
"""
cur_file = os.path.join(c.bview_dir, filename)
old_file = os.path.join(c.bview_dir, 'old', filename)
if not os.path.exists(cur_file) and not os.path.exists(old_file):
return False
return True | python | {
"resource": ""
} |
q257572 | strToBool | validation | def strToBool(val):
"""
Helper function to turn a string representation of "true" into
boolean True.
"""
if isinstance(val, str):
val = val.lower()
return val in ['true', 'on', 'yes', True] | python | {
"resource": ""
} |
q257573 | get_page_url | validation | def get_page_url(page_num, current_app, url_view_name, url_extra_args, url_extra_kwargs, url_param_name, url_get_params, url_anchor):
"""
Helper function to return a valid URL string given the template tag parameters
"""
if url_view_name is not None:
# Add page param to the kwargs list. Overrides any previously set parameter of the same name.
url_extra_kwargs[url_param_name] = page_num
try:
url = reverse(url_view_name, args=url_extra_args, kwargs=url_extra_kwargs, current_app=current_app)
except NoReverseMatch as e: # Attempt to load view from application root, allowing the use of non-namespaced view names if your view is defined in the root application
if settings.SETTINGS_MODULE:
if django.VERSION < (1, 9, 0):
separator = '.'
else:
separator = ':' # Namespace separator changed to colon after 1.8
project_name = settings.SETTINGS_MODULE.split('.')[0]
try:
url = reverse(project_name + separator + url_view_name, args=url_extra_args, kwargs=url_extra_kwargs, current_app=current_app)
except NoReverseMatch:
raise e # Raise the original exception so the error message doesn't confusingly include something the Developer didn't add to the view name themselves
else:
raise e # We can't determine the project name so just re-throw the exception
else:
url = ''
url_get_params = url_get_params or QueryDict(url)
url_get_params = url_get_params.copy()
url_get_params[url_param_name] = str(page_num)
if len(url_get_params) > 0:
if not isinstance(url_get_params, QueryDict):
tmp = QueryDict(mutable=True)
tmp.update(url_get_params)
url_get_params = tmp
url += '?' + url_get_params.urlencode()
if (url_anchor is not None):
url += '#' + url_anchor
return url | python | {
"resource": ""
} |
q257574 | bootstrap_paginate | validation | def bootstrap_paginate(parser, token):
"""
Renders a Page object as a Twitter Bootstrap styled pagination bar.
Compatible with Bootstrap 3.x and 4.x only.
Example::
{% bootstrap_paginate page_obj range=10 %}
Named Parameters::
range - The size of the pagination bar (ie, if set to 10 then, at most,
10 page numbers will display at any given time) Defaults to
None, which shows all pages.
size - Accepts "small", and "large". Defaults to
None which is the standard size.
show_prev_next - Accepts "true" or "false". Determines whether or not
to show the previous and next page links. Defaults to
"true"
show_first_last - Accepts "true" or "false". Determines whether or not
to show the first and last page links. Defaults to
"false"
previous_label - The text to display for the previous page link.
Defaults to "←"
next_label - The text to display for the next page link. Defaults to
"→"
first_label - The text to display for the first page link. Defaults to
"«"
last_label - The text to display for the last page link. Defaults to
"»"
url_view_name - The named URL to use. Defaults to None. If None, then the
default template simply appends the url parameter as a
relative URL link, eg: <a href="?page=1">1</a>
url_param_name - The name of the parameter to use in the URL. If
url_view_name is set to None, this string is used as the
parameter name in the relative URL path. If a URL
name is specified, this string is used as the
parameter name passed into the reverse() method for
the URL.
url_extra_args - This is used only in conjunction with url_view_name.
When referencing a URL, additional arguments may be
passed in as a list.
url_extra_kwargs - This is used only in conjunction with url_view_name.
When referencing a URL, additional named arguments
may be passed in as a dictionary.
url_get_params - The other get parameters to pass, only the page
number will be overwritten. Use this to preserve
filters.
url_anchor - The anchor to use in URLs. Defaults to None.
extra_pagination_classes - A space separated list of CSS class names
that will be added to the top level <ul>
HTML element. In particular, this can be
utilized in Bootstrap 4 installatinos to
add the appropriate alignment classes from
Flexbox utilites, eg: justify-content-center
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument"
" (Page object reference)" % bits[0])
page = parser.compile_filter(bits[1])
kwargs = {}
bits = bits[2:]
kwarg_re = re.compile(r'(\w+)=(.+)')
if len(bits):
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError("Malformed arguments to bootstrap_pagination paginate tag")
name, value = match.groups()
kwargs[name] = parser.compile_filter(value)
return BootstrapPaginationNode(page, kwargs) | python | {
"resource": ""
} |
q257575 | get_regressions | validation | def get_regressions(
package_descriptors, targets,
building_repo_data, testing_repo_data, main_repo_data):
"""
For each package and target check if it is a regression.
This is the case if the main repo contains a package version which is
higher then in any of the other repos or if any of the other repos does not
contain that package at all.
:return: a dict indexed by package names containing
dicts indexed by targets containing a boolean flag
"""
regressions = {}
for package_descriptor in package_descriptors.values():
pkg_name = package_descriptor.pkg_name
debian_pkg_name = package_descriptor.debian_pkg_name
regressions[pkg_name] = {}
for target in targets:
regressions[pkg_name][target] = False
main_version = \
main_repo_data.get(target, {}).get(debian_pkg_name, None)
if main_version is not None:
main_ver_loose = LooseVersion(main_version)
for repo_data in [building_repo_data, testing_repo_data]:
version = \
repo_data.get(target, {}).get(debian_pkg_name, None)
if not version or main_ver_loose > LooseVersion(version):
regressions[pkg_name][target] = True
return regressions | python | {
"resource": ""
} |
q257576 | _strip_version_suffix | validation | def _strip_version_suffix(version):
"""
Remove trailing junk from the version number.
>>> strip_version_suffix('')
''
>>> strip_version_suffix('None')
'None'
>>> strip_version_suffix('1.2.3-4trusty-20140131-1359-+0000')
'1.2.3-4'
>>> strip_version_suffix('1.2.3-foo')
'1.2.3'
"""
global version_regex
if not version:
return version
match = version_regex.search(version)
return match.group(0) if match else version | python | {
"resource": ""
} |
q257577 | get_homogeneous | validation | def get_homogeneous(package_descriptors, targets, repos_data):
"""
For each package check if the version in one repo is equal for all targets.
The version could be different in different repos though.
:return: a dict indexed by package names containing a boolean flag
"""
homogeneous = {}
for package_descriptor in package_descriptors.values():
pkg_name = package_descriptor.pkg_name
debian_pkg_name = package_descriptor.debian_pkg_name
versions = []
for repo_data in repos_data:
versions.append(set([]))
for target in targets:
version = _strip_version_suffix(
repo_data.get(target, {}).get(debian_pkg_name, None))
versions[-1].add(version)
homogeneous[pkg_name] = max([len(v) for v in versions]) == 1
return homogeneous | python | {
"resource": ""
} |
q257578 | get_package_counts | validation | def get_package_counts(package_descriptors, targets, repos_data):
"""
Get the number of packages per target and repository.
:return: a dict indexed by targets containing
a list of integer values (one for each repo)
"""
counts = {}
for target in targets:
counts[target] = [0] * len(repos_data)
for package_descriptor in package_descriptors.values():
debian_pkg_name = package_descriptor.debian_pkg_name
for target in targets:
for i, repo_data in enumerate(repos_data):
version = repo_data.get(target, {}).get(debian_pkg_name, None)
if version:
counts[target][i] += 1
return counts | python | {
"resource": ""
} |
q257579 | get_jenkins_job_urls | validation | def get_jenkins_job_urls(
rosdistro_name, jenkins_url, release_build_name, targets):
"""
Get the Jenkins job urls for each target.
The placeholder {pkg} needs to be replaced with the ROS package name.
:return: a dict indexed by targets containing a string
"""
urls = {}
for target in targets:
view_name = get_release_view_name(
rosdistro_name, release_build_name,
target.os_name, target.os_code_name, target.arch)
base_url = jenkins_url + '/view/%s/job/%s__{pkg}__' % \
(view_name, view_name)
if target.arch == 'source':
urls[target] = base_url + '%s_%s__source' % \
(target.os_name, target.os_code_name)
else:
urls[target] = base_url + '%s_%s_%s__binary' % \
(target.os_name, target.os_code_name, target.arch)
return urls | python | {
"resource": ""
} |
q257580 | configure_ci_jobs | validation | def configure_ci_jobs(
config_url, rosdistro_name, ci_build_name,
groovy_script=None, dry_run=False):
"""Configure all Jenkins CI jobs."""
config = get_config_index(config_url)
build_files = get_ci_build_files(config, rosdistro_name)
build_file = build_files[ci_build_name]
index = get_index(config.rosdistro_index_url)
# get targets
targets = []
for os_name in build_file.targets.keys():
for os_code_name in build_file.targets[os_name].keys():
for arch in build_file.targets[os_name][os_code_name]:
targets.append((os_name, os_code_name, arch))
print('The build file contains the following targets:')
for os_name, os_code_name, arch in targets:
print(' -', os_name, os_code_name, arch)
dist_file = get_distribution_file(index, rosdistro_name, build_file)
if not dist_file:
print('No distribution file matches the build file')
return
ci_view_name = get_ci_view_name(rosdistro_name)
# all further configuration will be handled by either the Jenkins API
# or by a generated groovy script
from ros_buildfarm.jenkins import connect
jenkins = connect(config.jenkins_url) if groovy_script is None else False
view_configs = {}
views = {
ci_view_name: configure_ci_view(
jenkins, ci_view_name, dry_run=dry_run)
}
if not jenkins:
view_configs.update(views)
groovy_data = {
'dry_run': dry_run,
'expected_num_views': len(view_configs),
}
ci_job_names = []
job_configs = OrderedDict()
is_disabled = False
for os_name, os_code_name, arch in targets:
try:
job_name, job_config = configure_ci_job(
config_url, rosdistro_name, ci_build_name,
os_name, os_code_name, arch,
config=config, build_file=build_file,
index=index, dist_file=dist_file,
jenkins=jenkins, views=views,
is_disabled=is_disabled,
groovy_script=groovy_script,
dry_run=dry_run,
trigger_timer=build_file.jenkins_job_schedule)
ci_job_names.append(job_name)
if groovy_script is not None:
print("Configuration for job '%s'" % job_name)
job_configs[job_name] = job_config
except JobValidationError as e:
print(e.message, file=sys.stderr)
groovy_data['expected_num_jobs'] = len(job_configs)
groovy_data['job_prefixes_and_names'] = {}
if groovy_script is not None:
print(
"Writing groovy script '%s' to reconfigure %d jobs" %
(groovy_script, len(job_configs)))
content = expand_template(
'snippet/reconfigure_jobs.groovy.em', groovy_data)
write_groovy_script_and_configs(
groovy_script, content, job_configs, view_configs) | python | {
"resource": ""
} |
q257581 | configure_ci_job | validation | def configure_ci_job(
config_url, rosdistro_name, ci_build_name,
os_name, os_code_name, arch,
config=None, build_file=None,
index=None, dist_file=None,
jenkins=None, views=None,
is_disabled=False,
groovy_script=None,
build_targets=None,
dry_run=False,
underlay_source_paths=None,
trigger_timer=None):
"""
Configure a single Jenkins CI job.
This includes the following steps:
- clone the ros_buildfarm repository
- write the distribution repository keys into files
- invoke the ci/run_ci_job.py script
"""
if config is None:
config = get_config_index(config_url)
if build_file is None:
build_files = get_ci_build_files(config, rosdistro_name)
build_file = build_files[ci_build_name]
# Overwrite build_file.targets if build_targets is specified
if build_targets is not None:
build_file.targets = build_targets
if index is None:
index = get_index(config.rosdistro_index_url)
if dist_file is None:
dist_file = get_distribution_file(index, rosdistro_name, build_file)
if not dist_file:
raise JobValidationError(
'No distribution file matches the build file')
if os_name not in build_file.targets.keys():
raise JobValidationError(
"Invalid OS name '%s' " % os_name +
'choose one of the following: ' +
', '.join(sorted(build_file.targets.keys())))
if os_code_name not in build_file.targets[os_name].keys():
raise JobValidationError(
"Invalid OS code name '%s' " % os_code_name +
'choose one of the following: ' +
', '.join(sorted(build_file.targets[os_name].keys())))
if arch not in build_file.targets[os_name][os_code_name]:
raise JobValidationError(
"Invalid architecture '%s' " % arch +
'choose one of the following: %s' % ', '.join(sorted(
build_file.targets[os_name][os_code_name])))
if len(build_file.underlay_from_ci_jobs) > 1:
raise JobValidationError(
'Only a single underlay job is currently supported, but the ' +
'build file lists %d.' % len(build_file.underlay_from_ci_jobs))
underlay_source_job = None
if build_file.underlay_from_ci_jobs:
underlay_source_job = get_ci_job_name(
rosdistro_name, os_name, os_code_name, arch,
build_file.underlay_from_ci_jobs[0])
underlay_source_paths = (underlay_source_paths or []) + \
['$UNDERLAY_JOB_SPACE']
if jenkins is None:
from ros_buildfarm.jenkins import connect
jenkins = connect(config.jenkins_url)
if views is None:
view_name = get_ci_view_name(rosdistro_name)
configure_ci_view(jenkins, view_name, dry_run=dry_run)
job_name = get_ci_job_name(
rosdistro_name, os_name, os_code_name, arch, ci_build_name)
job_config = _get_ci_job_config(
index, rosdistro_name, build_file, os_name,
os_code_name, arch,
build_file.repos_files,
underlay_source_job,
underlay_source_paths,
trigger_timer,
is_disabled=is_disabled)
# jenkinsapi.jenkins.Jenkins evaluates to false if job count is zero
if isinstance(jenkins, object) and jenkins is not False:
from ros_buildfarm.jenkins import configure_job
configure_job(jenkins, job_name, job_config, dry_run=dry_run)
return job_name, job_config | python | {
"resource": ""
} |
q257582 | write_groovy_script_and_configs | validation | def write_groovy_script_and_configs(
filename, content, job_configs, view_configs=None):
"""Write out the groovy script and configs to file.
This writes the reconfigure script to the file location
and places the expanded configs in subdirectories 'view_configs' /
'job_configs' that the script can then access when run.
"""
with open(filename, 'w') as h:
h.write(content)
if view_configs:
view_config_dir = os.path.join(os.path.dirname(filename), 'view_configs')
if not os.path.isdir(view_config_dir):
os.makedirs(view_config_dir)
for config_name, config_body in view_configs.items():
config_filename = os.path.join(view_config_dir, config_name)
with open(config_filename, 'w') as config_fh:
config_fh.write(config_body)
job_config_dir = os.path.join(os.path.dirname(filename), 'job_configs')
if not os.path.isdir(job_config_dir):
os.makedirs(job_config_dir)
# prefix each config file with a serial number to maintain order
format_str = '%0' + str(len(str(len(job_configs)))) + 'd'
i = 0
for config_name, config_body in job_configs.items():
i += 1
config_filename = os.path.join(
job_config_dir,
format_str % i + ' ' + config_name)
with open(config_filename, 'w') as config_fh:
config_fh.write(config_body) | python | {
"resource": ""
} |
q257583 | topological_order_packages | validation | def topological_order_packages(packages):
"""
Order packages topologically.
First returning packages which have message generators and then
the rest based on all direct depends and indirect recursive run_depends.
:param packages: A dict mapping relative paths to ``Package`` objects ``dict``
:returns: A list of tuples containing the relative path and a ``Package`` object, ``list``
"""
from catkin_pkg.topological_order import _PackageDecorator
from catkin_pkg.topological_order import _sort_decorated_packages
decorators_by_name = {}
for path, package in packages.items():
decorators_by_name[package.name] = _PackageDecorator(package, path)
# calculate transitive dependencies
for decorator in decorators_by_name.values():
decorator.depends_for_topological_order = set([])
all_depends = \
decorator.package.build_depends + decorator.package.buildtool_depends + \
decorator.package.run_depends + decorator.package.test_depends
# skip external dependencies, meaning names that are not known packages
unique_depend_names = set([
d.name for d in all_depends if d.name in decorators_by_name.keys()])
for name in unique_depend_names:
if name in decorator.depends_for_topological_order:
# avoid function call to improve performance
# check within the loop since the set changes every cycle
continue
decorators_by_name[name]._add_recursive_run_depends(
decorators_by_name, decorator.depends_for_topological_order)
ordered_pkg_tuples = _sort_decorated_packages(decorators_by_name)
for pkg_path, pkg in ordered_pkg_tuples:
if pkg_path is None:
raise RuntimeError('Circular dependency in: %s' % pkg)
return ordered_pkg_tuples | python | {
"resource": ""
} |
q257584 | _unarmor_pem | validation | def _unarmor_pem(data, password=None):
"""
Removes PEM-encoding from a public key, private key or certificate. If the
private key is encrypted, the password will be used to decrypt it.
:param data:
A byte string of the PEM-encoded data
:param password:
A byte string of the encryption password, or None
:return:
A 3-element tuple in the format: (key_type, algorithm, der_bytes). The
key_type will be a unicode string of "public key", "private key" or
"certificate". The algorithm will be a unicode string of "rsa", "dsa"
or "ec".
"""
object_type, headers, der_bytes = pem.unarmor(data)
type_regex = '^((DSA|EC|RSA) PRIVATE KEY|ENCRYPTED PRIVATE KEY|PRIVATE KEY|PUBLIC KEY|RSA PUBLIC KEY|CERTIFICATE)'
armor_type = re.match(type_regex, object_type)
if not armor_type:
raise ValueError(pretty_message(
'''
data does not seem to contain a PEM-encoded certificate, private
key or public key
'''
))
pem_header = armor_type.group(1)
data = data.strip()
# RSA private keys are encrypted after being DER-encoded, but before base64
# encoding, so they need to be hanlded specially
if pem_header in set(['RSA PRIVATE KEY', 'DSA PRIVATE KEY', 'EC PRIVATE KEY']):
algo = armor_type.group(2).lower()
return ('private key', algo, _unarmor_pem_openssl_private(headers, der_bytes, password))
key_type = pem_header.lower()
algo = None
if key_type == 'encrypted private key':
key_type = 'private key'
elif key_type == 'rsa public key':
key_type = 'public key'
algo = 'rsa'
return (key_type, algo, der_bytes) | python | {
"resource": ""
} |
q257585 | _decrypt_encrypted_data | validation | def _decrypt_encrypted_data(encryption_algorithm_info, encrypted_content, password):
"""
Decrypts encrypted ASN.1 data
:param encryption_algorithm_info:
An instance of asn1crypto.pkcs5.Pkcs5EncryptionAlgorithm
:param encrypted_content:
A byte string of the encrypted content
:param password:
A byte string of the encrypted content's password
:return:
A byte string of the decrypted plaintext
"""
decrypt_func = crypto_funcs[encryption_algorithm_info.encryption_cipher]
# Modern, PKCS#5 PBES2-based encryption
if encryption_algorithm_info.kdf == 'pbkdf2':
if encryption_algorithm_info.encryption_cipher == 'rc5':
raise ValueError(pretty_message(
'''
PBES2 encryption scheme utilizing RC5 encryption is not supported
'''
))
enc_key = pbkdf2(
encryption_algorithm_info.kdf_hmac,
password,
encryption_algorithm_info.kdf_salt,
encryption_algorithm_info.kdf_iterations,
encryption_algorithm_info.key_length
)
enc_iv = encryption_algorithm_info.encryption_iv
plaintext = decrypt_func(enc_key, encrypted_content, enc_iv)
elif encryption_algorithm_info.kdf == 'pbkdf1':
derived_output = pbkdf1(
encryption_algorithm_info.kdf_hmac,
password,
encryption_algorithm_info.kdf_salt,
encryption_algorithm_info.kdf_iterations,
encryption_algorithm_info.key_length + 8
)
enc_key = derived_output[0:8]
enc_iv = derived_output[8:16]
plaintext = decrypt_func(enc_key, encrypted_content, enc_iv)
elif encryption_algorithm_info.kdf == 'pkcs12_kdf':
enc_key = pkcs12_kdf(
encryption_algorithm_info.kdf_hmac,
password,
encryption_algorithm_info.kdf_salt,
encryption_algorithm_info.kdf_iterations,
encryption_algorithm_info.key_length,
1 # ID 1 is for generating a key
)
# Since RC4 is a stream cipher, we don't use an IV
if encryption_algorithm_info.encryption_cipher == 'rc4':
plaintext = decrypt_func(enc_key, encrypted_content)
else:
enc_iv = pkcs12_kdf(
encryption_algorithm_info.kdf_hmac,
password,
encryption_algorithm_info.kdf_salt,
encryption_algorithm_info.kdf_iterations,
encryption_algorithm_info.encryption_block_size,
2 # ID 2 is for generating an IV
)
plaintext = decrypt_func(enc_key, encrypted_content, enc_iv)
return plaintext | python | {
"resource": ""
} |
q257586 | _setup_evp_encrypt_decrypt | validation | def _setup_evp_encrypt_decrypt(cipher, data):
"""
Creates an EVP_CIPHER pointer object and determines the buffer size
necessary for the parameter specified.
:param evp_cipher_ctx:
An EVP_CIPHER_CTX pointer
:param cipher:
A unicode string of "aes128", "aes192", "aes256", "des",
"tripledes_2key", "tripledes_3key", "rc2", "rc4"
:param key:
The key byte string
:param data:
The plaintext or ciphertext as a byte string
:param padding:
If padding is to be used
:return:
A 2-element tuple with the first element being an EVP_CIPHER pointer
and the second being an integer that is the required buffer size
"""
evp_cipher = {
'aes128': libcrypto.EVP_aes_128_cbc,
'aes192': libcrypto.EVP_aes_192_cbc,
'aes256': libcrypto.EVP_aes_256_cbc,
'rc2': libcrypto.EVP_rc2_cbc,
'rc4': libcrypto.EVP_rc4,
'des': libcrypto.EVP_des_cbc,
'tripledes_2key': libcrypto.EVP_des_ede_cbc,
'tripledes_3key': libcrypto.EVP_des_ede3_cbc,
}[cipher]()
if cipher == 'rc4':
buffer_size = len(data)
else:
block_size = {
'aes128': 16,
'aes192': 16,
'aes256': 16,
'rc2': 8,
'des': 8,
'tripledes_2key': 8,
'tripledes_3key': 8,
}[cipher]
buffer_size = block_size * int(math.ceil(len(data) / block_size))
return (evp_cipher, buffer_size) | python | {
"resource": ""
} |
q257587 | _advapi32_interpret_rsa_key_blob | validation | def _advapi32_interpret_rsa_key_blob(bit_size, blob_struct, blob):
"""
Takes a CryptoAPI RSA private key blob and converts it into the ASN.1
structures for the public and private keys
:param bit_size:
The integer bit size of the key
:param blob_struct:
An instance of the advapi32.RSAPUBKEY struct
:param blob:
A byte string of the binary data after the header
:return:
A 2-element tuple of (asn1crypto.keys.PublicKeyInfo,
asn1crypto.keys.PrivateKeyInfo)
"""
len1 = bit_size // 8
len2 = bit_size // 16
prime1_offset = len1
prime2_offset = prime1_offset + len2
exponent1_offset = prime2_offset + len2
exponent2_offset = exponent1_offset + len2
coefficient_offset = exponent2_offset + len2
private_exponent_offset = coefficient_offset + len2
public_exponent = blob_struct.rsapubkey.pubexp
modulus = int_from_bytes(blob[0:prime1_offset][::-1])
prime1 = int_from_bytes(blob[prime1_offset:prime2_offset][::-1])
prime2 = int_from_bytes(blob[prime2_offset:exponent1_offset][::-1])
exponent1 = int_from_bytes(blob[exponent1_offset:exponent2_offset][::-1])
exponent2 = int_from_bytes(blob[exponent2_offset:coefficient_offset][::-1])
coefficient = int_from_bytes(blob[coefficient_offset:private_exponent_offset][::-1])
private_exponent = int_from_bytes(blob[private_exponent_offset:private_exponent_offset + len1][::-1])
public_key_info = keys.PublicKeyInfo({
'algorithm': keys.PublicKeyAlgorithm({
'algorithm': 'rsa',
}),
'public_key': keys.RSAPublicKey({
'modulus': modulus,
'public_exponent': public_exponent,
}),
})
rsa_private_key = keys.RSAPrivateKey({
'version': 'two-prime',
'modulus': modulus,
'public_exponent': public_exponent,
'private_exponent': private_exponent,
'prime1': prime1,
'prime2': prime2,
'exponent1': exponent1,
'exponent2': exponent2,
'coefficient': coefficient,
})
private_key_info = keys.PrivateKeyInfo({
'version': 0,
'private_key_algorithm': keys.PrivateKeyAlgorithm({
'algorithm': 'rsa',
}),
'private_key': rsa_private_key,
})
return (public_key_info, private_key_info) | python | {
"resource": ""
} |
q257588 | _advapi32_interpret_dsa_key_blob | validation | def _advapi32_interpret_dsa_key_blob(bit_size, public_blob, private_blob):
"""
Takes a CryptoAPI DSS private key blob and converts it into the ASN.1
structures for the public and private keys
:param bit_size:
The integer bit size of the key
:param public_blob:
A byte string of the binary data after the public key header
:param private_blob:
A byte string of the binary data after the private key header
:return:
A 2-element tuple of (asn1crypto.keys.PublicKeyInfo,
asn1crypto.keys.PrivateKeyInfo)
"""
len1 = 20
len2 = bit_size // 8
q_offset = len2
g_offset = q_offset + len1
x_offset = g_offset + len2
y_offset = x_offset
p = int_from_bytes(private_blob[0:q_offset][::-1])
q = int_from_bytes(private_blob[q_offset:g_offset][::-1])
g = int_from_bytes(private_blob[g_offset:x_offset][::-1])
x = int_from_bytes(private_blob[x_offset:x_offset + len1][::-1])
y = int_from_bytes(public_blob[y_offset:y_offset + len2][::-1])
public_key_info = keys.PublicKeyInfo({
'algorithm': keys.PublicKeyAlgorithm({
'algorithm': 'dsa',
'parameters': keys.DSAParams({
'p': p,
'q': q,
'g': g,
})
}),
'public_key': core.Integer(y),
})
private_key_info = keys.PrivateKeyInfo({
'version': 0,
'private_key_algorithm': keys.PrivateKeyAlgorithm({
'algorithm': 'dsa',
'parameters': keys.DSAParams({
'p': p,
'q': q,
'g': g,
})
}),
'private_key': core.Integer(x),
})
return (public_key_info, private_key_info) | python | {
"resource": ""
} |
q257589 | _advapi32_load_key | validation | def _advapi32_load_key(key_object, key_info, container):
"""
Loads a certificate, public key or private key into a Certificate,
PublicKey or PrivateKey object via CryptoAPI
:param key_object:
An asn1crypto.x509.Certificate, asn1crypto.keys.PublicKeyInfo or
asn1crypto.keys.PrivateKeyInfo object
:param key_info:
An asn1crypto.keys.PublicKeyInfo or asn1crypto.keys.PrivateKeyInfo
object
:param container:
The class of the object to hold the key_handle
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
oscrypto.errors.AsymmetricKeyError - when the key is incompatible with the OS crypto library
OSError - when an error is returned by the OS crypto library
:return:
A PrivateKey, PublicKey or Certificate object, based on container
"""
key_type = 'public' if isinstance(key_info, keys.PublicKeyInfo) else 'private'
algo = key_info.algorithm
if algo == 'rsa':
provider = Advapi32Const.MS_ENH_RSA_AES_PROV
else:
provider = Advapi32Const.MS_ENH_DSS_DH_PROV
context_handle = None
key_handle = None
try:
context_handle = open_context_handle(provider, verify_only=key_type == 'public')
blob = _advapi32_create_blob(key_info, key_type, algo)
buffer_ = buffer_from_bytes(blob)
key_handle_pointer = new(advapi32, 'HCRYPTKEY *')
res = advapi32.CryptImportKey(
context_handle,
buffer_,
len(blob),
null(),
0,
key_handle_pointer
)
handle_error(res)
key_handle = unwrap(key_handle_pointer)
output = container(key_handle, key_object)
output.context_handle = context_handle
if algo == 'rsa':
ex_blob = _advapi32_create_blob(key_info, key_type, algo, signing=False)
ex_buffer = buffer_from_bytes(ex_blob)
ex_key_handle_pointer = new(advapi32, 'HCRYPTKEY *')
res = advapi32.CryptImportKey(
context_handle,
ex_buffer,
len(ex_blob),
null(),
0,
ex_key_handle_pointer
)
handle_error(res)
output.ex_key_handle = unwrap(ex_key_handle_pointer)
return output
except (Exception):
if key_handle:
advapi32.CryptDestroyKey(key_handle)
if context_handle:
close_context_handle(context_handle)
raise | python | {
"resource": ""
} |
q257590 | rsa_pkcs1v15_verify | validation | def rsa_pkcs1v15_verify(certificate_or_public_key, signature, data, hash_algorithm):
"""
Verifies an RSASSA-PKCS-v1.5 signature.
When the hash_algorithm is "raw", the operation is identical to RSA
public key decryption. That is: the data is not hashed and no ASN.1
structure with an algorithm identifier of the hash algorithm is placed in
the encrypted byte string.
:param certificate_or_public_key:
A Certificate or PublicKey instance to verify the signature with
:param signature:
A byte string of the signature to verify
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha256", "sha384", "sha512" or "raw"
:raises:
oscrypto.errors.SignatureError - when the signature is determined to be invalid
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
"""
if certificate_or_public_key.algorithm != 'rsa':
raise ValueError('The key specified is not an RSA public key')
return _verify(certificate_or_public_key, signature, data, hash_algorithm) | python | {
"resource": ""
} |
q257591 | _advapi32_verify | validation | def _advapi32_verify(certificate_or_public_key, signature, data, hash_algorithm, rsa_pss_padding=False):
"""
Verifies an RSA, DSA or ECDSA signature via CryptoAPI
:param certificate_or_public_key:
A Certificate or PublicKey instance to verify the signature with
:param signature:
A byte string of the signature to verify
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha256", "sha384", "sha512" or "raw"
:param rsa_pss_padding:
If PSS padding should be used for RSA keys
:raises:
oscrypto.errors.SignatureError - when the signature is determined to be invalid
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
"""
algo = certificate_or_public_key.algorithm
if algo == 'rsa' and rsa_pss_padding:
hash_length = {
'sha1': 20,
'sha224': 28,
'sha256': 32,
'sha384': 48,
'sha512': 64
}.get(hash_algorithm, 0)
decrypted_signature = raw_rsa_public_crypt(certificate_or_public_key, signature)
key_size = certificate_or_public_key.bit_size
if not verify_pss_padding(hash_algorithm, hash_length, key_size, data, decrypted_signature):
raise SignatureError('Signature is invalid')
return
if algo == 'rsa' and hash_algorithm == 'raw':
padded_plaintext = raw_rsa_public_crypt(certificate_or_public_key, signature)
try:
plaintext = remove_pkcs1v15_signature_padding(certificate_or_public_key.byte_size, padded_plaintext)
if not constant_compare(plaintext, data):
raise ValueError()
except (ValueError):
raise SignatureError('Signature is invalid')
return
hash_handle = None
try:
alg_id = {
'md5': Advapi32Const.CALG_MD5,
'sha1': Advapi32Const.CALG_SHA1,
'sha256': Advapi32Const.CALG_SHA_256,
'sha384': Advapi32Const.CALG_SHA_384,
'sha512': Advapi32Const.CALG_SHA_512,
}[hash_algorithm]
hash_handle_pointer = new(advapi32, 'HCRYPTHASH *')
res = advapi32.CryptCreateHash(
certificate_or_public_key.context_handle,
alg_id,
null(),
0,
hash_handle_pointer
)
handle_error(res)
hash_handle = unwrap(hash_handle_pointer)
res = advapi32.CryptHashData(hash_handle, data, len(data), 0)
handle_error(res)
if algo == 'dsa':
# Windows doesn't use the ASN.1 Sequence for DSA signatures,
# so we have to convert it here for the verification to work
try:
signature = algos.DSASignature.load(signature).to_p1363()
# Switch the two integers so that the reversal later will
# result in the correct order
half_len = len(signature) // 2
signature = signature[half_len:] + signature[:half_len]
except (ValueError, OverflowError, TypeError):
raise SignatureError('Signature is invalid')
# The CryptoAPI expects signatures to be in little endian byte order,
# which is the opposite of other systems, so we must reverse it
reversed_signature = signature[::-1]
res = advapi32.CryptVerifySignatureW(
hash_handle,
reversed_signature,
len(signature),
certificate_or_public_key.key_handle,
null(),
0
)
handle_error(res)
finally:
if hash_handle:
advapi32.CryptDestroyHash(hash_handle) | python | {
"resource": ""
} |
q257592 | _bcrypt_verify | validation | def _bcrypt_verify(certificate_or_public_key, signature, data, hash_algorithm, rsa_pss_padding=False):
"""
Verifies an RSA, DSA or ECDSA signature via CNG
:param certificate_or_public_key:
A Certificate or PublicKey instance to verify the signature with
:param signature:
A byte string of the signature to verify
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha256", "sha384", "sha512" or "raw"
:param rsa_pss_padding:
If PSS padding should be used for RSA keys
:raises:
oscrypto.errors.SignatureError - when the signature is determined to be invalid
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
"""
if hash_algorithm == 'raw':
digest = data
else:
hash_constant = {
'md5': BcryptConst.BCRYPT_MD5_ALGORITHM,
'sha1': BcryptConst.BCRYPT_SHA1_ALGORITHM,
'sha256': BcryptConst.BCRYPT_SHA256_ALGORITHM,
'sha384': BcryptConst.BCRYPT_SHA384_ALGORITHM,
'sha512': BcryptConst.BCRYPT_SHA512_ALGORITHM
}[hash_algorithm]
digest = getattr(hashlib, hash_algorithm)(data).digest()
padding_info = null()
flags = 0
if certificate_or_public_key.algorithm == 'rsa':
if rsa_pss_padding:
flags = BcryptConst.BCRYPT_PAD_PSS
padding_info_struct_pointer = struct(bcrypt, 'BCRYPT_PSS_PADDING_INFO')
padding_info_struct = unwrap(padding_info_struct_pointer)
# This has to be assigned to a variable to prevent cffi from gc'ing it
hash_buffer = buffer_from_unicode(hash_constant)
padding_info_struct.pszAlgId = cast(bcrypt, 'wchar_t *', hash_buffer)
padding_info_struct.cbSalt = len(digest)
else:
flags = BcryptConst.BCRYPT_PAD_PKCS1
padding_info_struct_pointer = struct(bcrypt, 'BCRYPT_PKCS1_PADDING_INFO')
padding_info_struct = unwrap(padding_info_struct_pointer)
# This has to be assigned to a variable to prevent cffi from gc'ing it
if hash_algorithm == 'raw':
padding_info_struct.pszAlgId = null()
else:
hash_buffer = buffer_from_unicode(hash_constant)
padding_info_struct.pszAlgId = cast(bcrypt, 'wchar_t *', hash_buffer)
padding_info = cast(bcrypt, 'void *', padding_info_struct_pointer)
else:
# Windows doesn't use the ASN.1 Sequence for DSA/ECDSA signatures,
# so we have to convert it here for the verification to work
try:
signature = algos.DSASignature.load(signature).to_p1363()
except (ValueError, OverflowError, TypeError):
raise SignatureError('Signature is invalid')
res = bcrypt.BCryptVerifySignature(
certificate_or_public_key.key_handle,
padding_info,
digest,
len(digest),
signature,
len(signature),
flags
)
failure = res == BcryptConst.STATUS_INVALID_SIGNATURE
failure = failure or res == BcryptConst.STATUS_INVALID_PARAMETER
if failure:
raise SignatureError('Signature is invalid')
handle_error(res) | python | {
"resource": ""
} |
q257593 | dsa_sign | validation | def dsa_sign(private_key, data, hash_algorithm):
"""
Generates a DSA signature
:param private_key:
The PrivateKey to generate the signature with
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha256", "sha384" or "sha512"
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the signature
"""
if private_key.algorithm != 'dsa':
raise ValueError('The key specified is not a DSA private key')
return _sign(private_key, data, hash_algorithm) | python | {
"resource": ""
} |
q257594 | ecdsa_sign | validation | def ecdsa_sign(private_key, data, hash_algorithm):
"""
Generates an ECDSA signature
:param private_key:
The PrivateKey to generate the signature with
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha256", "sha384" or "sha512"
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the signature
"""
if private_key.algorithm != 'ec':
raise ValueError('The key specified is not an EC private key')
return _sign(private_key, data, hash_algorithm) | python | {
"resource": ""
} |
q257595 | _advapi32_sign | validation | def _advapi32_sign(private_key, data, hash_algorithm, rsa_pss_padding=False):
"""
Generates an RSA, DSA or ECDSA signature via CryptoAPI
:param private_key:
The PrivateKey to generate the signature with
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha256", "sha384", "sha512" or "raw"
:param rsa_pss_padding:
If PSS padding should be used for RSA keys
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the signature
"""
algo = private_key.algorithm
if algo == 'rsa' and hash_algorithm == 'raw':
padded_data = add_pkcs1v15_signature_padding(private_key.byte_size, data)
return raw_rsa_private_crypt(private_key, padded_data)
if algo == 'rsa' and rsa_pss_padding:
hash_length = {
'sha1': 20,
'sha224': 28,
'sha256': 32,
'sha384': 48,
'sha512': 64
}.get(hash_algorithm, 0)
padded_data = add_pss_padding(hash_algorithm, hash_length, private_key.bit_size, data)
return raw_rsa_private_crypt(private_key, padded_data)
if private_key.algorithm == 'dsa' and hash_algorithm == 'md5':
raise ValueError(pretty_message(
'''
Windows does not support md5 signatures with DSA keys
'''
))
hash_handle = None
try:
alg_id = {
'md5': Advapi32Const.CALG_MD5,
'sha1': Advapi32Const.CALG_SHA1,
'sha256': Advapi32Const.CALG_SHA_256,
'sha384': Advapi32Const.CALG_SHA_384,
'sha512': Advapi32Const.CALG_SHA_512,
}[hash_algorithm]
hash_handle_pointer = new(advapi32, 'HCRYPTHASH *')
res = advapi32.CryptCreateHash(
private_key.context_handle,
alg_id,
null(),
0,
hash_handle_pointer
)
handle_error(res)
hash_handle = unwrap(hash_handle_pointer)
res = advapi32.CryptHashData(hash_handle, data, len(data), 0)
handle_error(res)
out_len = new(advapi32, 'DWORD *')
res = advapi32.CryptSignHashW(
hash_handle,
Advapi32Const.AT_SIGNATURE,
null(),
0,
null(),
out_len
)
handle_error(res)
buffer_length = deref(out_len)
buffer_ = buffer_from_bytes(buffer_length)
res = advapi32.CryptSignHashW(
hash_handle,
Advapi32Const.AT_SIGNATURE,
null(),
0,
buffer_,
out_len
)
handle_error(res)
output = bytes_from_buffer(buffer_, deref(out_len))
# CryptoAPI outputs the signature in little endian byte order, so we
# must swap it for compatibility with other systems
output = output[::-1]
if algo == 'dsa':
# Switch the two integers because the reversal just before switched
# then
half_len = len(output) // 2
output = output[half_len:] + output[:half_len]
# Windows doesn't use the ASN.1 Sequence for DSA signatures,
# so we have to convert it here for the verification to work
output = algos.DSASignature.from_p1363(output).dump()
return output
finally:
if hash_handle:
advapi32.CryptDestroyHash(hash_handle) | python | {
"resource": ""
} |
q257596 | _bcrypt_sign | validation | def _bcrypt_sign(private_key, data, hash_algorithm, rsa_pss_padding=False):
"""
Generates an RSA, DSA or ECDSA signature via CNG
:param private_key:
The PrivateKey to generate the signature with
:param data:
A byte string of the data the signature is for
:param hash_algorithm:
A unicode string of "md5", "sha1", "sha256", "sha384", "sha512" or "raw"
:param rsa_pss_padding:
If PSS padding should be used for RSA keys
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the signature
"""
if hash_algorithm == 'raw':
digest = data
else:
hash_constant = {
'md5': BcryptConst.BCRYPT_MD5_ALGORITHM,
'sha1': BcryptConst.BCRYPT_SHA1_ALGORITHM,
'sha256': BcryptConst.BCRYPT_SHA256_ALGORITHM,
'sha384': BcryptConst.BCRYPT_SHA384_ALGORITHM,
'sha512': BcryptConst.BCRYPT_SHA512_ALGORITHM
}[hash_algorithm]
digest = getattr(hashlib, hash_algorithm)(data).digest()
padding_info = null()
flags = 0
if private_key.algorithm == 'rsa':
if rsa_pss_padding:
hash_length = {
'md5': 16,
'sha1': 20,
'sha256': 32,
'sha384': 48,
'sha512': 64
}[hash_algorithm]
flags = BcryptConst.BCRYPT_PAD_PSS
padding_info_struct_pointer = struct(bcrypt, 'BCRYPT_PSS_PADDING_INFO')
padding_info_struct = unwrap(padding_info_struct_pointer)
# This has to be assigned to a variable to prevent cffi from gc'ing it
hash_buffer = buffer_from_unicode(hash_constant)
padding_info_struct.pszAlgId = cast(bcrypt, 'wchar_t *', hash_buffer)
padding_info_struct.cbSalt = hash_length
else:
flags = BcryptConst.BCRYPT_PAD_PKCS1
padding_info_struct_pointer = struct(bcrypt, 'BCRYPT_PKCS1_PADDING_INFO')
padding_info_struct = unwrap(padding_info_struct_pointer)
# This has to be assigned to a variable to prevent cffi from gc'ing it
if hash_algorithm == 'raw':
padding_info_struct.pszAlgId = null()
else:
hash_buffer = buffer_from_unicode(hash_constant)
padding_info_struct.pszAlgId = cast(bcrypt, 'wchar_t *', hash_buffer)
padding_info = cast(bcrypt, 'void *', padding_info_struct_pointer)
if private_key.algorithm == 'dsa' and private_key.bit_size > 1024 and hash_algorithm in set(['md5', 'sha1']):
raise ValueError(pretty_message(
'''
Windows does not support sha1 signatures with DSA keys based on
sha224, sha256 or sha512
'''
))
out_len = new(bcrypt, 'DWORD *')
res = bcrypt.BCryptSignHash(
private_key.key_handle,
padding_info,
digest,
len(digest),
null(),
0,
out_len,
flags
)
handle_error(res)
buffer_len = deref(out_len)
buffer = buffer_from_bytes(buffer_len)
if private_key.algorithm == 'rsa':
padding_info = cast(bcrypt, 'void *', padding_info_struct_pointer)
res = bcrypt.BCryptSignHash(
private_key.key_handle,
padding_info,
digest,
len(digest),
buffer,
buffer_len,
out_len,
flags
)
handle_error(res)
signature = bytes_from_buffer(buffer, deref(out_len))
if private_key.algorithm != 'rsa':
# Windows doesn't use the ASN.1 Sequence for DSA/ECDSA signatures,
# so we have to convert it here for the verification to work
signature = algos.DSASignature.from_p1363(signature).dump()
return signature | python | {
"resource": ""
} |
q257597 | _advapi32_encrypt | validation | def _advapi32_encrypt(certificate_or_public_key, data, rsa_oaep_padding=False):
"""
Encrypts a value using an RSA public key via CryptoAPI
:param certificate_or_public_key:
A Certificate or PublicKey instance to encrypt with
:param data:
A byte string of the data to encrypt
:param rsa_oaep_padding:
If OAEP padding should be used instead of PKCS#1 v1.5
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext
"""
flags = 0
if rsa_oaep_padding:
flags = Advapi32Const.CRYPT_OAEP
out_len = new(advapi32, 'DWORD *', len(data))
res = advapi32.CryptEncrypt(
certificate_or_public_key.ex_key_handle,
null(),
True,
flags,
null(),
out_len,
0
)
handle_error(res)
buffer_len = deref(out_len)
buffer = buffer_from_bytes(buffer_len)
write_to_buffer(buffer, data)
pointer_set(out_len, len(data))
res = advapi32.CryptEncrypt(
certificate_or_public_key.ex_key_handle,
null(),
True,
flags,
buffer,
out_len,
buffer_len
)
handle_error(res)
return bytes_from_buffer(buffer, deref(out_len))[::-1] | python | {
"resource": ""
} |
q257598 | _bcrypt_encrypt | validation | def _bcrypt_encrypt(certificate_or_public_key, data, rsa_oaep_padding=False):
"""
Encrypts a value using an RSA public key via CNG
:param certificate_or_public_key:
A Certificate or PublicKey instance to encrypt with
:param data:
A byte string of the data to encrypt
:param rsa_oaep_padding:
If OAEP padding should be used instead of PKCS#1 v1.5
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the ciphertext
"""
flags = BcryptConst.BCRYPT_PAD_PKCS1
if rsa_oaep_padding is True:
flags = BcryptConst.BCRYPT_PAD_OAEP
padding_info_struct_pointer = struct(bcrypt, 'BCRYPT_OAEP_PADDING_INFO')
padding_info_struct = unwrap(padding_info_struct_pointer)
# This has to be assigned to a variable to prevent cffi from gc'ing it
hash_buffer = buffer_from_unicode(BcryptConst.BCRYPT_SHA1_ALGORITHM)
padding_info_struct.pszAlgId = cast(bcrypt, 'wchar_t *', hash_buffer)
padding_info_struct.pbLabel = null()
padding_info_struct.cbLabel = 0
padding_info = cast(bcrypt, 'void *', padding_info_struct_pointer)
else:
padding_info = null()
out_len = new(bcrypt, 'ULONG *')
res = bcrypt.BCryptEncrypt(
certificate_or_public_key.key_handle,
data,
len(data),
padding_info,
null(),
0,
null(),
0,
out_len,
flags
)
handle_error(res)
buffer_len = deref(out_len)
buffer = buffer_from_bytes(buffer_len)
res = bcrypt.BCryptEncrypt(
certificate_or_public_key.key_handle,
data,
len(data),
padding_info,
null(),
0,
buffer,
buffer_len,
out_len,
flags
)
handle_error(res)
return bytes_from_buffer(buffer, deref(out_len)) | python | {
"resource": ""
} |
q257599 | _advapi32_decrypt | validation | def _advapi32_decrypt(private_key, ciphertext, rsa_oaep_padding=False):
"""
Encrypts a value using an RSA private key via CryptoAPI
:param private_key:
A PrivateKey instance to decrypt with
:param ciphertext:
A byte string of the data to decrypt
:param rsa_oaep_padding:
If OAEP padding should be used instead of PKCS#1 v1.5
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the plaintext
"""
flags = 0
if rsa_oaep_padding:
flags = Advapi32Const.CRYPT_OAEP
ciphertext = ciphertext[::-1]
buffer = buffer_from_bytes(ciphertext)
out_len = new(advapi32, 'DWORD *', len(ciphertext))
res = advapi32.CryptDecrypt(
private_key.ex_key_handle,
null(),
True,
flags,
buffer,
out_len
)
handle_error(res)
return bytes_from_buffer(buffer, deref(out_len)) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.