text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def P2(self, value):
"""Set private ``_P2`` and reset ``_block_matcher``.""" |
if value > self.P1:
self._P2 = value
else:
raise InvalidSecondDisparityChangePenaltyError("P2 must be greater "
"than P1.")
self._replace_bm() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _copy_calibration(self, calibration):
"""Copy another ``StereoCalibration`` object's values.""" |
for key, item in calibration.__dict__.items():
self.__dict__[key] = item |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_corners(self, image):
"""Find subpixel chessboard corners in image.""" |
temp = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(temp,
(self.rows, self.columns))
if not ret:
raise ChessboardNotFoundError("No chessboard could be found.")
cv2.cornerSubPix(temp, corners, (11, 11), (-1, -1),
(cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS,
30, 0.01))
return corners |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _show_corners(self, image, corners):
"""Show chessboard corners found in image.""" |
temp = image
cv2.drawChessboardCorners(temp, (self.rows, self.columns), corners,
True)
window_name = "Chessboard"
cv2.imshow(window_name, temp)
if cv2.waitKey(0):
cv2.destroyWindow(window_name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_corners(self, image_pair, show_results=False):
""" Record chessboard corners found in an image pair. The image pair should be an iterable composed of two CvMats ordered (left, right). """ |
side = "left"
self.object_points.append(self.corner_coordinates)
for image in image_pair:
corners = self._get_corners(image)
if show_results:
self._show_corners(image, corners)
self.image_points[side].append(corners.reshape(-1, 2))
side = "right"
self.image_count += 1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calibrate_cameras(self):
"""Calibrate cameras based on found chessboard corners.""" |
criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS,
100, 1e-5)
flags = (cv2.CALIB_FIX_ASPECT_RATIO + cv2.CALIB_ZERO_TANGENT_DIST +
cv2.CALIB_SAME_FOCAL_LENGTH)
calib = StereoCalibration()
(calib.cam_mats["left"], calib.dist_coefs["left"],
calib.cam_mats["right"], calib.dist_coefs["right"],
calib.rot_mat, calib.trans_vec, calib.e_mat,
calib.f_mat) = cv2.stereoCalibrate(self.object_points,
self.image_points["left"],
self.image_points["right"],
self.image_size,
calib.cam_mats["left"],
calib.dist_coefs["left"],
calib.cam_mats["right"],
calib.dist_coefs["right"],
calib.rot_mat,
calib.trans_vec,
calib.e_mat,
calib.f_mat,
criteria=criteria,
flags=flags)[1:]
(calib.rect_trans["left"], calib.rect_trans["right"],
calib.proj_mats["left"], calib.proj_mats["right"],
calib.disp_to_depth_mat, calib.valid_boxes["left"],
calib.valid_boxes["right"]) = cv2.stereoRectify(calib.cam_mats["left"],
calib.dist_coefs["left"],
calib.cam_mats["right"],
calib.dist_coefs["right"],
self.image_size,
calib.rot_mat,
calib.trans_vec,
flags=0)
for side in ("left", "right"):
(calib.undistortion_map[side],
calib.rectification_map[side]) = cv2.initUndistortRectifyMap(
calib.cam_mats[side],
calib.dist_coefs[side],
calib.rect_trans[side],
calib.proj_mats[side],
self.image_size,
cv2.CV_32FC1)
# This is replaced because my results were always bad. Estimates are
# taken from the OpenCV samples.
width, height = self.image_size
focal_length = 0.8 * width
calib.disp_to_depth_mat = np.float32([[1, 0, 0, -0.5 * width],
[0, -1, 0, 0.5 * height],
[0, 0, 0, -focal_length],
[0, 0, 1, 0]])
return calib |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_calibration(self, calibration):
""" Check calibration quality by computing average reprojection error. First, undistort detected points and compute epilines for each side. Then compute the error between the computed epipolar lines and the position of the points detected on the other side for each point and return the average error. """ |
sides = "left", "right"
which_image = {sides[0]: 1, sides[1]: 2}
undistorted, lines = {}, {}
for side in sides:
undistorted[side] = cv2.undistortPoints(
np.concatenate(self.image_points[side]).reshape(-1,
1, 2),
calibration.cam_mats[side],
calibration.dist_coefs[side],
P=calibration.cam_mats[side])
lines[side] = cv2.computeCorrespondEpilines(undistorted[side],
which_image[side],
calibration.f_mat)
total_error = 0
this_side, other_side = sides
for side in sides:
for i in range(len(undistorted[side])):
total_error += abs(undistorted[this_side][i][0][0] *
lines[other_side][i][0][0] +
undistorted[this_side][i][0][1] *
lines[other_side][i][0][1] +
lines[other_side][i][0][2])
other_side, this_side = sides
total_points = self.image_count * len(self.object_points)
return total_error / total_points |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_files(folder):
"""Discover stereo photos and return them as a pairwise sorted list.""" |
files = [i for i in os.listdir(folder) if i.startswith("left")]
files.sort()
for i in range(len(files)):
insert_string = "right{}".format(files[i * 2][4:])
files.insert(i * 2 + 1, insert_string)
files = [os.path.join(folder, filename) for filename in files]
return files |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calibrate_folder(args):
""" Calibrate camera based on chessboard images, write results to output folder. All images are read from disk. Chessboard points are found and used to calibrate the stereo pair. Finally, the calibration is written to the folder specified in ``args``. ``args`` needs to contain the following fields: input_files: List of paths to input files rows: Number of rows in chessboard columns: Number of columns in chessboard square_size: Size of chessboard squares in cm output_folder: Folder to write calibration to """ |
height, width = cv2.imread(args.input_files[0]).shape[:2]
calibrator = StereoCalibrator(args.rows, args.columns, args.square_size,
(width, height))
progress = ProgressBar(maxval=len(args.input_files),
widgets=[Bar("=", "[", "]"),
" ", Percentage()])
print("Reading input files...")
progress.start()
while args.input_files:
left, right = args.input_files[:2]
img_left, im_right = cv2.imread(left), cv2.imread(right)
calibrator.add_corners((img_left, im_right),
show_results=args.show_chessboards)
args.input_files = args.input_files[2:]
progress.update(progress.maxval - len(args.input_files))
progress.finish()
print("Calibrating cameras. This can take a while.")
calibration = calibrator.calibrate_cameras()
avg_error = calibrator.check_calibration(calibration)
print("The average error between chessboard points and their epipolar "
"lines is \n"
"{} pixels. This should be as small as possible.".format(avg_error))
calibration.export(args.output_folder) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _set_value(self, parameter, new_value):
"""Try setting new parameter on ``block_matcher`` and update map.""" |
try:
self.block_matcher.__setattr__(parameter, new_value)
except BadBlockMatcherArgumentError:
return
self.update_disparity_map() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _initialize_trackbars(self):
""" Initialize trackbars by discovering ``block_matcher``'s parameters. """ |
for parameter in self.block_matcher.parameter_maxima.keys():
maximum = self.block_matcher.parameter_maxima[parameter]
if not maximum:
maximum = self.shortest_dimension
cv2.createTrackbar(parameter, self.window_name,
self.block_matcher.__getattribute__(parameter),
maximum,
partial(self._set_value, parameter)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _save_bm_state(self):
"""Save current state of ``block_matcher``.""" |
for parameter in self.block_matcher.parameter_maxima.keys():
self.bm_settings[parameter].append(
self.block_matcher.__getattribute__(parameter)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_disparity_map(self):
""" Update disparity map in GUI. The disparity image is normalized to the range 0-255 and then divided by 255, because OpenCV multiplies it by 255 when displaying. This is because the pixels are stored as floating points. """ |
disparity = self.block_matcher.get_disparity(self.pair)
norm_coeff = 255 / disparity.max()
cv2.imshow(self.window_name, disparity * norm_coeff / 255)
cv2.waitKey() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tune_pair(self, pair):
"""Tune a pair of images.""" |
self._save_bm_state()
self.pair = pair
self.update_disparity_map() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def report_settings(self, parameter):
""" Report chosen settings for ``parameter`` in ``block_matcher``. ``bm_settings`` is updated to include the latest state before work is begun. This state is removed at the end so that the method has no side effects. All settings are reported except for the first one on record, which is ``block_matcher``'s default setting. """ |
self._save_bm_state()
report = []
settings_list = self.bm_settings[parameter][1:]
unique_values = list(set(settings_list))
value_frequency = {}
for value in unique_values:
value_frequency[settings_list.count(value)] = value
frequencies = value_frequency.keys()
frequencies.sort(reverse=True)
header = "{} value | Selection frequency".format(parameter)
left_column_width = len(header[:-21])
right_column_width = 21
report.append(header)
report.append("{}|{}".format("-" * left_column_width,
"-" * right_column_width))
for frequency in frequencies:
left_column = str(value_frequency[frequency]).center(
left_column_width)
right_column = str(frequency).center(right_column_width)
report.append("{}|{}".format(left_column, right_column))
# Remove newest settings
for param in self.block_matcher.parameter_maxima.keys():
self.bm_settings[param].pop(-1)
return "\n".join(report) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_ply(self, output_file):
"""Export ``PointCloud`` to PLY file for viewing in MeshLab.""" |
points = np.hstack([self.coordinates, self.colors])
with open(output_file, 'w') as outfile:
outfile.write(self.ply_header.format(
vertex_count=len(self.coordinates)))
np.savetxt(outfile, points, '%f %f %f %d %d %d') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filter_infinity(self):
"""Filter infinite distances from ``PointCloud.``""" |
mask = self.coordinates[:, 2] > self.coordinates[:, 2].min()
coords = self.coordinates[mask]
colors = self.colors[mask]
return PointCloud(coords, colors) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_loader_for_url(self, url):
""" Determine loading method based on uri """ |
parts = url.split('://', 1)
if len(parts) < 2:
type_ = 'file'
else:
type_ = parts[0]
if '+' in type_:
profile_name, scheme = type_.split('+', 1)
if len(parts) == 2:
url = scheme + '://' + parts[1]
else:
profile_name = ''
scheme = type_
loader = self.cached.get(type_)
if loader:
return loader, url
loader_cls = self._get_loader_class_for_type(scheme)
if not loader_cls:
raise IOError('No Loader for type: ' + scheme)
profile = self.kwargs
if self.profile_loader:
profile = self.profile_loader(profile_name, scheme)
loader = loader_cls(**profile)
self.cached[type_] = loader
return loader, url |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(self, url, offset=0, length=-1):
""" Load a file-like reader from the local file system """ |
# if starting with . or /, can only be a file path..
file_only = url.startswith(('/', '.'))
# convert to filename
filename = from_file_url(url)
if filename != url:
file_only = True
url = filename
try:
# first, try as file
afile = open(url, 'rb')
except IOError:
if file_only:
raise
return super(LocalFileLoader, self).load(url, offset, length)
if offset > 0:
afile.seek(offset)
if length >= 0:
return LimitReader(afile, length)
else:
return afile |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(self, url, offset, length):
""" Load a file-like reader over http using range requests and an optional cookie created via a cookie_maker """ |
headers = {}
if offset != 0 or length != -1:
headers['Range'] = BlockLoader._make_range_header(offset, length)
if self.cookie_maker:
if isinstance(self.cookie_maker, six.string_types):
headers['Cookie'] = self.cookie_maker
else:
headers['Cookie'] = self.cookie_maker.make()
if not self.session:
self.session = requests.Session()
r = self.session.get(url, headers=headers, stream=True)
r.raise_for_status()
return r.raw |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def raise_on_self_redirect(self, params, cdx, status_code, location_url):
""" Check if response is a 3xx redirect to the same url If so, reject this capture to avoid causing redirect loop """ |
if cdx.get('is_live'):
return
if not status_code.startswith('3') or status_code == '304':
return
request_url = params['url'].lower()
if not location_url:
return
location_url = location_url.lower()
if location_url.startswith('/'):
host = urlsplit(cdx['url']).netloc
location_url = host + location_url
location_url = location_url.split('://', 1)[-1].rstrip('/')
request_url = request_url.split('://', 1)[-1].rstrip('/')
self_redir = False
if request_url == location_url:
self_redir = True
elif params.get('sr-urlkey'):
# if new location canonicalized matches old key, also self-redirect
if canonicalize(location_url) == params.get('sr-urlkey'):
self_redir = True
if self_redir:
msg = 'Self Redirect {0} -> {1}'
msg = msg.format(request_url, location_url)
params['sr-urlkey'] = cdx['urlkey']
raise LiveResourceException(msg) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(self, url, offset, length, no_record_parse=False):
""" Load a single record from given url at offset with length and parse as either warc or arc record """ |
try:
length = int(length)
except:
length = -1
stream = self.loader.load(url, int(offset), length)
decomp_type = 'gzip'
# Create decompressing stream
stream = DecompressingBufferedReader(stream=stream,
decomp_type=decomp_type,
block_size=self.block_size)
return self.parse_record_stream(stream, no_record_parse=no_record_parse) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def conv_to_json(obj, fields=None):
""" return cdx as json dictionary string if ``fields`` is ``None``, output will include all fields in order stored, otherwise only specified fields will be included :param fields: list of field names to output """ |
if fields is None:
return json_encode(OrderedDict(((x, obj[x]) for x in obj if not x.startswith('_')))) + '\n'
result = json_encode(OrderedDict([(x, obj[x]) for x in fields if x in obj])) + '\n'
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rewrite_text_stream_to_gen(self, stream, rwinfo):
""" Convert stream to generator using applying rewriting func to each portion of the stream. Align to line boundaries if needed. """ |
try:
buff = self.first_buff
# for html rewriting:
# if charset is utf-8, use that, otherwise default to encode to ascii-compatible encoding
# encoding only used for url rewriting, encoding back to bytes after rewriting
if rwinfo.charset == 'utf-8' and rwinfo.text_type == 'html':
charset = 'utf-8'
else:
charset = 'iso-8859-1'
if buff:
yield buff.encode(charset)
decoder = codecs.getincrementaldecoder(charset)()
while True:
buff = stream.read(BUFF_SIZE)
if not buff:
break
if self.align_to_line:
buff += stream.readline()
try:
buff = decoder.decode(buff)
except UnicodeDecodeError:
if charset == 'utf-8':
rwinfo.charset = 'iso-8859-1'
charset = rwinfo.charset
decoder = codecs.getincrementaldecoder(charset)()
buff = decoder.decode(buff)
buff = self.rewrite(buff)
yield buff.encode(charset)
# For adding a tail/handling final buffer
buff = self.final_read()
# ensure decoder is marked as finished (final buffer already decoded)
decoder.decode(b'', final=True)
if buff:
yield buff.encode(charset)
finally:
stream.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cdx_load(sources, query, process=True):
""" merge text CDX lines from sources, return an iterator for filtered and access-checked sequence of CDX objects. :param sources: iterable for text CDX sources. :param process: bool, perform processing sorting/filtering/grouping ops """ |
cdx_iter = create_merged_cdx_gen(sources, query)
# page count is a special case, no further processing
if query.page_count:
return cdx_iter
cdx_iter = make_obj_iter(cdx_iter, query)
if process and not query.secondary_index_only:
cdx_iter = process_cdx(cdx_iter, query)
custom_ops = query.custom_ops
for op in custom_ops:
cdx_iter = op(cdx_iter, query)
if query.output == 'text':
cdx_iter = cdx_to_text(cdx_iter, query.fields)
elif query.output == 'json':
cdx_iter = cdx_to_json(cdx_iter, query.fields)
return cdx_iter |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_merged_cdx_gen(sources, query):
""" create a generator which loads and merges cdx streams ensures cdxs are lazy loaded """ |
# Optimize: no need to merge if just one input
if len(sources) == 1:
cdx_iter = sources[0].load_cdx(query)
else:
source_iters = map(lambda src: src.load_cdx(query), sources)
cdx_iter = merge(*(source_iters))
for cdx in cdx_iter:
yield cdx |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cdx_limit(cdx_iter, limit):
""" limit cdx to at most `limit`. """ |
# for cdx, _ in itertools.izip(cdx_iter, xrange(limit)):
# yield cdx
return (cdx for cdx, _ in zip(cdx_iter, range(limit))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cdx_reverse(cdx_iter, limit):
""" return cdx records in reverse order. """ |
# optimize for single last
if limit == 1:
last = None
for cdx in cdx_iter:
last = cdx
if not last:
return
yield last
reverse_cdxs = deque(maxlen=limit)
for cdx in cdx_iter:
reverse_cdxs.appendleft(cdx)
for cdx in reverse_cdxs:
yield cdx |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cdx_clamp(cdx_iter, from_ts, to_ts):
""" Clamp by start and end ts """ |
if from_ts and len(from_ts) < 14:
from_ts = pad_timestamp(from_ts, PAD_14_DOWN)
if to_ts and len(to_ts) < 14:
to_ts = pad_timestamp(to_ts, PAD_14_UP)
for cdx in cdx_iter:
if from_ts and cdx[TIMESTAMP] < from_ts:
continue
if to_ts and cdx[TIMESTAMP] > to_ts:
continue
yield cdx |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cdx_collapse_time_status(cdx_iter, timelen=10):
""" collapse by timestamp and status code. """ |
timelen = int(timelen)
last_token = None
for cdx in cdx_iter:
curr_token = (cdx[TIMESTAMP][:timelen], cdx.get(STATUSCODE, ''))
# yield if last_dedup_time is diff, otherwise skip
if curr_token != last_token:
last_token = curr_token
yield cdx |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cdx_sort_closest(closest, cdx_iter, limit=10):
""" sort CDXCaptureResult by closest to timestamp. """ |
closest_cdx = []
closest_keys = []
closest_sec = timestamp_to_sec(closest)
for cdx in cdx_iter:
sec = timestamp_to_sec(cdx[TIMESTAMP])
key = abs(closest_sec - sec)
# create tuple to sort by key
#bisect.insort(closest_cdx, (key, cdx))
i = bisect.bisect_right(closest_keys, key)
closest_keys.insert(i, key)
closest_cdx.insert(i, cdx)
if len(closest_cdx) == limit:
# assuming cdx in ascending order and keys have started increasing
if key > closest_keys[-1]:
break
if len(closest_cdx) > limit:
closest_cdx.pop()
for cdx in closest_cdx:
yield cdx |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cdx_resolve_revisits(cdx_iter):
""" resolve revisits. this filter adds three fields to CDX: ``orig.length``, ``orig.offset``, and ``orig.filename``. for revisit records, these fields have corresponding field values in previous non-revisit (original) CDX record. They are all ``"-"`` for non-revisit records. """ |
originals = {}
for cdx in cdx_iter:
is_revisit = cdx.is_revisit()
digest = cdx.get(DIGEST)
original_cdx = None
# only set if digest is valid, otherwise no way to resolve
if digest:
original_cdx = originals.get(digest)
if not original_cdx and not is_revisit:
originals[digest] = cdx
if original_cdx and is_revisit:
fill_orig = lambda field: original_cdx.get(field, '-')
# Transfer mimetype and statuscode
if MIMETYPE in cdx:
cdx[MIMETYPE] = original_cdx.get(MIMETYPE, '')
if STATUSCODE in cdx:
cdx[STATUSCODE] = original_cdx.get(STATUSCODE, '')
else:
fill_orig = lambda field: '-'
# Always add either the original or empty '- - -'
for field in ORIG_TUPLE:
cdx['orig.' + field] = fill_orig(field)
yield cdx |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(self):
"""This method is called to load the application. Subclasses must return a application that can be used by used by pywb.utils.geventserver.GeventServer.""" |
if self.r.live:
self.extra_config['collections'] = {'live':
{'index': '$live'}}
if self.r.debug:
self.extra_config['debug'] = True
if self.r.record:
self.extra_config['recorder'] = 'live' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run_gevent(self):
"""Created the server that runs the application supplied a subclass""" |
from pywb.utils.geventserver import GeventServer, RequestURIWSGIHandler
logging.info('Starting Gevent Server on ' + str(self.r.port))
ge = GeventServer(self.application,
port=self.r.port,
hostname=self.r.bind,
handler_class=RequestURIWSGIHandler,
direct=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _make_loaders(self, paths, packages):
"""Initialize the template loaders based on the supplied paths and packages. :param list[str] paths: List of paths to search for templates :param list[str] packages: List of assets package names :return: A list of loaders to be used for loading the template assets :rtype: list[FileSystemLoader|PackageLoader] """ |
loaders = []
# add loaders for paths
for path in paths:
loaders.append(FileSystemLoader(path))
# add loaders for all specified packages
for package in packages:
loaders.append(PackageLoader(package))
return loaders |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def template_filter(self, param=None):
"""Returns a decorator that adds the wrapped function to dictionary of template filters. The wrapped function is keyed by either the supplied param (if supplied) or by the wrapped functions name. :param param: Optional name to use instead of the name of the function to be wrapped :return: A decorator to wrap a template filter function :rtype: callable """ |
def deco(func):
name = param or func.__name__
self.filters[name] = func
return func
return deco |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _init_filters(self):
"""Initialize the default pywb provided Jninja filters available during template rendering""" |
self.filters = {}
@self.template_filter()
def format_ts(value, format_='%a, %b %d %Y %H:%M:%S'):
"""Formats the supplied timestamp using format_
:param str value: The timestamp to be formatted
:param str format_: The format string
:return: The correctly formatted timestamp as determined by format_
:rtype: str
"""
if format_ == '%s':
return timestamp_to_sec(value)
else:
value = timestamp_to_datetime(value)
return value.strftime(format_)
@self.template_filter('urlsplit')
def get_urlsplit(url):
"""Splits the supplied URL
:param str url: The url to be split
:return: The split url
:rtype: urllib.parse.SplitResult
"""
split = urlsplit(url)
return split
@self.template_filter()
def tojson(obj):
"""Converts the supplied object/array/any to a JSON string if it can be JSONified
:param any obj: The value to be converted to a JSON string
:return: The JSON string representation of the supplied value
:rtype: str
"""
return json.dumps(obj)
@self.template_filter()
def tobool(bool_val):
"""Converts a python boolean to a JS "true" or "false" string
:param any obj: A value to be evaluated as a boolean
:return: The string "true" or "false" to be inserted into JS
"""
return 'true' if bool_val else 'false' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def render_to_string(self, env, **kwargs):
"""Render this template. :param dict env: The WSGI environment associated with the request causing this template to be rendered :param any kwargs: The keyword arguments to be supplied to the Jninja template render method :return: The rendered template :rtype: str """ |
template = None
template_path = env.get(self.jenv.env_template_dir_key)
if template_path:
# jinja paths are not os paths, always use '/' as separator
# https://github.com/pallets/jinja/issues/411
template_path = template_path + '/' + self.insert_file
try:
template = self.jenv.jinja_env.get_template(template_path)
except TemplateNotFound as te:
pass
if not template:
template = self.jenv.jinja_env.get_template(self.insert_file)
params = env.get(self.jenv.env_template_params_key)
if params:
kwargs.update(params)
kwargs['env'] = env
kwargs['static_prefix'] = env.get('pywb.host_prefix', '') + env.get('pywb.app_prefix', '') + '/static'
return template.render(**kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_insert_func(self, wb_url, wb_prefix, host_prefix, top_url, env, is_framed, coll='', include_ts=True, **kwargs):
"""Create the function used to render the header insert template for the current request. :param rewrite.wburl.WbUrl wb_url: The WbUrl for the request this template is being rendered for :param str wb_prefix: The URL prefix pywb is serving the content using (e.g. http://localhost:8080/live/) :param str host_prefix: The host URL prefix pywb is running on (e.g. http://localhost:8080) :param str top_url: The full URL for this request (e.g. http://localhost:8080/live/http://example.com) :param dict env: The WSGI environment dictionary for this request :param bool is_framed: Is pywb or a specific collection running in framed mode :param str coll: The name of the collection this request is associated with :param bool include_ts: Should a timestamp be included in the rendered template :param kwargs: Additional keyword arguments to be supplied to the Jninja template render method :return: A function to be used to render the header insert for the request this template is being rendered for :rtype: callable """ |
params = kwargs
params['host_prefix'] = host_prefix
params['wb_prefix'] = wb_prefix
params['wb_url'] = wb_url
params['top_url'] = top_url
params['coll'] = coll
params['is_framed'] = is_framed
def make_head_insert(rule, cdx):
params['wombat_ts'] = cdx['timestamp'] if include_ts else ''
params['wombat_sec'] = timestamp_to_sec(cdx['timestamp'])
params['is_live'] = cdx.get('is_live')
if self.banner_view:
banner_html = self.banner_view.render_to_string(env, cdx=cdx, **params)
params['banner_html'] = banner_html
return self.render_to_string(env, cdx=cdx, **params)
return make_head_insert |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_pkg_path(self, item):
"""Get the package path for the :param str item: A resources full package path :return: The netloc and path from the items package path :rtype: tuple[str, str] """ |
if not isinstance(item, str):
return None
parts = urlsplit(item)
if parts.scheme == 'pkg' and parts.netloc:
return (parts.netloc, parts.path)
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def text_stream(stream, content_type='text/plain; charset=utf-8', status='200 OK'):
"""Utility method for constructing a streaming text response. :param Any stream: The response body stream :param str content_type: The content-type of the response :param str status: The HTTP status line :return: WbResponse that is a text stream :rtype WbResponse: """ |
if 'charset' not in content_type:
content_type += '; charset=utf-8'
return WbResponse.bin_stream(WbResponse.encode_stream(stream), content_type, status) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bin_stream(stream, content_type, status='200 OK', headers=None):
"""Utility method for constructing a binary response. :param Any stream: The response body stream :param str content_type: The content-type of the response :param str status: The HTTP status line :param list[tuple[str, str]] headers: Additional headers for this response :return: WbResponse that is a binary stream :rtype: WbResponse """ |
def_headers = [('Content-Type', content_type)]
if headers:
def_headers += headers
status_headers = StatusAndHeaders(status, def_headers)
return WbResponse(status_headers, value=stream) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def text_response(text, status='200 OK', content_type='text/plain; charset=utf-8'):
"""Utility method for constructing a text response. :param str text: The text response body :param str content_type: The content-type of the response :param str status: The HTTP status line :return: WbResponse text response :rtype: WbResponse """ |
encoded_text = text.encode('utf-8')
status_headers = StatusAndHeaders(status,
[('Content-Type', content_type),
('Content-Length', str(len(encoded_text)))])
return WbResponse(status_headers, value=[encoded_text]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def json_response(obj, status='200 OK', content_type='application/json; charset=utf-8'):
"""Utility method for constructing a JSON response. :param dict obj: The dictionary to be serialized in JSON format :param str content_type: The content-type of the response :param str status: The HTTP status line :return: WbResponse JSON response :rtype: WbResponse """ |
return WbResponse.text_response(json.dumps(obj), status, content_type) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def redir_response(location, status='302 Redirect', headers=None):
"""Utility method for constructing redirection response. :param str location: The location of the resource redirecting to :param str status: The HTTP status line :param list[tuple[str, str]] headers: Additional headers for this response :return: WbResponse redirection response :rtype: WbResponse """ |
redir_headers = [('Location', location), ('Content-Length', '0')]
if headers:
redir_headers += headers
return WbResponse(StatusAndHeaders(status, redir_headers)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def options_response(env):
"""Construct WbResponse for OPTIONS based on the WSGI env dictionary :param dict env: The WSGI environment dictionary :return: The WBResponse for the options request :rtype: WbResponse """ |
status_headers = StatusAndHeaders('200 Ok', [
('Content-Type', 'text/plain'),
('Content-Length', '0'),
])
response = WbResponse(status_headers)
response.add_access_control_headers(env=env)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def canonicalize(url, surt_ordered=True):
""" Canonicalize url and convert to surt If not in surt ordered mode, convert back to url form as surt conversion is currently part of canonicalization 'com,example)/path/file.html' 'example.com/path/file.html' 'urn:some:id' """ |
try:
key = surt.surt(url)
except Exception as e: #pragma: no cover
# doesn't happen with surt from 0.3b
# urn is already canonical, so just use as-is
if url.startswith('urn:'):
return url
raise UrlCanonicalizeException('Invalid Url: ' + url)
# if not surt, unsurt the surt to get canonicalized non-surt url
if not surt_ordered:
key = unsurt(key)
return key |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_fuzzy_rule(self, rule):
""" Parse rules using all the different supported forms """ |
url_prefix = rule.get('url_prefix')
config = rule.get('fuzzy_lookup')
if not config:
return
if not isinstance(url_prefix, list):
url_prefix = [url_prefix]
if not isinstance(config, dict):
regex = self.make_regex(config)
replace_after = self.DEFAULT_REPLACE_AFTER
filter_str = self.DEFAULT_FILTER
match_type = self.DEFAULT_MATCH_TYPE
find_all = False
else:
regex = self.make_regex(config.get('match'))
replace_after = config.get('replace', self.DEFAULT_REPLACE_AFTER)
filter_str = config.get('filter', self.DEFAULT_FILTER)
match_type = config.get('type', self.DEFAULT_MATCH_TYPE)
find_all = config.get('find_all', False)
return FuzzyRule(url_prefix, regex, replace_after, filter_str, match_type, find_all) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_headers_and_payload(self, cdx, failed_files, cdx_loader):
""" Resolve headers and payload for a given capture In the simple case, headers and payload are in the same record. In the case of revisit records, the payload and headers may be in different records. If the original has already been found, lookup original using orig. fields in cdx dict. Otherwise, call _load_different_url_payload() to get cdx index from a different url to find the original record. """ |
has_curr = (cdx['filename'] != '-')
#has_orig = (cdx.get('orig.filename', '-') != '-')
orig_f = cdx.get('orig.filename')
has_orig = orig_f and orig_f != '-'
# load headers record from cdx['filename'] unless it is '-' (rare)
headers_record = None
if has_curr:
headers_record = self._resolve_path_load(cdx, False, failed_files)
# two index lookups
# Case 1: if mimetype is still warc/revisit
if cdx.get('mime') == 'warc/revisit' and headers_record:
payload_record = self._load_different_url_payload(cdx,
headers_record,
failed_files,
cdx_loader)
# single lookup cases
# case 2: non-revisit
elif (has_curr and not has_orig):
payload_record = headers_record
# case 3: identical url revisit, load payload from orig.filename
elif (has_orig):
payload_record = self._resolve_path_load(cdx, True, failed_files)
return headers_record, payload_record |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _load_different_url_payload(self, cdx, headers_record, failed_files, cdx_loader):
""" Handle the case where a duplicate of a capture with same digest exists at a different url. If a cdx_server is provided, a query is made for matching url, timestamp and digest. Raise exception if no matches found. """ |
ref_target_uri = (headers_record.rec_headers.
get_header('WARC-Refers-To-Target-URI'))
target_uri = headers_record.rec_headers.get_header('WARC-Target-URI')
# if no target uri, no way to find the original
if not ref_target_uri:
raise ArchiveLoadFailed(self.MISSING_REVISIT_MSG)
ref_target_date = (headers_record.rec_headers.
get_header('WARC-Refers-To-Date'))
if not ref_target_date:
ref_target_date = cdx['timestamp']
else:
ref_target_date = iso_date_to_timestamp(ref_target_date)
digest = cdx.get('digest', '-')
try:
orig_cdx_lines = self.load_cdx_for_dupe(ref_target_uri,
ref_target_date,
digest,
cdx_loader)
except NotFoundException:
raise ArchiveLoadFailed(self.MISSING_REVISIT_MSG)
for orig_cdx in orig_cdx_lines:
try:
payload_record = self._resolve_path_load(orig_cdx, False,
failed_files)
return payload_record
except ArchiveLoadFailed as e:
pass
raise ArchiveLoadFailed(self.MISSING_REVISIT_MSG) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_cdx_for_dupe(self, url, timestamp, digest, cdx_loader):
""" If a cdx_server is available, return response from server, otherwise empty list """ |
if not cdx_loader:
return iter([])
filters = []
filters.append('!mime:warc/revisit')
if digest and digest != '-':
filters.append('digest:' + digest)
params = dict(url=url,
closest=timestamp,
filter=filters)
return cdx_loader(params) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def binsearch_offset(reader, key, compare_func=cmp, block_size=8192):
""" Find offset of the line which matches a given 'key' using binary search If key is not found, the offset is of the line after the key File is subdivided into block_size (default 8192) sized blocks Optional compare_func may be specified """ |
min_ = 0
reader.seek(0, 2)
max_ = int(reader.tell() / block_size)
while max_ - min_ > 1:
mid = int(min_ + ((max_ - min_) / 2))
reader.seek(mid * block_size)
if mid > 0:
reader.readline() # skip partial line
line = reader.readline()
if compare_func(key, line) > 0:
min_ = mid
else:
max_ = mid
return min_ * block_size |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def linearsearch(iter_, key, prev_size=0, compare_func=cmp):
""" Perform a linear search over iterator until current_line >= key optionally also tracking upto N previous lines, which are returned before the first matched line. if end of stream is reached before a match is found, nothing is returned (prev lines discarded also) """ |
prev_deque = deque(maxlen=prev_size + 1)
matched = False
for line in iter_:
prev_deque.append(line)
if compare_func(line, key) >= 0:
matched = True
break
# no matches, so return empty iterator
if not matched:
return iter([])
return itertools.chain(prev_deque, iter_) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def iter_prefix(reader, key):
""" Creates an iterator which iterates over lines that start with prefix 'key' in a sorted text file. """ |
return itertools.takewhile(
lambda line: line.startswith(key),
search(reader, key)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_upstream_paths(self, port):
"""Retrieve a dictionary containing the full URLs of the upstream apps :param int port: The port used by the replay and cdx servers :return: A dictionary containing the upstream paths (replay, cdx-server, record [if enabled]) :rtype: dict[str, str] """ |
base_paths = {
'replay': self.REPLAY_API % port,
'cdx-server': self.CDX_API % port,
}
if self.recorder_path:
base_paths['record'] = self.recorder_path
return base_paths |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def init_recorder(self, recorder_config):
"""Initialize the recording functionality of pywb. If recording_config is None this function is a no op""" |
if not recorder_config:
self.recorder = None
self.recorder_path = None
return
if isinstance(recorder_config, str):
recorder_coll = recorder_config
recorder_config = {}
else:
recorder_coll = recorder_config['source_coll']
# TODO: support dedup
dedup_index = None
warc_writer = MultiFileWARCWriter(self.warcserver.archive_paths,
max_size=int(recorder_config.get('rollover_size', 1000000000)),
max_idle_secs=int(recorder_config.get('rollover_idle_secs', 600)),
filename_template=recorder_config.get('filename_template'),
dedup_index=dedup_index)
self.recorder = RecorderApp(self.RECORD_SERVER % str(self.warcserver_server.port), warc_writer,
accept_colls=recorder_config.get('source_filter'))
recorder_server = GeventServer(self.recorder, port=0)
self.recorder_path = self.RECORD_API % (recorder_server.port, recorder_coll) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def init_autoindex(self, auto_interval):
"""Initialize and start the auto-indexing of the collections. If auto_interval is None this is a no op. :param str|int auto_interval: The auto-indexing interval from the configuration file or CLI argument """ |
if not auto_interval:
return
from pywb.manager.autoindex import AutoIndexer
colls_dir = self.warcserver.root_dir if self.warcserver.root_dir else None
indexer = AutoIndexer(colls_dir=colls_dir, interval=int(auto_interval))
if not os.path.isdir(indexer.root_path):
msg = 'No managed directory "{0}" for auto-indexing'
logging.error(msg.format(indexer.root_path))
import sys
sys.exit(2)
msg = 'Auto-Indexing Enabled on "{0}", checking every {1} secs'
logging.info(msg.format(indexer.root_path, auto_interval))
indexer.start() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serve_static(self, environ, coll='', filepath=''):
"""Serve a static file associated with a specific collection or one of pywb's own static assets :param dict environ: The WSGI environment dictionary for the request :param str coll: The collection the static file is associated with :param str filepath: The file path (relative to the collection) for the static assest :return: The WbResponse for the static asset :rtype: WbResponse """ |
proxy_enabled = self.is_proxy_enabled(environ)
if proxy_enabled and environ.get('REQUEST_METHOD') == 'OPTIONS':
return WbResponse.options_response(environ)
if coll:
path = os.path.join(self.warcserver.root_dir, coll, self.static_dir)
else:
path = self.static_dir
environ['pywb.static_dir'] = path
try:
response = self.static_handler(environ, filepath)
if proxy_enabled:
response.add_access_control_headers(env=environ)
return response
except:
self.raise_not_found(environ, 'Static File Not Found: {0}'.format(filepath)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_metadata(self, coll):
"""Retrieve the metadata associated with a collection :param str coll: The name of the collection to receive metadata for :return: The collections metadata if it exists :rtype: dict """ |
#if coll == self.all_coll:
# coll = '*'
metadata = {'coll': coll,
'type': 'replay'}
if coll in self.warcserver.list_fixed_routes():
metadata.update(self.warcserver.get_coll_config(coll))
else:
metadata.update(self.metadata_cache.load(coll))
return metadata |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serve_cdx(self, environ, coll='$root'):
"""Make the upstream CDX query for a collection and response with the results of the query :param dict environ: The WSGI environment dictionary for the request :param str coll: The name of the collection this CDX query is for :return: The WbResponse containing the results of the CDX query :rtype: WbResponse """ |
base_url = self.rewriterapp.paths['cdx-server']
#if coll == self.all_coll:
# coll = '*'
cdx_url = base_url.format(coll=coll)
if environ.get('QUERY_STRING'):
cdx_url += '&' if '?' in cdx_url else '?'
cdx_url += environ.get('QUERY_STRING')
try:
res = requests.get(cdx_url, stream=True)
content_type = res.headers.get('Content-Type')
return WbResponse.bin_stream(StreamIter(res.raw),
content_type=content_type)
except Exception as e:
return WbResponse.text_response('Error: ' + str(e), status='400 Bad Request') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setup_paths(self, environ, coll, record=False):
"""Populates the WSGI environment dictionary with the path information necessary to perform a response for content or record. :param dict environ: The WSGI environment dictionary for the request :param str coll: The name of the collection the record is to be served from :param bool record: Should the content being served by recorded (save to a warc). Only valid in record mode """ |
if not coll or not self.warcserver.root_dir:
return
if coll != '$root':
pop_path_info(environ)
if record:
pop_path_info(environ)
paths = [self.warcserver.root_dir]
if coll != '$root':
paths.append(coll)
paths.append(self.templates_dir)
# jinja2 template paths always use '/' as separator
environ['pywb.templates_dir'] = '/'.join(paths) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def raise_not_found(self, environ, msg):
"""Utility function for raising a werkzeug.exceptions.NotFound execption with the supplied WSGI environment and message. :param dict environ: The WSGI environment dictionary for the request :param str msg: The error message """ |
raise NotFound(response=self.rewriterapp._error_response(environ, msg)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _check_refer_redirect(self, environ):
"""Returns a WbResponse for a HTTP 307 redirection if the HTTP referer header is the same as the HTTP host header :param dict environ: The WSGI environment dictionary for the request :return: WbResponse HTTP 307 redirection :rtype: WbResponse """ |
referer = environ.get('HTTP_REFERER')
if not referer:
return
host = environ.get('HTTP_HOST')
if host not in referer:
return
inx = referer[1:].find('http')
if not inx:
inx = referer[1:].find('///')
if inx > 0:
inx + 1
if inx < 0:
return
url = referer[inx + 1:]
host = referer[:inx + 1]
orig_url = environ['PATH_INFO']
if environ.get('QUERY_STRING'):
orig_url += '?' + environ['QUERY_STRING']
full_url = host + urljoin(url, orig_url)
return WbResponse.redir_response(full_url, '307 Redirect') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle_request(self, environ, start_response):
"""Retrieves the route handler and calls the handler returning its the response :param dict environ: The WSGI environment dictionary for the request :param start_response: :return: The WbResponse for the request :rtype: WbResponse """ |
urls = self.url_map.bind_to_environ(environ)
try:
endpoint, args = urls.match()
# store original script_name (original prefix) before modifications are made
environ['pywb.app_prefix'] = environ.get('SCRIPT_NAME')
response = endpoint(environ, **args)
return response(environ, start_response)
except HTTPException as e:
redir = self._check_refer_redirect(environ)
if redir:
return redir(environ, start_response)
return e(environ, start_response)
except Exception as e:
if self.debug:
traceback.print_exc()
response = self.rewriterapp._error_response(environ, 'Internal Error: ' + str(e), '500 Server Error')
return response(environ, start_response) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_app(cls, port):
"""Create a new instance of FrontEndApp that listens on port with a hostname of 0.0.0.0 :param int port: The port FrontEndApp is to listen on :return: A new instance of FrontEndApp wrapped in GeventServer :rtype: GeventServer """ |
app = FrontEndApp()
app_server = GeventServer(app, port=port, hostname='0.0.0.0')
return app_server |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def init_proxy(self, config):
"""Initialize and start proxy mode. If proxy configuration entry is not contained in the config this is a no op. Causes handler to become an instance of WSGIProxMiddleware. :param dict config: The configuration object used to configure this instance of FrontEndApp """ |
proxy_config = config.get('proxy')
if not proxy_config:
return
if isinstance(proxy_config, str):
proxy_coll = proxy_config
proxy_config = {}
else:
proxy_coll = proxy_config['coll']
if '/' in proxy_coll:
raise Exception('Proxy collection can not contain "/"')
proxy_config['ca_name'] = proxy_config.get('ca_name', self.PROXY_CA_NAME)
proxy_config['ca_file_cache'] = proxy_config.get('ca_file_cache', self.PROXY_CA_PATH)
if proxy_config.get('recording'):
logging.info('Proxy recording into collection "{0}"'.format(proxy_coll))
if proxy_coll in self.warcserver.list_fixed_routes():
raise Exception('Can not record into fixed collection')
proxy_coll += self.RECORD_ROUTE
if not config.get('recorder'):
config['recorder'] = 'live'
else:
logging.info('Proxy enabled for collection "{0}"'.format(proxy_coll))
if proxy_config.get('enable_content_rewrite', True):
self.proxy_prefix = '/{0}/bn_/'.format(proxy_coll)
else:
self.proxy_prefix = '/{0}/id_/'.format(proxy_coll)
self.proxy_default_timestamp = proxy_config.get('default_timestamp')
if self.proxy_default_timestamp:
if not self.ALL_DIGITS.match(self.proxy_default_timestamp):
try:
self.proxy_default_timestamp = iso_date_to_timestamp(self.proxy_default_timestamp)
except:
raise Exception('Invalid Proxy Timestamp: Must Be All-Digit Timestamp or ISO Date Format')
self.proxy_coll = proxy_coll
self.handler = WSGIProxMiddleware(self.handle_request,
self.proxy_route_request,
proxy_host=proxy_config.get('host', 'pywb.proxy'),
proxy_options=proxy_config) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def proxy_route_request(self, url, environ):
""" Return the full url that this proxy request will be routed to The 'environ' PATH_INFO and REQUEST_URI will be modified based on the returned url Default is to use the 'proxy_prefix' to point to the proxy collection """ |
if self.proxy_default_timestamp:
environ['pywb_proxy_default_timestamp'] = self.proxy_default_timestamp
return self.proxy_prefix + url |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def proxy_fetch(self, env, url):
"""Proxy mode only endpoint that handles OPTIONS requests and COR fetches for Preservation Worker. Due to normal cross-origin browser restrictions in proxy mode, auto fetch worker cannot access the CSS rules of cross-origin style sheets and must re-fetch them in a manner that is CORS safe. This endpoint facilitates that by fetching the stylesheets for the auto fetch worker and then responds with its contents :param dict env: The WSGI environment dictionary :param str url: The URL of the resource to be fetched :return: WbResponse that is either response to an Options request or the results of fetching url :rtype: WbResponse """ |
if not self.is_proxy_enabled(env):
# we are not in proxy mode so just respond with forbidden
return WbResponse.text_response('proxy mode must be enabled to use this endpoint',
status='403 Forbidden')
if env.get('REQUEST_METHOD') == 'OPTIONS':
return WbResponse.options_response(env)
# ensure full URL
request_url = env['REQUEST_URI']
# replace with /id_ so we do not get rewritten
url = request_url.replace('/proxy-fetch', '/id_')
# update WSGI environment object
env['REQUEST_URI'] = self.proxy_coll + url
env['PATH_INFO'] = env['PATH_INFO'].replace('/proxy-fetch', self.proxy_coll + '/id_')
# make request using normal serve_content
response = self.serve_content(env, self.proxy_coll, url)
# for WR
if isinstance(response, WbResponse):
response.add_access_control_headers(env=env)
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(self, coll):
"""Load and receive the metadata associated with a collection. If the metadata for the collection is not cached yet its metadata file is read in and stored. If the cache has seen the collection before the mtime of the metadata file is checked and if it is more recent than the cached time, the cache is updated and returned otherwise the cached version is returned. :param str coll: Name of a collection :return: The cached metadata for a collection :rtype: dict """ |
path = self.template_str.format(coll=coll)
try:
mtime = os.path.getmtime(path)
obj = self.cache.get(path)
except:
return {}
if not obj:
return self.store_new(coll, path, mtime)
cached_mtime, data = obj
if mtime == cached_mtime == mtime:
return obj
return self.store_new(coll, path, mtime) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def store_new(self, coll, path, mtime):
"""Load a collections metadata file and store it :param str coll: The name of the collection the metadata is for :param str path: The path to the collections metadata file :param float mtime: The current mtime of the collections metadata file :return: The collections metadata :rtype: dict """ |
obj = load_yaml_config(path)
self.cache[coll] = (mtime, obj)
return obj |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_blocks(self, location, blocks, ranges, query):
""" Load one or more blocks of compressed cdx lines, return a line iterator which decompresses and returns one line at a time, bounded by query.key and query.end_key """ |
if (logging.getLogger().getEffectiveLevel() <= logging.DEBUG):
msg = 'Loading {b.count} blocks from {loc}:{b.offset}+{b.length}'
logging.debug(msg.format(b=blocks, loc=location))
reader = self.blk_loader.load(location, blocks.offset, blocks.length)
def decompress_block(range_):
decomp = gzip_decompressor()
buff = decomp.decompress(reader.read(range_))
for line in BytesIO(buff):
yield line
def iter_blocks(reader):
try:
for r in ranges:
yield decompress_block(r)
finally:
reader.close()
# iterate over all blocks
iter_ = itertools.chain.from_iterable(iter_blocks(reader))
# start bound
iter_ = linearsearch(iter_, query.key)
# end bound
iter_ = itertools.takewhile(lambda line: line < query.end_key, iter_)
return iter_ |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_mime(self, mime, def_mime='unk'):
""" Utility function to extract mimetype only from a full content type, removing charset settings """ |
self['mime'] = def_mime
if mime:
self['mime'] = self.MIME_RE.split(mime, 1)[0]
self['_content_type'] = mime |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extract_status(self, status_headers):
""" Extract status code only from status line """ |
self['status'] = status_headers.get_statuscode()
if not self['status']:
self['status'] = '-'
elif self['status'] == '204' and 'Error' in status_headers.statusline:
self['status'] = '-' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_warc_record(self, record):
""" Parse warc record """ |
entry = self._create_index_entry(record.rec_type)
if record.rec_type == 'warcinfo':
entry['url'] = record.rec_headers.get_header('WARC-Filename')
entry['urlkey'] = entry['url']
entry['_warcinfo'] = record.raw_stream.read(record.length)
return entry
entry['url'] = record.rec_headers.get_header('WARC-Target-Uri')
# timestamp
entry['timestamp'] = iso_date_to_timestamp(record.rec_headers.
get_header('WARC-Date'))
# mime
if record.rec_type == 'revisit':
entry['mime'] = 'warc/revisit'
elif self.options.get('minimal'):
entry['mime'] = '-'
else:
def_mime = '-' if record.rec_type == 'request' else 'unk'
entry.extract_mime(record.http_headers.
get_header('Content-Type'),
def_mime)
# detected mime from WARC-Identified-Payload-Type
entry['mime-detected'] = record.rec_headers.get_header(
'WARC-Identified-Payload-Type')
# status -- only for response records (by convention):
if record.rec_type == 'response' and not self.options.get('minimal'):
entry.extract_status(record.http_headers)
else:
entry['status'] = '-'
# digest
digest = record.rec_headers.get_header('WARC-Payload-Digest')
entry['digest'] = digest
if digest and digest.startswith('sha1:'):
entry['digest'] = digest[len('sha1:'):]
elif not entry.get('digest'):
entry['digest'] = '-'
# optional json metadata, if present
metadata = record.rec_headers.get_header('WARC-Json-Metadata')
if metadata:
entry['metadata'] = metadata
return entry |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_arc_record(self, record):
""" Parse arc record """ |
url = record.rec_headers.get_header('uri')
url = url.replace('\r', '%0D')
url = url.replace('\n', '%0A')
# replace formfeed
url = url.replace('\x0c', '%0C')
# replace nulls
url = url.replace('\x00', '%00')
entry = self._create_index_entry(record.rec_type)
entry['url'] = url
# timestamp
entry['timestamp'] = record.rec_headers.get_header('archive-date')
if len(entry['timestamp']) > 14:
entry['timestamp'] = entry['timestamp'][:14]
if not self.options.get('minimal'):
# mime
entry.extract_mime(record.rec_headers.get_header('content-type'))
# status
entry.extract_status(record.http_headers)
# digest
entry['digest'] = '-'
return entry |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def render_field(parser, token):
""" Render a form field using given attribute-value pairs Takes form field as first argument and list of attribute-value pairs for all other arguments. Attribute-value pairs should be in the form of attribute=value or attribute="a value" for assignment and attribute+=value or attribute+="value" for appending. """ |
error_msg = '%r tag requires a form field followed by a list of attributes and values in the form attr="value"' % token.split_contents()[0]
try:
bits = token.split_contents()
tag_name = bits[0]
form_field = bits[1]
attr_list = bits[2:]
except ValueError:
raise TemplateSyntaxError(error_msg)
form_field = parser.compile_filter(form_field)
set_attrs = []
append_attrs = []
for pair in attr_list:
match = ATTRIBUTE_RE.match(pair)
if not match:
raise TemplateSyntaxError(error_msg + ": %s" % pair)
dct = match.groupdict()
attr, sign, value = \
dct['attr'], dct['sign'], parser.compile_filter(dct['value'])
if sign == "=":
set_attrs.append((attr, value))
else:
append_attrs.append((attr, value))
return FieldAttributeNode(form_field, set_attrs, append_attrs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def response(cls, dic, status=200):
""" Create and return a response object. """ |
response = JsonResponse(dic, status=status)
response['Cache-Control'] = 'no-store'
response['Pragma'] = 'no-cache'
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_response_dic(self):
""" Generate the dic that will be jsonify. Checking scopes given vs registered. Returns a dic. """ |
dic = {}
for scope in self.scopes:
if scope in self._scopes_registered():
dic.update(getattr(self, 'scope_' + scope)())
dic = self._clean_dic(dic)
return dic |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _scopes_registered(self):
""" Return a list that contains all the scopes registered in the class. """ |
scopes = []
for name in dir(self.__class__):
if name.startswith('scope_'):
scope = name.split('scope_')[1]
scopes.append(scope)
return scopes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _clean_dic(self, dic):
""" Clean recursively all empty or None values inside a dict. """ |
aux_dic = dic.copy()
for key, value in iter(dic.items()):
if value is None or value == '':
del aux_dic[key]
elif type(value) is dict:
cleaned_dict = self._clean_dic(value)
if not cleaned_dict:
del aux_dic[key]
continue
aux_dic[key] = cleaned_dict
return aux_dic |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_client_user_consent(self):
""" Save the user consent given to a specific client. Return None. """ |
date_given = timezone.now()
expires_at = date_given + timedelta(
days=settings.get('OIDC_SKIP_CONSENT_EXPIRE'))
uc, created = UserConsent.objects.get_or_create(
user=self.request.user,
client=self.client,
defaults={
'expires_at': expires_at,
'date_given': date_given,
}
)
uc.scope = self.params['scope']
# Rewrite expires_at and date_given if object already exists.
if not created:
uc.expires_at = expires_at
uc.date_given = date_given
uc.save() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def client_has_user_consent(self):
""" Check if already exists user consent for some client. Return bool. """ |
value = False
try:
uc = UserConsent.objects.get(user=self.request.user, client=self.client)
if (set(self.params['scope']).issubset(uc.scope)) and not (uc.has_expired()):
value = True
except UserConsent.DoesNotExist:
pass
return value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_scopes_information(self):
""" Return a list with the description of all the scopes requested. """ |
scopes = StandardScopeClaims.get_scopes_info(self.params['scope'])
if settings.get('OIDC_EXTRA_SCOPE_CLAIMS'):
scopes_extra = settings.get(
'OIDC_EXTRA_SCOPE_CLAIMS', import_str=True).get_scopes_info(self.params['scope'])
for index_extra, scope_extra in enumerate(scopes_extra):
for index, scope in enumerate(scopes[:]):
if scope_extra['scope'] == scope['scope']:
del scopes[index]
else:
scopes_extra = []
return scopes + scopes_extra |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(name, import_str=False):
""" Helper function to use inside the package. """ |
value = None
default_value = getattr(default_settings, name)
try:
value = getattr(settings, name)
except AttributeError:
if name in default_settings.required_attrs:
raise Exception('You must set ' + name + ' in your settings.')
if isinstance(default_value, dict) and value:
default_value.update(value)
value = default_value
else:
if value is None:
value = default_value
value = import_from_str(value) if import_str else value
return value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def OIDC_UNAUTHENTICATED_SESSION_MANAGEMENT_KEY(self):
""" OPTIONAL. Supply a fixed string to use as browser-state key for unauthenticated clients. """ |
# Memoize generated value
if not self._unauthenticated_session_management_key:
self._unauthenticated_session_management_key = ''.join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(100))
return self._unauthenticated_session_management_key |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def strip_prompt_login(path):
""" Strips 'login' from the 'prompt' query parameter. """ |
uri = urlsplit(path)
query_params = parse_qs(uri.query)
prompt_list = query_params.get('prompt', '')[0].split()
if 'login' in prompt_list:
prompt_list.remove('login')
query_params['prompt'] = ' '.join(prompt_list)
if not query_params['prompt']:
del query_params['prompt']
uri = uri._replace(query=urlencode(query_params, doseq=True))
return urlunsplit(uri) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_site_url(site_url=None, request=None):
""" Construct the site url. Orders to decide site url: 1. valid `site_url` parameter 2. valid `SITE_URL` in settings 3. construct from `request` object """ |
site_url = site_url or settings.get('SITE_URL')
if site_url:
return site_url
elif request:
return '{}://{}'.format(request.scheme, request.get_host())
else:
raise Exception('Either pass `site_url`, '
'or set `SITE_URL` in settings, '
'or pass `request` object.') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_issuer(site_url=None, request=None):
""" Construct the issuer full url. Basically is the site url with some path appended. """ |
site_url = get_site_url(site_url=site_url, request=request)
path = reverse('oidc_provider:provider-info') \
.split('/.well-known/openid-configuration')[0]
issuer = site_url + path
return str(issuer) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_browser_state_or_default(request):
""" Determine value to use as session state. """ |
key = (request.session.session_key or
settings.get('OIDC_UNAUTHENTICATED_SESSION_MANAGEMENT_KEY'))
return sha224(key.encode('utf-8')).hexdigest() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cors_allow_any(request, response):
""" Add headers to permit CORS requests from any origin, with or without credentials, with any headers. """ |
origin = request.META.get('HTTP_ORIGIN')
if not origin:
return response
# From the CORS spec: The string "*" cannot be used for a resource that supports credentials.
response['Access-Control-Allow-Origin'] = origin
patch_vary_headers(response, ['Origin'])
response['Access-Control-Allow-Credentials'] = 'true'
if request.method == 'OPTIONS':
if 'HTTP_ACCESS_CONTROL_REQUEST_HEADERS' in request.META:
response['Access-Control-Allow-Headers'] \
= request.META['HTTP_ACCESS_CONTROL_REQUEST_HEADERS']
response['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS'
return response |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_token(user, client, scope, id_token_dic=None):
""" Create and populate a Token object. Return a Token object. """ |
token = Token()
token.user = user
token.client = client
token.access_token = uuid.uuid4().hex
if id_token_dic is not None:
token.id_token = id_token_dic
token.refresh_token = uuid.uuid4().hex
token.expires_at = timezone.now() + timedelta(
seconds=settings.get('OIDC_TOKEN_EXPIRE'))
token.scope = scope
return token |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_code(user, client, scope, nonce, is_authentication, code_challenge=None, code_challenge_method=None):
""" Create and populate a Code object. Return a Code object. """ |
code = Code()
code.user = user
code.client = client
code.code = uuid.uuid4().hex
if code_challenge and code_challenge_method:
code.code_challenge = code_challenge
code.code_challenge_method = code_challenge_method
code.expires_at = timezone.now() + timedelta(
seconds=settings.get('OIDC_CODE_EXPIRE'))
code.scope = scope
code.nonce = nonce
code.is_authentication = is_authentication
return code |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_client_alg_keys(client):
""" Takes a client and returns the set of keys associated with it. Returns a list of keys. """ |
if client.jwt_alg == 'RS256':
keys = []
for rsakey in RSAKey.objects.all():
keys.append(jwk_RSAKey(key=importKey(rsakey.key), kid=rsakey.kid))
if not keys:
raise Exception('You must add at least one RSA Key.')
elif client.jwt_alg == 'HS256':
keys = [SYMKey(key=client.client_secret, alg=client.jwt_alg)]
else:
raise Exception('Unsupported key algorithm.')
return keys |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def read_gbasis(basis_lines, fname):
'''Reads gbasis-formatted file data and converts it to a dictionary with the
usual BSE fields
Note that the gbasis format does not store all the fields we
have, so some fields are left blank
'''
skipchars = '!#'
basis_lines = [l for l in basis_lines if l and not l[0] in skipchars]
bs_data = create_skel('component')
i = 0
bs_name = None
while i < len(basis_lines):
line = basis_lines[i]
lsplt = line.split(':')
elementsym = lsplt[0]
if bs_name is None:
bs_name = lsplt[1]
elif lsplt[1] != bs_name:
raise RuntimeError("Multiple basis sets in a file")
element_Z = lut.element_Z_from_sym(elementsym)
element_Z = str(element_Z)
if not element_Z in bs_data['elements']:
bs_data['elements'][element_Z] = {}
element_data = bs_data['elements'][element_Z]
if not 'electron_shells' in element_data:
element_data['electron_shells'] = []
i += 1
max_am = int(basis_lines[i].strip())
i += 1
for am in range(0, max_am + 1):
lsplt = basis_lines[i].split()
shell_am = lut.amchar_to_int(lsplt[0])
nprim = int(lsplt[1])
ngen = int(lsplt[2])
if shell_am[0] != am:
raise RuntimeError("AM out of order in gbasis?")
if max(shell_am) <= 1:
func_type = 'gto'
else:
func_type = 'gto_spherical'
shell = {
'function_type': func_type,
'region': '',
'angular_momentum': shell_am
}
exponents = []
coefficients = []
i += 1
for j in range(nprim):
line = basis_lines[i].replace('D', 'E')
line = line.replace('d', 'E')
lsplt = line.split()
if len(lsplt) != (ngen + 1):
raise RuntimeError("Incorrect number of general contractions in gbasis")
exponents.append(lsplt[0])
coefficients.append(lsplt[1:])
i += 1
shell['exponents'] = exponents
# We need to transpose the coefficient matrix
# (we store a matrix with primitives being the column index and
# general contraction being the row index)
shell['coefficients'] = list(map(list, zip(*coefficients)))
element_data['electron_shells'].append(shell)
return bs_data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def read_molcas(basis_lines, fname):
'''Reads molcas-formatted file data and converts it to a dictionary with the
usual BSE fields
Note that the turbomole format does not store all the fields we
have, so some fields are left blank
'''
skipchars = '*#$'
basis_lines = [l for l in basis_lines if l and not l[0] in skipchars]
bs_data = create_skel('component')
i = 0
while i < len(basis_lines):
line = basis_lines[i]
if not line.startswith('/'):
raise RuntimeError("Expecting line starting with /")
line_splt = line[1:].split('.')
elementsym = line_splt[0]
element_Z = lut.element_Z_from_sym(elementsym)
element_Z = str(element_Z)
if not element_Z in bs_data['elements']:
bs_data['elements'][element_Z] = {}
element_data = bs_data['elements'][element_Z]
if "ecp" in line.lower():
raise NotImplementedError("MolCAS ECPs not supported")
#if not 'ecp_potentials' in element_data:
# element_data['ecp_potentials'] = []
#i += 1
#line = basis_lines[i]
#lsplt = line.split('=')
#maxam = int(lsplt[2])
#n_elec = int(lsplt[1].split()[0])
#amlist = [maxam]
#amlist.extend(list(range(0, maxam)))
#i += 1
#for shell_am in amlist:
# shell_am2 = lut.amchar_to_int(basis_lines[i][0])[0]
# if shell_am2 != shell_am:
# raise RuntimeError("AM not in expected order?")
# i += 1
# ecp_shell = {
# 'ecp_type': 'scalar',
# 'angular_momentum': [shell_am],
# }
# ecp_exponents = []
# ecp_rexponents = []
# ecp_coefficients = []
# while i < len(basis_lines) and basis_lines[i][0].isalpha() is False:
# lsplt = basis_lines[i].split()
# ecp_exponents.append(lsplt[2])
# ecp_rexponents.append(int(lsplt[1]))
# ecp_coefficients.append(lsplt[0])
# i += 1
# ecp_shell['r_exponents'] = ecp_rexponents
# ecp_shell['gaussian_exponents'] = ecp_exponents
# ecp_shell['coefficients'] = [ecp_coefficients]
# element_data['ecp_potentials'].append(ecp_shell)
#element_data['ecp_electrons'] = n_elec
else:
if not 'electron_shells' in element_data:
element_data['electron_shells'] = []
# Skip two comment lines (usually ref)
i += 3
# Skip over an options block
line = basis_lines[i]
if line.lower() == 'options':
while basis_lines[i].lower() != 'endoptions':
i += 1
i += 1
lsplt = basis_lines[i].split()
max_am = int(lsplt[1])
i += 1
for shell_am in range(max_am+1):
lsplt = basis_lines[i].replace(',', ' ').split()
nprim = int(lsplt[0])
ngen = int(lsplt[1])
i += 1
if shell_am <= 1:
func_type = 'gto'
else:
func_type = 'gto_spherical'
shell = {
'function_type': func_type,
'region': '',
'angular_momentum': [shell_am]
}
exponents = []
coefficients = []
j = 0
while j < nprim:
line = basis_lines[i].replace('D', 'E')
line = line.replace('d', 'E')
lsplt = line.split()
exponents.extend(lsplt)
i += 1
j += len(lsplt)
for j in range(nprim):
line = basis_lines[i].replace('D', 'E')
line = line.replace('d', 'E')
lsplt = line.split()
if len(lsplt) != ngen:
print(fname)
print(line)
raise RuntimeError("Unexpected number of coefficients")
coefficients.append(lsplt)
i += 1
shell['exponents'] = exponents
# We need to transpose the coefficient matrix
# (we store a matrix with primitives being the column index and
# general contraction being the row index)
shell['coefficients'] = list(map(list, zip(*coefficients)))
element_data['electron_shells'].append(shell)
# Skip energies?
to_skip = int(basis_lines[i].strip())
skipped = 0
i += 1
while skipped < to_skip:
skipped += len(basis_lines[i].split())
i += 1
return bs_data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _read_plain_json(file_path, check_bse):
""" Reads a JSON file A simple wrapper around json.load that only takes the file name If the file does not exist, an exception is thrown. If the file does exist, but there is a problem with the JSON formatting, the filename is added to the exception information. If check_bse is True, this function also make sure the 'molssi_bse_schema' key exists in the file. Parameters file_path : str Full path to the file to read check_bse: bool If True, check to make sure the bse schema information is included. If not found, an exception is raised """ |
if not os.path.isfile(file_path):
raise FileNotFoundError('JSON file \'{}\' does not exist, is not '
'readable, or is not a file'.format(file_path))
try:
if file_path.endswith('.bz2'):
with bz2.open(file_path, 'rt', encoding=_default_encoding) as f:
js = json.load(f)
else:
with open(file_path, 'r', encoding=_default_encoding) as f:
js = json.load(f)
except json.decoder.JSONDecodeError as ex:
raise RuntimeError("File {} contains JSON errors".format(file_path)) from ex
if check_bse is True:
# Check for molssi_bse_schema key
if 'molssi_bse_schema' not in js:
raise RuntimeError('File {} does not appear to be a BSE JSON file'.format(file_path))
return js |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _write_plain_json(file_path, js):
""" Write information to a JSON file This makes sure files are created with the proper encoding and consistent indenting Parameters file_path : str Full path to the file to write to. It will be overwritten if it exists js : dict JSON information to write """ |
# Disable ascii in the json - this prevents the json writer
# from escaping everything
if file_path.endswith('.bz2'):
with bz2.open(file_path, 'wt', encoding=_default_encoding) as f:
json.dump(js, f, indent=2, ensure_ascii=False)
else:
with open(file_path, 'w', encoding=_default_encoding) as f:
json.dump(js, f, indent=2, ensure_ascii=False) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read_notes_file(file_path):
""" Returns the contents of a notes file. If the notes file does not exist, None is returned """ |
if not os.path.isfile(file_path):
return None
with open(file_path, 'r', encoding=_default_encoding) as f:
return f.read() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _whole_basis_types(basis):
'''
Get a list of all the types of features in this basis set.
'''
all_types = set()
for v in basis['elements'].values():
if 'electron_shells' in v:
for sh in v['electron_shells']:
all_types.add(sh['function_type'])
if 'ecp_potentials' in v:
for pot in v['ecp_potentials']:
all_types.add(pot['ecp_type'])
return sorted(list(all_types)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compose_elemental_basis(file_relpath, data_dir):
""" Creates an 'elemental' basis from an elemental json file This function reads the info from the given file, and reads all the component basis set information from the files listed therein. It then composes all the information together into one 'elemental' basis dictionary """ |
# Do a simple read of the json
el_bs = fileio.read_json_basis(os.path.join(data_dir, file_relpath))
# construct a list of all files to read
component_files = set()
for k, v in el_bs['elements'].items():
component_files.update(set(v['components']))
# Read all the data from these files into a big dictionary
component_map = {k: fileio.read_json_basis(os.path.join(data_dir, k)) for k in component_files}
# Use the basis_set_description for the reference description
for k, v in component_map.items():
for el, el_data in v['elements'].items():
el_data['references'] = [{
'reference_description': v['description'],
'reference_keys': el_data['references']
}]
# Compose on a per-element basis
for k, v in el_bs['elements'].items():
components = v.pop('components')
# all of the component data for this element
el_comp_data = []
for c in components:
centry = component_map[c]['elements']
if k not in centry:
raise RuntimeError('File {} does not contain element {}'.format(c, k))
el_comp_data.append(centry[k])
# merge all the data
v = manip.merge_element_data(None, el_comp_data)
el_bs['elements'][k] = v
return el_bs |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.