Dataset Viewer
Auto-converted to Parquet Duplicate
repo
stringlengths
7
55
path
stringlengths
4
223
func_name
stringlengths
1
129
original_string
stringlengths
234
21.9k
language
stringclasses
1 value
code
stringlengths
234
21.9k
code_tokens
sequencelengths
20
10.3k
docstring
stringlengths
1
11.2k
docstring_tokens
sequencelengths
1
725
sha
stringlengths
40
40
url
stringlengths
88
315
partition
stringclasses
3 values
summary
stringlengths
5
350
ageitgey/face_recognition
examples/face_recognition_knn.py
train
def train(train_dir, model_save_path=None, n_neighbors=None, knn_algo='ball_tree', verbose=False): """ Trains a k-nearest neighbors classifier for face recognition. :param train_dir: directory that contains a sub-directory for each known person, with its name. (View in source code to see train_dir ex...
python
def train(train_dir, model_save_path=None, n_neighbors=None, knn_algo='ball_tree', verbose=False): """ Trains a k-nearest neighbors classifier for face recognition. :param train_dir: directory that contains a sub-directory for each known person, with its name. (View in source code to see train_dir ex...
[ "def", "train", "(", "train_dir", ",", "model_save_path", "=", "None", ",", "n_neighbors", "=", "None", ",", "knn_algo", "=", "'ball_tree'", ",", "verbose", "=", "False", ")", ":", "X", "=", "[", "]", "y", "=", "[", "]", "# Loop through each person in the ...
Trains a k-nearest neighbors classifier for face recognition. :param train_dir: directory that contains a sub-directory for each known person, with its name. (View in source code to see train_dir example tree structure) Structure: <train_dir>/ ├── <person1>/ │ ├── <somename1>....
[ "Trains", "a", "k", "-", "nearest", "neighbors", "classifier", "for", "face", "recognition", "." ]
c96b010c02f15e8eeb0f71308c641179ac1f19bb
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/examples/face_recognition_knn.py#L46-L108
train
Train a k - nearest neighbors classifier for face recognition.
ageitgey/face_recognition
examples/face_recognition_knn.py
predict
def predict(X_img_path, knn_clf=None, model_path=None, distance_threshold=0.6): """ Recognizes faces in given image using a trained KNN classifier :param X_img_path: path to image to be recognized :param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified. ...
python
def predict(X_img_path, knn_clf=None, model_path=None, distance_threshold=0.6): """ Recognizes faces in given image using a trained KNN classifier :param X_img_path: path to image to be recognized :param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified. ...
[ "def", "predict", "(", "X_img_path", ",", "knn_clf", "=", "None", ",", "model_path", "=", "None", ",", "distance_threshold", "=", "0.6", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "X_img_path", ")", "or", "os", ".", "path", ".", "s...
Recognizes faces in given image using a trained KNN classifier :param X_img_path: path to image to be recognized :param knn_clf: (optional) a knn classifier object. if not specified, model_save_path must be specified. :param model_path: (optional) path to a pickled knn classifier. if not specified, model_s...
[ "Recognizes", "faces", "in", "given", "image", "using", "a", "trained", "KNN", "classifier" ]
c96b010c02f15e8eeb0f71308c641179ac1f19bb
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/examples/face_recognition_knn.py#L111-L150
train
Predicts faces in a given image using a KNN classifier.
ageitgey/face_recognition
examples/face_recognition_knn.py
show_prediction_labels_on_image
def show_prediction_labels_on_image(img_path, predictions): """ Shows the face recognition results visually. :param img_path: path to image to be recognized :param predictions: results of the predict function :return: """ pil_image = Image.open(img_path).convert("RGB") draw = ImageDraw....
python
def show_prediction_labels_on_image(img_path, predictions): """ Shows the face recognition results visually. :param img_path: path to image to be recognized :param predictions: results of the predict function :return: """ pil_image = Image.open(img_path).convert("RGB") draw = ImageDraw....
[ "def", "show_prediction_labels_on_image", "(", "img_path", ",", "predictions", ")", ":", "pil_image", "=", "Image", ".", "open", "(", "img_path", ")", ".", "convert", "(", "\"RGB\"", ")", "draw", "=", "ImageDraw", ".", "Draw", "(", "pil_image", ")", "for", ...
Shows the face recognition results visually. :param img_path: path to image to be recognized :param predictions: results of the predict function :return:
[ "Shows", "the", "face", "recognition", "results", "visually", "." ]
c96b010c02f15e8eeb0f71308c641179ac1f19bb
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/examples/face_recognition_knn.py#L153-L181
train
Show the face recognition results visually.
ageitgey/face_recognition
face_recognition/api.py
_trim_css_to_bounds
def _trim_css_to_bounds(css, image_shape): """ Make sure a tuple in (top, right, bottom, left) order is within the bounds of the image. :param css: plain tuple representation of the rect in (top, right, bottom, left) order :param image_shape: numpy shape of the image array :return: a trimmed plain...
python
def _trim_css_to_bounds(css, image_shape): """ Make sure a tuple in (top, right, bottom, left) order is within the bounds of the image. :param css: plain tuple representation of the rect in (top, right, bottom, left) order :param image_shape: numpy shape of the image array :return: a trimmed plain...
[ "def", "_trim_css_to_bounds", "(", "css", ",", "image_shape", ")", ":", "return", "max", "(", "css", "[", "0", "]", ",", "0", ")", ",", "min", "(", "css", "[", "1", "]", ",", "image_shape", "[", "1", "]", ")", ",", "min", "(", "css", "[", "2", ...
Make sure a tuple in (top, right, bottom, left) order is within the bounds of the image. :param css: plain tuple representation of the rect in (top, right, bottom, left) order :param image_shape: numpy shape of the image array :return: a trimmed plain tuple representation of the rect in (top, right, botto...
[ "Make", "sure", "a", "tuple", "in", "(", "top", "right", "bottom", "left", ")", "order", "is", "within", "the", "bounds", "of", "the", "image", "." ]
c96b010c02f15e8eeb0f71308c641179ac1f19bb
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/face_recognition/api.py#L52-L60
train
Trim the given CSS tuple to the bounds of the image.
ageitgey/face_recognition
face_recognition/api.py
face_distance
def face_distance(face_encodings, face_to_compare): """ Given a list of face encodings, compare them to a known face encoding and get a euclidean distance for each comparison face. The distance tells you how similar the faces are. :param faces: List of face encodings to compare :param face_to_compa...
python
def face_distance(face_encodings, face_to_compare): """ Given a list of face encodings, compare them to a known face encoding and get a euclidean distance for each comparison face. The distance tells you how similar the faces are. :param faces: List of face encodings to compare :param face_to_compa...
[ "def", "face_distance", "(", "face_encodings", ",", "face_to_compare", ")", ":", "if", "len", "(", "face_encodings", ")", "==", "0", ":", "return", "np", ".", "empty", "(", "(", "0", ")", ")", "return", "np", ".", "linalg", ".", "norm", "(", "face_enco...
Given a list of face encodings, compare them to a known face encoding and get a euclidean distance for each comparison face. The distance tells you how similar the faces are. :param faces: List of face encodings to compare :param face_to_compare: A face encoding to compare against :return: A numpy ndar...
[ "Given", "a", "list", "of", "face", "encodings", "compare", "them", "to", "a", "known", "face", "encoding", "and", "get", "a", "euclidean", "distance", "for", "each", "comparison", "face", ".", "The", "distance", "tells", "you", "how", "similar", "the", "f...
c96b010c02f15e8eeb0f71308c641179ac1f19bb
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/face_recognition/api.py#L63-L75
train
Given a list of face encodings compare them to a known face encoding and get a euclidean distance for each comparison face.
ageitgey/face_recognition
face_recognition/api.py
load_image_file
def load_image_file(file, mode='RGB'): """ Loads an image file (.jpg, .png, etc) into a numpy array :param file: image file name or file object to load :param mode: format to convert the image to. Only 'RGB' (8-bit RGB, 3 channels) and 'L' (black and white) are supported. :return: image contents as...
python
def load_image_file(file, mode='RGB'): """ Loads an image file (.jpg, .png, etc) into a numpy array :param file: image file name or file object to load :param mode: format to convert the image to. Only 'RGB' (8-bit RGB, 3 channels) and 'L' (black and white) are supported. :return: image contents as...
[ "def", "load_image_file", "(", "file", ",", "mode", "=", "'RGB'", ")", ":", "im", "=", "PIL", ".", "Image", ".", "open", "(", "file", ")", "if", "mode", ":", "im", "=", "im", ".", "convert", "(", "mode", ")", "return", "np", ".", "array", "(", ...
Loads an image file (.jpg, .png, etc) into a numpy array :param file: image file name or file object to load :param mode: format to convert the image to. Only 'RGB' (8-bit RGB, 3 channels) and 'L' (black and white) are supported. :return: image contents as numpy array
[ "Loads", "an", "image", "file", "(", ".", "jpg", ".", "png", "etc", ")", "into", "a", "numpy", "array" ]
c96b010c02f15e8eeb0f71308c641179ac1f19bb
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/face_recognition/api.py#L78-L89
train
Loads an image file into a numpy array.
ageitgey/face_recognition
face_recognition/api.py
_raw_face_locations
def _raw_face_locations(img, number_of_times_to_upsample=1, model="hog"): """ Returns an array of bounding boxes of human faces in a image :param img: An image (as a numpy array) :param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller face...
python
def _raw_face_locations(img, number_of_times_to_upsample=1, model="hog"): """ Returns an array of bounding boxes of human faces in a image :param img: An image (as a numpy array) :param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller face...
[ "def", "_raw_face_locations", "(", "img", ",", "number_of_times_to_upsample", "=", "1", ",", "model", "=", "\"hog\"", ")", ":", "if", "model", "==", "\"cnn\"", ":", "return", "cnn_face_detector", "(", "img", ",", "number_of_times_to_upsample", ")", "else", ":", ...
Returns an array of bounding boxes of human faces in a image :param img: An image (as a numpy array) :param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces. :param model: Which face detection model to use. "hog" is less accurate but fas...
[ "Returns", "an", "array", "of", "bounding", "boxes", "of", "human", "faces", "in", "a", "image" ]
c96b010c02f15e8eeb0f71308c641179ac1f19bb
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/face_recognition/api.py#L92-L105
train
Returns an array of bounding boxes of human faces in a image.
ageitgey/face_recognition
face_recognition/api.py
face_locations
def face_locations(img, number_of_times_to_upsample=1, model="hog"): """ Returns an array of bounding boxes of human faces in a image :param img: An image (as a numpy array) :param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces. ...
python
def face_locations(img, number_of_times_to_upsample=1, model="hog"): """ Returns an array of bounding boxes of human faces in a image :param img: An image (as a numpy array) :param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces. ...
[ "def", "face_locations", "(", "img", ",", "number_of_times_to_upsample", "=", "1", ",", "model", "=", "\"hog\"", ")", ":", "if", "model", "==", "\"cnn\"", ":", "return", "[", "_trim_css_to_bounds", "(", "_rect_to_css", "(", "face", ".", "rect", ")", ",", "...
Returns an array of bounding boxes of human faces in a image :param img: An image (as a numpy array) :param number_of_times_to_upsample: How many times to upsample the image looking for faces. Higher numbers find smaller faces. :param model: Which face detection model to use. "hog" is less accurate but fas...
[ "Returns", "an", "array", "of", "bounding", "boxes", "of", "human", "faces", "in", "a", "image" ]
c96b010c02f15e8eeb0f71308c641179ac1f19bb
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/face_recognition/api.py#L108-L121
train
Returns an array of bounding boxes of human faces in a image.
ageitgey/face_recognition
face_recognition/api.py
batch_face_locations
def batch_face_locations(images, number_of_times_to_upsample=1, batch_size=128): """ Returns an 2d array of bounding boxes of human faces in a image using the cnn face detector If you are using a GPU, this can give you much faster results since the GPU can process batches of images at once. If you aren'...
python
def batch_face_locations(images, number_of_times_to_upsample=1, batch_size=128): """ Returns an 2d array of bounding boxes of human faces in a image using the cnn face detector If you are using a GPU, this can give you much faster results since the GPU can process batches of images at once. If you aren'...
[ "def", "batch_face_locations", "(", "images", ",", "number_of_times_to_upsample", "=", "1", ",", "batch_size", "=", "128", ")", ":", "def", "convert_cnn_detections_to_css", "(", "detections", ")", ":", "return", "[", "_trim_css_to_bounds", "(", "_rect_to_css", "(", ...
Returns an 2d array of bounding boxes of human faces in a image using the cnn face detector If you are using a GPU, this can give you much faster results since the GPU can process batches of images at once. If you aren't using a GPU, you don't need this function. :param img: A list of images (each as a num...
[ "Returns", "an", "2d", "array", "of", "bounding", "boxes", "of", "human", "faces", "in", "a", "image", "using", "the", "cnn", "face", "detector", "If", "you", "are", "using", "a", "GPU", "this", "can", "give", "you", "much", "faster", "results", "since",...
c96b010c02f15e8eeb0f71308c641179ac1f19bb
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/face_recognition/api.py#L135-L151
train
Returns a 2d array of bounding boxes of human faces in a given image using the cnn face detectors.
ageitgey/face_recognition
face_recognition/api.py
face_landmarks
def face_landmarks(face_image, face_locations=None, model="large"): """ Given an image, returns a dict of face feature locations (eyes, nose, etc) for each face in the image :param face_image: image to search :param face_locations: Optionally provide a list of face locations to check. :param model:...
python
def face_landmarks(face_image, face_locations=None, model="large"): """ Given an image, returns a dict of face feature locations (eyes, nose, etc) for each face in the image :param face_image: image to search :param face_locations: Optionally provide a list of face locations to check. :param model:...
[ "def", "face_landmarks", "(", "face_image", ",", "face_locations", "=", "None", ",", "model", "=", "\"large\"", ")", ":", "landmarks", "=", "_raw_face_landmarks", "(", "face_image", ",", "face_locations", ",", "model", ")", "landmarks_as_tuples", "=", "[", "[", ...
Given an image, returns a dict of face feature locations (eyes, nose, etc) for each face in the image :param face_image: image to search :param face_locations: Optionally provide a list of face locations to check. :param model: Optional - which model to use. "large" (default) or "small" which only returns ...
[ "Given", "an", "image", "returns", "a", "dict", "of", "face", "feature", "locations", "(", "eyes", "nose", "etc", ")", "for", "each", "face", "in", "the", "image" ]
c96b010c02f15e8eeb0f71308c641179ac1f19bb
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/face_recognition/api.py#L168-L200
train
Given an image returns a dict of face feature locations
ageitgey/face_recognition
face_recognition/api.py
face_encodings
def face_encodings(face_image, known_face_locations=None, num_jitters=1): """ Given an image, return the 128-dimension face encoding for each face in the image. :param face_image: The image that contains one or more faces :param known_face_locations: Optional - the bounding boxes of each face if you al...
python
def face_encodings(face_image, known_face_locations=None, num_jitters=1): """ Given an image, return the 128-dimension face encoding for each face in the image. :param face_image: The image that contains one or more faces :param known_face_locations: Optional - the bounding boxes of each face if you al...
[ "def", "face_encodings", "(", "face_image", ",", "known_face_locations", "=", "None", ",", "num_jitters", "=", "1", ")", ":", "raw_landmarks", "=", "_raw_face_landmarks", "(", "face_image", ",", "known_face_locations", ",", "model", "=", "\"small\"", ")", "return"...
Given an image, return the 128-dimension face encoding for each face in the image. :param face_image: The image that contains one or more faces :param known_face_locations: Optional - the bounding boxes of each face if you already know them. :param num_jitters: How many times to re-sample the face when cal...
[ "Given", "an", "image", "return", "the", "128", "-", "dimension", "face", "encoding", "for", "each", "face", "in", "the", "image", "." ]
c96b010c02f15e8eeb0f71308c641179ac1f19bb
https://github.com/ageitgey/face_recognition/blob/c96b010c02f15e8eeb0f71308c641179ac1f19bb/face_recognition/api.py#L203-L213
train
Given an image returns the 128 - dimensional face encoding for each face in the image.
apache/spark
python/pyspark/sql/types.py
_parse_datatype_string
def _parse_datatype_string(s): """ Parses the given data type string to a :class:`DataType`. The data type string format equals to :class:`DataType.simpleString`, except that top level struct type can omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use ``byte`` instead ...
python
def _parse_datatype_string(s): """ Parses the given data type string to a :class:`DataType`. The data type string format equals to :class:`DataType.simpleString`, except that top level struct type can omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use ``byte`` instead ...
[ "def", "_parse_datatype_string", "(", "s", ")", ":", "sc", "=", "SparkContext", ".", "_active_spark_context", "def", "from_ddl_schema", "(", "type_str", ")", ":", "return", "_parse_datatype_json_string", "(", "sc", ".", "_jvm", ".", "org", ".", "apache", ".", ...
Parses the given data type string to a :class:`DataType`. The data type string format equals to :class:`DataType.simpleString`, except that top level struct type can omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use ``byte`` instead of ``tinyint`` for :class:`ByteType`. We ...
[ "Parses", "the", "given", "data", "type", "string", "to", "a", ":", "class", ":", "DataType", ".", "The", "data", "type", "string", "format", "equals", "to", ":", "class", ":", "DataType", ".", "simpleString", "except", "that", "top", "level", "struct", ...
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L758-L820
train
Parses a string into a base - level structure type.
apache/spark
python/pyspark/sql/types.py
_infer_type
def _infer_type(obj): """Infer the DataType from obj """ if obj is None: return NullType() if hasattr(obj, '__UDT__'): return obj.__UDT__ dataType = _type_mappings.get(type(obj)) if dataType is DecimalType: # the precision and scale of `obj` may be different from row to...
python
def _infer_type(obj): """Infer the DataType from obj """ if obj is None: return NullType() if hasattr(obj, '__UDT__'): return obj.__UDT__ dataType = _type_mappings.get(type(obj)) if dataType is DecimalType: # the precision and scale of `obj` may be different from row to...
[ "def", "_infer_type", "(", "obj", ")", ":", "if", "obj", "is", "None", ":", "return", "NullType", "(", ")", "if", "hasattr", "(", "obj", ",", "'__UDT__'", ")", ":", "return", "obj", ".", "__UDT__", "dataType", "=", "_type_mappings", ".", "get", "(", ...
Infer the DataType from obj
[ "Infer", "the", "DataType", "from", "obj" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1003-L1038
train
Infer the DataType from obj
apache/spark
python/pyspark/sql/types.py
_infer_schema
def _infer_schema(row, names=None): """Infer the schema from dict/namedtuple/object""" if isinstance(row, dict): items = sorted(row.items()) elif isinstance(row, (tuple, list)): if hasattr(row, "__fields__"): # Row items = zip(row.__fields__, tuple(row)) elif hasattr(ro...
python
def _infer_schema(row, names=None): """Infer the schema from dict/namedtuple/object""" if isinstance(row, dict): items = sorted(row.items()) elif isinstance(row, (tuple, list)): if hasattr(row, "__fields__"): # Row items = zip(row.__fields__, tuple(row)) elif hasattr(ro...
[ "def", "_infer_schema", "(", "row", ",", "names", "=", "None", ")", ":", "if", "isinstance", "(", "row", ",", "dict", ")", ":", "items", "=", "sorted", "(", "row", ".", "items", "(", ")", ")", "elif", "isinstance", "(", "row", ",", "(", "tuple", ...
Infer the schema from dict/namedtuple/object
[ "Infer", "the", "schema", "from", "dict", "/", "namedtuple", "/", "object" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1041-L1065
train
Infer the schema from dict namedtuple or object
apache/spark
python/pyspark/sql/types.py
_create_converter
def _create_converter(dataType): """Create a converter to drop the names of fields in obj """ if not _need_converter(dataType): return lambda x: x if isinstance(dataType, ArrayType): conv = _create_converter(dataType.elementType) return lambda row: [conv(v) for v in row] elif i...
python
def _create_converter(dataType): """Create a converter to drop the names of fields in obj """ if not _need_converter(dataType): return lambda x: x if isinstance(dataType, ArrayType): conv = _create_converter(dataType.elementType) return lambda row: [conv(v) for v in row] elif i...
[ "def", "_create_converter", "(", "dataType", ")", ":", "if", "not", "_need_converter", "(", "dataType", ")", ":", "return", "lambda", "x", ":", "x", "if", "isinstance", "(", "dataType", ",", "ArrayType", ")", ":", "conv", "=", "_create_converter", "(", "da...
Create a converter to drop the names of fields in obj
[ "Create", "a", "converter", "to", "drop", "the", "names", "of", "fields", "in", "obj" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1133-L1180
train
Create a converter to drop the names of fields in obj
apache/spark
python/pyspark/sql/types.py
to_arrow_type
def to_arrow_type(dt): """ Convert Spark data type to pyarrow type """ import pyarrow as pa if type(dt) == BooleanType: arrow_type = pa.bool_() elif type(dt) == ByteType: arrow_type = pa.int8() elif type(dt) == ShortType: arrow_type = pa.int16() elif type(dt) == Integ...
python
def to_arrow_type(dt): """ Convert Spark data type to pyarrow type """ import pyarrow as pa if type(dt) == BooleanType: arrow_type = pa.bool_() elif type(dt) == ByteType: arrow_type = pa.int8() elif type(dt) == ShortType: arrow_type = pa.int16() elif type(dt) == Integ...
[ "def", "to_arrow_type", "(", "dt", ")", ":", "import", "pyarrow", "as", "pa", "if", "type", "(", "dt", ")", "==", "BooleanType", ":", "arrow_type", "=", "pa", ".", "bool_", "(", ")", "elif", "type", "(", "dt", ")", "==", "ByteType", ":", "arrow_type"...
Convert Spark data type to pyarrow type
[ "Convert", "Spark", "data", "type", "to", "pyarrow", "type" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1581-L1622
train
Convert Spark data type to Arrow type
apache/spark
python/pyspark/sql/types.py
from_arrow_type
def from_arrow_type(at): """ Convert pyarrow type to Spark data type. """ import pyarrow.types as types if types.is_boolean(at): spark_type = BooleanType() elif types.is_int8(at): spark_type = ByteType() elif types.is_int16(at): spark_type = ShortType() elif types.is_...
python
def from_arrow_type(at): """ Convert pyarrow type to Spark data type. """ import pyarrow.types as types if types.is_boolean(at): spark_type = BooleanType() elif types.is_int8(at): spark_type = ByteType() elif types.is_int16(at): spark_type = ShortType() elif types.is_...
[ "def", "from_arrow_type", "(", "at", ")", ":", "import", "pyarrow", ".", "types", "as", "types", "if", "types", ".", "is_boolean", "(", "at", ")", ":", "spark_type", "=", "BooleanType", "(", ")", "elif", "types", ".", "is_int8", "(", "at", ")", ":", ...
Convert pyarrow type to Spark data type.
[ "Convert", "pyarrow", "type", "to", "Spark", "data", "type", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1634-L1674
train
Convert a pyarrow type to Spark data type.
apache/spark
python/pyspark/sql/types.py
_check_series_localize_timestamps
def _check_series_localize_timestamps(s, timezone): """ Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone. If the input series is not a timestamp series, then the same series is returned. If the input series is a timestamp series, then a converted series is...
python
def _check_series_localize_timestamps(s, timezone): """ Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone. If the input series is not a timestamp series, then the same series is returned. If the input series is a timestamp series, then a converted series is...
[ "def", "_check_series_localize_timestamps", "(", "s", ",", "timezone", ")", ":", "from", "pyspark", ".", "sql", ".", "utils", "import", "require_minimum_pandas_version", "require_minimum_pandas_version", "(", ")", "from", "pandas", ".", "api", ".", "types", "import"...
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone. If the input series is not a timestamp series, then the same series is returned. If the input series is a timestamp series, then a converted series is returned. :param s: pandas.Series :param timezone: the...
[ "Convert", "timezone", "aware", "timestamps", "to", "timezone", "-", "naive", "in", "the", "specified", "timezone", "or", "local", "timezone", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1700-L1720
train
Convert timezone aware timestamps to timezone - naive in the specified timezone or local timezone.
apache/spark
python/pyspark/sql/types.py
_check_dataframe_localize_timestamps
def _check_dataframe_localize_timestamps(pdf, timezone): """ Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone :param pdf: pandas.DataFrame :param timezone: the timezone to convert. if None then use local timezone :return pandas.DataFrame where any time...
python
def _check_dataframe_localize_timestamps(pdf, timezone): """ Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone :param pdf: pandas.DataFrame :param timezone: the timezone to convert. if None then use local timezone :return pandas.DataFrame where any time...
[ "def", "_check_dataframe_localize_timestamps", "(", "pdf", ",", "timezone", ")", ":", "from", "pyspark", ".", "sql", ".", "utils", "import", "require_minimum_pandas_version", "require_minimum_pandas_version", "(", ")", "for", "column", ",", "series", "in", "pdf", "....
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone :param pdf: pandas.DataFrame :param timezone: the timezone to convert. if None then use local timezone :return pandas.DataFrame where any timezone aware columns have been converted to tz-naive
[ "Convert", "timezone", "aware", "timestamps", "to", "timezone", "-", "naive", "in", "the", "specified", "timezone", "or", "local", "timezone" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1723-L1736
train
Convert timezone aware timestamps to timezone - naive in the specified timezone or local timezone - naive in the specified timezone or local timezone - naive in the specified timezone.
apache/spark
python/pyspark/sql/types.py
_check_series_convert_timestamps_internal
def _check_series_convert_timestamps_internal(s, timezone): """ Convert a tz-naive timestamp in the specified timezone or local timezone to UTC normalized for Spark internal storage :param s: a pandas.Series :param timezone: the timezone to convert. if None then use local timezone :return panda...
python
def _check_series_convert_timestamps_internal(s, timezone): """ Convert a tz-naive timestamp in the specified timezone or local timezone to UTC normalized for Spark internal storage :param s: a pandas.Series :param timezone: the timezone to convert. if None then use local timezone :return panda...
[ "def", "_check_series_convert_timestamps_internal", "(", "s", ",", "timezone", ")", ":", "from", "pyspark", ".", "sql", ".", "utils", "import", "require_minimum_pandas_version", "require_minimum_pandas_version", "(", ")", "from", "pandas", ".", "api", ".", "types", ...
Convert a tz-naive timestamp in the specified timezone or local timezone to UTC normalized for Spark internal storage :param s: a pandas.Series :param timezone: the timezone to convert. if None then use local timezone :return pandas.Series where if it is a timestamp, has been UTC normalized without a t...
[ "Convert", "a", "tz", "-", "naive", "timestamp", "in", "the", "specified", "timezone", "or", "local", "timezone", "to", "UTC", "normalized", "for", "Spark", "internal", "storage" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1739-L1789
train
Convert a tz - naive timestamp in the specified timezone or local timezone to UTC normalized for Spark internal storage.
apache/spark
python/pyspark/sql/types.py
_check_series_convert_timestamps_localize
def _check_series_convert_timestamps_localize(s, from_timezone, to_timezone): """ Convert timestamp to timezone-naive in the specified timezone or local timezone :param s: a pandas.Series :param from_timezone: the timezone to convert from. if None then use local timezone :param to_timezone: the tim...
python
def _check_series_convert_timestamps_localize(s, from_timezone, to_timezone): """ Convert timestamp to timezone-naive in the specified timezone or local timezone :param s: a pandas.Series :param from_timezone: the timezone to convert from. if None then use local timezone :param to_timezone: the tim...
[ "def", "_check_series_convert_timestamps_localize", "(", "s", ",", "from_timezone", ",", "to_timezone", ")", ":", "from", "pyspark", ".", "sql", ".", "utils", "import", "require_minimum_pandas_version", "require_minimum_pandas_version", "(", ")", "import", "pandas", "as...
Convert timestamp to timezone-naive in the specified timezone or local timezone :param s: a pandas.Series :param from_timezone: the timezone to convert from. if None then use local timezone :param to_timezone: the timezone to convert to. if None then use local timezone :return pandas.Series where if it...
[ "Convert", "timestamp", "to", "timezone", "-", "naive", "in", "the", "specified", "timezone", "or", "local", "timezone" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1792-L1817
train
Convert timestamp to timezone - naive in the specified timezone or local timezone.
apache/spark
python/pyspark/sql/types.py
StructType.add
def add(self, field, data_type=None, nullable=True, metadata=None): """ Construct a StructType by adding new elements to it to define the schema. The method accepts either: a) A single parameter which is a StructField object. b) Between 2 and 4 parameters as (name, data_...
python
def add(self, field, data_type=None, nullable=True, metadata=None): """ Construct a StructType by adding new elements to it to define the schema. The method accepts either: a) A single parameter which is a StructField object. b) Between 2 and 4 parameters as (name, data_...
[ "def", "add", "(", "self", ",", "field", ",", "data_type", "=", "None", ",", "nullable", "=", "True", ",", "metadata", "=", "None", ")", ":", "if", "isinstance", "(", "field", ",", "StructField", ")", ":", "self", ".", "fields", ".", "append", "(", ...
Construct a StructType by adding new elements to it to define the schema. The method accepts either: a) A single parameter which is a StructField object. b) Between 2 and 4 parameters as (name, data_type, nullable (optional), metadata(optional). The data_type parameter ma...
[ "Construct", "a", "StructType", "by", "adding", "new", "elements", "to", "it", "to", "define", "the", "schema", ".", "The", "method", "accepts", "either", ":" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L491-L537
train
Constructs a new StructType object by adding new elements to the list of fields.
apache/spark
python/pyspark/sql/types.py
Row.asDict
def asDict(self, recursive=False): """ Return as an dict :param recursive: turns the nested Row as dict (default: False). >>> Row(name="Alice", age=11).asDict() == {'name': 'Alice', 'age': 11} True >>> row = Row(key=1, value=Row(name='a', age=2)) >>> row.asDict(...
python
def asDict(self, recursive=False): """ Return as an dict :param recursive: turns the nested Row as dict (default: False). >>> Row(name="Alice", age=11).asDict() == {'name': 'Alice', 'age': 11} True >>> row = Row(key=1, value=Row(name='a', age=2)) >>> row.asDict(...
[ "def", "asDict", "(", "self", ",", "recursive", "=", "False", ")", ":", "if", "not", "hasattr", "(", "self", ",", "\"__fields__\"", ")", ":", "raise", "TypeError", "(", "\"Cannot convert a Row class into dict\"", ")", "if", "recursive", ":", "def", "conv", "...
Return as an dict :param recursive: turns the nested Row as dict (default: False). >>> Row(name="Alice", age=11).asDict() == {'name': 'Alice', 'age': 11} True >>> row = Row(key=1, value=Row(name='a', age=2)) >>> row.asDict() == {'key': 1, 'value': Row(age=2, name='a')} ...
[ "Return", "as", "an", "dict" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/types.py#L1463-L1492
train
Converts the table into a dict.
apache/spark
python/pyspark/shuffle.py
ExternalMerger.mergeValues
def mergeValues(self, iterator): """ Combine the items by creator and combiner """ # speedup attribute lookup creator, comb = self.agg.createCombiner, self.agg.mergeValue c, data, pdata, hfun, batch = 0, self.data, self.pdata, self._partition, self.batch limit = self.memory_limit...
python
def mergeValues(self, iterator): """ Combine the items by creator and combiner """ # speedup attribute lookup creator, comb = self.agg.createCombiner, self.agg.mergeValue c, data, pdata, hfun, batch = 0, self.data, self.pdata, self._partition, self.batch limit = self.memory_limit...
[ "def", "mergeValues", "(", "self", ",", "iterator", ")", ":", "# speedup attribute lookup", "creator", ",", "comb", "=", "self", ".", "agg", ".", "createCombiner", ",", "self", ".", "agg", ".", "mergeValue", "c", ",", "data", ",", "pdata", ",", "hfun", "...
Combine the items by creator and combiner
[ "Combine", "the", "items", "by", "creator", "and", "combiner" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/shuffle.py#L231-L253
train
Combine the items by creator and combiner
apache/spark
python/pyspark/shuffle.py
ExternalMerger.mergeCombiners
def mergeCombiners(self, iterator, limit=None): """ Merge (K,V) pair by mergeCombiner """ if limit is None: limit = self.memory_limit # speedup attribute lookup comb, hfun, objsize = self.agg.mergeCombiners, self._partition, self._object_size c, data, pdata, batch = 0...
python
def mergeCombiners(self, iterator, limit=None): """ Merge (K,V) pair by mergeCombiner """ if limit is None: limit = self.memory_limit # speedup attribute lookup comb, hfun, objsize = self.agg.mergeCombiners, self._partition, self._object_size c, data, pdata, batch = 0...
[ "def", "mergeCombiners", "(", "self", ",", "iterator", ",", "limit", "=", "None", ")", ":", "if", "limit", "is", "None", ":", "limit", "=", "self", ".", "memory_limit", "# speedup attribute lookup", "comb", ",", "hfun", ",", "objsize", "=", "self", ".", ...
Merge (K,V) pair by mergeCombiner
[ "Merge", "(", "K", "V", ")", "pair", "by", "mergeCombiner" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/shuffle.py#L265-L289
train
Merge a set of keys and values by merging them into a single object.
apache/spark
python/pyspark/shuffle.py
ExternalMerger._spill
def _spill(self): """ dump already partitioned data into disks. It will dump the data in batch for better performance. """ global MemoryBytesSpilled, DiskBytesSpilled path = self._get_spill_dir(self.spills) if not os.path.exists(path): os.makedirs(pat...
python
def _spill(self): """ dump already partitioned data into disks. It will dump the data in batch for better performance. """ global MemoryBytesSpilled, DiskBytesSpilled path = self._get_spill_dir(self.spills) if not os.path.exists(path): os.makedirs(pat...
[ "def", "_spill", "(", "self", ")", ":", "global", "MemoryBytesSpilled", ",", "DiskBytesSpilled", "path", "=", "self", ".", "_get_spill_dir", "(", "self", ".", "spills", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "os", "....
dump already partitioned data into disks. It will dump the data in batch for better performance.
[ "dump", "already", "partitioned", "data", "into", "disks", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/shuffle.py#L291-L337
train
This function will dump already partitioned data into disks. It will dump the data into the disks and the memory used by the memory.
apache/spark
python/pyspark/shuffle.py
ExternalMerger._external_items
def _external_items(self): """ Return all partitioned items as iterator """ assert not self.data if any(self.pdata): self._spill() # disable partitioning and spilling when merge combiners from disk self.pdata = [] try: for i in range(self.partitio...
python
def _external_items(self): """ Return all partitioned items as iterator """ assert not self.data if any(self.pdata): self._spill() # disable partitioning and spilling when merge combiners from disk self.pdata = [] try: for i in range(self.partitio...
[ "def", "_external_items", "(", "self", ")", ":", "assert", "not", "self", ".", "data", "if", "any", "(", "self", ".", "pdata", ")", ":", "self", ".", "_spill", "(", ")", "# disable partitioning and spilling when merge combiners from disk", "self", ".", "pdata", ...
Return all partitioned items as iterator
[ "Return", "all", "partitioned", "items", "as", "iterator" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/shuffle.py#L345-L364
train
Return all partitioned items as iterator
apache/spark
python/pyspark/shuffle.py
ExternalMerger._recursive_merged_items
def _recursive_merged_items(self, index): """ merge the partitioned items and return the as iterator If one partition can not be fit in memory, then them will be partitioned and merged recursively. """ subdirs = [os.path.join(d, "parts", str(index)) for d in self.localdi...
python
def _recursive_merged_items(self, index): """ merge the partitioned items and return the as iterator If one partition can not be fit in memory, then them will be partitioned and merged recursively. """ subdirs = [os.path.join(d, "parts", str(index)) for d in self.localdi...
[ "def", "_recursive_merged_items", "(", "self", ",", "index", ")", ":", "subdirs", "=", "[", "os", ".", "path", ".", "join", "(", "d", ",", "\"parts\"", ",", "str", "(", "index", ")", ")", "for", "d", "in", "self", ".", "localdirs", "]", "m", "=", ...
merge the partitioned items and return the as iterator If one partition can not be fit in memory, then them will be partitioned and merged recursively.
[ "merge", "the", "partitioned", "items", "and", "return", "the", "as", "iterator" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/shuffle.py#L386-L409
train
Merge the partitioned items and return the as iterator
apache/spark
python/pyspark/shuffle.py
ExternalSorter.sorted
def sorted(self, iterator, key=None, reverse=False): """ Sort the elements in iterator, do external sort when the memory goes above the limit. """ global MemoryBytesSpilled, DiskBytesSpilled batch, limit = 100, self._next_limit() chunks, current_chunk = [], [] ...
python
def sorted(self, iterator, key=None, reverse=False): """ Sort the elements in iterator, do external sort when the memory goes above the limit. """ global MemoryBytesSpilled, DiskBytesSpilled batch, limit = 100, self._next_limit() chunks, current_chunk = [], [] ...
[ "def", "sorted", "(", "self", ",", "iterator", ",", "key", "=", "None", ",", "reverse", "=", "False", ")", ":", "global", "MemoryBytesSpilled", ",", "DiskBytesSpilled", "batch", ",", "limit", "=", "100", ",", "self", ".", "_next_limit", "(", ")", "chunks...
Sort the elements in iterator, do external sort when the memory goes above the limit.
[ "Sort", "the", "elements", "in", "iterator", "do", "external", "sort", "when", "the", "memory", "goes", "above", "the", "limit", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/shuffle.py#L455-L501
train
Sort the elements in iterator do external sort when the memory is below the limit.
apache/spark
python/pyspark/shuffle.py
ExternalGroupBy._spill
def _spill(self): """ dump already partitioned data into disks. """ global MemoryBytesSpilled, DiskBytesSpilled path = self._get_spill_dir(self.spills) if not os.path.exists(path): os.makedirs(path) used_memory = get_used_memory() if not self....
python
def _spill(self): """ dump already partitioned data into disks. """ global MemoryBytesSpilled, DiskBytesSpilled path = self._get_spill_dir(self.spills) if not os.path.exists(path): os.makedirs(path) used_memory = get_used_memory() if not self....
[ "def", "_spill", "(", "self", ")", ":", "global", "MemoryBytesSpilled", ",", "DiskBytesSpilled", "path", "=", "self", ".", "_get_spill_dir", "(", "self", ".", "spills", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "os", "....
dump already partitioned data into disks.
[ "dump", "already", "partitioned", "data", "into", "disks", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/shuffle.py#L709-L766
train
Dump already partitioned data into disks.
apache/spark
python/pyspark/shuffle.py
ExternalGroupBy._merge_sorted_items
def _merge_sorted_items(self, index): """ load a partition from disk, then sort and group by key """ def load_partition(j): path = self._get_spill_dir(j) p = os.path.join(path, str(index)) with open(p, 'rb', 65536) as f: for v in self.serializer.load_s...
python
def _merge_sorted_items(self, index): """ load a partition from disk, then sort and group by key """ def load_partition(j): path = self._get_spill_dir(j) p = os.path.join(path, str(index)) with open(p, 'rb', 65536) as f: for v in self.serializer.load_s...
[ "def", "_merge_sorted_items", "(", "self", ",", "index", ")", ":", "def", "load_partition", "(", "j", ")", ":", "path", "=", "self", ".", "_get_spill_dir", "(", "j", ")", "p", "=", "os", ".", "path", ".", "join", "(", "path", ",", "str", "(", "inde...
load a partition from disk, then sort and group by key
[ "load", "a", "partition", "from", "disk", "then", "sort", "and", "group", "by", "key" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/shuffle.py#L786-L808
train
Load a partition from disk then sort and group by key
apache/spark
python/pyspark/daemon.py
worker
def worker(sock, authenticated): """ Called by a worker process after the fork(). """ signal.signal(SIGHUP, SIG_DFL) signal.signal(SIGCHLD, SIG_DFL) signal.signal(SIGTERM, SIG_DFL) # restore the handler for SIGINT, # it's useful for debugging (show the stacktrace before exit) signal....
python
def worker(sock, authenticated): """ Called by a worker process after the fork(). """ signal.signal(SIGHUP, SIG_DFL) signal.signal(SIGCHLD, SIG_DFL) signal.signal(SIGTERM, SIG_DFL) # restore the handler for SIGINT, # it's useful for debugging (show the stacktrace before exit) signal....
[ "def", "worker", "(", "sock", ",", "authenticated", ")", ":", "signal", ".", "signal", "(", "SIGHUP", ",", "SIG_DFL", ")", "signal", ".", "signal", "(", "SIGCHLD", ",", "SIG_DFL", ")", "signal", ".", "signal", "(", "SIGTERM", ",", "SIG_DFL", ")", "# re...
Called by a worker process after the fork().
[ "Called", "by", "a", "worker", "process", "after", "the", "fork", "()", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/daemon.py#L43-L81
train
This function is called by the worker process.
apache/spark
python/pyspark/rdd.py
portable_hash
def portable_hash(x): """ This function returns consistent hash code for builtin types, especially for None and tuple with None. The algorithm is similar to that one used by CPython 2.7 >>> portable_hash(None) 0 >>> portable_hash((None, 1)) & 0xffffffff 219750521 """ if sys.ve...
python
def portable_hash(x): """ This function returns consistent hash code for builtin types, especially for None and tuple with None. The algorithm is similar to that one used by CPython 2.7 >>> portable_hash(None) 0 >>> portable_hash((None, 1)) & 0xffffffff 219750521 """ if sys.ve...
[ "def", "portable_hash", "(", "x", ")", ":", "if", "sys", ".", "version_info", ">=", "(", "3", ",", "2", ",", "3", ")", "and", "'PYTHONHASHSEED'", "not", "in", "os", ".", "environ", ":", "raise", "Exception", "(", "\"Randomness of hash of string should be dis...
This function returns consistent hash code for builtin types, especially for None and tuple with None. The algorithm is similar to that one used by CPython 2.7 >>> portable_hash(None) 0 >>> portable_hash((None, 1)) & 0xffffffff 219750521
[ "This", "function", "returns", "consistent", "hash", "code", "for", "builtin", "types", "especially", "for", "None", "and", "tuple", "with", "None", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L78-L106
train
This function returns consistent hash code for builtin types and tuple with None.
apache/spark
python/pyspark/rdd.py
_parse_memory
def _parse_memory(s): """ Parse a memory string in the format supported by Java (e.g. 1g, 200m) and return the value in MiB >>> _parse_memory("256m") 256 >>> _parse_memory("2g") 2048 """ units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024} if s[-1].lower() not in units: ...
python
def _parse_memory(s): """ Parse a memory string in the format supported by Java (e.g. 1g, 200m) and return the value in MiB >>> _parse_memory("256m") 256 >>> _parse_memory("2g") 2048 """ units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024} if s[-1].lower() not in units: ...
[ "def", "_parse_memory", "(", "s", ")", ":", "units", "=", "{", "'g'", ":", "1024", ",", "'m'", ":", "1", ",", "'t'", ":", "1", "<<", "20", ",", "'k'", ":", "1.0", "/", "1024", "}", "if", "s", "[", "-", "1", "]", ".", "lower", "(", ")", "n...
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and return the value in MiB >>> _parse_memory("256m") 256 >>> _parse_memory("2g") 2048
[ "Parse", "a", "memory", "string", "in", "the", "format", "supported", "by", "Java", "(", "e", ".", "g", ".", "1g", "200m", ")", "and", "return", "the", "value", "in", "MiB" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L125-L138
train
Parse a memory string in the format supported by Java and return the value in MiB.
apache/spark
python/pyspark/rdd.py
ignore_unicode_prefix
def ignore_unicode_prefix(f): """ Ignore the 'u' prefix of string in doc tests, to make it works in both python 2 and 3 """ if sys.version >= '3': # the representation of unicode string in Python 3 does not have prefix 'u', # so remove the prefix 'u' for doc tests literal_re ...
python
def ignore_unicode_prefix(f): """ Ignore the 'u' prefix of string in doc tests, to make it works in both python 2 and 3 """ if sys.version >= '3': # the representation of unicode string in Python 3 does not have prefix 'u', # so remove the prefix 'u' for doc tests literal_re ...
[ "def", "ignore_unicode_prefix", "(", "f", ")", ":", "if", "sys", ".", "version", ">=", "'3'", ":", "# the representation of unicode string in Python 3 does not have prefix 'u',", "# so remove the prefix 'u' for doc tests", "literal_re", "=", "re", ".", "compile", "(", "r\"(...
Ignore the 'u' prefix of string in doc tests, to make it works in both python 2 and 3
[ "Ignore", "the", "u", "prefix", "of", "string", "in", "doc", "tests", "to", "make", "it", "works", "in", "both", "python", "2", "and", "3" ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L150-L160
train
Ignore the u prefix of string in doc tests
apache/spark
python/pyspark/rdd.py
RDD.persist
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY): """ Set this RDD's storage level to persist its values across operations after the first time it is computed. This can only be used to assign a new storage level if the RDD does not have a storage level set yet. If no stor...
python
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY): """ Set this RDD's storage level to persist its values across operations after the first time it is computed. This can only be used to assign a new storage level if the RDD does not have a storage level set yet. If no stor...
[ "def", "persist", "(", "self", ",", "storageLevel", "=", "StorageLevel", ".", "MEMORY_ONLY", ")", ":", "self", ".", "is_cached", "=", "True", "javaStorageLevel", "=", "self", ".", "ctx", ".", "_getJavaStorageLevel", "(", "storageLevel", ")", "self", ".", "_j...
Set this RDD's storage level to persist its values across operations after the first time it is computed. This can only be used to assign a new storage level if the RDD does not have a storage level set yet. If no storage level is specified defaults to (C{MEMORY_ONLY}). >>> rdd = sc.par...
[ "Set", "this", "RDD", "s", "storage", "level", "to", "persist", "its", "values", "across", "operations", "after", "the", "first", "time", "it", "is", "computed", ".", "This", "can", "only", "be", "used", "to", "assign", "a", "new", "storage", "level", "i...
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L231-L245
train
Set this RDD s storage level to persist its values across operations .
apache/spark
python/pyspark/rdd.py
RDD.flatMap
def flatMap(self, f, preservesPartitioning=False): """ Return a new RDD by first applying a function to all elements of this RDD, and then flattening the results. >>> rdd = sc.parallelize([2, 3, 4]) >>> sorted(rdd.flatMap(lambda x: range(1, x)).collect()) [1, 1, 1, 2, 2,...
python
def flatMap(self, f, preservesPartitioning=False): """ Return a new RDD by first applying a function to all elements of this RDD, and then flattening the results. >>> rdd = sc.parallelize([2, 3, 4]) >>> sorted(rdd.flatMap(lambda x: range(1, x)).collect()) [1, 1, 1, 2, 2,...
[ "def", "flatMap", "(", "self", ",", "f", ",", "preservesPartitioning", "=", "False", ")", ":", "def", "func", "(", "s", ",", "iterator", ")", ":", "return", "chain", ".", "from_iterable", "(", "map", "(", "fail_on_stopiteration", "(", "f", ")", ",", "i...
Return a new RDD by first applying a function to all elements of this RDD, and then flattening the results. >>> rdd = sc.parallelize([2, 3, 4]) >>> sorted(rdd.flatMap(lambda x: range(1, x)).collect()) [1, 1, 1, 2, 2, 3] >>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect(...
[ "Return", "a", "new", "RDD", "by", "first", "applying", "a", "function", "to", "all", "elements", "of", "this", "RDD", "and", "then", "flattening", "the", "results", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L329-L342
train
Return a new RDD by first applying a function to all elements of this RDD and then flattening the results.
apache/spark
python/pyspark/rdd.py
RDD.mapPartitionsWithSplit
def mapPartitionsWithSplit(self, f, preservesPartitioning=False): """ Deprecated: use mapPartitionsWithIndex instead. Return a new RDD by applying a function to each partition of this RDD, while tracking the index of the original partition. >>> rdd = sc.parallelize([1, 2, 3, 4]...
python
def mapPartitionsWithSplit(self, f, preservesPartitioning=False): """ Deprecated: use mapPartitionsWithIndex instead. Return a new RDD by applying a function to each partition of this RDD, while tracking the index of the original partition. >>> rdd = sc.parallelize([1, 2, 3, 4]...
[ "def", "mapPartitionsWithSplit", "(", "self", ",", "f", ",", "preservesPartitioning", "=", "False", ")", ":", "warnings", ".", "warn", "(", "\"mapPartitionsWithSplit is deprecated; \"", "\"use mapPartitionsWithIndex instead\"", ",", "DeprecationWarning", ",", "stacklevel", ...
Deprecated: use mapPartitionsWithIndex instead. Return a new RDD by applying a function to each partition of this RDD, while tracking the index of the original partition. >>> rdd = sc.parallelize([1, 2, 3, 4], 4) >>> def f(splitIndex, iterator): yield splitIndex >>> rdd.mapPart...
[ "Deprecated", ":", "use", "mapPartitionsWithIndex", "instead", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L369-L383
train
Return a new RDD by applying a function to each partition of this RDD while tracking the index of the original partition.
apache/spark
python/pyspark/rdd.py
RDD.sample
def sample(self, withReplacement, fraction, seed=None): """ Return a sampled subset of this RDD. :param withReplacement: can elements be sampled multiple times (replaced when sampled out) :param fraction: expected size of the sample as a fraction of this RDD's size without r...
python
def sample(self, withReplacement, fraction, seed=None): """ Return a sampled subset of this RDD. :param withReplacement: can elements be sampled multiple times (replaced when sampled out) :param fraction: expected size of the sample as a fraction of this RDD's size without r...
[ "def", "sample", "(", "self", ",", "withReplacement", ",", "fraction", ",", "seed", "=", "None", ")", ":", "assert", "fraction", ">=", "0.0", ",", "\"Negative fraction value: %s\"", "%", "fraction", "return", "self", ".", "mapPartitionsWithIndex", "(", "RDDSampl...
Return a sampled subset of this RDD. :param withReplacement: can elements be sampled multiple times (replaced when sampled out) :param fraction: expected size of the sample as a fraction of this RDD's size without replacement: probability that each element is chosen; fraction must be [0, 1]...
[ "Return", "a", "sampled", "subset", "of", "this", "RDD", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L418-L436
train
Return a new RDD with the specified fraction of the total number of elements in this RDD.
apache/spark
python/pyspark/rdd.py
RDD.randomSplit
def randomSplit(self, weights, seed=None): """ Randomly splits this RDD with the provided weights. :param weights: weights for splits, will be normalized if they don't sum to 1 :param seed: random seed :return: split RDDs in a list >>> rdd = sc.parallelize(range(500), 1...
python
def randomSplit(self, weights, seed=None): """ Randomly splits this RDD with the provided weights. :param weights: weights for splits, will be normalized if they don't sum to 1 :param seed: random seed :return: split RDDs in a list >>> rdd = sc.parallelize(range(500), 1...
[ "def", "randomSplit", "(", "self", ",", "weights", ",", "seed", "=", "None", ")", ":", "s", "=", "float", "(", "sum", "(", "weights", ")", ")", "cweights", "=", "[", "0.0", "]", "for", "w", "in", "weights", ":", "cweights", ".", "append", "(", "c...
Randomly splits this RDD with the provided weights. :param weights: weights for splits, will be normalized if they don't sum to 1 :param seed: random seed :return: split RDDs in a list >>> rdd = sc.parallelize(range(500), 1) >>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17) ...
[ "Randomly", "splits", "this", "RDD", "with", "the", "provided", "weights", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L438-L462
train
Randomly splits this RDD with the provided weights.
apache/spark
python/pyspark/rdd.py
RDD.takeSample
def takeSample(self, withReplacement, num, seed=None): """ Return a fixed-size sampled subset of this RDD. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. >>> rdd = sc.parallelize(...
python
def takeSample(self, withReplacement, num, seed=None): """ Return a fixed-size sampled subset of this RDD. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. >>> rdd = sc.parallelize(...
[ "def", "takeSample", "(", "self", ",", "withReplacement", ",", "num", ",", "seed", "=", "None", ")", ":", "numStDev", "=", "10.0", "if", "num", "<", "0", ":", "raise", "ValueError", "(", "\"Sample size cannot be negative.\"", ")", "elif", "num", "==", "0",...
Return a fixed-size sampled subset of this RDD. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. >>> rdd = sc.parallelize(range(0, 10)) >>> len(rdd.takeSample(True, 20, 1)) 20 ...
[ "Return", "a", "fixed", "-", "size", "sampled", "subset", "of", "this", "RDD", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L465-L518
train
Return a fixed - size sampled subset of this RDD.
apache/spark
python/pyspark/rdd.py
RDD._computeFractionForSampleSize
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement): """ Returns a sampling rate that guarantees a sample of size >= sampleSizeLowerBound 99.99% of the time. How the sampling rate is determined: Let p = num / total, where num is the sample size and to...
python
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement): """ Returns a sampling rate that guarantees a sample of size >= sampleSizeLowerBound 99.99% of the time. How the sampling rate is determined: Let p = num / total, where num is the sample size and to...
[ "def", "_computeFractionForSampleSize", "(", "sampleSizeLowerBound", ",", "total", ",", "withReplacement", ")", ":", "fraction", "=", "float", "(", "sampleSizeLowerBound", ")", "/", "total", "if", "withReplacement", ":", "numStDev", "=", "5", "if", "(", "sampleSiz...
Returns a sampling rate that guarantees a sample of size >= sampleSizeLowerBound 99.99% of the time. How the sampling rate is determined: Let p = num / total, where num is the sample size and total is the total number of data points in the RDD. We're trying to compute q > p such...
[ "Returns", "a", "sampling", "rate", "that", "guarantees", "a", "sample", "of", "size", ">", "=", "sampleSizeLowerBound", "99", ".", "99%", "of", "the", "time", "." ]
618d6bff71073c8c93501ab7392c3cc579730f0b
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L521-L551
train
Compute the sampling rate for a specific sample size.
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
4

Spaces using frankjosh/filtered_dataset 2