repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
has2k1/plotnine
plotnine/geoms/geom.py
geom.draw_layer
def draw_layer(self, data, layout, coord, **params): """ Draw layer across all panels geoms should not override this method. Parameters ---------- data : DataFrame DataFrame specific for this layer layout : Lanel Layout object created when the plot is getting built coord : coord Type of coordinate axes params : dict Combined *geom* and *stat* parameters. Also includes the stacking order of the layer in the plot (*zorder*) """ for pid, pdata in data.groupby('PANEL'): if len(pdata) == 0: continue ploc = pid - 1 panel_params = layout.panel_params[ploc] ax = layout.axs[ploc] self.draw_panel(pdata, panel_params, coord, ax, **params)
python
def draw_layer(self, data, layout, coord, **params): """ Draw layer across all panels geoms should not override this method. Parameters ---------- data : DataFrame DataFrame specific for this layer layout : Lanel Layout object created when the plot is getting built coord : coord Type of coordinate axes params : dict Combined *geom* and *stat* parameters. Also includes the stacking order of the layer in the plot (*zorder*) """ for pid, pdata in data.groupby('PANEL'): if len(pdata) == 0: continue ploc = pid - 1 panel_params = layout.panel_params[ploc] ax = layout.axs[ploc] self.draw_panel(pdata, panel_params, coord, ax, **params)
[ "def", "draw_layer", "(", "self", ",", "data", ",", "layout", ",", "coord", ",", "*", "*", "params", ")", ":", "for", "pid", ",", "pdata", "in", "data", ".", "groupby", "(", "'PANEL'", ")", ":", "if", "len", "(", "pdata", ")", "==", "0", ":", "...
Draw layer across all panels geoms should not override this method. Parameters ---------- data : DataFrame DataFrame specific for this layer layout : Lanel Layout object created when the plot is getting built coord : coord Type of coordinate axes params : dict Combined *geom* and *stat* parameters. Also includes the stacking order of the layer in the plot (*zorder*)
[ "Draw", "layer", "across", "all", "panels" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/geoms/geom.py#L195-L221
train
214,400
has2k1/plotnine
plotnine/geoms/geom.py
geom.draw_panel
def draw_panel(self, data, panel_params, coord, ax, **params): """ Plot all groups For effeciency, geoms that do not need to partition different groups before plotting should override this method and avoid the groupby. Parameters ---------- data : dataframe Data to be plotted by this geom. This is the dataframe created in the plot_build pipeline. panel_params : dict The scale information as may be required by the axes. At this point, that information is about ranges, ticks and labels. Keys of interest to the geom are:: 'x_range' # tuple 'y_range' # tuple coord : coord Coordinate (e.g. coord_cartesian) system of the geom. ax : axes Axes on which to plot. params : dict Combined parameters for the geom and stat. Also includes the 'zorder'. """ for _, gdata in data.groupby('group'): gdata.reset_index(inplace=True, drop=True) self.draw_group(gdata, panel_params, coord, ax, **params)
python
def draw_panel(self, data, panel_params, coord, ax, **params): """ Plot all groups For effeciency, geoms that do not need to partition different groups before plotting should override this method and avoid the groupby. Parameters ---------- data : dataframe Data to be plotted by this geom. This is the dataframe created in the plot_build pipeline. panel_params : dict The scale information as may be required by the axes. At this point, that information is about ranges, ticks and labels. Keys of interest to the geom are:: 'x_range' # tuple 'y_range' # tuple coord : coord Coordinate (e.g. coord_cartesian) system of the geom. ax : axes Axes on which to plot. params : dict Combined parameters for the geom and stat. Also includes the 'zorder'. """ for _, gdata in data.groupby('group'): gdata.reset_index(inplace=True, drop=True) self.draw_group(gdata, panel_params, coord, ax, **params)
[ "def", "draw_panel", "(", "self", ",", "data", ",", "panel_params", ",", "coord", ",", "ax", ",", "*", "*", "params", ")", ":", "for", "_", ",", "gdata", "in", "data", ".", "groupby", "(", "'group'", ")", ":", "gdata", ".", "reset_index", "(", "inp...
Plot all groups For effeciency, geoms that do not need to partition different groups before plotting should override this method and avoid the groupby. Parameters ---------- data : dataframe Data to be plotted by this geom. This is the dataframe created in the plot_build pipeline. panel_params : dict The scale information as may be required by the axes. At this point, that information is about ranges, ticks and labels. Keys of interest to the geom are:: 'x_range' # tuple 'y_range' # tuple coord : coord Coordinate (e.g. coord_cartesian) system of the geom. ax : axes Axes on which to plot. params : dict Combined parameters for the geom and stat. Also includes the 'zorder'.
[ "Plot", "all", "groups" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/geoms/geom.py#L223-L256
train
214,401
has2k1/plotnine
plotnine/geoms/geom.py
geom._verify_arguments
def _verify_arguments(self, kwargs): """ Verify arguments passed to the geom """ geom_stat_args = kwargs.keys() | self._stat._kwargs.keys() unknown = (geom_stat_args - self.aesthetics() - # geom aesthetics self.DEFAULT_PARAMS.keys() - # geom parameters self._stat.aesthetics() - # stat aesthetics self._stat.DEFAULT_PARAMS.keys() - # stat parameters {'data', 'mapping', # layer parameters 'show_legend', 'inherit_aes'}) # layer parameters if unknown: msg = ("Parameters {}, are not understood by " "either the geom, stat or layer.") raise PlotnineError(msg.format(unknown))
python
def _verify_arguments(self, kwargs): """ Verify arguments passed to the geom """ geom_stat_args = kwargs.keys() | self._stat._kwargs.keys() unknown = (geom_stat_args - self.aesthetics() - # geom aesthetics self.DEFAULT_PARAMS.keys() - # geom parameters self._stat.aesthetics() - # stat aesthetics self._stat.DEFAULT_PARAMS.keys() - # stat parameters {'data', 'mapping', # layer parameters 'show_legend', 'inherit_aes'}) # layer parameters if unknown: msg = ("Parameters {}, are not understood by " "either the geom, stat or layer.") raise PlotnineError(msg.format(unknown))
[ "def", "_verify_arguments", "(", "self", ",", "kwargs", ")", ":", "geom_stat_args", "=", "kwargs", ".", "keys", "(", ")", "|", "self", ".", "_stat", ".", "_kwargs", ".", "keys", "(", ")", "unknown", "=", "(", "geom_stat_args", "-", "self", ".", "aesthe...
Verify arguments passed to the geom
[ "Verify", "arguments", "passed", "to", "the", "geom" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/geoms/geom.py#L358-L373
train
214,402
has2k1/plotnine
plotnine/geoms/geom.py
geom.handle_na
def handle_na(self, data): """ Remove rows with NaN values geoms that infer extra information from missing values should override this method. For example :class:`~plotnine.geoms.geom_path`. Parameters ---------- data : dataframe Data Returns ------- out : dataframe Data without the NaNs. Notes ----- Shows a warning if the any rows are removed and the `na_rm` parameter is False. It only takes into account the columns of the required aesthetics. """ return remove_missing(data, self.params['na_rm'], list(self.REQUIRED_AES | self.NON_MISSING_AES), self.__class__.__name__)
python
def handle_na(self, data): """ Remove rows with NaN values geoms that infer extra information from missing values should override this method. For example :class:`~plotnine.geoms.geom_path`. Parameters ---------- data : dataframe Data Returns ------- out : dataframe Data without the NaNs. Notes ----- Shows a warning if the any rows are removed and the `na_rm` parameter is False. It only takes into account the columns of the required aesthetics. """ return remove_missing(data, self.params['na_rm'], list(self.REQUIRED_AES | self.NON_MISSING_AES), self.__class__.__name__)
[ "def", "handle_na", "(", "self", ",", "data", ")", ":", "return", "remove_missing", "(", "data", ",", "self", ".", "params", "[", "'na_rm'", "]", ",", "list", "(", "self", ".", "REQUIRED_AES", "|", "self", ".", "NON_MISSING_AES", ")", ",", "self", ".",...
Remove rows with NaN values geoms that infer extra information from missing values should override this method. For example :class:`~plotnine.geoms.geom_path`. Parameters ---------- data : dataframe Data Returns ------- out : dataframe Data without the NaNs. Notes ----- Shows a warning if the any rows are removed and the `na_rm` parameter is False. It only takes into account the columns of the required aesthetics.
[ "Remove", "rows", "with", "NaN", "values" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/geoms/geom.py#L375-L402
train
214,403
has2k1/plotnine
plotnine/positions/position_stack.py
position_stack.setup_params
def setup_params(self, data): """ Verify, modify & return a copy of the params. """ # Variable for which to do the stacking if 'ymax' in data: if any((data['ymin'] != 0) & (data['ymax'] != 0)): warn("Stacking not well defined when not " "anchored on the axis.", PlotnineWarning) var = 'ymax' elif 'y' in data: var = 'y' else: warn("Stacking requires either ymin & ymax or y " "aesthetics. Maybe you want position = 'identity'?", PlotnineWarning) var = None params = self.params.copy() params['var'] = var params['fill'] = self.fill return params
python
def setup_params(self, data): """ Verify, modify & return a copy of the params. """ # Variable for which to do the stacking if 'ymax' in data: if any((data['ymin'] != 0) & (data['ymax'] != 0)): warn("Stacking not well defined when not " "anchored on the axis.", PlotnineWarning) var = 'ymax' elif 'y' in data: var = 'y' else: warn("Stacking requires either ymin & ymax or y " "aesthetics. Maybe you want position = 'identity'?", PlotnineWarning) var = None params = self.params.copy() params['var'] = var params['fill'] = self.fill return params
[ "def", "setup_params", "(", "self", ",", "data", ")", ":", "# Variable for which to do the stacking", "if", "'ymax'", "in", "data", ":", "if", "any", "(", "(", "data", "[", "'ymin'", "]", "!=", "0", ")", "&", "(", "data", "[", "'ymax'", "]", "!=", "0",...
Verify, modify & return a copy of the params.
[ "Verify", "modify", "&", "return", "a", "copy", "of", "the", "params", "." ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/positions/position_stack.py#L24-L45
train
214,404
has2k1/plotnine
plotnine/positions/position_stack.py
position_stack.strategy
def strategy(data, params): """ Stack overlapping intervals. Assumes that each set has the same horizontal position """ vjust = params['vjust'] y = data['y'].copy() y[np.isnan(y)] = 0 heights = np.append(0, y.cumsum()) if params['fill']: heights = heights / np.abs(heights[-1]) data['ymin'] = np.min([heights[:-1], heights[1:]], axis=0) data['ymax'] = np.max([heights[:-1], heights[1:]], axis=0) # less intuitive than (ymin + vjust(ymax-ymin)), but # this way avoids subtracting numbers of potentially # similar precision data['y'] = ((1-vjust)*data['ymin'] + vjust*data['ymax']) return data
python
def strategy(data, params): """ Stack overlapping intervals. Assumes that each set has the same horizontal position """ vjust = params['vjust'] y = data['y'].copy() y[np.isnan(y)] = 0 heights = np.append(0, y.cumsum()) if params['fill']: heights = heights / np.abs(heights[-1]) data['ymin'] = np.min([heights[:-1], heights[1:]], axis=0) data['ymax'] = np.max([heights[:-1], heights[1:]], axis=0) # less intuitive than (ymin + vjust(ymax-ymin)), but # this way avoids subtracting numbers of potentially # similar precision data['y'] = ((1-vjust)*data['ymin'] + vjust*data['ymax']) return data
[ "def", "strategy", "(", "data", ",", "params", ")", ":", "vjust", "=", "params", "[", "'vjust'", "]", "y", "=", "data", "[", "'y'", "]", ".", "copy", "(", ")", "y", "[", "np", ".", "isnan", "(", "y", ")", "]", "=", "0", "heights", "=", "np", ...
Stack overlapping intervals. Assumes that each set has the same horizontal position
[ "Stack", "overlapping", "intervals", "." ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/positions/position_stack.py#L83-L104
train
214,405
has2k1/plotnine
plotnine/stats/stat_bindot.py
densitybin
def densitybin(x, weight=None, binwidth=None, bins=None, rangee=None): """ Do density binning It does not collapse each bin with a count. Parameters ---------- x : array-like Numbers to bin weight : array-like Weights binwidth : numeric Size of the bins rangee : tuple Range of x Returns ------- data : DataFrame """ if all(pd.isnull(x)): return pd.DataFrame() if weight is None: weight = np.ones(len(x)) weight = np.asarray(weight) weight[np.isnan(weight)] = 0 if rangee is None: rangee = np.min(x), np.max(x) if bins is None: bins = 30 if binwidth is None: binwidth = np.ptp(rangee) / bins # Sort weight and x, by x order = np.argsort(x) weight = weight[order] x = x[order] cbin = 0 # Current bin ID binn = [None] * len(x) # The bin ID for each observation # End position of current bin (scan left to right) binend = -np.inf # Scan list and put dots in bins for i, value in enumerate(x): # If past end of bin, start a new bin at this point if value >= binend: binend = value + binwidth cbin = cbin + 1 binn[i] = cbin def func(series): return (series.min()+series.max())/2 results = pd.DataFrame({'x': x, 'bin': binn, 'binwidth': binwidth, 'weight': weight}) # This is a plyr::ddply results['bincenter'] = results.groupby('bin')['x'].transform(func) return results
python
def densitybin(x, weight=None, binwidth=None, bins=None, rangee=None): """ Do density binning It does not collapse each bin with a count. Parameters ---------- x : array-like Numbers to bin weight : array-like Weights binwidth : numeric Size of the bins rangee : tuple Range of x Returns ------- data : DataFrame """ if all(pd.isnull(x)): return pd.DataFrame() if weight is None: weight = np.ones(len(x)) weight = np.asarray(weight) weight[np.isnan(weight)] = 0 if rangee is None: rangee = np.min(x), np.max(x) if bins is None: bins = 30 if binwidth is None: binwidth = np.ptp(rangee) / bins # Sort weight and x, by x order = np.argsort(x) weight = weight[order] x = x[order] cbin = 0 # Current bin ID binn = [None] * len(x) # The bin ID for each observation # End position of current bin (scan left to right) binend = -np.inf # Scan list and put dots in bins for i, value in enumerate(x): # If past end of bin, start a new bin at this point if value >= binend: binend = value + binwidth cbin = cbin + 1 binn[i] = cbin def func(series): return (series.min()+series.max())/2 results = pd.DataFrame({'x': x, 'bin': binn, 'binwidth': binwidth, 'weight': weight}) # This is a plyr::ddply results['bincenter'] = results.groupby('bin')['x'].transform(func) return results
[ "def", "densitybin", "(", "x", ",", "weight", "=", "None", ",", "binwidth", "=", "None", ",", "bins", "=", "None", ",", "rangee", "=", "None", ")", ":", "if", "all", "(", "pd", ".", "isnull", "(", "x", ")", ")", ":", "return", "pd", ".", "DataF...
Do density binning It does not collapse each bin with a count. Parameters ---------- x : array-like Numbers to bin weight : array-like Weights binwidth : numeric Size of the bins rangee : tuple Range of x Returns ------- data : DataFrame
[ "Do", "density", "binning" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/stats/stat_bindot.py#L215-L278
train
214,406
has2k1/plotnine
plotnine/themes/theme.py
theme_get
def theme_get(): """ Return the default theme The default theme is the one set (using :func:`theme_set`) by the user. If none has been set, then :class:`theme_gray` is the default. """ from .theme_gray import theme_gray _theme = get_option('current_theme') if isinstance(_theme, type): _theme = _theme() return _theme or theme_gray()
python
def theme_get(): """ Return the default theme The default theme is the one set (using :func:`theme_set`) by the user. If none has been set, then :class:`theme_gray` is the default. """ from .theme_gray import theme_gray _theme = get_option('current_theme') if isinstance(_theme, type): _theme = _theme() return _theme or theme_gray()
[ "def", "theme_get", "(", ")", ":", "from", ".", "theme_gray", "import", "theme_gray", "_theme", "=", "get_option", "(", "'current_theme'", ")", "if", "isinstance", "(", "_theme", ",", "type", ")", ":", "_theme", "=", "_theme", "(", ")", "return", "_theme",...
Return the default theme The default theme is the one set (using :func:`theme_set`) by the user. If none has been set, then :class:`theme_gray` is the default.
[ "Return", "the", "default", "theme" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/themes/theme.py#L277-L289
train
214,407
has2k1/plotnine
plotnine/themes/theme.py
theme.apply
def apply(self, ax): """ Apply this theme, then apply additional modifications in order. Subclasses that override this method should make sure that the base class method is called. """ for th in self.themeables.values(): th.apply(ax)
python
def apply(self, ax): """ Apply this theme, then apply additional modifications in order. Subclasses that override this method should make sure that the base class method is called. """ for th in self.themeables.values(): th.apply(ax)
[ "def", "apply", "(", "self", ",", "ax", ")", ":", "for", "th", "in", "self", ".", "themeables", ".", "values", "(", ")", ":", "th", ".", "apply", "(", "ax", ")" ]
Apply this theme, then apply additional modifications in order. Subclasses that override this method should make sure that the base class method is called.
[ "Apply", "this", "theme", "then", "apply", "additional", "modifications", "in", "order", "." ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/themes/theme.py#L114-L122
train
214,408
has2k1/plotnine
plotnine/themes/theme.py
theme.apply_rcparams
def apply_rcparams(self): """ Set the rcParams """ from matplotlib import rcParams for key, val in self.rcParams.items(): try: rcParams[key] = val except Exception as e: msg = ("""Setting "mpl.rcParams['{}']={}" """ "raised an Exception: {}") raise PlotnineError(msg.format(key, val, e))
python
def apply_rcparams(self): """ Set the rcParams """ from matplotlib import rcParams for key, val in self.rcParams.items(): try: rcParams[key] = val except Exception as e: msg = ("""Setting "mpl.rcParams['{}']={}" """ "raised an Exception: {}") raise PlotnineError(msg.format(key, val, e))
[ "def", "apply_rcparams", "(", "self", ")", ":", "from", "matplotlib", "import", "rcParams", "for", "key", ",", "val", "in", "self", ".", "rcParams", ".", "items", "(", ")", ":", "try", ":", "rcParams", "[", "key", "]", "=", "val", "except", "Exception"...
Set the rcParams
[ "Set", "the", "rcParams" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/themes/theme.py#L148-L159
train
214,409
has2k1/plotnine
plotnine/themes/theme.py
theme.rcParams
def rcParams(self): """ Return rcParams dict for this theme. Notes ----- Subclasses should not need to override this method method as long as self._rcParams is constructed properly. rcParams are used during plotting. Sometimes the same theme can be achieved by setting rcParams before plotting or a apply after plotting. The choice of how to implement it is is a matter of convenience in that case. There are certain things can only be themed after plotting. There may not be an rcParam to control the theme or the act of plotting may cause an entity to come into existence before it can be themed. """ try: rcParams = deepcopy(self._rcParams) except NotImplementedError: # deepcopy raises an error for objects that are drived from or # composed of matplotlib.transform.TransformNode. # Not desirable, but probably requires upstream fix. # In particular, XKCD uses matplotlib.patheffects.withStrok rcParams = copy(self._rcParams) for th in self.themeables.values(): rcParams.update(th.rcParams) return rcParams
python
def rcParams(self): """ Return rcParams dict for this theme. Notes ----- Subclasses should not need to override this method method as long as self._rcParams is constructed properly. rcParams are used during plotting. Sometimes the same theme can be achieved by setting rcParams before plotting or a apply after plotting. The choice of how to implement it is is a matter of convenience in that case. There are certain things can only be themed after plotting. There may not be an rcParam to control the theme or the act of plotting may cause an entity to come into existence before it can be themed. """ try: rcParams = deepcopy(self._rcParams) except NotImplementedError: # deepcopy raises an error for objects that are drived from or # composed of matplotlib.transform.TransformNode. # Not desirable, but probably requires upstream fix. # In particular, XKCD uses matplotlib.patheffects.withStrok rcParams = copy(self._rcParams) for th in self.themeables.values(): rcParams.update(th.rcParams) return rcParams
[ "def", "rcParams", "(", "self", ")", ":", "try", ":", "rcParams", "=", "deepcopy", "(", "self", ".", "_rcParams", ")", "except", "NotImplementedError", ":", "# deepcopy raises an error for objects that are drived from or", "# composed of matplotlib.transform.TransformNode.", ...
Return rcParams dict for this theme. Notes ----- Subclasses should not need to override this method method as long as self._rcParams is constructed properly. rcParams are used during plotting. Sometimes the same theme can be achieved by setting rcParams before plotting or a apply after plotting. The choice of how to implement it is is a matter of convenience in that case. There are certain things can only be themed after plotting. There may not be an rcParam to control the theme or the act of plotting may cause an entity to come into existence before it can be themed.
[ "Return", "rcParams", "dict", "for", "this", "theme", "." ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/themes/theme.py#L162-L193
train
214,410
has2k1/plotnine
plotnine/themes/theme.py
theme.add_theme
def add_theme(self, other, inplace=False): """Add themes together. Subclasses should not override this method. This will be called when adding two instances of class 'theme' together. A complete theme will annihilate any previous themes. Partial themes can be added together and can be added to a complete theme. """ if other.complete: return other theme_copy = self if inplace else deepcopy(self) theme_copy.themeables.update(deepcopy(other.themeables)) return theme_copy
python
def add_theme(self, other, inplace=False): """Add themes together. Subclasses should not override this method. This will be called when adding two instances of class 'theme' together. A complete theme will annihilate any previous themes. Partial themes can be added together and can be added to a complete theme. """ if other.complete: return other theme_copy = self if inplace else deepcopy(self) theme_copy.themeables.update(deepcopy(other.themeables)) return theme_copy
[ "def", "add_theme", "(", "self", ",", "other", ",", "inplace", "=", "False", ")", ":", "if", "other", ".", "complete", ":", "return", "other", "theme_copy", "=", "self", "if", "inplace", "else", "deepcopy", "(", "self", ")", "theme_copy", ".", "themeable...
Add themes together. Subclasses should not override this method. This will be called when adding two instances of class 'theme' together. A complete theme will annihilate any previous themes. Partial themes can be added together and can be added to a complete theme.
[ "Add", "themes", "together", "." ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/themes/theme.py#L195-L210
train
214,411
has2k1/plotnine
plotnine/animation.py
PlotnineAnimation._draw_plots
def _draw_plots(self, plots): """ Plot and return the figure and artists Parameters ---------- plots : iterable ggplot objects that make up the the frames of the animation Returns ------- figure : matplotlib.figure.Figure Matplotlib figure artists : list List of :class:`Matplotlib.artist.artist` """ # For keeping track of artists for each frame artist_offsets = { 'collections': [], 'patches': [], 'lines': [], 'texts': [], 'artists': [] } scale_limits = dict() def initialise_artist_offsets(n): """ Initilise artists_offsets arrays to zero Parameters ---------- n : int Number of axes to initialise artists for. The artists for each axes are tracked separately. """ for artist_type in artist_offsets: artist_offsets[artist_type] = [0] * n def get_frame_artists(plot): """ Parameters ---------- plot : ggplot Drawn ggplot object from which to extract artists. """ # The axes accumulate artists for all frames # For each frame we pickup the newly added artists # We use offsets to mark the end of the previous frame # e.g ax.collections[start:] frame_artists = [] for i, ax in enumerate(plot.axs): for name in artist_offsets: start = artist_offsets[name][i] new_artists = getattr(ax, name)[start:] frame_artists.extend(new_artists) artist_offsets[name][i] += len(new_artists) return frame_artists def set_scale_limits(plot): """ Set limits of all the scales in the animation Should be called before :func:`check_scale_limits`. Parameters ---------- plot : ggplot First ggplot object that has been drawn """ for sc in plot.scales: ae = sc.aesthetics[0] scale_limits[ae] = sc.limits def check_scale_limits(plot, frame_no): """ Check limits of the scales of a plot in the animation Raises a PlotnineError if any of the scales has limits that do not match those of the first plot/frame. Should be called after :func:`set_scale_limits`. Parameters ---------- plot : ggplot ggplot object that has been drawn frame_no : int Frame number """ if len(scale_limits) != len(plot.scales): raise PlotnineError( "All plots must have the same number of scales " "as the first plot of the animation." ) for sc in plot.scales: ae = sc.aesthetics[0] if ae not in scale_limits: raise PlotnineError( "The plot for frame {} does not have a scale " "for the {} aesthetic.".format(frame_no, ae) ) if sc.limits != scale_limits[ae]: raise PlotnineError( "The {} scale of plot for frame {} has different " "limits from those of the first frame." "".format(ae, frame_no) ) figure = None axs = None artists = [] # The first ggplot creates the figure, axes and the initial # frame of the animation. The rest of the ggplots draw # onto the figure and axes created by the first ggplot and # they create the subsequent frames. for frame_no, p in enumerate(plots): if figure is None: figure, plot = p.draw(return_ggplot=True) axs = plot.axs initialise_artist_offsets(len(axs)) set_scale_limits(plot) else: p = copy(p) plot = p._draw_using_figure(figure, axs) try: check_scale_limits(plot, frame_no) except PlotnineError as err: plt.close(figure) raise err artists.append(get_frame_artists(plot)) if figure is None: figure = plt.figure() return figure, artists
python
def _draw_plots(self, plots): """ Plot and return the figure and artists Parameters ---------- plots : iterable ggplot objects that make up the the frames of the animation Returns ------- figure : matplotlib.figure.Figure Matplotlib figure artists : list List of :class:`Matplotlib.artist.artist` """ # For keeping track of artists for each frame artist_offsets = { 'collections': [], 'patches': [], 'lines': [], 'texts': [], 'artists': [] } scale_limits = dict() def initialise_artist_offsets(n): """ Initilise artists_offsets arrays to zero Parameters ---------- n : int Number of axes to initialise artists for. The artists for each axes are tracked separately. """ for artist_type in artist_offsets: artist_offsets[artist_type] = [0] * n def get_frame_artists(plot): """ Parameters ---------- plot : ggplot Drawn ggplot object from which to extract artists. """ # The axes accumulate artists for all frames # For each frame we pickup the newly added artists # We use offsets to mark the end of the previous frame # e.g ax.collections[start:] frame_artists = [] for i, ax in enumerate(plot.axs): for name in artist_offsets: start = artist_offsets[name][i] new_artists = getattr(ax, name)[start:] frame_artists.extend(new_artists) artist_offsets[name][i] += len(new_artists) return frame_artists def set_scale_limits(plot): """ Set limits of all the scales in the animation Should be called before :func:`check_scale_limits`. Parameters ---------- plot : ggplot First ggplot object that has been drawn """ for sc in plot.scales: ae = sc.aesthetics[0] scale_limits[ae] = sc.limits def check_scale_limits(plot, frame_no): """ Check limits of the scales of a plot in the animation Raises a PlotnineError if any of the scales has limits that do not match those of the first plot/frame. Should be called after :func:`set_scale_limits`. Parameters ---------- plot : ggplot ggplot object that has been drawn frame_no : int Frame number """ if len(scale_limits) != len(plot.scales): raise PlotnineError( "All plots must have the same number of scales " "as the first plot of the animation." ) for sc in plot.scales: ae = sc.aesthetics[0] if ae not in scale_limits: raise PlotnineError( "The plot for frame {} does not have a scale " "for the {} aesthetic.".format(frame_no, ae) ) if sc.limits != scale_limits[ae]: raise PlotnineError( "The {} scale of plot for frame {} has different " "limits from those of the first frame." "".format(ae, frame_no) ) figure = None axs = None artists = [] # The first ggplot creates the figure, axes and the initial # frame of the animation. The rest of the ggplots draw # onto the figure and axes created by the first ggplot and # they create the subsequent frames. for frame_no, p in enumerate(plots): if figure is None: figure, plot = p.draw(return_ggplot=True) axs = plot.axs initialise_artist_offsets(len(axs)) set_scale_limits(plot) else: p = copy(p) plot = p._draw_using_figure(figure, axs) try: check_scale_limits(plot, frame_no) except PlotnineError as err: plt.close(figure) raise err artists.append(get_frame_artists(plot)) if figure is None: figure = plt.figure() return figure, artists
[ "def", "_draw_plots", "(", "self", ",", "plots", ")", ":", "# For keeping track of artists for each frame", "artist_offsets", "=", "{", "'collections'", ":", "[", "]", ",", "'patches'", ":", "[", "]", ",", "'lines'", ":", "[", "]", ",", "'texts'", ":", "[", ...
Plot and return the figure and artists Parameters ---------- plots : iterable ggplot objects that make up the the frames of the animation Returns ------- figure : matplotlib.figure.Figure Matplotlib figure artists : list List of :class:`Matplotlib.artist.artist`
[ "Plot", "and", "return", "the", "figure", "and", "artists" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/animation.py#L54-L194
train
214,412
has2k1/plotnine
plotnine/coords/coord_flip.py
flip_labels
def flip_labels(obj): """ Rename fields x to y and y to x Parameters ---------- obj : dict_like Object with labels to rename """ def sub(a, b): """ Substitute all keys that start with a to b """ for label in list(obj.keys()): if label.startswith(a): new_label = b+label[1:] obj[new_label] = obj.pop(label) sub('x', 'z') sub('y', 'x') sub('z', 'y') return obj
python
def flip_labels(obj): """ Rename fields x to y and y to x Parameters ---------- obj : dict_like Object with labels to rename """ def sub(a, b): """ Substitute all keys that start with a to b """ for label in list(obj.keys()): if label.startswith(a): new_label = b+label[1:] obj[new_label] = obj.pop(label) sub('x', 'z') sub('y', 'x') sub('z', 'y') return obj
[ "def", "flip_labels", "(", "obj", ")", ":", "def", "sub", "(", "a", ",", "b", ")", ":", "\"\"\"\n Substitute all keys that start with a to b\n \"\"\"", "for", "label", "in", "list", "(", "obj", ".", "keys", "(", ")", ")", ":", "if", "label", "....
Rename fields x to y and y to x Parameters ---------- obj : dict_like Object with labels to rename
[ "Rename", "fields", "x", "to", "y", "and", "y", "to", "x" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/coords/coord_flip.py#L58-L79
train
214,413
has2k1/plotnine
plotnine/stats/stat_summary.py
bootstrap_statistics
def bootstrap_statistics(series, statistic, n_samples=1000, confidence_interval=0.95, random_state=None): """ Default parameters taken from R's Hmisc smean.cl.boot """ if random_state is None: random_state = np.random alpha = 1 - confidence_interval size = (n_samples, len(series)) inds = random_state.randint(0, len(series), size=size) samples = series.values[inds] means = np.sort(statistic(samples, axis=1)) return pd.DataFrame({'ymin': means[int((alpha/2)*n_samples)], 'ymax': means[int((1-alpha/2)*n_samples)], 'y': [statistic(series)]})
python
def bootstrap_statistics(series, statistic, n_samples=1000, confidence_interval=0.95, random_state=None): """ Default parameters taken from R's Hmisc smean.cl.boot """ if random_state is None: random_state = np.random alpha = 1 - confidence_interval size = (n_samples, len(series)) inds = random_state.randint(0, len(series), size=size) samples = series.values[inds] means = np.sort(statistic(samples, axis=1)) return pd.DataFrame({'ymin': means[int((alpha/2)*n_samples)], 'ymax': means[int((1-alpha/2)*n_samples)], 'y': [statistic(series)]})
[ "def", "bootstrap_statistics", "(", "series", ",", "statistic", ",", "n_samples", "=", "1000", ",", "confidence_interval", "=", "0.95", ",", "random_state", "=", "None", ")", ":", "if", "random_state", "is", "None", ":", "random_state", "=", "np", ".", "rand...
Default parameters taken from R's Hmisc smean.cl.boot
[ "Default", "parameters", "taken", "from", "R", "s", "Hmisc", "smean", ".", "cl", ".", "boot" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/stats/stat_summary.py#L11-L27
train
214,414
has2k1/plotnine
plotnine/stats/stat_summary.py
mean_cl_boot
def mean_cl_boot(series, n_samples=1000, confidence_interval=0.95, random_state=None): """ Bootstrapped mean with confidence limits """ return bootstrap_statistics(series, np.mean, n_samples=n_samples, confidence_interval=confidence_interval, random_state=random_state)
python
def mean_cl_boot(series, n_samples=1000, confidence_interval=0.95, random_state=None): """ Bootstrapped mean with confidence limits """ return bootstrap_statistics(series, np.mean, n_samples=n_samples, confidence_interval=confidence_interval, random_state=random_state)
[ "def", "mean_cl_boot", "(", "series", ",", "n_samples", "=", "1000", ",", "confidence_interval", "=", "0.95", ",", "random_state", "=", "None", ")", ":", "return", "bootstrap_statistics", "(", "series", ",", "np", ".", "mean", ",", "n_samples", "=", "n_sampl...
Bootstrapped mean with confidence limits
[ "Bootstrapped", "mean", "with", "confidence", "limits" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/stats/stat_summary.py#L30-L38
train
214,415
has2k1/plotnine
plotnine/stats/stat_summary.py
mean_sdl
def mean_sdl(series, mult=2): """ mean plus or minus a constant times the standard deviation """ m = series.mean() s = series.std() return pd.DataFrame({'y': [m], 'ymin': m-mult*s, 'ymax': m+mult*s})
python
def mean_sdl(series, mult=2): """ mean plus or minus a constant times the standard deviation """ m = series.mean() s = series.std() return pd.DataFrame({'y': [m], 'ymin': m-mult*s, 'ymax': m+mult*s})
[ "def", "mean_sdl", "(", "series", ",", "mult", "=", "2", ")", ":", "m", "=", "series", ".", "mean", "(", ")", "s", "=", "series", ".", "std", "(", ")", "return", "pd", ".", "DataFrame", "(", "{", "'y'", ":", "[", "m", "]", ",", "'ymin'", ":",...
mean plus or minus a constant times the standard deviation
[ "mean", "plus", "or", "minus", "a", "constant", "times", "the", "standard", "deviation" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/stats/stat_summary.py#L54-L62
train
214,416
has2k1/plotnine
plotnine/stats/stat_summary.py
median_hilow
def median_hilow(series, confidence_interval=0.95): """ Median and a selected pair of outer quantiles having equal tail areas """ tail = (1 - confidence_interval) / 2 return pd.DataFrame({'y': [np.median(series)], 'ymin': np.percentile(series, 100 * tail), 'ymax': np.percentile(series, 100 * (1 - tail))})
python
def median_hilow(series, confidence_interval=0.95): """ Median and a selected pair of outer quantiles having equal tail areas """ tail = (1 - confidence_interval) / 2 return pd.DataFrame({'y': [np.median(series)], 'ymin': np.percentile(series, 100 * tail), 'ymax': np.percentile(series, 100 * (1 - tail))})
[ "def", "median_hilow", "(", "series", ",", "confidence_interval", "=", "0.95", ")", ":", "tail", "=", "(", "1", "-", "confidence_interval", ")", "/", "2", "return", "pd", ".", "DataFrame", "(", "{", "'y'", ":", "[", "np", ".", "median", "(", "series", ...
Median and a selected pair of outer quantiles having equal tail areas
[ "Median", "and", "a", "selected", "pair", "of", "outer", "quantiles", "having", "equal", "tail", "areas" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/stats/stat_summary.py#L65-L72
train
214,417
has2k1/plotnine
plotnine/stats/stat_summary.py
mean_se
def mean_se(series, mult=1): """ Calculate mean and standard errors on either side """ m = np.mean(series) se = mult * np.sqrt(np.var(series) / len(series)) return pd.DataFrame({'y': [m], 'ymin': m-se, 'ymax': m+se})
python
def mean_se(series, mult=1): """ Calculate mean and standard errors on either side """ m = np.mean(series) se = mult * np.sqrt(np.var(series) / len(series)) return pd.DataFrame({'y': [m], 'ymin': m-se, 'ymax': m+se})
[ "def", "mean_se", "(", "series", ",", "mult", "=", "1", ")", ":", "m", "=", "np", ".", "mean", "(", "series", ")", "se", "=", "mult", "*", "np", ".", "sqrt", "(", "np", ".", "var", "(", "series", ")", "/", "len", "(", "series", ")", ")", "r...
Calculate mean and standard errors on either side
[ "Calculate", "mean", "and", "standard", "errors", "on", "either", "side" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/stats/stat_summary.py#L75-L83
train
214,418
has2k1/plotnine
plotnine/facets/facet_null.py
facet_null.set_breaks_and_labels
def set_breaks_and_labels(self, ranges, layout_info, pidx): """ Add breaks and labels to the axes Parameters ---------- ranges : dict-like range information for the axes layout_info : dict-like facet layout information pidx : int Panel index """ ax = self.axs[pidx] facet.set_breaks_and_labels(self, ranges, layout_info, pidx) ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left')
python
def set_breaks_and_labels(self, ranges, layout_info, pidx): """ Add breaks and labels to the axes Parameters ---------- ranges : dict-like range information for the axes layout_info : dict-like facet layout information pidx : int Panel index """ ax = self.axs[pidx] facet.set_breaks_and_labels(self, ranges, layout_info, pidx) ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left')
[ "def", "set_breaks_and_labels", "(", "self", ",", "ranges", ",", "layout_info", ",", "pidx", ")", ":", "ax", "=", "self", ".", "axs", "[", "pidx", "]", "facet", ".", "set_breaks_and_labels", "(", "self", ",", "ranges", ",", "layout_info", ",", "pidx", ")...
Add breaks and labels to the axes Parameters ---------- ranges : dict-like range information for the axes layout_info : dict-like facet layout information pidx : int Panel index
[ "Add", "breaks", "and", "labels", "to", "the", "axes" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/facets/facet_null.py#L27-L43
train
214,419
has2k1/plotnine
plotnine/facets/facet_null.py
facet_null.spaceout_and_resize_panels
def spaceout_and_resize_panels(self): """ Adjust the space between the panels """ # Only deal with the aspect ratio figure = self.figure theme = self.theme try: aspect_ratio = theme.themeables.property('aspect_ratio') except KeyError: aspect_ratio = self.coordinates.aspect( self.layout.panel_params[0]) if aspect_ratio is None: return left = figure.subplotpars.left right = figure.subplotpars.right top = figure.subplotpars.top bottom = figure.subplotpars.bottom W, H = figure.get_size_inches() w = (right-left)*W h = w*aspect_ratio H = h / (top-bottom) figure.set_figheight(H)
python
def spaceout_and_resize_panels(self): """ Adjust the space between the panels """ # Only deal with the aspect ratio figure = self.figure theme = self.theme try: aspect_ratio = theme.themeables.property('aspect_ratio') except KeyError: aspect_ratio = self.coordinates.aspect( self.layout.panel_params[0]) if aspect_ratio is None: return left = figure.subplotpars.left right = figure.subplotpars.right top = figure.subplotpars.top bottom = figure.subplotpars.bottom W, H = figure.get_size_inches() w = (right-left)*W h = w*aspect_ratio H = h / (top-bottom) figure.set_figheight(H)
[ "def", "spaceout_and_resize_panels", "(", "self", ")", ":", "# Only deal with the aspect ratio", "figure", "=", "self", ".", "figure", "theme", "=", "self", ".", "theme", "try", ":", "aspect_ratio", "=", "theme", ".", "themeables", ".", "property", "(", "'aspect...
Adjust the space between the panels
[ "Adjust", "the", "space", "between", "the", "panels" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/facets/facet_null.py#L45-L72
train
214,420
has2k1/plotnine
plotnine/geoms/geom_path.py
_draw_segments
def _draw_segments(data, ax, **params): """ Draw independent line segments between all the points """ color = to_rgba(data['color'], data['alpha']) # All we do is line-up all the points in a group # into segments, all in a single list. # Along the way the other parameters are put in # sequences accordingly indices = [] # for attributes of starting point of each segment segments = [] for _, df in data.groupby('group'): idx = df.index indices.extend(idx[:-1]) # One line from two points x = data['x'].iloc[idx] y = data['y'].iloc[idx] segments.append(make_line_segments(x, y, ispath=True)) segments = np.vstack(segments) if color is None: edgecolor = color else: edgecolor = [color[i] for i in indices] linewidth = data.loc[indices, 'size'] linestyle = data.loc[indices, 'linetype'] coll = mcoll.LineCollection(segments, edgecolor=edgecolor, linewidth=linewidth, linestyle=linestyle, zorder=params['zorder']) ax.add_collection(coll)
python
def _draw_segments(data, ax, **params): """ Draw independent line segments between all the points """ color = to_rgba(data['color'], data['alpha']) # All we do is line-up all the points in a group # into segments, all in a single list. # Along the way the other parameters are put in # sequences accordingly indices = [] # for attributes of starting point of each segment segments = [] for _, df in data.groupby('group'): idx = df.index indices.extend(idx[:-1]) # One line from two points x = data['x'].iloc[idx] y = data['y'].iloc[idx] segments.append(make_line_segments(x, y, ispath=True)) segments = np.vstack(segments) if color is None: edgecolor = color else: edgecolor = [color[i] for i in indices] linewidth = data.loc[indices, 'size'] linestyle = data.loc[indices, 'linetype'] coll = mcoll.LineCollection(segments, edgecolor=edgecolor, linewidth=linewidth, linestyle=linestyle, zorder=params['zorder']) ax.add_collection(coll)
[ "def", "_draw_segments", "(", "data", ",", "ax", ",", "*", "*", "params", ")", ":", "color", "=", "to_rgba", "(", "data", "[", "'color'", "]", ",", "data", "[", "'alpha'", "]", ")", "# All we do is line-up all the points in a group", "# into segments, all in a s...
Draw independent line segments between all the points
[ "Draw", "independent", "line", "segments", "between", "all", "the", "points" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/geoms/geom_path.py#L341-L375
train
214,421
has2k1/plotnine
plotnine/geoms/geom_path.py
_draw_lines
def _draw_lines(data, ax, **params): """ Draw a path with the same characteristics from the first point to the last point """ color = to_rgba(data['color'].iloc[0], data['alpha'].iloc[0]) join_style = _get_joinstyle(data, params) lines = mlines.Line2D(data['x'], data['y'], color=color, linewidth=data['size'].iloc[0], linestyle=data['linetype'].iloc[0], zorder=params['zorder'], **join_style) ax.add_artist(lines)
python
def _draw_lines(data, ax, **params): """ Draw a path with the same characteristics from the first point to the last point """ color = to_rgba(data['color'].iloc[0], data['alpha'].iloc[0]) join_style = _get_joinstyle(data, params) lines = mlines.Line2D(data['x'], data['y'], color=color, linewidth=data['size'].iloc[0], linestyle=data['linetype'].iloc[0], zorder=params['zorder'], **join_style) ax.add_artist(lines)
[ "def", "_draw_lines", "(", "data", ",", "ax", ",", "*", "*", "params", ")", ":", "color", "=", "to_rgba", "(", "data", "[", "'color'", "]", ".", "iloc", "[", "0", "]", ",", "data", "[", "'alpha'", "]", ".", "iloc", "[", "0", "]", ")", "join_sty...
Draw a path with the same characteristics from the first point to the last point
[ "Draw", "a", "path", "with", "the", "same", "characteristics", "from", "the", "first", "point", "to", "the", "last", "point" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/geoms/geom_path.py#L378-L392
train
214,422
has2k1/plotnine
plotnine/geoms/geom_path.py
arrow.get_paths
def get_paths(self, x1, y1, x2, y2, panel_params, coord, ax): """ Compute paths that create the arrow heads Parameters ---------- x1, y1, x2, y2 : array_like List of points that define the tails of the arrows. The arrow heads will be at x1, y1. If you need them at x2, y2 reverse the input. Returns ------- out : list of Path Paths that create arrow heads """ Path = mpath.Path # Create reusable lists of vertices and codes # arrowhead path has 3 vertices (Nones), # plus dummy vertex for the STOP code verts = [None, None, None, (0, 0)] # codes list remains the same after initialization codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.STOP] # Slices into the vertices list slc = slice(0, 3) # We need the plot dimensions so that we can # compute scaling factors fig = ax.get_figure() width, height = fig.get_size_inches() ranges = coord.range(panel_params) width_ = np.ptp(ranges.x) height_ = np.ptp(ranges.y) # scaling factors to prevent skewed arrowheads lx = self.length * width_/width ly = self.length * height_/height # angle in radians a = self.angle * np.pi / 180 # direction of arrow head xdiff, ydiff = x2 - x1, y2 - y1 rotations = np.arctan2(ydiff/ly, xdiff/lx) # Arrow head vertices v1x = x1 + lx * np.cos(rotations + a) v1y = y1 + ly * np.sin(rotations + a) v2x = x1 + lx * np.cos(rotations - a) v2y = y1 + ly * np.sin(rotations - a) # create a path for each arrow head paths = [] for t in zip(v1x, v1y, x1, y1, v2x, v2y): verts[slc] = [t[:2], t[2:4], t[4:]] paths.append(Path(verts, codes)) return paths
python
def get_paths(self, x1, y1, x2, y2, panel_params, coord, ax): """ Compute paths that create the arrow heads Parameters ---------- x1, y1, x2, y2 : array_like List of points that define the tails of the arrows. The arrow heads will be at x1, y1. If you need them at x2, y2 reverse the input. Returns ------- out : list of Path Paths that create arrow heads """ Path = mpath.Path # Create reusable lists of vertices and codes # arrowhead path has 3 vertices (Nones), # plus dummy vertex for the STOP code verts = [None, None, None, (0, 0)] # codes list remains the same after initialization codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.STOP] # Slices into the vertices list slc = slice(0, 3) # We need the plot dimensions so that we can # compute scaling factors fig = ax.get_figure() width, height = fig.get_size_inches() ranges = coord.range(panel_params) width_ = np.ptp(ranges.x) height_ = np.ptp(ranges.y) # scaling factors to prevent skewed arrowheads lx = self.length * width_/width ly = self.length * height_/height # angle in radians a = self.angle * np.pi / 180 # direction of arrow head xdiff, ydiff = x2 - x1, y2 - y1 rotations = np.arctan2(ydiff/ly, xdiff/lx) # Arrow head vertices v1x = x1 + lx * np.cos(rotations + a) v1y = y1 + ly * np.sin(rotations + a) v2x = x1 + lx * np.cos(rotations - a) v2y = y1 + ly * np.sin(rotations - a) # create a path for each arrow head paths = [] for t in zip(v1x, v1y, x1, y1, v2x, v2y): verts[slc] = [t[:2], t[2:4], t[4:]] paths.append(Path(verts, codes)) return paths
[ "def", "get_paths", "(", "self", ",", "x1", ",", "y1", ",", "x2", ",", "y2", ",", "panel_params", ",", "coord", ",", "ax", ")", ":", "Path", "=", "mpath", ".", "Path", "# Create reusable lists of vertices and codes", "# arrowhead path has 3 vertices (Nones),", "...
Compute paths that create the arrow heads Parameters ---------- x1, y1, x2, y2 : array_like List of points that define the tails of the arrows. The arrow heads will be at x1, y1. If you need them at x2, y2 reverse the input. Returns ------- out : list of Path Paths that create arrow heads
[ "Compute", "paths", "that", "create", "the", "arrow", "heads" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/geoms/geom_path.py#L276-L338
train
214,423
has2k1/plotnine
plotnine/facets/facet.py
combine_vars
def combine_vars(data, environment=None, vars=None, drop=True): """ Base layout function that generates all combinations of data needed for facetting The first data frame in the list should be the default data for the plot. Other data frames in the list are ones that are added to the layers. """ if not vars: return pd.DataFrame() # For each layer, compute the facet values values = [eval_facet_vars(df, vars, environment) for df in data if df is not None] # Form the base data frame which contains all combinations # of facetting variables that appear in the data has_all = [x.shape[1] == len(vars) for x in values] if not any(has_all): raise PlotnineError( "At least one layer must contain all variables " + "used for facetting") base = pd.concat([x for i, x in enumerate(values) if has_all[i]], axis=0) base = base.drop_duplicates() if not drop: base = unique_combs(base) # sorts according to order of factor levels base = base.sort_values(list(base.columns)) # Systematically add on missing combinations for i, value in enumerate(values): if has_all[i] or len(value.columns) == 0: continue old = base.loc[:, base.columns - value.columns] new = value.loc[:, base.columns & value.columns].drop_duplicates() if not drop: new = unique_combs(new) base = base.append(cross_join(old, new), ignore_index=True) if len(base) == 0: raise PlotnineError( "Faceting variables must have at least one value") base = base.reset_index(drop=True) return base
python
def combine_vars(data, environment=None, vars=None, drop=True): """ Base layout function that generates all combinations of data needed for facetting The first data frame in the list should be the default data for the plot. Other data frames in the list are ones that are added to the layers. """ if not vars: return pd.DataFrame() # For each layer, compute the facet values values = [eval_facet_vars(df, vars, environment) for df in data if df is not None] # Form the base data frame which contains all combinations # of facetting variables that appear in the data has_all = [x.shape[1] == len(vars) for x in values] if not any(has_all): raise PlotnineError( "At least one layer must contain all variables " + "used for facetting") base = pd.concat([x for i, x in enumerate(values) if has_all[i]], axis=0) base = base.drop_duplicates() if not drop: base = unique_combs(base) # sorts according to order of factor levels base = base.sort_values(list(base.columns)) # Systematically add on missing combinations for i, value in enumerate(values): if has_all[i] or len(value.columns) == 0: continue old = base.loc[:, base.columns - value.columns] new = value.loc[:, base.columns & value.columns].drop_duplicates() if not drop: new = unique_combs(new) base = base.append(cross_join(old, new), ignore_index=True) if len(base) == 0: raise PlotnineError( "Faceting variables must have at least one value") base = base.reset_index(drop=True) return base
[ "def", "combine_vars", "(", "data", ",", "environment", "=", "None", ",", "vars", "=", "None", ",", "drop", "=", "True", ")", ":", "if", "not", "vars", ":", "return", "pd", ".", "DataFrame", "(", ")", "# For each layer, compute the facet values", "values", ...
Base layout function that generates all combinations of data needed for facetting The first data frame in the list should be the default data for the plot. Other data frames in the list are ones that are added to the layers.
[ "Base", "layout", "function", "that", "generates", "all", "combinations", "of", "data", "needed", "for", "facetting", "The", "first", "data", "frame", "in", "the", "list", "should", "be", "the", "default", "data", "for", "the", "plot", ".", "Other", "data", ...
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/facets/facet.py#L556-L603
train
214,424
has2k1/plotnine
plotnine/facets/facet.py
unique_combs
def unique_combs(df): """ Return data frame with all possible combinations of the values in the columns """ # List of unique values from every column lst = (x.unique() for x in (df[c] for c in df)) rows = list(itertools.product(*lst)) _df = pd.DataFrame(rows, columns=df.columns) # preserve the column dtypes for col in df: _df[col] = _df[col].astype(df[col].dtype, copy=False) return _df
python
def unique_combs(df): """ Return data frame with all possible combinations of the values in the columns """ # List of unique values from every column lst = (x.unique() for x in (df[c] for c in df)) rows = list(itertools.product(*lst)) _df = pd.DataFrame(rows, columns=df.columns) # preserve the column dtypes for col in df: _df[col] = _df[col].astype(df[col].dtype, copy=False) return _df
[ "def", "unique_combs", "(", "df", ")", ":", "# List of unique values from every column", "lst", "=", "(", "x", ".", "unique", "(", ")", "for", "x", "in", "(", "df", "[", "c", "]", "for", "c", "in", "df", ")", ")", "rows", "=", "list", "(", "itertools...
Return data frame with all possible combinations of the values in the columns
[ "Return", "data", "frame", "with", "all", "possible", "combinations", "of", "the", "values", "in", "the", "columns" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/facets/facet.py#L606-L619
train
214,425
has2k1/plotnine
plotnine/facets/facet.py
eval_facet_vars
def eval_facet_vars(data, vars, env): """ Evaluate facet variables Parameters ---------- data : DataFrame Factet dataframe vars : list Facet variables env : environment Plot environment Returns ------- facet_vals : DataFrame Facet values that correspond to the specified variables. """ # To allow expressions in facet formula def I(value): return value env = env.with_outer_namespace({'I': I}) facet_vals = pd.DataFrame(index=data.index) for name in vars: if name in data: # This is a limited solution. If a keyword is # part of an expression it will fail in the # else statement below res = data[name] elif str.isidentifier(name): # All other non-statements continue else: # Statements try: res = env.eval(name, inner_namespace=data) except NameError: continue facet_vals[name] = res return facet_vals
python
def eval_facet_vars(data, vars, env): """ Evaluate facet variables Parameters ---------- data : DataFrame Factet dataframe vars : list Facet variables env : environment Plot environment Returns ------- facet_vals : DataFrame Facet values that correspond to the specified variables. """ # To allow expressions in facet formula def I(value): return value env = env.with_outer_namespace({'I': I}) facet_vals = pd.DataFrame(index=data.index) for name in vars: if name in data: # This is a limited solution. If a keyword is # part of an expression it will fail in the # else statement below res = data[name] elif str.isidentifier(name): # All other non-statements continue else: # Statements try: res = env.eval(name, inner_namespace=data) except NameError: continue facet_vals[name] = res return facet_vals
[ "def", "eval_facet_vars", "(", "data", ",", "vars", ",", "env", ")", ":", "# To allow expressions in facet formula", "def", "I", "(", "value", ")", ":", "return", "value", "env", "=", "env", ".", "with_outer_namespace", "(", "{", "'I'", ":", "I", "}", ")",...
Evaluate facet variables Parameters ---------- data : DataFrame Factet dataframe vars : list Facet variables env : environment Plot environment Returns ------- facet_vals : DataFrame Facet values that correspond to the specified variables.
[ "Evaluate", "facet", "variables" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/facets/facet.py#L654-L697
train
214,426
has2k1/plotnine
plotnine/facets/facet.py
facet.map
def map(self, data, layout): """ Assign a data points to panels Parameters ---------- data : DataFrame Data for a layer layout : DataFrame As returned by self.compute_layout Returns ------- data : DataFrame Data with all points mapped to the panels on which they will be plotted. """ msg = "{} should implement this method." raise NotImplementedError( msg.format(self.__class.__name__))
python
def map(self, data, layout): """ Assign a data points to panels Parameters ---------- data : DataFrame Data for a layer layout : DataFrame As returned by self.compute_layout Returns ------- data : DataFrame Data with all points mapped to the panels on which they will be plotted. """ msg = "{} should implement this method." raise NotImplementedError( msg.format(self.__class.__name__))
[ "def", "map", "(", "self", ",", "data", ",", "layout", ")", ":", "msg", "=", "\"{} should implement this method.\"", "raise", "NotImplementedError", "(", "msg", ".", "format", "(", "self", ".", "__class", ".", "__name__", ")", ")" ]
Assign a data points to panels Parameters ---------- data : DataFrame Data for a layer layout : DataFrame As returned by self.compute_layout Returns ------- data : DataFrame Data with all points mapped to the panels on which they will be plotted.
[ "Assign", "a", "data", "points", "to", "panels" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/facets/facet.py#L160-L179
train
214,427
has2k1/plotnine
plotnine/facets/facet.py
facet.train_position_scales
def train_position_scales(self, layout, layers): """ Compute ranges for the x and y scales """ _layout = layout.layout panel_scales_x = layout.panel_scales_x panel_scales_y = layout.panel_scales_y # loop over each layer, training x and y scales in turn for layer in layers: data = layer.data match_id = match(data['PANEL'], _layout['PANEL']) if panel_scales_x: x_vars = list(set(panel_scales_x[0].aesthetics) & set(data.columns)) # the scale index for each data point SCALE_X = _layout['SCALE_X'].iloc[match_id].tolist() panel_scales_x.train(data, x_vars, SCALE_X) if panel_scales_y: y_vars = list(set(panel_scales_y[0].aesthetics) & set(data.columns)) # the scale index for each data point SCALE_Y = _layout['SCALE_Y'].iloc[match_id].tolist() panel_scales_y.train(data, y_vars, SCALE_Y) return self
python
def train_position_scales(self, layout, layers): """ Compute ranges for the x and y scales """ _layout = layout.layout panel_scales_x = layout.panel_scales_x panel_scales_y = layout.panel_scales_y # loop over each layer, training x and y scales in turn for layer in layers: data = layer.data match_id = match(data['PANEL'], _layout['PANEL']) if panel_scales_x: x_vars = list(set(panel_scales_x[0].aesthetics) & set(data.columns)) # the scale index for each data point SCALE_X = _layout['SCALE_X'].iloc[match_id].tolist() panel_scales_x.train(data, x_vars, SCALE_X) if panel_scales_y: y_vars = list(set(panel_scales_y[0].aesthetics) & set(data.columns)) # the scale index for each data point SCALE_Y = _layout['SCALE_Y'].iloc[match_id].tolist() panel_scales_y.train(data, y_vars, SCALE_Y) return self
[ "def", "train_position_scales", "(", "self", ",", "layout", ",", "layers", ")", ":", "_layout", "=", "layout", ".", "layout", "panel_scales_x", "=", "layout", ".", "panel_scales_x", "panel_scales_y", "=", "layout", ".", "panel_scales_y", "# loop over each layer, tra...
Compute ranges for the x and y scales
[ "Compute", "ranges", "for", "the", "x", "and", "y", "scales" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/facets/facet.py#L210-L236
train
214,428
has2k1/plotnine
plotnine/facets/facet.py
facet._create_subplots
def _create_subplots(self, fig, layout): """ Create suplots and return axs """ num_panels = len(layout) axsarr = np.empty((self.nrow, self.ncol), dtype=object) # Create axes i = 1 for row in range(self.nrow): for col in range(self.ncol): axsarr[row, col] = fig.add_subplot(self.nrow, self.ncol, i) i += 1 # Rearrange axes # They are ordered to match the positions in the layout table if self.dir == 'h': order = 'C' if not self.as_table: axsarr = axsarr[::-1] elif self.dir == 'v': order = 'F' if not self.as_table: axsarr = np.array([row[::-1] for row in axsarr]) axs = axsarr.ravel(order) # Delete unused axes for ax in axs[num_panels:]: fig.delaxes(ax) axs = axs[:num_panels] return axs
python
def _create_subplots(self, fig, layout): """ Create suplots and return axs """ num_panels = len(layout) axsarr = np.empty((self.nrow, self.ncol), dtype=object) # Create axes i = 1 for row in range(self.nrow): for col in range(self.ncol): axsarr[row, col] = fig.add_subplot(self.nrow, self.ncol, i) i += 1 # Rearrange axes # They are ordered to match the positions in the layout table if self.dir == 'h': order = 'C' if not self.as_table: axsarr = axsarr[::-1] elif self.dir == 'v': order = 'F' if not self.as_table: axsarr = np.array([row[::-1] for row in axsarr]) axs = axsarr.ravel(order) # Delete unused axes for ax in axs[num_panels:]: fig.delaxes(ax) axs = axs[:num_panels] return axs
[ "def", "_create_subplots", "(", "self", ",", "fig", ",", "layout", ")", ":", "num_panels", "=", "len", "(", "layout", ")", "axsarr", "=", "np", ".", "empty", "(", "(", "self", ".", "nrow", ",", "self", ".", "ncol", ")", ",", "dtype", "=", "object",...
Create suplots and return axs
[ "Create", "suplots", "and", "return", "axs" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/facets/facet.py#L319-L350
train
214,429
has2k1/plotnine
plotnine/facets/facet.py
facet.make_axes
def make_axes(self, figure, layout, coordinates): """ Create and return Matplotlib axes """ axs = self._create_subplots(figure, layout) # Used for labelling the x and y axes, the first and # last axes according to how MPL creates them. self.first_ax = figure.axes[0] self.last_ax = figure.axes[-1] self.figure = figure self.axs = axs return axs
python
def make_axes(self, figure, layout, coordinates): """ Create and return Matplotlib axes """ axs = self._create_subplots(figure, layout) # Used for labelling the x and y axes, the first and # last axes according to how MPL creates them. self.first_ax = figure.axes[0] self.last_ax = figure.axes[-1] self.figure = figure self.axs = axs return axs
[ "def", "make_axes", "(", "self", ",", "figure", ",", "layout", ",", "coordinates", ")", ":", "axs", "=", "self", ".", "_create_subplots", "(", "figure", ",", "layout", ")", "# Used for labelling the x and y axes, the first and", "# last axes according to how MPL creates...
Create and return Matplotlib axes
[ "Create", "and", "return", "Matplotlib", "axes" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/facets/facet.py#L352-L364
train
214,430
has2k1/plotnine
plotnine/facets/facet.py
facet.strip_size
def strip_size(self, location='top', num_lines=None): """ Breadth of the strip background in inches Parameters ---------- location : str in ``['top', 'right']`` Location of the strip text num_lines : int Number of text lines """ dpi = 72 theme = self.theme get_property = theme.themeables.property if location == 'right': strip_name = 'strip_text_y' num_lines = num_lines or self.num_vars_y else: strip_name = 'strip_text_x' num_lines = num_lines or self.num_vars_x if not num_lines: return 0 # The facet labels are placed onto the figure using # transAxes dimensions. The line height and line # width are mapped to the same [0, 1] range # i.e (pts) * (inches / pts) * (1 / inches) try: fontsize = get_property(strip_name, 'size') except KeyError: fontsize = float(theme.rcParams.get('font.size', 10)) try: linespacing = get_property(strip_name, 'linespacing') except KeyError: linespacing = 1 # margins on either side of the strip text m1, m2 = self.inner_strip_margins(location) # Using figure.dpi value here does not workout well! breadth = (linespacing*fontsize) * num_lines / dpi breadth = breadth + (m1 + m2) / dpi return breadth
python
def strip_size(self, location='top', num_lines=None): """ Breadth of the strip background in inches Parameters ---------- location : str in ``['top', 'right']`` Location of the strip text num_lines : int Number of text lines """ dpi = 72 theme = self.theme get_property = theme.themeables.property if location == 'right': strip_name = 'strip_text_y' num_lines = num_lines or self.num_vars_y else: strip_name = 'strip_text_x' num_lines = num_lines or self.num_vars_x if not num_lines: return 0 # The facet labels are placed onto the figure using # transAxes dimensions. The line height and line # width are mapped to the same [0, 1] range # i.e (pts) * (inches / pts) * (1 / inches) try: fontsize = get_property(strip_name, 'size') except KeyError: fontsize = float(theme.rcParams.get('font.size', 10)) try: linespacing = get_property(strip_name, 'linespacing') except KeyError: linespacing = 1 # margins on either side of the strip text m1, m2 = self.inner_strip_margins(location) # Using figure.dpi value here does not workout well! breadth = (linespacing*fontsize) * num_lines / dpi breadth = breadth + (m1 + m2) / dpi return breadth
[ "def", "strip_size", "(", "self", ",", "location", "=", "'top'", ",", "num_lines", "=", "None", ")", ":", "dpi", "=", "72", "theme", "=", "self", ".", "theme", "get_property", "=", "theme", ".", "themeables", ".", "property", "if", "location", "==", "'...
Breadth of the strip background in inches Parameters ---------- location : str in ``['top', 'right']`` Location of the strip text num_lines : int Number of text lines
[ "Breadth", "of", "the", "strip", "background", "in", "inches" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/facets/facet.py#L392-L436
train
214,431
has2k1/plotnine
plotnine/facets/facet.py
facet.strip_dimensions
def strip_dimensions(self, text_lines, location, pid): """ Calculate the dimension Returns ------- out : types.SimpleNamespace A structure with all the coordinates required to draw the strip text and the background box. """ dpi = 72 num_lines = len(text_lines) get_property = self.theme.themeables.property ax = self.axs[pid] bbox = ax.get_window_extent().transformed( self.figure.dpi_scale_trans.inverted()) ax_width, ax_height = bbox.width, bbox.height # in inches strip_size = self.strip_size(location, num_lines) m1, m2 = self.inner_strip_margins(location) m1, m2 = m1/dpi, m2/dpi margin = 0 # default if location == 'right': box_x = 1 box_y = 0 box_width = strip_size/ax_width box_height = 1 # y & height properties of the background slide and # shrink the strip vertically. The y margin slides # it horizontally. with suppress(KeyError): box_y = get_property('strip_background_y', 'y') with suppress(KeyError): box_height = get_property('strip_background_y', 'height') with suppress(KeyError): margin = get_property('strip_margin_y') x = 1 + (strip_size-m2+m1) / (2*ax_width) y = (2*box_y+box_height)/2 # margin adjustment hslide = 1 + margin*strip_size/ax_width x *= hslide box_x *= hslide else: box_x = 0 box_y = 1 box_width = 1 box_height = strip_size/ax_height # x & width properties of the background slide and # shrink the strip horizontally. The y margin slides # it vertically. with suppress(KeyError): box_x = get_property('strip_background_x', 'x') with suppress(KeyError): box_width = get_property('strip_background_x', 'width') with suppress(KeyError): margin = get_property('strip_margin_x') x = (2*box_x+box_width)/2 y = 1 + (strip_size-m1+m2)/(2*ax_height) # margin adjustment vslide = 1 + margin*strip_size/ax_height y *= vslide box_y *= vslide dimensions = types.SimpleNamespace( x=x, y=y, box_x=box_x, box_y=box_y, box_width=box_width, box_height=box_height) return dimensions
python
def strip_dimensions(self, text_lines, location, pid): """ Calculate the dimension Returns ------- out : types.SimpleNamespace A structure with all the coordinates required to draw the strip text and the background box. """ dpi = 72 num_lines = len(text_lines) get_property = self.theme.themeables.property ax = self.axs[pid] bbox = ax.get_window_extent().transformed( self.figure.dpi_scale_trans.inverted()) ax_width, ax_height = bbox.width, bbox.height # in inches strip_size = self.strip_size(location, num_lines) m1, m2 = self.inner_strip_margins(location) m1, m2 = m1/dpi, m2/dpi margin = 0 # default if location == 'right': box_x = 1 box_y = 0 box_width = strip_size/ax_width box_height = 1 # y & height properties of the background slide and # shrink the strip vertically. The y margin slides # it horizontally. with suppress(KeyError): box_y = get_property('strip_background_y', 'y') with suppress(KeyError): box_height = get_property('strip_background_y', 'height') with suppress(KeyError): margin = get_property('strip_margin_y') x = 1 + (strip_size-m2+m1) / (2*ax_width) y = (2*box_y+box_height)/2 # margin adjustment hslide = 1 + margin*strip_size/ax_width x *= hslide box_x *= hslide else: box_x = 0 box_y = 1 box_width = 1 box_height = strip_size/ax_height # x & width properties of the background slide and # shrink the strip horizontally. The y margin slides # it vertically. with suppress(KeyError): box_x = get_property('strip_background_x', 'x') with suppress(KeyError): box_width = get_property('strip_background_x', 'width') with suppress(KeyError): margin = get_property('strip_margin_x') x = (2*box_x+box_width)/2 y = 1 + (strip_size-m1+m2)/(2*ax_height) # margin adjustment vslide = 1 + margin*strip_size/ax_height y *= vslide box_y *= vslide dimensions = types.SimpleNamespace( x=x, y=y, box_x=box_x, box_y=box_y, box_width=box_width, box_height=box_height) return dimensions
[ "def", "strip_dimensions", "(", "self", ",", "text_lines", ",", "location", ",", "pid", ")", ":", "dpi", "=", "72", "num_lines", "=", "len", "(", "text_lines", ")", "get_property", "=", "self", ".", "theme", ".", "themeables", ".", "property", "ax", "=",...
Calculate the dimension Returns ------- out : types.SimpleNamespace A structure with all the coordinates required to draw the strip text and the background box.
[ "Calculate", "the", "dimension" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/facets/facet.py#L438-L505
train
214,432
has2k1/plotnine
plotnine/facets/facet.py
facet.draw_strip_text
def draw_strip_text(self, text_lines, location, pid): """ Create a background patch and put a label on it """ ax = self.axs[pid] themeable = self.figure._themeable dim = self.strip_dimensions(text_lines, location, pid) if location == 'right': rotation = -90 label = '\n'.join(reversed(text_lines)) else: rotation = 0 label = '\n'.join(text_lines) rect = mpatch.FancyBboxPatch((dim.box_x, dim.box_y), width=dim.box_width, height=dim.box_height, facecolor='lightgrey', edgecolor='None', transform=ax.transAxes, zorder=2.2, # > ax line & boundary boxstyle='square, pad=0', clip_on=False) text = mtext.Text(dim.x, dim.y, label, rotation=rotation, verticalalignment='center', horizontalalignment='center', transform=ax.transAxes, zorder=3.3, # > rect clip_on=False) ax.add_artist(rect) ax.add_artist(text) for key in ('strip_text_x', 'strip_text_y', 'strip_background_x', 'strip_background_y'): if key not in themeable: themeable[key] = [] if location == 'right': themeable['strip_background_y'].append(rect) themeable['strip_text_y'].append(text) else: themeable['strip_background_x'].append(rect) themeable['strip_text_x'].append(text)
python
def draw_strip_text(self, text_lines, location, pid): """ Create a background patch and put a label on it """ ax = self.axs[pid] themeable = self.figure._themeable dim = self.strip_dimensions(text_lines, location, pid) if location == 'right': rotation = -90 label = '\n'.join(reversed(text_lines)) else: rotation = 0 label = '\n'.join(text_lines) rect = mpatch.FancyBboxPatch((dim.box_x, dim.box_y), width=dim.box_width, height=dim.box_height, facecolor='lightgrey', edgecolor='None', transform=ax.transAxes, zorder=2.2, # > ax line & boundary boxstyle='square, pad=0', clip_on=False) text = mtext.Text(dim.x, dim.y, label, rotation=rotation, verticalalignment='center', horizontalalignment='center', transform=ax.transAxes, zorder=3.3, # > rect clip_on=False) ax.add_artist(rect) ax.add_artist(text) for key in ('strip_text_x', 'strip_text_y', 'strip_background_x', 'strip_background_y'): if key not in themeable: themeable[key] = [] if location == 'right': themeable['strip_background_y'].append(rect) themeable['strip_text_y'].append(text) else: themeable['strip_background_x'].append(rect) themeable['strip_text_x'].append(text)
[ "def", "draw_strip_text", "(", "self", ",", "text_lines", ",", "location", ",", "pid", ")", ":", "ax", "=", "self", ".", "axs", "[", "pid", "]", "themeable", "=", "self", ".", "figure", ".", "_themeable", "dim", "=", "self", ".", "strip_dimensions", "(...
Create a background patch and put a label on it
[ "Create", "a", "background", "patch", "and", "put", "a", "label", "on", "it" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/facets/facet.py#L507-L553
train
214,433
has2k1/plotnine
plotnine/facets/facet.py
MyFixedFormatter.format_data
def format_data(self, value): """ Return a formatted string representation of a number. """ s = locale.format_string('%1.10e', (value,)) return self.fix_minus(s)
python
def format_data(self, value): """ Return a formatted string representation of a number. """ s = locale.format_string('%1.10e', (value,)) return self.fix_minus(s)
[ "def", "format_data", "(", "self", ",", "value", ")", ":", "s", "=", "locale", ".", "format_string", "(", "'%1.10e'", ",", "(", "value", ",", ")", ")", "return", "self", ".", "fix_minus", "(", "s", ")" ]
Return a formatted string representation of a number.
[ "Return", "a", "formatted", "string", "representation", "of", "a", "number", "." ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/facets/facet.py#L701-L706
train
214,434
has2k1/plotnine
plotnine/guides/guide_colorbar.py
add_interpolated_colorbar
def add_interpolated_colorbar(da, colors, direction): """ Add 'rastered' colorbar to DrawingArea """ # Special case that arises due to not so useful # aesthetic mapping. if len(colors) == 1: colors = [colors[0], colors[0]] # Number of horizontal egdes(breaks) in the grid # No need to create more nbreak than colors, provided # no. of colors = no. of breaks = no. of cmap colors # the shading does a perfect interpolation nbreak = len(colors) if direction == 'vertical': mesh_width = 1 mesh_height = nbreak-1 linewidth = da.height/mesh_height # Construct rectangular meshgrid # The values(Z) at each vertex are just the # normalized (onto [0, 1]) vertical distance x = np.array([0, da.width]) y = np.arange(0, nbreak) * linewidth X, Y = np.meshgrid(x, y) Z = Y/y.max() else: mesh_width = nbreak-1 mesh_height = 1 linewidth = da.width/mesh_width x = np.arange(0, nbreak) * linewidth y = np.array([0, da.height]) X, Y = np.meshgrid(x, y) Z = X/x.max() # As a 2D coordinates array coordinates = np.zeros( ((mesh_width+1)*(mesh_height+1), 2), dtype=float) coordinates[:, 0] = X.ravel() coordinates[:, 1] = Y.ravel() cmap = ListedColormap(colors) coll = mcoll.QuadMesh(mesh_width, mesh_height, coordinates, antialiased=False, shading='gouraud', linewidth=0, cmap=cmap, array=Z.ravel()) da.add_artist(coll)
python
def add_interpolated_colorbar(da, colors, direction): """ Add 'rastered' colorbar to DrawingArea """ # Special case that arises due to not so useful # aesthetic mapping. if len(colors) == 1: colors = [colors[0], colors[0]] # Number of horizontal egdes(breaks) in the grid # No need to create more nbreak than colors, provided # no. of colors = no. of breaks = no. of cmap colors # the shading does a perfect interpolation nbreak = len(colors) if direction == 'vertical': mesh_width = 1 mesh_height = nbreak-1 linewidth = da.height/mesh_height # Construct rectangular meshgrid # The values(Z) at each vertex are just the # normalized (onto [0, 1]) vertical distance x = np.array([0, da.width]) y = np.arange(0, nbreak) * linewidth X, Y = np.meshgrid(x, y) Z = Y/y.max() else: mesh_width = nbreak-1 mesh_height = 1 linewidth = da.width/mesh_width x = np.arange(0, nbreak) * linewidth y = np.array([0, da.height]) X, Y = np.meshgrid(x, y) Z = X/x.max() # As a 2D coordinates array coordinates = np.zeros( ((mesh_width+1)*(mesh_height+1), 2), dtype=float) coordinates[:, 0] = X.ravel() coordinates[:, 1] = Y.ravel() cmap = ListedColormap(colors) coll = mcoll.QuadMesh(mesh_width, mesh_height, coordinates, antialiased=False, shading='gouraud', linewidth=0, cmap=cmap, array=Z.ravel()) da.add_artist(coll)
[ "def", "add_interpolated_colorbar", "(", "da", ",", "colors", ",", "direction", ")", ":", "# Special case that arises due to not so useful", "# aesthetic mapping.", "if", "len", "(", "colors", ")", "==", "1", ":", "colors", "=", "[", "colors", "[", "0", "]", ","...
Add 'rastered' colorbar to DrawingArea
[ "Add", "rastered", "colorbar", "to", "DrawingArea" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/guides/guide_colorbar.py#L242-L292
train
214,435
has2k1/plotnine
plotnine/guides/guide_colorbar.py
add_segmented_colorbar
def add_segmented_colorbar(da, colors, direction): """ Add 'non-rastered' colorbar to DrawingArea """ nbreak = len(colors) if direction == 'vertical': linewidth = da.height/nbreak verts = [None] * nbreak x1, x2 = 0, da.width for i, color in enumerate(colors): y1 = i * linewidth y2 = y1 + linewidth verts[i] = ((x1, y1), (x1, y2), (x2, y2), (x2, y1)) else: linewidth = da.width/nbreak verts = [None] * nbreak y1, y2 = 0, da.height for i, color in enumerate(colors): x1 = i * linewidth x2 = x1 + linewidth verts[i] = ((x1, y1), (x1, y2), (x2, y2), (x2, y1)) coll = mcoll.PolyCollection(verts, facecolors=colors, linewidth=0, antialiased=False) da.add_artist(coll)
python
def add_segmented_colorbar(da, colors, direction): """ Add 'non-rastered' colorbar to DrawingArea """ nbreak = len(colors) if direction == 'vertical': linewidth = da.height/nbreak verts = [None] * nbreak x1, x2 = 0, da.width for i, color in enumerate(colors): y1 = i * linewidth y2 = y1 + linewidth verts[i] = ((x1, y1), (x1, y2), (x2, y2), (x2, y1)) else: linewidth = da.width/nbreak verts = [None] * nbreak y1, y2 = 0, da.height for i, color in enumerate(colors): x1 = i * linewidth x2 = x1 + linewidth verts[i] = ((x1, y1), (x1, y2), (x2, y2), (x2, y1)) coll = mcoll.PolyCollection(verts, facecolors=colors, linewidth=0, antialiased=False) da.add_artist(coll)
[ "def", "add_segmented_colorbar", "(", "da", ",", "colors", ",", "direction", ")", ":", "nbreak", "=", "len", "(", "colors", ")", "if", "direction", "==", "'vertical'", ":", "linewidth", "=", "da", ".", "height", "/", "nbreak", "verts", "=", "[", "None", ...
Add 'non-rastered' colorbar to DrawingArea
[ "Add", "non", "-", "rastered", "colorbar", "to", "DrawingArea" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/guides/guide_colorbar.py#L295-L321
train
214,436
has2k1/plotnine
plotnine/guides/guide_colorbar.py
create_labels
def create_labels(da, labels, locations, direction): """ Return an OffsetBox with label texts """ # The box dimensions are determined by the size of # the text objects. We put two dummy children at # either end to gaurantee that when center packed # the labels in the labels_box matchup with the ticks. fontsize = 9 aux_transform = mtransforms.IdentityTransform() labels_box = MyAuxTransformBox(aux_transform) xs, ys = [0]*len(labels), locations ha, va = 'left', 'center' x1, y1 = 0, 0 x2, y2 = 0, da.height if direction == 'horizontal': xs, ys = ys, xs ha, va = 'center', 'top' x2, y2 = da.width, 0 txt1 = mtext.Text(x1, y1, '', horizontalalignment=ha, verticalalignment=va) txt2 = mtext.Text(x2, y2, '', horizontalalignment=ha, verticalalignment=va) labels_box.add_artist(txt1) labels_box.add_artist(txt2) legend_text = [] for i, (x, y, text) in enumerate(zip(xs, ys, labels)): txt = mtext.Text(x, y, text, size=fontsize, horizontalalignment=ha, verticalalignment=va) labels_box.add_artist(txt) legend_text.append(txt) return labels_box, legend_text
python
def create_labels(da, labels, locations, direction): """ Return an OffsetBox with label texts """ # The box dimensions are determined by the size of # the text objects. We put two dummy children at # either end to gaurantee that when center packed # the labels in the labels_box matchup with the ticks. fontsize = 9 aux_transform = mtransforms.IdentityTransform() labels_box = MyAuxTransformBox(aux_transform) xs, ys = [0]*len(labels), locations ha, va = 'left', 'center' x1, y1 = 0, 0 x2, y2 = 0, da.height if direction == 'horizontal': xs, ys = ys, xs ha, va = 'center', 'top' x2, y2 = da.width, 0 txt1 = mtext.Text(x1, y1, '', horizontalalignment=ha, verticalalignment=va) txt2 = mtext.Text(x2, y2, '', horizontalalignment=ha, verticalalignment=va) labels_box.add_artist(txt1) labels_box.add_artist(txt2) legend_text = [] for i, (x, y, text) in enumerate(zip(xs, ys, labels)): txt = mtext.Text(x, y, text, size=fontsize, horizontalalignment=ha, verticalalignment=va) labels_box.add_artist(txt) legend_text.append(txt) return labels_box, legend_text
[ "def", "create_labels", "(", "da", ",", "labels", ",", "locations", ",", "direction", ")", ":", "# The box dimensions are determined by the size of", "# the text objects. We put two dummy children at", "# either end to gaurantee that when center packed", "# the labels in the labels_box...
Return an OffsetBox with label texts
[ "Return", "an", "OffsetBox", "with", "label", "texts" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/guides/guide_colorbar.py#L344-L381
train
214,437
has2k1/plotnine
plotnine/guides/guide_colorbar.py
guide_colorbar.create_geoms
def create_geoms(self, plot): """ This guide is not geom based Return self if colorbar will be drawn and None if not. """ for l in plot.layers: exclude = set() if isinstance(l.show_legend, dict): l.show_legend = rename_aesthetics(l.show_legend) exclude = {ae for ae, val in l.show_legend.items() if not val} elif l.show_legend not in (None, True): continue matched = self.legend_aesthetics(l, plot) # layer uses guide if set(matched) - exclude: break # no break, no layer uses this guide else: return None return self
python
def create_geoms(self, plot): """ This guide is not geom based Return self if colorbar will be drawn and None if not. """ for l in plot.layers: exclude = set() if isinstance(l.show_legend, dict): l.show_legend = rename_aesthetics(l.show_legend) exclude = {ae for ae, val in l.show_legend.items() if not val} elif l.show_legend not in (None, True): continue matched = self.legend_aesthetics(l, plot) # layer uses guide if set(matched) - exclude: break # no break, no layer uses this guide else: return None return self
[ "def", "create_geoms", "(", "self", ",", "plot", ")", ":", "for", "l", "in", "plot", ".", "layers", ":", "exclude", "=", "set", "(", ")", "if", "isinstance", "(", "l", ".", "show_legend", ",", "dict", ")", ":", "l", ".", "show_legend", "=", "rename...
This guide is not geom based Return self if colorbar will be drawn and None if not.
[ "This", "guide", "is", "not", "geom", "based" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/guides/guide_colorbar.py#L108-L132
train
214,438
has2k1/plotnine
plotnine/guides/guide_colorbar.py
MyAuxTransformBox.draw
def draw(self, renderer): """ Draw the children """ dpi_cor = renderer.points_to_pixels(1.) self.dpi_transform.clear() self.dpi_transform.scale(dpi_cor, dpi_cor) for c in self._children: c.draw(renderer) self.stale = False
python
def draw(self, renderer): """ Draw the children """ dpi_cor = renderer.points_to_pixels(1.) self.dpi_transform.clear() self.dpi_transform.scale(dpi_cor, dpi_cor) for c in self._children: c.draw(renderer) self.stale = False
[ "def", "draw", "(", "self", ",", "renderer", ")", ":", "dpi_cor", "=", "renderer", ".", "points_to_pixels", "(", "1.", ")", "self", ".", "dpi_transform", ".", "clear", "(", ")", "self", ".", "dpi_transform", ".", "scale", "(", "dpi_cor", ",", "dpi_cor", ...
Draw the children
[ "Draw", "the", "children" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/guides/guide_colorbar.py#L404-L415
train
214,439
has2k1/plotnine
plotnine/themes/themeable.py
themeable.from_class_name
def from_class_name(name, theme_element): """ Create an themeable by name Parameters ---------- name : str Class name theme_element : element object One of :class:`element_line`, :class:`element_rect`, :class:`element_text` or :class:`element_blank` Returns ------- out : Themeable """ msg = "No such themeable element {}".format(name) try: klass = themeable._registry[name] except KeyError: raise PlotnineError(msg) if not issubclass(klass, themeable): raise PlotnineError(msg) return klass(theme_element)
python
def from_class_name(name, theme_element): """ Create an themeable by name Parameters ---------- name : str Class name theme_element : element object One of :class:`element_line`, :class:`element_rect`, :class:`element_text` or :class:`element_blank` Returns ------- out : Themeable """ msg = "No such themeable element {}".format(name) try: klass = themeable._registry[name] except KeyError: raise PlotnineError(msg) if not issubclass(klass, themeable): raise PlotnineError(msg) return klass(theme_element)
[ "def", "from_class_name", "(", "name", ",", "theme_element", ")", ":", "msg", "=", "\"No such themeable element {}\"", ".", "format", "(", "name", ")", "try", ":", "klass", "=", "themeable", ".", "_registry", "[", "name", "]", "except", "KeyError", ":", "rai...
Create an themeable by name Parameters ---------- name : str Class name theme_element : element object One of :class:`element_line`, :class:`element_rect`, :class:`element_text` or :class:`element_blank` Returns ------- out : Themeable
[ "Create", "an", "themeable", "by", "name" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/themes/themeable.py#L96-L121
train
214,440
has2k1/plotnine
plotnine/themes/themeable.py
themeable.merge
def merge(self, other): """ Merge properties of other into self Raises ValueError if any them are a blank """ if self.is_blank() or other.is_blank(): raise ValueError('Cannot merge if there is a blank.') else: self.properties.update(other.properties)
python
def merge(self, other): """ Merge properties of other into self Raises ValueError if any them are a blank """ if self.is_blank() or other.is_blank(): raise ValueError('Cannot merge if there is a blank.') else: self.properties.update(other.properties)
[ "def", "merge", "(", "self", ",", "other", ")", ":", "if", "self", ".", "is_blank", "(", ")", "or", "other", ".", "is_blank", "(", ")", ":", "raise", "ValueError", "(", "'Cannot merge if there is a blank.'", ")", "else", ":", "self", ".", "properties", "...
Merge properties of other into self Raises ValueError if any them are a blank
[ "Merge", "properties", "of", "other", "into", "self" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/themes/themeable.py#L129-L138
train
214,441
has2k1/plotnine
plotnine/themes/themeable.py
Themeables.update
def update(self, other): """ Update themeables with those from `other` This method takes care of inserting the `themeable` into the underlying dictionary. Before doing the insertion, any existing themeables that will be affected by a new from `other` will either be merged or removed. This makes sure that a general themeable of type :class:`text` can be added to override an existing specific one of type :class:`axis_text_x`. """ for new in other.values(): new_key = new.__class__.__name__ # 1st in the mro is self, the # last 2 are (themeable, object) for child in new.__class__.mro()[1:-2]: child_key = child.__name__ try: self[child_key].merge(new) except KeyError: pass except ValueError: # Blank child is will be overridden del self[child_key] try: self[new_key].merge(new) except (KeyError, ValueError): # Themeable type is new or # could not merge blank element. self[new_key] = new
python
def update(self, other): """ Update themeables with those from `other` This method takes care of inserting the `themeable` into the underlying dictionary. Before doing the insertion, any existing themeables that will be affected by a new from `other` will either be merged or removed. This makes sure that a general themeable of type :class:`text` can be added to override an existing specific one of type :class:`axis_text_x`. """ for new in other.values(): new_key = new.__class__.__name__ # 1st in the mro is self, the # last 2 are (themeable, object) for child in new.__class__.mro()[1:-2]: child_key = child.__name__ try: self[child_key].merge(new) except KeyError: pass except ValueError: # Blank child is will be overridden del self[child_key] try: self[new_key].merge(new) except (KeyError, ValueError): # Themeable type is new or # could not merge blank element. self[new_key] = new
[ "def", "update", "(", "self", ",", "other", ")", ":", "for", "new", "in", "other", ".", "values", "(", ")", ":", "new_key", "=", "new", ".", "__class__", ".", "__name__", "# 1st in the mro is self, the", "# last 2 are (themeable, object)", "for", "child", "in"...
Update themeables with those from `other` This method takes care of inserting the `themeable` into the underlying dictionary. Before doing the insertion, any existing themeables that will be affected by a new from `other` will either be merged or removed. This makes sure that a general themeable of type :class:`text` can be added to override an existing specific one of type :class:`axis_text_x`.
[ "Update", "themeables", "with", "those", "from", "other" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/themes/themeable.py#L225-L256
train
214,442
has2k1/plotnine
plotnine/themes/themeable.py
Themeables.values
def values(self): """ Return a list themeables sorted in reverse based on the their depth in the inheritance hierarchy. The sorting is key applying and merging the themeables so that they do not clash i.e :class:`axis_line` applied before :class:`axis_line_x`. """ def key(th): return len(th.__class__.__mro__) return sorted(dict.values(self), key=key, reverse=True)
python
def values(self): """ Return a list themeables sorted in reverse based on the their depth in the inheritance hierarchy. The sorting is key applying and merging the themeables so that they do not clash i.e :class:`axis_line` applied before :class:`axis_line_x`. """ def key(th): return len(th.__class__.__mro__) return sorted(dict.values(self), key=key, reverse=True)
[ "def", "values", "(", "self", ")", ":", "def", "key", "(", "th", ")", ":", "return", "len", "(", "th", ".", "__class__", ".", "__mro__", ")", "return", "sorted", "(", "dict", ".", "values", "(", "self", ")", ",", "key", "=", "key", ",", "reverse"...
Return a list themeables sorted in reverse based on the their depth in the inheritance hierarchy. The sorting is key applying and merging the themeables so that they do not clash i.e :class:`axis_line` applied before :class:`axis_line_x`.
[ "Return", "a", "list", "themeables", "sorted", "in", "reverse", "based", "on", "the", "their", "depth", "in", "the", "inheritance", "hierarchy", "." ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/themes/themeable.py#L258-L270
train
214,443
has2k1/plotnine
plotnine/positions/position.py
position.setup_data
def setup_data(self, data, params): """ Verify & return data """ check_required_aesthetics( self.REQUIRED_AES, data.columns, self.__class__.__name__) return data
python
def setup_data(self, data, params): """ Verify & return data """ check_required_aesthetics( self.REQUIRED_AES, data.columns, self.__class__.__name__) return data
[ "def", "setup_data", "(", "self", ",", "data", ",", "params", ")", ":", "check_required_aesthetics", "(", "self", ".", "REQUIRED_AES", ",", "data", ".", "columns", ",", "self", ".", "__class__", ".", "__name__", ")", "return", "data" ]
Verify & return data
[ "Verify", "&", "return", "data" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/positions/position.py#L24-L32
train
214,444
has2k1/plotnine
plotnine/positions/position.py
position.compute_layer
def compute_layer(cls, data, params, layout): """ Compute position for the layer in all panels Positions can override this function instead of `compute_panel` if the position computations are independent of the panel. i.e when not colliding """ def fn(pdata): """ Helper compute function """ # Given data belonging to a specific panel, grab # the corresponding scales and call the method # that does the real computation if len(pdata) == 0: return pdata scales = layout.get_scales(pdata['PANEL'].iat[0]) return cls.compute_panel(pdata, scales, params) return groupby_apply(data, 'PANEL', fn)
python
def compute_layer(cls, data, params, layout): """ Compute position for the layer in all panels Positions can override this function instead of `compute_panel` if the position computations are independent of the panel. i.e when not colliding """ def fn(pdata): """ Helper compute function """ # Given data belonging to a specific panel, grab # the corresponding scales and call the method # that does the real computation if len(pdata) == 0: return pdata scales = layout.get_scales(pdata['PANEL'].iat[0]) return cls.compute_panel(pdata, scales, params) return groupby_apply(data, 'PANEL', fn)
[ "def", "compute_layer", "(", "cls", ",", "data", ",", "params", ",", "layout", ")", ":", "def", "fn", "(", "pdata", ")", ":", "\"\"\"\n Helper compute function\n \"\"\"", "# Given data belonging to a specific panel, grab", "# the corresponding scales and...
Compute position for the layer in all panels Positions can override this function instead of `compute_panel` if the position computations are independent of the panel. i.e when not colliding
[ "Compute", "position", "for", "the", "layer", "in", "all", "panels" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/positions/position.py#L35-L55
train
214,445
has2k1/plotnine
plotnine/positions/position.py
position.compute_panel
def compute_panel(cls, data, scales, params): """ Positions must override this function Notes ----- Make necessary adjustments to the columns in the dataframe. Create the position transformation functions and use self.transform_position() do the rest. See Also -------- position_jitter.compute_panel """ msg = '{} needs to implement this method' raise NotImplementedError(msg.format(cls.__name__))
python
def compute_panel(cls, data, scales, params): """ Positions must override this function Notes ----- Make necessary adjustments to the columns in the dataframe. Create the position transformation functions and use self.transform_position() do the rest. See Also -------- position_jitter.compute_panel """ msg = '{} needs to implement this method' raise NotImplementedError(msg.format(cls.__name__))
[ "def", "compute_panel", "(", "cls", ",", "data", ",", "scales", ",", "params", ")", ":", "msg", "=", "'{} needs to implement this method'", "raise", "NotImplementedError", "(", "msg", ".", "format", "(", "cls", ".", "__name__", ")", ")" ]
Positions must override this function Notes ----- Make necessary adjustments to the columns in the dataframe. Create the position transformation functions and use self.transform_position() do the rest. See Also -------- position_jitter.compute_panel
[ "Positions", "must", "override", "this", "function" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/positions/position.py#L58-L74
train
214,446
has2k1/plotnine
plotnine/positions/position.py
position.transform_position
def transform_position(data, trans_x=None, trans_y=None): """ Transform all the variables that map onto the x and y scales. Parameters ---------- data : dataframe trans_x : function Transforms x scale mappings Takes one argument, either a scalar or an array-type trans_y : function Transforms y scale mappings Takes one argument, either a scalar or an array-type """ # Aesthetics that map onto the x and y scales X = {'x', 'xmin', 'xmax', 'xend', 'xintercept'} Y = {'y', 'ymin', 'ymax', 'yend', 'yintercept'} if trans_x: xs = [name for name in data.columns if name in X] data[xs] = data[xs].apply(trans_x) if trans_y: ys = [name for name in data.columns if name in Y] data[ys] = data[ys].apply(trans_y) return data
python
def transform_position(data, trans_x=None, trans_y=None): """ Transform all the variables that map onto the x and y scales. Parameters ---------- data : dataframe trans_x : function Transforms x scale mappings Takes one argument, either a scalar or an array-type trans_y : function Transforms y scale mappings Takes one argument, either a scalar or an array-type """ # Aesthetics that map onto the x and y scales X = {'x', 'xmin', 'xmax', 'xend', 'xintercept'} Y = {'y', 'ymin', 'ymax', 'yend', 'yintercept'} if trans_x: xs = [name for name in data.columns if name in X] data[xs] = data[xs].apply(trans_x) if trans_y: ys = [name for name in data.columns if name in Y] data[ys] = data[ys].apply(trans_y) return data
[ "def", "transform_position", "(", "data", ",", "trans_x", "=", "None", ",", "trans_y", "=", "None", ")", ":", "# Aesthetics that map onto the x and y scales", "X", "=", "{", "'x'", ",", "'xmin'", ",", "'xmax'", ",", "'xend'", ",", "'xintercept'", "}", "Y", "...
Transform all the variables that map onto the x and y scales. Parameters ---------- data : dataframe trans_x : function Transforms x scale mappings Takes one argument, either a scalar or an array-type trans_y : function Transforms y scale mappings Takes one argument, either a scalar or an array-type
[ "Transform", "all", "the", "variables", "that", "map", "onto", "the", "x", "and", "y", "scales", "." ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/positions/position.py#L77-L103
train
214,447
has2k1/plotnine
plotnine/positions/position.py
position.from_geom
def from_geom(geom): """ Create and return a position object for the geom Parameters ---------- geom : geom An instantiated geom object. Returns ------- out : position A position object Raises :class:`PlotnineError` if unable to create a `position`. """ name = geom.params['position'] if issubclass(type(name), position): return name if isinstance(name, type) and issubclass(name, position): klass = name elif is_string(name): if not name.startswith('position_'): name = 'position_{}'.format(name) klass = Registry[name] else: raise PlotnineError( 'Unknown position of type {}'.format(type(name))) return klass()
python
def from_geom(geom): """ Create and return a position object for the geom Parameters ---------- geom : geom An instantiated geom object. Returns ------- out : position A position object Raises :class:`PlotnineError` if unable to create a `position`. """ name = geom.params['position'] if issubclass(type(name), position): return name if isinstance(name, type) and issubclass(name, position): klass = name elif is_string(name): if not name.startswith('position_'): name = 'position_{}'.format(name) klass = Registry[name] else: raise PlotnineError( 'Unknown position of type {}'.format(type(name))) return klass()
[ "def", "from_geom", "(", "geom", ")", ":", "name", "=", "geom", ".", "params", "[", "'position'", "]", "if", "issubclass", "(", "type", "(", "name", ")", ",", "position", ")", ":", "return", "name", "if", "isinstance", "(", "name", ",", "type", ")", ...
Create and return a position object for the geom Parameters ---------- geom : geom An instantiated geom object. Returns ------- out : position A position object Raises :class:`PlotnineError` if unable to create a `position`.
[ "Create", "and", "return", "a", "position", "object", "for", "the", "geom" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/positions/position.py#L106-L136
train
214,448
has2k1/plotnine
plotnine/scales/scales.py
make_scale
def make_scale(ae, series, *args, **kwargs): """ Return a proper scale object for the series The scale is for the aesthetic ae, and args & kwargs are passed on to the scale creating class """ stype = scale_type(series) # filter parameters by scale type if stype == 'discrete': with suppress(KeyError): del kwargs['trans'] scale_name = 'scale_{}_{}'.format(ae, stype) scale_klass = Registry[scale_name] return scale_klass(*args, **kwargs)
python
def make_scale(ae, series, *args, **kwargs): """ Return a proper scale object for the series The scale is for the aesthetic ae, and args & kwargs are passed on to the scale creating class """ stype = scale_type(series) # filter parameters by scale type if stype == 'discrete': with suppress(KeyError): del kwargs['trans'] scale_name = 'scale_{}_{}'.format(ae, stype) scale_klass = Registry[scale_name] return scale_klass(*args, **kwargs)
[ "def", "make_scale", "(", "ae", ",", "series", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "stype", "=", "scale_type", "(", "series", ")", "# filter parameters by scale type", "if", "stype", "==", "'discrete'", ":", "with", "suppress", "(", "KeyEr...
Return a proper scale object for the series The scale is for the aesthetic ae, and args & kwargs are passed on to the scale creating class
[ "Return", "a", "proper", "scale", "object", "for", "the", "series" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/scales/scales.py#L293-L309
train
214,449
has2k1/plotnine
plotnine/scales/scales.py
Scales.append
def append(self, sc): """ Add scale 'sc' and remove any previous scales that cover the same aesthetics """ ae = sc.aesthetics[0] cover_ae = self.find(ae) if any(cover_ae): warn(_TPL_DUPLICATE_SCALE.format(ae), PlotnineWarning) idx = cover_ae.index(True) self.pop(idx) # super() does not work well with reloads list.append(self, sc)
python
def append(self, sc): """ Add scale 'sc' and remove any previous scales that cover the same aesthetics """ ae = sc.aesthetics[0] cover_ae = self.find(ae) if any(cover_ae): warn(_TPL_DUPLICATE_SCALE.format(ae), PlotnineWarning) idx = cover_ae.index(True) self.pop(idx) # super() does not work well with reloads list.append(self, sc)
[ "def", "append", "(", "self", ",", "sc", ")", ":", "ae", "=", "sc", ".", "aesthetics", "[", "0", "]", "cover_ae", "=", "self", ".", "find", "(", "ae", ")", "if", "any", "(", "cover_ae", ")", ":", "warn", "(", "_TPL_DUPLICATE_SCALE", ".", "format", ...
Add scale 'sc' and remove any previous scales that cover the same aesthetics
[ "Add", "scale", "sc", "and", "remove", "any", "previous", "scales", "that", "cover", "the", "same", "aesthetics" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/scales/scales.py#L20-L32
train
214,450
has2k1/plotnine
plotnine/scales/scales.py
Scales.input
def input(self): """ Return a list of all the aesthetics covered by the scales. """ lst = [s.aesthetics for s in self] return list(itertools.chain(*lst))
python
def input(self): """ Return a list of all the aesthetics covered by the scales. """ lst = [s.aesthetics for s in self] return list(itertools.chain(*lst))
[ "def", "input", "(", "self", ")", ":", "lst", "=", "[", "s", ".", "aesthetics", "for", "s", "in", "self", "]", "return", "list", "(", "itertools", ".", "chain", "(", "*", "lst", ")", ")" ]
Return a list of all the aesthetics covered by the scales.
[ "Return", "a", "list", "of", "all", "the", "aesthetics", "covered", "by", "the", "scales", "." ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/scales/scales.py#L41-L47
train
214,451
has2k1/plotnine
plotnine/scales/scales.py
Scales.get_scales
def get_scales(self, aesthetic): """ Return the scale for the aesthetic or None if there isn't one. These are the scales specified by the user e.g `ggplot() + scale_x_continuous()` or those added by default during the plot building process """ bool_lst = self.find(aesthetic) try: idx = bool_lst.index(True) return self[idx] except ValueError: return None
python
def get_scales(self, aesthetic): """ Return the scale for the aesthetic or None if there isn't one. These are the scales specified by the user e.g `ggplot() + scale_x_continuous()` or those added by default during the plot building process """ bool_lst = self.find(aesthetic) try: idx = bool_lst.index(True) return self[idx] except ValueError: return None
[ "def", "get_scales", "(", "self", ",", "aesthetic", ")", ":", "bool_lst", "=", "self", ".", "find", "(", "aesthetic", ")", "try", ":", "idx", "=", "bool_lst", ".", "index", "(", "True", ")", "return", "self", "[", "idx", "]", "except", "ValueError", ...
Return the scale for the aesthetic or None if there isn't one. These are the scales specified by the user e.g `ggplot() + scale_x_continuous()` or those added by default during the plot building process
[ "Return", "the", "scale", "for", "the", "aesthetic", "or", "None", "if", "there", "isn", "t", "one", "." ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/scales/scales.py#L49-L64
train
214,452
has2k1/plotnine
plotnine/scales/scales.py
Scales.non_position_scales
def non_position_scales(self): """ Return a list of the non-position scales that are present """ l = [s for s in self if not ('x' in s.aesthetics) and not ('y' in s.aesthetics)] return Scales(l)
python
def non_position_scales(self): """ Return a list of the non-position scales that are present """ l = [s for s in self if not ('x' in s.aesthetics) and not ('y' in s.aesthetics)] return Scales(l)
[ "def", "non_position_scales", "(", "self", ")", ":", "l", "=", "[", "s", "for", "s", "in", "self", "if", "not", "(", "'x'", "in", "s", ".", "aesthetics", ")", "and", "not", "(", "'y'", "in", "s", ".", "aesthetics", ")", "]", "return", "Scales", "...
Return a list of the non-position scales that are present
[ "Return", "a", "list", "of", "the", "non", "-", "position", "scales", "that", "are", "present" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/scales/scales.py#L80-L87
train
214,453
has2k1/plotnine
plotnine/scales/scales.py
Scales.position_scales
def position_scales(self): """ Return a list of the position scales that are present """ l = [s for s in self if ('x' in s.aesthetics) or ('y' in s.aesthetics)] return Scales(l)
python
def position_scales(self): """ Return a list of the position scales that are present """ l = [s for s in self if ('x' in s.aesthetics) or ('y' in s.aesthetics)] return Scales(l)
[ "def", "position_scales", "(", "self", ")", ":", "l", "=", "[", "s", "for", "s", "in", "self", "if", "(", "'x'", "in", "s", ".", "aesthetics", ")", "or", "(", "'y'", "in", "s", ".", "aesthetics", ")", "]", "return", "Scales", "(", "l", ")" ]
Return a list of the position scales that are present
[ "Return", "a", "list", "of", "the", "position", "scales", "that", "are", "present" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/scales/scales.py#L89-L95
train
214,454
has2k1/plotnine
plotnine/scales/scales.py
Scales.train_df
def train_df(self, df, drop=False): """ Train scales from a dataframe """ if (len(df) == 0) or (len(self) == 0): return df # Each scale trains the columns it understands for sc in self: sc.train_df(df) return df
python
def train_df(self, df, drop=False): """ Train scales from a dataframe """ if (len(df) == 0) or (len(self) == 0): return df # Each scale trains the columns it understands for sc in self: sc.train_df(df) return df
[ "def", "train_df", "(", "self", ",", "df", ",", "drop", "=", "False", ")", ":", "if", "(", "len", "(", "df", ")", "==", "0", ")", "or", "(", "len", "(", "self", ")", "==", "0", ")", ":", "return", "df", "# Each scale trains the columns it understands...
Train scales from a dataframe
[ "Train", "scales", "from", "a", "dataframe" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/scales/scales.py#L173-L183
train
214,455
has2k1/plotnine
plotnine/scales/scales.py
Scales.map_df
def map_df(self, df): """ Map values from a dataframe. Returns dataframe """ if (len(df) == 0) or (len(self) == 0): return df # Each scale maps the columns it understands for sc in self: df = sc.map_df(df) return df
python
def map_df(self, df): """ Map values from a dataframe. Returns dataframe """ if (len(df) == 0) or (len(self) == 0): return df # Each scale maps the columns it understands for sc in self: df = sc.map_df(df) return df
[ "def", "map_df", "(", "self", ",", "df", ")", ":", "if", "(", "len", "(", "df", ")", "==", "0", ")", "or", "(", "len", "(", "self", ")", "==", "0", ")", ":", "return", "df", "# Each scale maps the columns it understands", "for", "sc", "in", "self", ...
Map values from a dataframe. Returns dataframe
[ "Map", "values", "from", "a", "dataframe", "." ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/scales/scales.py#L185-L197
train
214,456
has2k1/plotnine
plotnine/scales/scales.py
Scales.transform_df
def transform_df(self, df): """ Transform values in a dataframe. Returns dataframe """ if (len(df) == 0) or (len(self) == 0): return df # Each scale transforms the columns it understands for sc in self: df = sc.transform_df(df) return df
python
def transform_df(self, df): """ Transform values in a dataframe. Returns dataframe """ if (len(df) == 0) or (len(self) == 0): return df # Each scale transforms the columns it understands for sc in self: df = sc.transform_df(df) return df
[ "def", "transform_df", "(", "self", ",", "df", ")", ":", "if", "(", "len", "(", "df", ")", "==", "0", ")", "or", "(", "len", "(", "self", ")", "==", "0", ")", ":", "return", "df", "# Each scale transforms the columns it understands", "for", "sc", "in",...
Transform values in a dataframe. Returns dataframe
[ "Transform", "values", "in", "a", "dataframe", "." ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/scales/scales.py#L199-L211
train
214,457
has2k1/plotnine
plotnine/scales/scales.py
Scales.add_defaults
def add_defaults(self, data, aesthetics): """ Add default scales for the aesthetics if none are present Scales are added only if the aesthetic is mapped to a column in the dataframe. This function may have to be called separately after evaluating the aesthetics. """ if not aesthetics: return # aesthetics with scales aws = set() if self: for s in (set(sc.aesthetics) for sc in self): aws.update(s) # aesthetics that do not have scales present # We preserve the order of the aesthetics new_aesthetics = [x for x in aesthetics.keys() if x not in aws] if not new_aesthetics: return # If a new aesthetic corresponds to a column in the data # frame, find a default scale for the type of data in that # column seen = set() for ae in new_aesthetics: col = aesthetics[ae] if col not in data: col = ae scale_var = aes_to_scale(ae) if self.get_scales(scale_var): continue seen.add(scale_var) try: sc = make_scale(scale_var, data[col]) except PlotnineError: # Skip aesthetics with no scales (e.g. group, order, etc) continue self.append(sc)
python
def add_defaults(self, data, aesthetics): """ Add default scales for the aesthetics if none are present Scales are added only if the aesthetic is mapped to a column in the dataframe. This function may have to be called separately after evaluating the aesthetics. """ if not aesthetics: return # aesthetics with scales aws = set() if self: for s in (set(sc.aesthetics) for sc in self): aws.update(s) # aesthetics that do not have scales present # We preserve the order of the aesthetics new_aesthetics = [x for x in aesthetics.keys() if x not in aws] if not new_aesthetics: return # If a new aesthetic corresponds to a column in the data # frame, find a default scale for the type of data in that # column seen = set() for ae in new_aesthetics: col = aesthetics[ae] if col not in data: col = ae scale_var = aes_to_scale(ae) if self.get_scales(scale_var): continue seen.add(scale_var) try: sc = make_scale(scale_var, data[col]) except PlotnineError: # Skip aesthetics with no scales (e.g. group, order, etc) continue self.append(sc)
[ "def", "add_defaults", "(", "self", ",", "data", ",", "aesthetics", ")", ":", "if", "not", "aesthetics", ":", "return", "# aesthetics with scales", "aws", "=", "set", "(", ")", "if", "self", ":", "for", "s", "in", "(", "set", "(", "sc", ".", "aesthetic...
Add default scales for the aesthetics if none are present Scales are added only if the aesthetic is mapped to a column in the dataframe. This function may have to be called separately after evaluating the aesthetics.
[ "Add", "default", "scales", "for", "the", "aesthetics", "if", "none", "are", "present" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/scales/scales.py#L213-L256
train
214,458
has2k1/plotnine
plotnine/scales/scales.py
Scales.add_missing
def add_missing(self, aesthetics): """ Add missing but required scales. Parameters ---------- aesthetics : list | tuple Aesthetic names. Typically, ('x', 'y'). """ # Keep only aesthetics that don't have scales aesthetics = set(aesthetics) - set(self.input()) for ae in aesthetics: scale_name = 'scale_{}_continuous'.format(ae) scale_f = Registry[scale_name] self.append(scale_f())
python
def add_missing(self, aesthetics): """ Add missing but required scales. Parameters ---------- aesthetics : list | tuple Aesthetic names. Typically, ('x', 'y'). """ # Keep only aesthetics that don't have scales aesthetics = set(aesthetics) - set(self.input()) for ae in aesthetics: scale_name = 'scale_{}_continuous'.format(ae) scale_f = Registry[scale_name] self.append(scale_f())
[ "def", "add_missing", "(", "self", ",", "aesthetics", ")", ":", "# Keep only aesthetics that don't have scales", "aesthetics", "=", "set", "(", "aesthetics", ")", "-", "set", "(", "self", ".", "input", "(", ")", ")", "for", "ae", "in", "aesthetics", ":", "sc...
Add missing but required scales. Parameters ---------- aesthetics : list | tuple Aesthetic names. Typically, ('x', 'y').
[ "Add", "missing", "but", "required", "scales", "." ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/scales/scales.py#L258-L273
train
214,459
has2k1/plotnine
plotnine/facets/labelling.py
label_value
def label_value(label_info, multi_line=True): """ Convert series values to str and maybe concatenate them Parameters ---------- label_info : series Series whose values will be returned multi_line : bool Whether to place each variable on a separate line Returns ------- out : series Label text strings """ label_info = label_info.astype(str) if not multi_line: label_info = collapse_label_lines(label_info) return label_info
python
def label_value(label_info, multi_line=True): """ Convert series values to str and maybe concatenate them Parameters ---------- label_info : series Series whose values will be returned multi_line : bool Whether to place each variable on a separate line Returns ------- out : series Label text strings """ label_info = label_info.astype(str) if not multi_line: label_info = collapse_label_lines(label_info) return label_info
[ "def", "label_value", "(", "label_info", ",", "multi_line", "=", "True", ")", ":", "label_info", "=", "label_info", ".", "astype", "(", "str", ")", "if", "not", "multi_line", ":", "label_info", "=", "collapse_label_lines", "(", "label_info", ")", "return", "...
Convert series values to str and maybe concatenate them Parameters ---------- label_info : series Series whose values will be returned multi_line : bool Whether to place each variable on a separate line Returns ------- out : series Label text strings
[ "Convert", "series", "values", "to", "str", "and", "maybe", "concatenate", "them" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/facets/labelling.py#L14-L34
train
214,460
has2k1/plotnine
plotnine/facets/labelling.py
label_both
def label_both(label_info, multi_line=True, sep=': '): """ Concatenate the index and the value of the series. Parameters ---------- label_info : series Series whose values will be returned. It must have an index made of variable names. multi_line : bool Whether to place each variable on a separate line sep : str Separation between variable name and value Returns ------- out : series Label text strings """ label_info = label_info.astype(str) for var in label_info.index: label_info[var] = '{0}{1}{2}'.format(var, sep, label_info[var]) if not multi_line: label_info = collapse_label_lines(label_info) return label_info
python
def label_both(label_info, multi_line=True, sep=': '): """ Concatenate the index and the value of the series. Parameters ---------- label_info : series Series whose values will be returned. It must have an index made of variable names. multi_line : bool Whether to place each variable on a separate line sep : str Separation between variable name and value Returns ------- out : series Label text strings """ label_info = label_info.astype(str) for var in label_info.index: label_info[var] = '{0}{1}{2}'.format(var, sep, label_info[var]) if not multi_line: label_info = collapse_label_lines(label_info) return label_info
[ "def", "label_both", "(", "label_info", ",", "multi_line", "=", "True", ",", "sep", "=", "': '", ")", ":", "label_info", "=", "label_info", ".", "astype", "(", "str", ")", "for", "var", "in", "label_info", ".", "index", ":", "label_info", "[", "var", "...
Concatenate the index and the value of the series. Parameters ---------- label_info : series Series whose values will be returned. It must have an index made of variable names. multi_line : bool Whether to place each variable on a separate line sep : str Separation between variable name and value Returns ------- out : series Label text strings
[ "Concatenate", "the", "index", "and", "the", "value", "of", "the", "series", "." ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/facets/labelling.py#L37-L63
train
214,461
has2k1/plotnine
plotnine/facets/labelling.py
label_context
def label_context(label_info, multi_line=True, sep=': '): """ Create an unabiguous label string If facetting over a single variable, `label_value` is used, if two or more variables then `label_both` is used. Parameters ---------- label_info : series Series whose values will be returned. It must have an index made of variable names multi_line : bool Whether to place each variable on a separate line sep : str Separation between variable name and value Returns ------- out : str Contatenated label values (or pairs of variable names & values) """ if len(label_info) == 1: return label_value(label_info, multi_line) else: return label_both(label_info, multi_line, sep)
python
def label_context(label_info, multi_line=True, sep=': '): """ Create an unabiguous label string If facetting over a single variable, `label_value` is used, if two or more variables then `label_both` is used. Parameters ---------- label_info : series Series whose values will be returned. It must have an index made of variable names multi_line : bool Whether to place each variable on a separate line sep : str Separation between variable name and value Returns ------- out : str Contatenated label values (or pairs of variable names & values) """ if len(label_info) == 1: return label_value(label_info, multi_line) else: return label_both(label_info, multi_line, sep)
[ "def", "label_context", "(", "label_info", ",", "multi_line", "=", "True", ",", "sep", "=", "': '", ")", ":", "if", "len", "(", "label_info", ")", "==", "1", ":", "return", "label_value", "(", "label_info", ",", "multi_line", ")", "else", ":", "return", ...
Create an unabiguous label string If facetting over a single variable, `label_value` is used, if two or more variables then `label_both` is used. Parameters ---------- label_info : series Series whose values will be returned. It must have an index made of variable names multi_line : bool Whether to place each variable on a separate line sep : str Separation between variable name and value Returns ------- out : str Contatenated label values (or pairs of variable names & values)
[ "Create", "an", "unabiguous", "label", "string" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/facets/labelling.py#L66-L92
train
214,462
has2k1/plotnine
plotnine/facets/labelling.py
as_labeller
def as_labeller(x, default=label_value, multi_line=True): """ Coerse to labeller function Parameters ---------- x : function | dict Object to coerce default : function | str Default labeller. If it is a string, it should be the name of one the labelling functions provided by plotnine. multi_line : bool Whether to place each variable on a separate line Returns ------- out : function Labelling function """ if x is None: x = default # One of the labelling functions as string with suppress(KeyError, TypeError): x = LABELLERS[x] # x is a labeller with suppress(AttributeError): if x.__name__ == '_labeller': return x def _labeller(label_info): label_info = pd.Series(label_info).astype(str) if callable(x) and x.__name__ in LABELLERS: # labellers in this module return x(label_info) elif hasattr(x, '__contains__'): # dictionary lookup for var in label_info.index: if label_info[var] in x: label_info[var] = x[label_info[var]] return label_info elif callable(x): # generic function for var in label_info.index: label_info[var] = x(label_info[var]) return label_info else: msg = "Could not use '{0}' for labelling." raise PlotnineError(msg.format(x)) return _labeller
python
def as_labeller(x, default=label_value, multi_line=True): """ Coerse to labeller function Parameters ---------- x : function | dict Object to coerce default : function | str Default labeller. If it is a string, it should be the name of one the labelling functions provided by plotnine. multi_line : bool Whether to place each variable on a separate line Returns ------- out : function Labelling function """ if x is None: x = default # One of the labelling functions as string with suppress(KeyError, TypeError): x = LABELLERS[x] # x is a labeller with suppress(AttributeError): if x.__name__ == '_labeller': return x def _labeller(label_info): label_info = pd.Series(label_info).astype(str) if callable(x) and x.__name__ in LABELLERS: # labellers in this module return x(label_info) elif hasattr(x, '__contains__'): # dictionary lookup for var in label_info.index: if label_info[var] in x: label_info[var] = x[label_info[var]] return label_info elif callable(x): # generic function for var in label_info.index: label_info[var] = x(label_info[var]) return label_info else: msg = "Could not use '{0}' for labelling." raise PlotnineError(msg.format(x)) return _labeller
[ "def", "as_labeller", "(", "x", ",", "default", "=", "label_value", ",", "multi_line", "=", "True", ")", ":", "if", "x", "is", "None", ":", "x", "=", "default", "# One of the labelling functions as string", "with", "suppress", "(", "KeyError", ",", "TypeError"...
Coerse to labeller function Parameters ---------- x : function | dict Object to coerce default : function | str Default labeller. If it is a string, it should be the name of one the labelling functions provided by plotnine. multi_line : bool Whether to place each variable on a separate line Returns ------- out : function Labelling function
[ "Coerse", "to", "labeller", "function" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/facets/labelling.py#L101-L154
train
214,463
has2k1/plotnine
plotnine/facets/labelling.py
labeller
def labeller(rows=None, cols=None, multi_line=True, default=label_value, **kwargs): """ Return a labeller function Parameters ---------- rows : str | function | None How to label the rows cols : str | function | None How to label the columns multi_line : bool Whether to place each variable on a separate line default : function | str Fallback labelling function. If it is a string, it should be the name of one the labelling functions provided by plotnine. kwargs : dict {variable name : function | string} pairs for renaming variables. A function to rename the variable or a string name. Returns ------- out : function Function to do the labelling """ # Sort out the labellers along each dimension rows_labeller = as_labeller(rows, default, multi_line) cols_labeller = as_labeller(cols, default, multi_line) def _labeller(label_info): # When there is no variable specific labeller, # use that of the dimension if label_info._meta['dimension'] == 'rows': margin_labeller = rows_labeller else: margin_labeller = cols_labeller # Labelling functions expect string values label_info = label_info.astype(str) # Each facetting variable is labelled independently for name, value in label_info.iteritems(): func = as_labeller(kwargs.get(name), margin_labeller) new_info = func(label_info[[name]]) label_info[name] = new_info[name] if not multi_line: label_info = collapse_label_lines(label_info) return label_info return _labeller
python
def labeller(rows=None, cols=None, multi_line=True, default=label_value, **kwargs): """ Return a labeller function Parameters ---------- rows : str | function | None How to label the rows cols : str | function | None How to label the columns multi_line : bool Whether to place each variable on a separate line default : function | str Fallback labelling function. If it is a string, it should be the name of one the labelling functions provided by plotnine. kwargs : dict {variable name : function | string} pairs for renaming variables. A function to rename the variable or a string name. Returns ------- out : function Function to do the labelling """ # Sort out the labellers along each dimension rows_labeller = as_labeller(rows, default, multi_line) cols_labeller = as_labeller(cols, default, multi_line) def _labeller(label_info): # When there is no variable specific labeller, # use that of the dimension if label_info._meta['dimension'] == 'rows': margin_labeller = rows_labeller else: margin_labeller = cols_labeller # Labelling functions expect string values label_info = label_info.astype(str) # Each facetting variable is labelled independently for name, value in label_info.iteritems(): func = as_labeller(kwargs.get(name), margin_labeller) new_info = func(label_info[[name]]) label_info[name] = new_info[name] if not multi_line: label_info = collapse_label_lines(label_info) return label_info return _labeller
[ "def", "labeller", "(", "rows", "=", "None", ",", "cols", "=", "None", ",", "multi_line", "=", "True", ",", "default", "=", "label_value", ",", "*", "*", "kwargs", ")", ":", "# Sort out the labellers along each dimension", "rows_labeller", "=", "as_labeller", ...
Return a labeller function Parameters ---------- rows : str | function | None How to label the rows cols : str | function | None How to label the columns multi_line : bool Whether to place each variable on a separate line default : function | str Fallback labelling function. If it is a string, it should be the name of one the labelling functions provided by plotnine. kwargs : dict {variable name : function | string} pairs for renaming variables. A function to rename the variable or a string name. Returns ------- out : function Function to do the labelling
[ "Return", "a", "labeller", "function" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/facets/labelling.py#L157-L210
train
214,464
has2k1/plotnine
plotnine/geoms/geom_violin.py
make_quantile_df
def make_quantile_df(data, draw_quantiles): """ Return a dataframe with info needed to draw quantile segments """ dens = data['density'].cumsum() / data['density'].sum() ecdf = interp1d(dens, data['y'], assume_sorted=True) ys = ecdf(draw_quantiles) # Get the violin bounds for the requested quantiles violin_xminvs = interp1d(data['y'], data['xminv'])(ys) violin_xmaxvs = interp1d(data['y'], data['xmaxv'])(ys) data = pd.DataFrame({ 'x': interleave(violin_xminvs, violin_xmaxvs), 'y': np.repeat(ys, 2), 'group': np.repeat(np.arange(1, len(ys)+1), 2)}) return data
python
def make_quantile_df(data, draw_quantiles): """ Return a dataframe with info needed to draw quantile segments """ dens = data['density'].cumsum() / data['density'].sum() ecdf = interp1d(dens, data['y'], assume_sorted=True) ys = ecdf(draw_quantiles) # Get the violin bounds for the requested quantiles violin_xminvs = interp1d(data['y'], data['xminv'])(ys) violin_xmaxvs = interp1d(data['y'], data['xmaxv'])(ys) data = pd.DataFrame({ 'x': interleave(violin_xminvs, violin_xmaxvs), 'y': np.repeat(ys, 2), 'group': np.repeat(np.arange(1, len(ys)+1), 2)}) return data
[ "def", "make_quantile_df", "(", "data", ",", "draw_quantiles", ")", ":", "dens", "=", "data", "[", "'density'", "]", ".", "cumsum", "(", ")", "/", "data", "[", "'density'", "]", ".", "sum", "(", ")", "ecdf", "=", "interp1d", "(", "dens", ",", "data",...
Return a dataframe with info needed to draw quantile segments
[ "Return", "a", "dataframe", "with", "info", "needed", "to", "draw", "quantile", "segments" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/geoms/geom_violin.py#L97-L114
train
214,465
has2k1/plotnine
plotnine/stats/stat.py
stat.from_geom
def from_geom(geom): """ Return an instantiated stat object stats should not override this method. Parameters ---------- geom : geom `geom` Returns ------- out : stat A stat object Raises ------ :class:`PlotnineError` if unable to create a `stat`. """ name = geom.params['stat'] kwargs = geom._kwargs # More stable when reloading modules than # using issubclass if (not isinstance(name, type) and hasattr(name, 'compute_layer')): return name if isinstance(name, stat): return name elif isinstance(name, type) and issubclass(name, stat): klass = name elif is_string(name): if not name.startswith('stat_'): name = 'stat_{}'.format(name) klass = Registry[name] else: raise PlotnineError( 'Unknown stat of type {}'.format(type(name))) valid_kwargs = ( (klass.aesthetics() | klass.DEFAULT_PARAMS.keys()) & kwargs.keys()) params = {k: kwargs[k] for k in valid_kwargs} return klass(geom=geom, **params)
python
def from_geom(geom): """ Return an instantiated stat object stats should not override this method. Parameters ---------- geom : geom `geom` Returns ------- out : stat A stat object Raises ------ :class:`PlotnineError` if unable to create a `stat`. """ name = geom.params['stat'] kwargs = geom._kwargs # More stable when reloading modules than # using issubclass if (not isinstance(name, type) and hasattr(name, 'compute_layer')): return name if isinstance(name, stat): return name elif isinstance(name, type) and issubclass(name, stat): klass = name elif is_string(name): if not name.startswith('stat_'): name = 'stat_{}'.format(name) klass = Registry[name] else: raise PlotnineError( 'Unknown stat of type {}'.format(type(name))) valid_kwargs = ( (klass.aesthetics() | klass.DEFAULT_PARAMS.keys()) & kwargs.keys()) params = {k: kwargs[k] for k in valid_kwargs} return klass(geom=geom, **params)
[ "def", "from_geom", "(", "geom", ")", ":", "name", "=", "geom", ".", "params", "[", "'stat'", "]", "kwargs", "=", "geom", ".", "_kwargs", "# More stable when reloading modules than", "# using issubclass", "if", "(", "not", "isinstance", "(", "name", ",", "type...
Return an instantiated stat object stats should not override this method. Parameters ---------- geom : geom `geom` Returns ------- out : stat A stat object Raises ------ :class:`PlotnineError` if unable to create a `stat`.
[ "Return", "an", "instantiated", "stat", "object" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/stats/stat.py#L49-L95
train
214,466
has2k1/plotnine
plotnine/stats/stat.py
stat.aesthetics
def aesthetics(cls): """ Return a set of all non-computed aesthetics for this stat. stats should not override this method. """ aesthetics = cls.REQUIRED_AES.copy() calculated = get_calculated_aes(cls.DEFAULT_AES) for ae in set(cls.DEFAULT_AES) - set(calculated): aesthetics.add(ae) return aesthetics
python
def aesthetics(cls): """ Return a set of all non-computed aesthetics for this stat. stats should not override this method. """ aesthetics = cls.REQUIRED_AES.copy() calculated = get_calculated_aes(cls.DEFAULT_AES) for ae in set(cls.DEFAULT_AES) - set(calculated): aesthetics.add(ae) return aesthetics
[ "def", "aesthetics", "(", "cls", ")", ":", "aesthetics", "=", "cls", ".", "REQUIRED_AES", ".", "copy", "(", ")", "calculated", "=", "get_calculated_aes", "(", "cls", ".", "DEFAULT_AES", ")", "for", "ae", "in", "set", "(", "cls", ".", "DEFAULT_AES", ")", ...
Return a set of all non-computed aesthetics for this stat. stats should not override this method.
[ "Return", "a", "set", "of", "all", "non", "-", "computed", "aesthetics", "for", "this", "stat", "." ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/stats/stat.py#L118-L128
train
214,467
has2k1/plotnine
plotnine/stats/stat.py
stat.compute_layer
def compute_layer(cls, data, params, layout): """ Calculate statistics for this layers This is the top-most computation method for the stat. It does not do any computations, but it knows how to verify the data, partition it call the next computation method and merge results. stats should not override this method. Parameters ---------- data : panda.DataFrame Data points for all objects in a layer. params : dict Stat parameters layout : plotnine.layout.Layout Panel layout information """ check_required_aesthetics( cls.REQUIRED_AES, list(data.columns) + list(params.keys()), cls.__name__) data = remove_missing( data, na_rm=params.get('na_rm', False), vars=list(cls.REQUIRED_AES | cls.NON_MISSING_AES), name=cls.__name__, finite=True) def fn(pdata): """ Helper compute function """ # Given data belonging to a specific panel, grab # the corresponding scales and call the method # that does the real computation if len(pdata) == 0: return pdata pscales = layout.get_scales(pdata['PANEL'].iat[0]) return cls.compute_panel(pdata, pscales, **params) return groupby_apply(data, 'PANEL', fn)
python
def compute_layer(cls, data, params, layout): """ Calculate statistics for this layers This is the top-most computation method for the stat. It does not do any computations, but it knows how to verify the data, partition it call the next computation method and merge results. stats should not override this method. Parameters ---------- data : panda.DataFrame Data points for all objects in a layer. params : dict Stat parameters layout : plotnine.layout.Layout Panel layout information """ check_required_aesthetics( cls.REQUIRED_AES, list(data.columns) + list(params.keys()), cls.__name__) data = remove_missing( data, na_rm=params.get('na_rm', False), vars=list(cls.REQUIRED_AES | cls.NON_MISSING_AES), name=cls.__name__, finite=True) def fn(pdata): """ Helper compute function """ # Given data belonging to a specific panel, grab # the corresponding scales and call the method # that does the real computation if len(pdata) == 0: return pdata pscales = layout.get_scales(pdata['PANEL'].iat[0]) return cls.compute_panel(pdata, pscales, **params) return groupby_apply(data, 'PANEL', fn)
[ "def", "compute_layer", "(", "cls", ",", "data", ",", "params", ",", "layout", ")", ":", "check_required_aesthetics", "(", "cls", ".", "REQUIRED_AES", ",", "list", "(", "data", ".", "columns", ")", "+", "list", "(", "params", ".", "keys", "(", ")", ")"...
Calculate statistics for this layers This is the top-most computation method for the stat. It does not do any computations, but it knows how to verify the data, partition it call the next computation method and merge results. stats should not override this method. Parameters ---------- data : panda.DataFrame Data points for all objects in a layer. params : dict Stat parameters layout : plotnine.layout.Layout Panel layout information
[ "Calculate", "statistics", "for", "this", "layers" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/stats/stat.py#L223-L267
train
214,468
has2k1/plotnine
plotnine/stats/stat.py
stat.compute_panel
def compute_panel(cls, data, scales, **params): """ Calculate the stats of all the groups and return the results in a single dataframe. This is a default function that can be overriden by individual stats Parameters ---------- data : dataframe data for the computing scales : types.SimpleNamespace x (``scales.x``) and y (``scales.y``) scale objects. The most likely reason to use scale information is to find out the physical size of a scale. e.g:: range_x = scales.x.dimension() params : dict The parameters for the stat. It includes default values if user did not set a particular parameter. """ if not len(data): return type(data)() stats = [] for _, old in data.groupby('group'): new = cls.compute_group(old, scales, **params) unique = uniquecols(old) missing = unique.columns.difference(new.columns) u = unique.loc[[0]*len(new), missing].reset_index(drop=True) # concat can have problems with empty dataframes that # have an index if u.empty and len(u): u = type(data)() df = pd.concat([new, u], axis=1) stats.append(df) stats = pd.concat(stats, axis=0, ignore_index=True) # Note: If the data coming in has columns with non-unique # values with-in group(s), this implementation loses the # columns. Individual stats may want to do some preparation # before then fall back on this implementation or override # it completely. return stats
python
def compute_panel(cls, data, scales, **params): """ Calculate the stats of all the groups and return the results in a single dataframe. This is a default function that can be overriden by individual stats Parameters ---------- data : dataframe data for the computing scales : types.SimpleNamespace x (``scales.x``) and y (``scales.y``) scale objects. The most likely reason to use scale information is to find out the physical size of a scale. e.g:: range_x = scales.x.dimension() params : dict The parameters for the stat. It includes default values if user did not set a particular parameter. """ if not len(data): return type(data)() stats = [] for _, old in data.groupby('group'): new = cls.compute_group(old, scales, **params) unique = uniquecols(old) missing = unique.columns.difference(new.columns) u = unique.loc[[0]*len(new), missing].reset_index(drop=True) # concat can have problems with empty dataframes that # have an index if u.empty and len(u): u = type(data)() df = pd.concat([new, u], axis=1) stats.append(df) stats = pd.concat(stats, axis=0, ignore_index=True) # Note: If the data coming in has columns with non-unique # values with-in group(s), this implementation loses the # columns. Individual stats may want to do some preparation # before then fall back on this implementation or override # it completely. return stats
[ "def", "compute_panel", "(", "cls", ",", "data", ",", "scales", ",", "*", "*", "params", ")", ":", "if", "not", "len", "(", "data", ")", ":", "return", "type", "(", "data", ")", "(", ")", "stats", "=", "[", "]", "for", "_", ",", "old", "in", ...
Calculate the stats of all the groups and return the results in a single dataframe. This is a default function that can be overriden by individual stats Parameters ---------- data : dataframe data for the computing scales : types.SimpleNamespace x (``scales.x``) and y (``scales.y``) scale objects. The most likely reason to use scale information is to find out the physical size of a scale. e.g:: range_x = scales.x.dimension() params : dict The parameters for the stat. It includes default values if user did not set a particular parameter.
[ "Calculate", "the", "stats", "of", "all", "the", "groups", "and", "return", "the", "results", "in", "a", "single", "dataframe", "." ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/stats/stat.py#L270-L317
train
214,469
has2k1/plotnine
plotnine/stats/density.py
kde_scipy
def kde_scipy(data, grid, **kwargs): """ Kernel Density Estimation with Scipy Parameters ---------- data : numpy.array Data points used to compute a density estimator. It has `n x p` dimensions, representing n points and p variables. grid : numpy.array Data points at which the desity will be estimated. It has `m x p` dimensions, representing m points and p variables. Returns ------- out : numpy.array Density estimate. Has `m x 1` dimensions """ kde = gaussian_kde(data.T, **kwargs) return kde.evaluate(grid.T)
python
def kde_scipy(data, grid, **kwargs): """ Kernel Density Estimation with Scipy Parameters ---------- data : numpy.array Data points used to compute a density estimator. It has `n x p` dimensions, representing n points and p variables. grid : numpy.array Data points at which the desity will be estimated. It has `m x p` dimensions, representing m points and p variables. Returns ------- out : numpy.array Density estimate. Has `m x 1` dimensions """ kde = gaussian_kde(data.T, **kwargs) return kde.evaluate(grid.T)
[ "def", "kde_scipy", "(", "data", ",", "grid", ",", "*", "*", "kwargs", ")", ":", "kde", "=", "gaussian_kde", "(", "data", ".", "T", ",", "*", "*", "kwargs", ")", "return", "kde", ".", "evaluate", "(", "grid", ".", "T", ")" ]
Kernel Density Estimation with Scipy Parameters ---------- data : numpy.array Data points used to compute a density estimator. It has `n x p` dimensions, representing n points and p variables. grid : numpy.array Data points at which the desity will be estimated. It has `m x p` dimensions, representing m points and p variables. Returns ------- out : numpy.array Density estimate. Has `m x 1` dimensions
[ "Kernel", "Density", "Estimation", "with", "Scipy" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/stats/density.py#L23-L44
train
214,470
has2k1/plotnine
plotnine/stats/density.py
kde_statsmodels_u
def kde_statsmodels_u(data, grid, **kwargs): """ Univariate Kernel Density Estimation with Statsmodels Parameters ---------- data : numpy.array Data points used to compute a density estimator. It has `n x 1` dimensions, representing n points and p variables. grid : numpy.array Data points at which the desity will be estimated. It has `m x 1` dimensions, representing m points and p variables. Returns ------- out : numpy.array Density estimate. Has `m x 1` dimensions """ kde = KDEUnivariate(data) kde.fit(**kwargs) return kde.evaluate(grid)
python
def kde_statsmodels_u(data, grid, **kwargs): """ Univariate Kernel Density Estimation with Statsmodels Parameters ---------- data : numpy.array Data points used to compute a density estimator. It has `n x 1` dimensions, representing n points and p variables. grid : numpy.array Data points at which the desity will be estimated. It has `m x 1` dimensions, representing m points and p variables. Returns ------- out : numpy.array Density estimate. Has `m x 1` dimensions """ kde = KDEUnivariate(data) kde.fit(**kwargs) return kde.evaluate(grid)
[ "def", "kde_statsmodels_u", "(", "data", ",", "grid", ",", "*", "*", "kwargs", ")", ":", "kde", "=", "KDEUnivariate", "(", "data", ")", "kde", ".", "fit", "(", "*", "*", "kwargs", ")", "return", "kde", ".", "evaluate", "(", "grid", ")" ]
Univariate Kernel Density Estimation with Statsmodels Parameters ---------- data : numpy.array Data points used to compute a density estimator. It has `n x 1` dimensions, representing n points and p variables. grid : numpy.array Data points at which the desity will be estimated. It has `m x 1` dimensions, representing m points and p variables. Returns ------- out : numpy.array Density estimate. Has `m x 1` dimensions
[ "Univariate", "Kernel", "Density", "Estimation", "with", "Statsmodels" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/stats/density.py#L47-L69
train
214,471
has2k1/plotnine
plotnine/stats/density.py
kde_statsmodels_m
def kde_statsmodels_m(data, grid, **kwargs): """ Multivariate Kernel Density Estimation with Statsmodels Parameters ---------- data : numpy.array Data points used to compute a density estimator. It has `n x p` dimensions, representing n points and p variables. grid : numpy.array Data points at which the desity will be estimated. It has `m x p` dimensions, representing m points and p variables. Returns ------- out : numpy.array Density estimate. Has `m x 1` dimensions """ kde = KDEMultivariate(data, **kwargs) return kde.pdf(grid)
python
def kde_statsmodels_m(data, grid, **kwargs): """ Multivariate Kernel Density Estimation with Statsmodels Parameters ---------- data : numpy.array Data points used to compute a density estimator. It has `n x p` dimensions, representing n points and p variables. grid : numpy.array Data points at which the desity will be estimated. It has `m x p` dimensions, representing m points and p variables. Returns ------- out : numpy.array Density estimate. Has `m x 1` dimensions """ kde = KDEMultivariate(data, **kwargs) return kde.pdf(grid)
[ "def", "kde_statsmodels_m", "(", "data", ",", "grid", ",", "*", "*", "kwargs", ")", ":", "kde", "=", "KDEMultivariate", "(", "data", ",", "*", "*", "kwargs", ")", "return", "kde", ".", "pdf", "(", "grid", ")" ]
Multivariate Kernel Density Estimation with Statsmodels Parameters ---------- data : numpy.array Data points used to compute a density estimator. It has `n x p` dimensions, representing n points and p variables. grid : numpy.array Data points at which the desity will be estimated. It has `m x p` dimensions, representing m points and p variables. Returns ------- out : numpy.array Density estimate. Has `m x 1` dimensions
[ "Multivariate", "Kernel", "Density", "Estimation", "with", "Statsmodels" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/stats/density.py#L72-L93
train
214,472
has2k1/plotnine
plotnine/stats/density.py
kde_sklearn
def kde_sklearn(data, grid, **kwargs): """ Kernel Density Estimation with Scikit-learn Parameters ---------- data : numpy.array Data points used to compute a density estimator. It has `n x p` dimensions, representing n points and p variables. grid : numpy.array Data points at which the desity will be estimated. It has `m x p` dimensions, representing m points and p variables. Returns ------- out : numpy.array Density estimate. Has `m x 1` dimensions """ kde_skl = KernelDensity(**kwargs) kde_skl.fit(data) # score_samples() returns the log-likelihood of the samples log_pdf = kde_skl.score_samples(grid) return np.exp(log_pdf)
python
def kde_sklearn(data, grid, **kwargs): """ Kernel Density Estimation with Scikit-learn Parameters ---------- data : numpy.array Data points used to compute a density estimator. It has `n x p` dimensions, representing n points and p variables. grid : numpy.array Data points at which the desity will be estimated. It has `m x p` dimensions, representing m points and p variables. Returns ------- out : numpy.array Density estimate. Has `m x 1` dimensions """ kde_skl = KernelDensity(**kwargs) kde_skl.fit(data) # score_samples() returns the log-likelihood of the samples log_pdf = kde_skl.score_samples(grid) return np.exp(log_pdf)
[ "def", "kde_sklearn", "(", "data", ",", "grid", ",", "*", "*", "kwargs", ")", ":", "kde_skl", "=", "KernelDensity", "(", "*", "*", "kwargs", ")", "kde_skl", ".", "fit", "(", "data", ")", "# score_samples() returns the log-likelihood of the samples", "log_pdf", ...
Kernel Density Estimation with Scikit-learn Parameters ---------- data : numpy.array Data points used to compute a density estimator. It has `n x p` dimensions, representing n points and p variables. grid : numpy.array Data points at which the desity will be estimated. It has `m x p` dimensions, representing m points and p variables. Returns ------- out : numpy.array Density estimate. Has `m x 1` dimensions
[ "Kernel", "Density", "Estimation", "with", "Scikit", "-", "learn" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/stats/density.py#L96-L120
train
214,473
has2k1/plotnine
plotnine/stats/density.py
kde
def kde(data, grid, package, **kwargs): """ Kernel Density Estimation Parameters ---------- package : str Package whose kernel density estimation to use. Should be one of `['statsmodels-u', 'statsmodels-m', 'scipy', 'sklearn']`. data : numpy.array Data points used to compute a density estimator. It has `n x p` dimensions, representing n points and p variables. grid : numpy.array Data points at which the desity will be estimated. It has `m x p` dimensions, representing m points and p variables. Returns ------- out : numpy.array Density estimate. Has `m x 1` dimensions """ if package == 'statsmodels': package = 'statsmodels-m' func = KDE_FUNCS[package] return func(data, grid, **kwargs)
python
def kde(data, grid, package, **kwargs): """ Kernel Density Estimation Parameters ---------- package : str Package whose kernel density estimation to use. Should be one of `['statsmodels-u', 'statsmodels-m', 'scipy', 'sklearn']`. data : numpy.array Data points used to compute a density estimator. It has `n x p` dimensions, representing n points and p variables. grid : numpy.array Data points at which the desity will be estimated. It has `m x p` dimensions, representing m points and p variables. Returns ------- out : numpy.array Density estimate. Has `m x 1` dimensions """ if package == 'statsmodels': package = 'statsmodels-m' func = KDE_FUNCS[package] return func(data, grid, **kwargs)
[ "def", "kde", "(", "data", ",", "grid", ",", "package", ",", "*", "*", "kwargs", ")", ":", "if", "package", "==", "'statsmodels'", ":", "package", "=", "'statsmodels-m'", "func", "=", "KDE_FUNCS", "[", "package", "]", "return", "func", "(", "data", ","...
Kernel Density Estimation Parameters ---------- package : str Package whose kernel density estimation to use. Should be one of `['statsmodels-u', 'statsmodels-m', 'scipy', 'sklearn']`. data : numpy.array Data points used to compute a density estimator. It has `n x p` dimensions, representing n points and p variables. grid : numpy.array Data points at which the desity will be estimated. It has `m x p` dimensions, representing m points and p variables. Returns ------- out : numpy.array Density estimate. Has `m x 1` dimensions
[ "Kernel", "Density", "Estimation" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/stats/density.py#L132-L159
train
214,474
has2k1/plotnine
plotnine/positions/position_dodge.py
position_dodge.strategy
def strategy(data, params): """ Dodge overlapping interval Assumes that each set has the same horizontal position. """ width = params['width'] with suppress(TypeError): iter(width) width = np.asarray(width) width = width[data.index] udata_group = data['group'].drop_duplicates() n = params.get('n', None) if n is None: n = len(udata_group) if n == 1: return data if not all([col in data.columns for col in ['xmin', 'xmax']]): data['xmin'] = data['x'] data['xmax'] = data['x'] d_width = np.max(data['xmax'] - data['xmin']) # Have a new group index from 1 to number of groups. # This might be needed if the group numbers in this set don't # include all of 1:n udata_group = udata_group.sort_values() groupidx = match(data['group'], udata_group) groupidx = np.asarray(groupidx) + 1 # Find the center for each group, then use that to # calculate xmin and xmax data['x'] = data['x'] + width * ((groupidx - 0.5) / n - 0.5) data['xmin'] = data['x'] - (d_width / n) / 2 data['xmax'] = data['x'] + (d_width / n) / 2 return data
python
def strategy(data, params): """ Dodge overlapping interval Assumes that each set has the same horizontal position. """ width = params['width'] with suppress(TypeError): iter(width) width = np.asarray(width) width = width[data.index] udata_group = data['group'].drop_duplicates() n = params.get('n', None) if n is None: n = len(udata_group) if n == 1: return data if not all([col in data.columns for col in ['xmin', 'xmax']]): data['xmin'] = data['x'] data['xmax'] = data['x'] d_width = np.max(data['xmax'] - data['xmin']) # Have a new group index from 1 to number of groups. # This might be needed if the group numbers in this set don't # include all of 1:n udata_group = udata_group.sort_values() groupidx = match(data['group'], udata_group) groupidx = np.asarray(groupidx) + 1 # Find the center for each group, then use that to # calculate xmin and xmax data['x'] = data['x'] + width * ((groupidx - 0.5) / n - 0.5) data['xmin'] = data['x'] - (d_width / n) / 2 data['xmax'] = data['x'] + (d_width / n) / 2 return data
[ "def", "strategy", "(", "data", ",", "params", ")", ":", "width", "=", "params", "[", "'width'", "]", "with", "suppress", "(", "TypeError", ")", ":", "iter", "(", "width", ")", "width", "=", "np", ".", "asarray", "(", "width", ")", "width", "=", "w...
Dodge overlapping interval Assumes that each set has the same horizontal position.
[ "Dodge", "overlapping", "interval" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/positions/position_dodge.py#L59-L98
train
214,475
has2k1/plotnine
plotnine/facets/facet_grid.py
parse_grid_facets
def parse_grid_facets(facets): """ Return two lists of facetting variables, for the rows & columns """ valid_seqs = ["('var1', '.')", "('var1', 'var2')", "('.', 'var1')", "((var1, var2), (var3, var4))"] error_msg_s = ("Valid sequences for specifying 'facets' look like" " {}".format(valid_seqs)) valid_forms = ['var1 ~ .', 'var1 ~ var2', '. ~ var1', 'var1 + var2 ~ var3 + var4', '. ~ func(var1) + func(var2)', '. ~ func(var1+var3) + func(var2)' ] + valid_seqs error_msg_f = ("Valid formula for 'facet_grid' look like" " {}".format(valid_forms)) if isinstance(facets, (tuple, list)): if len(facets) != 2: raise PlotnineError(error_msg_s) rows, cols = facets if isinstance(rows, str): rows = [] if rows == '.' else [rows] if isinstance(cols, str): cols = [] if cols == '.' else [cols] return rows, cols if not isinstance(facets, str): raise PlotnineError(error_msg_f) # Example of allowed formulae # 'c ~ a + b' # '. ~ func(a) + func(b)' # 'func(c) ~ func(a+1) + func(b+2)' try: lhs, rhs = facets.split('~') except ValueError: raise PlotnineError(error_msg_s) else: lhs = lhs.strip() rhs = rhs.strip() lhs = ensure_var_or_dot(lhs) rhs = ensure_var_or_dot(rhs) lsplitter = ' + ' if ' + ' in lhs else '+' rsplitter = ' + ' if ' + ' in rhs else '+' if lhs == '.': rows = [] else: rows = [var.strip() for var in lhs.split(lsplitter)] if rhs == '.': cols = [] else: cols = [var.strip() for var in rhs.split(rsplitter)] return rows, cols
python
def parse_grid_facets(facets): """ Return two lists of facetting variables, for the rows & columns """ valid_seqs = ["('var1', '.')", "('var1', 'var2')", "('.', 'var1')", "((var1, var2), (var3, var4))"] error_msg_s = ("Valid sequences for specifying 'facets' look like" " {}".format(valid_seqs)) valid_forms = ['var1 ~ .', 'var1 ~ var2', '. ~ var1', 'var1 + var2 ~ var3 + var4', '. ~ func(var1) + func(var2)', '. ~ func(var1+var3) + func(var2)' ] + valid_seqs error_msg_f = ("Valid formula for 'facet_grid' look like" " {}".format(valid_forms)) if isinstance(facets, (tuple, list)): if len(facets) != 2: raise PlotnineError(error_msg_s) rows, cols = facets if isinstance(rows, str): rows = [] if rows == '.' else [rows] if isinstance(cols, str): cols = [] if cols == '.' else [cols] return rows, cols if not isinstance(facets, str): raise PlotnineError(error_msg_f) # Example of allowed formulae # 'c ~ a + b' # '. ~ func(a) + func(b)' # 'func(c) ~ func(a+1) + func(b+2)' try: lhs, rhs = facets.split('~') except ValueError: raise PlotnineError(error_msg_s) else: lhs = lhs.strip() rhs = rhs.strip() lhs = ensure_var_or_dot(lhs) rhs = ensure_var_or_dot(rhs) lsplitter = ' + ' if ' + ' in lhs else '+' rsplitter = ' + ' if ' + ' in rhs else '+' if lhs == '.': rows = [] else: rows = [var.strip() for var in lhs.split(lsplitter)] if rhs == '.': cols = [] else: cols = [var.strip() for var in rhs.split(rsplitter)] return rows, cols
[ "def", "parse_grid_facets", "(", "facets", ")", ":", "valid_seqs", "=", "[", "\"('var1', '.')\"", ",", "\"('var1', 'var2')\"", ",", "\"('.', 'var1')\"", ",", "\"((var1, var2), (var3, var4))\"", "]", "error_msg_s", "=", "(", "\"Valid sequences for specifying 'facets' look like...
Return two lists of facetting variables, for the rows & columns
[ "Return", "two", "lists", "of", "facetting", "variables", "for", "the", "rows", "&", "columns" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/facets/facet_grid.py#L254-L315
train
214,476
has2k1/plotnine
plotnine/coords/coord.py
coord.expand_default
def expand_default(self, scale, discrete=(0, 0.6, 0, 0.6), continuous=(0.05, 0, 0.05, 0)): """ Expand a single scale """ if is_waive(scale.expand): if isinstance(scale, scale_discrete): return discrete elif isinstance(scale, scale_continuous): return continuous else: name = scale.__class__.__name__ msg = "Failed to expand scale '{}'".format(name) raise PlotnineError(msg) else: return scale.expand
python
def expand_default(self, scale, discrete=(0, 0.6, 0, 0.6), continuous=(0.05, 0, 0.05, 0)): """ Expand a single scale """ if is_waive(scale.expand): if isinstance(scale, scale_discrete): return discrete elif isinstance(scale, scale_continuous): return continuous else: name = scale.__class__.__name__ msg = "Failed to expand scale '{}'".format(name) raise PlotnineError(msg) else: return scale.expand
[ "def", "expand_default", "(", "self", ",", "scale", ",", "discrete", "=", "(", "0", ",", "0.6", ",", "0", ",", "0.6", ")", ",", "continuous", "=", "(", "0.05", ",", "0", ",", "0.05", ",", "0", ")", ")", ":", "if", "is_waive", "(", "scale", ".",...
Expand a single scale
[ "Expand", "a", "single", "scale" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/coords/coord.py#L113-L128
train
214,477
has2k1/plotnine
plotnine/doctools.py
dict_to_table
def dict_to_table(header, contents): """ Convert dict to table Parameters ---------- header : tuple Table header. Should have a length of 2. contents : dict The key becomes column 1 of table and the value becomes column 2 of table. """ def to_text(row): name, value = row m = max_col1_size + 1 - len(name) spacing = ' ' * m return ''.join([name, spacing, value]) thead = tuple(str(col) for col in header) rows = [] for name, value in contents.items(): # code highlighting if value != '': if isinstance(value, str): value = "'{}'".format(value) value = ':py:`{}`'.format(value) rows.append((name, value)) n = np.max([len(header[0])] + [len(col1) for col1, _ in rows]) hborder = tuple('='*n for col in header) rows = [hborder, thead, hborder] + rows + [hborder] max_col1_size = np.max([len(col1) for col1, _ in rows]) table = '\n'.join([to_text(row) for row in rows]) return table
python
def dict_to_table(header, contents): """ Convert dict to table Parameters ---------- header : tuple Table header. Should have a length of 2. contents : dict The key becomes column 1 of table and the value becomes column 2 of table. """ def to_text(row): name, value = row m = max_col1_size + 1 - len(name) spacing = ' ' * m return ''.join([name, spacing, value]) thead = tuple(str(col) for col in header) rows = [] for name, value in contents.items(): # code highlighting if value != '': if isinstance(value, str): value = "'{}'".format(value) value = ':py:`{}`'.format(value) rows.append((name, value)) n = np.max([len(header[0])] + [len(col1) for col1, _ in rows]) hborder = tuple('='*n for col in header) rows = [hborder, thead, hborder] + rows + [hborder] max_col1_size = np.max([len(col1) for col1, _ in rows]) table = '\n'.join([to_text(row) for row in rows]) return table
[ "def", "dict_to_table", "(", "header", ",", "contents", ")", ":", "def", "to_text", "(", "row", ")", ":", "name", ",", "value", "=", "row", "m", "=", "max_col1_size", "+", "1", "-", "len", "(", "name", ")", "spacing", "=", "' '", "*", "m", "return"...
Convert dict to table Parameters ---------- header : tuple Table header. Should have a length of 2. contents : dict The key becomes column 1 of table and the value becomes column 2 of table.
[ "Convert", "dict", "to", "table" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/doctools.py#L128-L163
train
214,478
has2k1/plotnine
plotnine/doctools.py
make_signature
def make_signature(name, params, common_params, common_param_values): """ Create a signature for a geom or stat Gets the DEFAULT_PARAMS (params) and creates are comma separated list of the `name=value` pairs. The common_params come first in the list, and they get take their values from either the params-dict or the common_geom_param_values-dict. """ tokens = [] seen = set() def tokens_append(key, value): if isinstance(value, str): value = "'{}'".format(value) tokens.append('{}={}'.format(key, value)) # preferred params come first for key in common_params: seen.add(key) try: value = params[key] except KeyError: value = common_param_values[key] tokens_append(key, value) # other params (these are the geom/stat specific parameters for key in (set(params) - seen): tokens_append(key, params[key]) # name, 1 opening bracket, 4 spaces in SIGNATURE_TPL s1 = name + '(' s2 = ', '.join(tokens) + ', **kwargs)' line_width = 78 - len(s1) indent_spaces = ' ' * (len(s1) + 4) newline_and_space = '\n' + indent_spaces s2_lines = wrap(s2, width=line_width) return s1 + newline_and_space.join(s2_lines)
python
def make_signature(name, params, common_params, common_param_values): """ Create a signature for a geom or stat Gets the DEFAULT_PARAMS (params) and creates are comma separated list of the `name=value` pairs. The common_params come first in the list, and they get take their values from either the params-dict or the common_geom_param_values-dict. """ tokens = [] seen = set() def tokens_append(key, value): if isinstance(value, str): value = "'{}'".format(value) tokens.append('{}={}'.format(key, value)) # preferred params come first for key in common_params: seen.add(key) try: value = params[key] except KeyError: value = common_param_values[key] tokens_append(key, value) # other params (these are the geom/stat specific parameters for key in (set(params) - seen): tokens_append(key, params[key]) # name, 1 opening bracket, 4 spaces in SIGNATURE_TPL s1 = name + '(' s2 = ', '.join(tokens) + ', **kwargs)' line_width = 78 - len(s1) indent_spaces = ' ' * (len(s1) + 4) newline_and_space = '\n' + indent_spaces s2_lines = wrap(s2, width=line_width) return s1 + newline_and_space.join(s2_lines)
[ "def", "make_signature", "(", "name", ",", "params", ",", "common_params", ",", "common_param_values", ")", ":", "tokens", "=", "[", "]", "seen", "=", "set", "(", ")", "def", "tokens_append", "(", "key", ",", "value", ")", ":", "if", "isinstance", "(", ...
Create a signature for a geom or stat Gets the DEFAULT_PARAMS (params) and creates are comma separated list of the `name=value` pairs. The common_params come first in the list, and they get take their values from either the params-dict or the common_geom_param_values-dict.
[ "Create", "a", "signature", "for", "a", "geom", "or", "stat" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/doctools.py#L166-L203
train
214,479
has2k1/plotnine
plotnine/doctools.py
docstring_section_lines
def docstring_section_lines(docstring, section_name): """ Return a section of a numpydoc string Paramters --------- docstring : str Docstring section_name : str Name of section to return Returns ------- section : str Section minus the header """ lines = [] inside_section = False underline = '-' * len(section_name) expect_underline = False for line in docstring.splitlines(): _line = line.strip().lower() if expect_underline: expect_underline = False if _line == underline: inside_section = True continue if _line == section_name: expect_underline = True elif _line in DOCSTRING_SECTIONS: # next section break elif inside_section: lines.append(line) return '\n'.join(lines)
python
def docstring_section_lines(docstring, section_name): """ Return a section of a numpydoc string Paramters --------- docstring : str Docstring section_name : str Name of section to return Returns ------- section : str Section minus the header """ lines = [] inside_section = False underline = '-' * len(section_name) expect_underline = False for line in docstring.splitlines(): _line = line.strip().lower() if expect_underline: expect_underline = False if _line == underline: inside_section = True continue if _line == section_name: expect_underline = True elif _line in DOCSTRING_SECTIONS: # next section break elif inside_section: lines.append(line) return '\n'.join(lines)
[ "def", "docstring_section_lines", "(", "docstring", ",", "section_name", ")", ":", "lines", "=", "[", "]", "inside_section", "=", "False", "underline", "=", "'-'", "*", "len", "(", "section_name", ")", "expect_underline", "=", "False", "for", "line", "in", "...
Return a section of a numpydoc string Paramters --------- docstring : str Docstring section_name : str Name of section to return Returns ------- section : str Section minus the header
[ "Return", "a", "section", "of", "a", "numpydoc", "string" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/doctools.py#L207-L243
train
214,480
has2k1/plotnine
plotnine/doctools.py
parameters_str_to_dict
def parameters_str_to_dict(param_section): """ Convert a param section to a dict Parameters ---------- param_section : str Text in the parameter section Returns ------- d : OrderedDict Dictionary of the parameters in the order that they are described in the parameters section. The dict is of the form ``{param: all_parameter_text}``. You can reconstruct the ``param_section`` from the keys of the dictionary. See Also -------- :func:`parameters_dict_to_str` """ d = OrderedDict() previous_param = None param_desc = None for line in param_section.split('\n'): param = param_spec(line) if param: if previous_param: d[previous_param] = '\n'.join(param_desc) param_desc = [line] previous_param = param elif param_desc: param_desc.append(line) if previous_param: d[previous_param] = '\n'.join(param_desc) return d
python
def parameters_str_to_dict(param_section): """ Convert a param section to a dict Parameters ---------- param_section : str Text in the parameter section Returns ------- d : OrderedDict Dictionary of the parameters in the order that they are described in the parameters section. The dict is of the form ``{param: all_parameter_text}``. You can reconstruct the ``param_section`` from the keys of the dictionary. See Also -------- :func:`parameters_dict_to_str` """ d = OrderedDict() previous_param = None param_desc = None for line in param_section.split('\n'): param = param_spec(line) if param: if previous_param: d[previous_param] = '\n'.join(param_desc) param_desc = [line] previous_param = param elif param_desc: param_desc.append(line) if previous_param: d[previous_param] = '\n'.join(param_desc) return d
[ "def", "parameters_str_to_dict", "(", "param_section", ")", ":", "d", "=", "OrderedDict", "(", ")", "previous_param", "=", "None", "param_desc", "=", "None", "for", "line", "in", "param_section", ".", "split", "(", "'\\n'", ")", ":", "param", "=", "param_spe...
Convert a param section to a dict Parameters ---------- param_section : str Text in the parameter section Returns ------- d : OrderedDict Dictionary of the parameters in the order that they are described in the parameters section. The dict is of the form ``{param: all_parameter_text}``. You can reconstruct the ``param_section`` from the keys of the dictionary. See Also -------- :func:`parameters_dict_to_str`
[ "Convert", "a", "param", "section", "to", "a", "dict" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/doctools.py#L279-L317
train
214,481
has2k1/plotnine
plotnine/doctools.py
document_geom
def document_geom(geom): """ Create a structured documentation for the geom It replaces `{usage}`, `{common_parameters}` and `{aesthetics}` with generated documentation. """ # Dedented so that it lineups (in sphinx) with the part # generated parts when put together docstring = dedent(geom.__doc__) # usage signature = make_signature(geom.__name__, geom.DEFAULT_PARAMS, common_geom_params, common_geom_param_values) usage = GEOM_SIGNATURE_TPL.format(signature=signature) # aesthetics contents = OrderedDict(('**{}**'.format(ae), '') for ae in sorted(geom.REQUIRED_AES)) if geom.DEFAULT_AES: d = geom.DEFAULT_AES.copy() d['group'] = '' # All geoms understand the group aesthetic contents.update(sorted(d.items())) table = dict_to_table(('Aesthetic', 'Default value'), contents) aesthetics_table = AESTHETICS_TABLE_TPL.format(table=table) tpl = dedent(geom._aesthetics_doc.lstrip('\n')) aesthetics_doc = tpl.format(aesthetics_table=aesthetics_table) aesthetics_doc = indent(aesthetics_doc, ' '*4) # common_parameters d = geom.DEFAULT_PARAMS common_parameters = GEOM_PARAMS_TPL.format( default_stat=d['stat'], default_position=d['position'], default_na_rm=d['na_rm'], default_inherit_aes=d.get('inherit_aes', True), _aesthetics_doc=aesthetics_doc, **common_params_doc) docstring = docstring.replace('{usage}', usage) docstring = docstring.replace('{common_parameters}', common_parameters) geom.__doc__ = docstring return geom
python
def document_geom(geom): """ Create a structured documentation for the geom It replaces `{usage}`, `{common_parameters}` and `{aesthetics}` with generated documentation. """ # Dedented so that it lineups (in sphinx) with the part # generated parts when put together docstring = dedent(geom.__doc__) # usage signature = make_signature(geom.__name__, geom.DEFAULT_PARAMS, common_geom_params, common_geom_param_values) usage = GEOM_SIGNATURE_TPL.format(signature=signature) # aesthetics contents = OrderedDict(('**{}**'.format(ae), '') for ae in sorted(geom.REQUIRED_AES)) if geom.DEFAULT_AES: d = geom.DEFAULT_AES.copy() d['group'] = '' # All geoms understand the group aesthetic contents.update(sorted(d.items())) table = dict_to_table(('Aesthetic', 'Default value'), contents) aesthetics_table = AESTHETICS_TABLE_TPL.format(table=table) tpl = dedent(geom._aesthetics_doc.lstrip('\n')) aesthetics_doc = tpl.format(aesthetics_table=aesthetics_table) aesthetics_doc = indent(aesthetics_doc, ' '*4) # common_parameters d = geom.DEFAULT_PARAMS common_parameters = GEOM_PARAMS_TPL.format( default_stat=d['stat'], default_position=d['position'], default_na_rm=d['na_rm'], default_inherit_aes=d.get('inherit_aes', True), _aesthetics_doc=aesthetics_doc, **common_params_doc) docstring = docstring.replace('{usage}', usage) docstring = docstring.replace('{common_parameters}', common_parameters) geom.__doc__ = docstring return geom
[ "def", "document_geom", "(", "geom", ")", ":", "# Dedented so that it lineups (in sphinx) with the part", "# generated parts when put together", "docstring", "=", "dedent", "(", "geom", ".", "__doc__", ")", "# usage", "signature", "=", "make_signature", "(", "geom", ".", ...
Create a structured documentation for the geom It replaces `{usage}`, `{common_parameters}` and `{aesthetics}` with generated documentation.
[ "Create", "a", "structured", "documentation", "for", "the", "geom" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/doctools.py#L341-L387
train
214,482
has2k1/plotnine
plotnine/doctools.py
document_stat
def document_stat(stat): """ Create a structured documentation for the stat It replaces `{usage}`, `{common_parameters}` and `{aesthetics}` with generated documentation. """ # Dedented so that it lineups (in sphinx) with the part # generated parts when put together docstring = dedent(stat.__doc__) # usage: signature = make_signature(stat.__name__, stat.DEFAULT_PARAMS, common_stat_params, common_stat_param_values) usage = STAT_SIGNATURE_TPL.format(signature=signature) # aesthetics contents = OrderedDict(('**{}**'.format(ae), '') for ae in sorted(stat.REQUIRED_AES)) contents.update(sorted(stat.DEFAULT_AES.items())) table = dict_to_table(('Aesthetic', 'Default value'), contents) aesthetics_table = AESTHETICS_TABLE_TPL.format(table=table) tpl = dedent(stat._aesthetics_doc.lstrip('\n')) aesthetics_doc = tpl.format(aesthetics_table=aesthetics_table) aesthetics_doc = indent(aesthetics_doc, ' '*4) # common_parameters d = stat.DEFAULT_PARAMS common_parameters = STAT_PARAMS_TPL.format( default_geom=d['geom'], default_position=d['position'], default_na_rm=d['na_rm'], _aesthetics_doc=aesthetics_doc, **common_params_doc) docstring = docstring.replace('{usage}', usage) docstring = docstring.replace('{common_parameters}', common_parameters) stat.__doc__ = docstring return stat
python
def document_stat(stat): """ Create a structured documentation for the stat It replaces `{usage}`, `{common_parameters}` and `{aesthetics}` with generated documentation. """ # Dedented so that it lineups (in sphinx) with the part # generated parts when put together docstring = dedent(stat.__doc__) # usage: signature = make_signature(stat.__name__, stat.DEFAULT_PARAMS, common_stat_params, common_stat_param_values) usage = STAT_SIGNATURE_TPL.format(signature=signature) # aesthetics contents = OrderedDict(('**{}**'.format(ae), '') for ae in sorted(stat.REQUIRED_AES)) contents.update(sorted(stat.DEFAULT_AES.items())) table = dict_to_table(('Aesthetic', 'Default value'), contents) aesthetics_table = AESTHETICS_TABLE_TPL.format(table=table) tpl = dedent(stat._aesthetics_doc.lstrip('\n')) aesthetics_doc = tpl.format(aesthetics_table=aesthetics_table) aesthetics_doc = indent(aesthetics_doc, ' '*4) # common_parameters d = stat.DEFAULT_PARAMS common_parameters = STAT_PARAMS_TPL.format( default_geom=d['geom'], default_position=d['position'], default_na_rm=d['na_rm'], _aesthetics_doc=aesthetics_doc, **common_params_doc) docstring = docstring.replace('{usage}', usage) docstring = docstring.replace('{common_parameters}', common_parameters) stat.__doc__ = docstring return stat
[ "def", "document_stat", "(", "stat", ")", ":", "# Dedented so that it lineups (in sphinx) with the part", "# generated parts when put together", "docstring", "=", "dedent", "(", "stat", ".", "__doc__", ")", "# usage:", "signature", "=", "make_signature", "(", "stat", ".",...
Create a structured documentation for the stat It replaces `{usage}`, `{common_parameters}` and `{aesthetics}` with generated documentation.
[ "Create", "a", "structured", "documentation", "for", "the", "stat" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/doctools.py#L390-L431
train
214,483
has2k1/plotnine
plotnine/doctools.py
document_scale
def document_scale(cls): """ Create a documentation for a scale Import the superclass parameters It replaces `{superclass_parameters}` with the documentation of the parameters from the superclass. Parameters ---------- cls : type A scale class Returns ------- cls : type The scale class with a modified docstring. """ params_list = [] # Get set of cls params cls_param_string = docstring_parameters_section(cls) cls_param_dict = parameters_str_to_dict(cls_param_string) cls_params = set(cls_param_dict.keys()) for i, base in enumerate(cls.__bases__): # Get set of base class params base_param_string = param_string = docstring_parameters_section(base) base_param_dict = parameters_str_to_dict(base_param_string) base_params = set(base_param_dict.keys()) # Remove duplicate params from the base class duplicate_params = base_params & cls_params for param in duplicate_params: del base_param_dict[param] if duplicate_params: param_string = parameters_dict_to_str(base_param_dict) # Accumulate params of base case if i == 0: # Compensate for the indentation of the # {superclass_parameters} string param_string = param_string.strip() params_list.append(param_string) # Prevent the next base classes from bringing in the # same parameters. cls_params |= base_params # Fill in the processed superclass parameters superclass_parameters = '\n'.join(params_list) cls.__doc__ = cls.__doc__.format( superclass_parameters=superclass_parameters) return cls
python
def document_scale(cls): """ Create a documentation for a scale Import the superclass parameters It replaces `{superclass_parameters}` with the documentation of the parameters from the superclass. Parameters ---------- cls : type A scale class Returns ------- cls : type The scale class with a modified docstring. """ params_list = [] # Get set of cls params cls_param_string = docstring_parameters_section(cls) cls_param_dict = parameters_str_to_dict(cls_param_string) cls_params = set(cls_param_dict.keys()) for i, base in enumerate(cls.__bases__): # Get set of base class params base_param_string = param_string = docstring_parameters_section(base) base_param_dict = parameters_str_to_dict(base_param_string) base_params = set(base_param_dict.keys()) # Remove duplicate params from the base class duplicate_params = base_params & cls_params for param in duplicate_params: del base_param_dict[param] if duplicate_params: param_string = parameters_dict_to_str(base_param_dict) # Accumulate params of base case if i == 0: # Compensate for the indentation of the # {superclass_parameters} string param_string = param_string.strip() params_list.append(param_string) # Prevent the next base classes from bringing in the # same parameters. cls_params |= base_params # Fill in the processed superclass parameters superclass_parameters = '\n'.join(params_list) cls.__doc__ = cls.__doc__.format( superclass_parameters=superclass_parameters) return cls
[ "def", "document_scale", "(", "cls", ")", ":", "params_list", "=", "[", "]", "# Get set of cls params", "cls_param_string", "=", "docstring_parameters_section", "(", "cls", ")", "cls_param_dict", "=", "parameters_str_to_dict", "(", "cls_param_string", ")", "cls_params",...
Create a documentation for a scale Import the superclass parameters It replaces `{superclass_parameters}` with the documentation of the parameters from the superclass. Parameters ---------- cls : type A scale class Returns ------- cls : type The scale class with a modified docstring.
[ "Create", "a", "documentation", "for", "a", "scale" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/doctools.py#L434-L488
train
214,484
has2k1/plotnine
plotnine/doctools.py
document
def document(cls): """ Decorator to document a class """ if cls.__doc__ is None: return cls baseclass_name = cls.mro()[-2].__name__ try: return DOC_FUNCTIONS[baseclass_name](cls) except KeyError: return cls
python
def document(cls): """ Decorator to document a class """ if cls.__doc__ is None: return cls baseclass_name = cls.mro()[-2].__name__ try: return DOC_FUNCTIONS[baseclass_name](cls) except KeyError: return cls
[ "def", "document", "(", "cls", ")", ":", "if", "cls", ".", "__doc__", "is", "None", ":", "return", "cls", "baseclass_name", "=", "cls", ".", "mro", "(", ")", "[", "-", "2", "]", ".", "__name__", "try", ":", "return", "DOC_FUNCTIONS", "[", "baseclass_...
Decorator to document a class
[ "Decorator", "to", "document", "a", "class" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/doctools.py#L498-L510
train
214,485
has2k1/plotnine
plotnine/ggplot.py
ggplot._draw_using_figure
def _draw_using_figure(self, figure, axs): """ Draw onto already created figure and axes This is can be used to draw animation frames, or inset plots. It is intended to be used after the key plot has been drawn. Parameters ---------- figure : ~matplotlib.figure.Figure Matplotlib figure axs : array_like Array of Axes onto which to draw the plots """ self = deepcopy(self) self._build() self.theme = self.theme or theme_get() self.figure = figure self.axs = axs try: with mpl.rc_context(): self.theme.apply_rcparams() self._setup_parameters() self._draw_layers() self._draw_facet_labels() self._draw_legend() self._apply_theme() except Exception as err: if self.figure is not None: plt.close(self.figure) raise err return self
python
def _draw_using_figure(self, figure, axs): """ Draw onto already created figure and axes This is can be used to draw animation frames, or inset plots. It is intended to be used after the key plot has been drawn. Parameters ---------- figure : ~matplotlib.figure.Figure Matplotlib figure axs : array_like Array of Axes onto which to draw the plots """ self = deepcopy(self) self._build() self.theme = self.theme or theme_get() self.figure = figure self.axs = axs try: with mpl.rc_context(): self.theme.apply_rcparams() self._setup_parameters() self._draw_layers() self._draw_facet_labels() self._draw_legend() self._apply_theme() except Exception as err: if self.figure is not None: plt.close(self.figure) raise err return self
[ "def", "_draw_using_figure", "(", "self", ",", "figure", ",", "axs", ")", ":", "self", "=", "deepcopy", "(", "self", ")", "self", ".", "_build", "(", ")", "self", ".", "theme", "=", "self", ".", "theme", "or", "theme_get", "(", ")", "self", ".", "f...
Draw onto already created figure and axes This is can be used to draw animation frames, or inset plots. It is intended to be used after the key plot has been drawn. Parameters ---------- figure : ~matplotlib.figure.Figure Matplotlib figure axs : array_like Array of Axes onto which to draw the plots
[ "Draw", "onto", "already", "created", "figure", "and", "axes" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/ggplot.py#L228-L263
train
214,486
has2k1/plotnine
plotnine/ggplot.py
ggplot._build
def _build(self): """ Build ggplot for rendering. Notes ----- This method modifies the ggplot object. The caller is responsible for making a copy and using that to make the method call. """ if not self.layers: self += geom_blank() self.layout = Layout() layers = self.layers scales = self.scales layout = self.layout # Give each layer a copy of the data that it will need layers.generate_data(self.data) # Initialise panels, add extra data for margins & missing # facetting variables, and add on a PANEL variable to data layout.setup(layers, self) # Compute aesthetics to produce data with generalised # variable names layers.compute_aesthetics(self) # Transform data using all scales layers.transform(scales) # Map and train positions so that statistics have access # to ranges and all positions are numeric layout.train_position(layers, scales.x, scales.y) layout.map_position(layers) # Apply and map statistics layers.compute_statistic(layout) layers.map_statistic(self) # Make sure missing (but required) aesthetics are added scales.add_missing(('x', 'y')) # Prepare data in geoms # e.g. from y and width to ymin and ymax layers.setup_data() # Apply position adjustments layers.compute_position(layout) # Reset position scales, then re-train and map. This # ensures that facets have control over the range of # a plot. layout.reset_position_scales() layout.train_position(layers, scales.x, scales.y) layout.map_position(layers) # Train and map non-position scales npscales = scales.non_position_scales() if len(npscales): layers.train(npscales) layers.map(npscales) # Train coordinate system layout.setup_panel_params(self.coordinates) # fill in the defaults layers.use_defaults() # Allow stats to modify the layer data layers.finish_statistics() # Allow layout to modify data before rendering layout.finish_data(layers)
python
def _build(self): """ Build ggplot for rendering. Notes ----- This method modifies the ggplot object. The caller is responsible for making a copy and using that to make the method call. """ if not self.layers: self += geom_blank() self.layout = Layout() layers = self.layers scales = self.scales layout = self.layout # Give each layer a copy of the data that it will need layers.generate_data(self.data) # Initialise panels, add extra data for margins & missing # facetting variables, and add on a PANEL variable to data layout.setup(layers, self) # Compute aesthetics to produce data with generalised # variable names layers.compute_aesthetics(self) # Transform data using all scales layers.transform(scales) # Map and train positions so that statistics have access # to ranges and all positions are numeric layout.train_position(layers, scales.x, scales.y) layout.map_position(layers) # Apply and map statistics layers.compute_statistic(layout) layers.map_statistic(self) # Make sure missing (but required) aesthetics are added scales.add_missing(('x', 'y')) # Prepare data in geoms # e.g. from y and width to ymin and ymax layers.setup_data() # Apply position adjustments layers.compute_position(layout) # Reset position scales, then re-train and map. This # ensures that facets have control over the range of # a plot. layout.reset_position_scales() layout.train_position(layers, scales.x, scales.y) layout.map_position(layers) # Train and map non-position scales npscales = scales.non_position_scales() if len(npscales): layers.train(npscales) layers.map(npscales) # Train coordinate system layout.setup_panel_params(self.coordinates) # fill in the defaults layers.use_defaults() # Allow stats to modify the layer data layers.finish_statistics() # Allow layout to modify data before rendering layout.finish_data(layers)
[ "def", "_build", "(", "self", ")", ":", "if", "not", "self", ".", "layers", ":", "self", "+=", "geom_blank", "(", ")", "self", ".", "layout", "=", "Layout", "(", ")", "layers", "=", "self", ".", "layers", "scales", "=", "self", ".", "scales", "layo...
Build ggplot for rendering. Notes ----- This method modifies the ggplot object. The caller is responsible for making a copy and using that to make the method call.
[ "Build", "ggplot", "for", "rendering", "." ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/ggplot.py#L265-L339
train
214,487
has2k1/plotnine
plotnine/ggplot.py
ggplot._setup_parameters
def _setup_parameters(self): """ Set facet properties """ # facet self.facet.set( layout=self.layout, theme=self.theme, coordinates=self.coordinates, figure=self.figure, axs=self.axs ) # layout self.layout.axs = self.axs # theme self.theme.figure = self.figure
python
def _setup_parameters(self): """ Set facet properties """ # facet self.facet.set( layout=self.layout, theme=self.theme, coordinates=self.coordinates, figure=self.figure, axs=self.axs ) # layout self.layout.axs = self.axs # theme self.theme.figure = self.figure
[ "def", "_setup_parameters", "(", "self", ")", ":", "# facet", "self", ".", "facet", ".", "set", "(", "layout", "=", "self", ".", "layout", ",", "theme", "=", "self", ".", "theme", ",", "coordinates", "=", "self", ".", "coordinates", ",", "figure", "=",...
Set facet properties
[ "Set", "facet", "properties" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/ggplot.py#L341-L357
train
214,488
has2k1/plotnine
plotnine/ggplot.py
ggplot._create_figure
def _create_figure(self): """ Create Matplotlib figure and axes """ # Good for development if get_option('close_all_figures'): plt.close('all') figure = plt.figure() axs = self.facet.make_axes( figure, self.layout.layout, self.coordinates) # Dictionary to collect matplotlib objects that will # be targeted for theming by the themeables figure._themeable = {} self.figure = figure self.axs = axs return figure, axs
python
def _create_figure(self): """ Create Matplotlib figure and axes """ # Good for development if get_option('close_all_figures'): plt.close('all') figure = plt.figure() axs = self.facet.make_axes( figure, self.layout.layout, self.coordinates) # Dictionary to collect matplotlib objects that will # be targeted for theming by the themeables figure._themeable = {} self.figure = figure self.axs = axs return figure, axs
[ "def", "_create_figure", "(", "self", ")", ":", "# Good for development", "if", "get_option", "(", "'close_all_figures'", ")", ":", "plt", ".", "close", "(", "'all'", ")", "figure", "=", "plt", ".", "figure", "(", ")", "axs", "=", "self", ".", "facet", "...
Create Matplotlib figure and axes
[ "Create", "Matplotlib", "figure", "and", "axes" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/ggplot.py#L359-L379
train
214,489
has2k1/plotnine
plotnine/ggplot.py
ggplot._draw_facet_labels
def _draw_facet_labels(self): """ Draw facet labels a.k.a strip texts """ # Decorate the axes # - xaxis & yaxis breaks, labels, limits, ... # - facet labels # # pidx is the panel index (location left to right, top to bottom) for pidx, layout_info in self.layout.layout.iterrows(): panel_params = self.layout.panel_params[pidx] self.facet.set_breaks_and_labels( panel_params, layout_info, pidx) self.facet.draw_label(layout_info, pidx)
python
def _draw_facet_labels(self): """ Draw facet labels a.k.a strip texts """ # Decorate the axes # - xaxis & yaxis breaks, labels, limits, ... # - facet labels # # pidx is the panel index (location left to right, top to bottom) for pidx, layout_info in self.layout.layout.iterrows(): panel_params = self.layout.panel_params[pidx] self.facet.set_breaks_and_labels( panel_params, layout_info, pidx) self.facet.draw_label(layout_info, pidx)
[ "def", "_draw_facet_labels", "(", "self", ")", ":", "# Decorate the axes", "# - xaxis & yaxis breaks, labels, limits, ...", "# - facet labels", "#", "# pidx is the panel index (location left to right, top to bottom)", "for", "pidx", ",", "layout_info", "in", "self", ".", "lay...
Draw facet labels a.k.a strip texts
[ "Draw", "facet", "labels", "a", ".", "k", ".", "a", "strip", "texts" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/ggplot.py#L395-L408
train
214,490
has2k1/plotnine
plotnine/ggplot.py
ggplot._draw_legend
def _draw_legend(self): """ Draw legend onto the figure """ legend_box = self.guides.build(self) if not legend_box: return figure = self.figure left = figure.subplotpars.left right = figure.subplotpars.right top = figure.subplotpars.top bottom = figure.subplotpars.bottom W, H = figure.get_size_inches() position = self.guides.position get_property = self.theme.themeables.property # defaults spacing = 0.1 strip_margin_x = 0 strip_margin_y = 0 with suppress(KeyError): spacing = get_property('legend_box_spacing') with suppress(KeyError): strip_margin_x = get_property('strip_margin_x') with suppress(KeyError): strip_margin_y = get_property('strip_margin_y') right_strip_width = self.facet.strip_size('right') top_strip_height = self.facet.strip_size('top') # Other than when the legend is on the right the rest of # the computed x, y locations are not gauranteed not to # overlap with the axes or the labels. The user must then # use the legend_margin theme parameter to adjust the # location. This should get fixed when MPL has a better # layout manager. if position == 'right': loc = 6 pad = right_strip_width*(1+strip_margin_x) + spacing x = right + pad/W y = 0.5 elif position == 'left': loc = 7 x = left - spacing/W y = 0.5 elif position == 'top': loc = 8 x = 0.5 pad = top_strip_height*(1+strip_margin_y) + spacing y = top + pad/H elif position == 'bottom': loc = 9 x = 0.5 y = bottom - spacing/H else: loc = 10 x, y = position anchored_box = AnchoredOffsetbox( loc=loc, child=legend_box, pad=0., frameon=False, bbox_to_anchor=(x, y), bbox_transform=figure.transFigure, borderpad=0.) anchored_box.set_zorder(90.1) self.figure._themeable['legend_background'] = anchored_box ax = self.axs[0] ax.add_artist(anchored_box)
python
def _draw_legend(self): """ Draw legend onto the figure """ legend_box = self.guides.build(self) if not legend_box: return figure = self.figure left = figure.subplotpars.left right = figure.subplotpars.right top = figure.subplotpars.top bottom = figure.subplotpars.bottom W, H = figure.get_size_inches() position = self.guides.position get_property = self.theme.themeables.property # defaults spacing = 0.1 strip_margin_x = 0 strip_margin_y = 0 with suppress(KeyError): spacing = get_property('legend_box_spacing') with suppress(KeyError): strip_margin_x = get_property('strip_margin_x') with suppress(KeyError): strip_margin_y = get_property('strip_margin_y') right_strip_width = self.facet.strip_size('right') top_strip_height = self.facet.strip_size('top') # Other than when the legend is on the right the rest of # the computed x, y locations are not gauranteed not to # overlap with the axes or the labels. The user must then # use the legend_margin theme parameter to adjust the # location. This should get fixed when MPL has a better # layout manager. if position == 'right': loc = 6 pad = right_strip_width*(1+strip_margin_x) + spacing x = right + pad/W y = 0.5 elif position == 'left': loc = 7 x = left - spacing/W y = 0.5 elif position == 'top': loc = 8 x = 0.5 pad = top_strip_height*(1+strip_margin_y) + spacing y = top + pad/H elif position == 'bottom': loc = 9 x = 0.5 y = bottom - spacing/H else: loc = 10 x, y = position anchored_box = AnchoredOffsetbox( loc=loc, child=legend_box, pad=0., frameon=False, bbox_to_anchor=(x, y), bbox_transform=figure.transFigure, borderpad=0.) anchored_box.set_zorder(90.1) self.figure._themeable['legend_background'] = anchored_box ax = self.axs[0] ax.add_artist(anchored_box)
[ "def", "_draw_legend", "(", "self", ")", ":", "legend_box", "=", "self", ".", "guides", ".", "build", "(", "self", ")", "if", "not", "legend_box", ":", "return", "figure", "=", "self", ".", "figure", "left", "=", "figure", ".", "subplotpars", ".", "lef...
Draw legend onto the figure
[ "Draw", "legend", "onto", "the", "figure" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/ggplot.py#L410-L481
train
214,491
has2k1/plotnine
plotnine/ggplot.py
ggplot._draw_labels
def _draw_labels(self): """ Draw x and y labels onto the figure """ # This is very laboured. Should be changed when MPL # finally has a constraint based layout manager. figure = self.figure get_property = self.theme.themeables.property try: margin = get_property('axis_title_x', 'margin') except KeyError: pad_x = 5 else: pad_x = margin.get_as('t', 'pt') try: margin = get_property('axis_title_y', 'margin') except KeyError: pad_y = 5 else: pad_y = margin.get_as('r', 'pt') # Get the axis labels (default or specified by user) # and let the coordinate modify them e.g. flip labels = self.coordinates.labels({ 'x': self.layout.xlabel(self.labels), 'y': self.layout.ylabel(self.labels) }) # The first axes object is on left, and the last axes object # is at the bottom. We change the transform so that the relevant # coordinate is in figure coordinates. This way we take # advantage of how MPL adjusts the label position so that they # do not overlap with the tick text. This works well for # facetting with scales='fixed' and also when not facetting. # first_ax = self.axs[0] # last_ax = self.axs[-1] xlabel = self.facet.last_ax.set_xlabel( labels['x'], labelpad=pad_x) ylabel = self.facet.first_ax.set_ylabel( labels['y'], labelpad=pad_y) xlabel.set_transform(mtransforms.blended_transform_factory( figure.transFigure, mtransforms.IdentityTransform())) ylabel.set_transform(mtransforms.blended_transform_factory( mtransforms.IdentityTransform(), figure.transFigure)) figure._themeable['axis_title_x'] = xlabel figure._themeable['axis_title_y'] = ylabel
python
def _draw_labels(self): """ Draw x and y labels onto the figure """ # This is very laboured. Should be changed when MPL # finally has a constraint based layout manager. figure = self.figure get_property = self.theme.themeables.property try: margin = get_property('axis_title_x', 'margin') except KeyError: pad_x = 5 else: pad_x = margin.get_as('t', 'pt') try: margin = get_property('axis_title_y', 'margin') except KeyError: pad_y = 5 else: pad_y = margin.get_as('r', 'pt') # Get the axis labels (default or specified by user) # and let the coordinate modify them e.g. flip labels = self.coordinates.labels({ 'x': self.layout.xlabel(self.labels), 'y': self.layout.ylabel(self.labels) }) # The first axes object is on left, and the last axes object # is at the bottom. We change the transform so that the relevant # coordinate is in figure coordinates. This way we take # advantage of how MPL adjusts the label position so that they # do not overlap with the tick text. This works well for # facetting with scales='fixed' and also when not facetting. # first_ax = self.axs[0] # last_ax = self.axs[-1] xlabel = self.facet.last_ax.set_xlabel( labels['x'], labelpad=pad_x) ylabel = self.facet.first_ax.set_ylabel( labels['y'], labelpad=pad_y) xlabel.set_transform(mtransforms.blended_transform_factory( figure.transFigure, mtransforms.IdentityTransform())) ylabel.set_transform(mtransforms.blended_transform_factory( mtransforms.IdentityTransform(), figure.transFigure)) figure._themeable['axis_title_x'] = xlabel figure._themeable['axis_title_y'] = ylabel
[ "def", "_draw_labels", "(", "self", ")", ":", "# This is very laboured. Should be changed when MPL", "# finally has a constraint based layout manager.", "figure", "=", "self", ".", "figure", "get_property", "=", "self", ".", "theme", ".", "themeables", ".", "property", "t...
Draw x and y labels onto the figure
[ "Draw", "x", "and", "y", "labels", "onto", "the", "figure" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/ggplot.py#L483-L533
train
214,492
has2k1/plotnine
plotnine/ggplot.py
ggplot._draw_title
def _draw_title(self): """ Draw title onto the figure """ # This is very laboured. Should be changed when MPL # finally has a constraint based layout manager. figure = self.figure title = self.labels.get('title', '') rcParams = self.theme.rcParams get_property = self.theme.themeables.property # Pick suitable values in inches and convert them to # transFigure dimension. This gives fixed spacing # margins which work for oblong plots. top = figure.subplotpars.top W, H = figure.get_size_inches() # Adjust the title to avoid overlap with the facet # labels on the top row # pad/H is inches in transFigure coordinates. A fixed # margin value in inches prevents oblong plots from # getting unpredictably large spaces. try: fontsize = get_property('plot_title', 'size') except KeyError: fontsize = float(rcParams.get('font.size', 12)) try: linespacing = get_property('plot_title', 'linespacing') except KeyError: linespacing = 1.2 try: margin = get_property('plot_title', 'margin') except KeyError: pad = 0.09 else: pad = margin.get_as('b', 'in') try: strip_margin_x = get_property('strip_margin_x') except KeyError: strip_margin_x = 0 line_size = fontsize / 72.27 num_lines = len(title.split('\n')) title_size = line_size * linespacing * num_lines strip_height = self.facet.strip_size('top') # vertical adjustment strip_height *= (1 + strip_margin_x) x = 0.5 y = top + (strip_height+title_size/2+pad)/H text = figure.text(x, y, title, ha='center', va='center') figure._themeable['plot_title'] = text
python
def _draw_title(self): """ Draw title onto the figure """ # This is very laboured. Should be changed when MPL # finally has a constraint based layout manager. figure = self.figure title = self.labels.get('title', '') rcParams = self.theme.rcParams get_property = self.theme.themeables.property # Pick suitable values in inches and convert them to # transFigure dimension. This gives fixed spacing # margins which work for oblong plots. top = figure.subplotpars.top W, H = figure.get_size_inches() # Adjust the title to avoid overlap with the facet # labels on the top row # pad/H is inches in transFigure coordinates. A fixed # margin value in inches prevents oblong plots from # getting unpredictably large spaces. try: fontsize = get_property('plot_title', 'size') except KeyError: fontsize = float(rcParams.get('font.size', 12)) try: linespacing = get_property('plot_title', 'linespacing') except KeyError: linespacing = 1.2 try: margin = get_property('plot_title', 'margin') except KeyError: pad = 0.09 else: pad = margin.get_as('b', 'in') try: strip_margin_x = get_property('strip_margin_x') except KeyError: strip_margin_x = 0 line_size = fontsize / 72.27 num_lines = len(title.split('\n')) title_size = line_size * linespacing * num_lines strip_height = self.facet.strip_size('top') # vertical adjustment strip_height *= (1 + strip_margin_x) x = 0.5 y = top + (strip_height+title_size/2+pad)/H text = figure.text(x, y, title, ha='center', va='center') figure._themeable['plot_title'] = text
[ "def", "_draw_title", "(", "self", ")", ":", "# This is very laboured. Should be changed when MPL", "# finally has a constraint based layout manager.", "figure", "=", "self", ".", "figure", "title", "=", "self", ".", "labels", ".", "get", "(", "'title'", ",", "''", ")...
Draw title onto the figure
[ "Draw", "title", "onto", "the", "figure" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/ggplot.py#L535-L590
train
214,493
has2k1/plotnine
plotnine/ggplot.py
ggplot._apply_theme
def _apply_theme(self): """ Apply theme attributes to Matplotlib objects """ self.theme.apply_axs(self.axs) self.theme.apply_figure(self.figure)
python
def _apply_theme(self): """ Apply theme attributes to Matplotlib objects """ self.theme.apply_axs(self.axs) self.theme.apply_figure(self.figure)
[ "def", "_apply_theme", "(", "self", ")", ":", "self", ".", "theme", ".", "apply_axs", "(", "self", ".", "axs", ")", "self", ".", "theme", ".", "apply_figure", "(", "self", ".", "figure", ")" ]
Apply theme attributes to Matplotlib objects
[ "Apply", "theme", "attributes", "to", "Matplotlib", "objects" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/ggplot.py#L599-L604
train
214,494
has2k1/plotnine
plotnine/ggplot.py
ggplot._save_filename
def _save_filename(self, ext): """ Default filename used by the save method Parameters ---------- ext : str Extension e.g. png, pdf, ... """ hash_token = abs(self.__hash__()) return 'plotnine-save-{}.{}'.format(hash_token, ext)
python
def _save_filename(self, ext): """ Default filename used by the save method Parameters ---------- ext : str Extension e.g. png, pdf, ... """ hash_token = abs(self.__hash__()) return 'plotnine-save-{}.{}'.format(hash_token, ext)
[ "def", "_save_filename", "(", "self", ",", "ext", ")", ":", "hash_token", "=", "abs", "(", "self", ".", "__hash__", "(", ")", ")", "return", "'plotnine-save-{}.{}'", ".", "format", "(", "hash_token", ",", "ext", ")" ]
Default filename used by the save method Parameters ---------- ext : str Extension e.g. png, pdf, ...
[ "Default", "filename", "used", "by", "the", "save", "method" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/ggplot.py#L606-L616
train
214,495
has2k1/plotnine
plotnine/ggplot.py
ggplot.save
def save(self, filename=None, format=None, path=None, width=None, height=None, units='in', dpi=None, limitsize=True, verbose=True, **kwargs): """ Save a ggplot object as an image file Parameters ---------- filename : str, optional File name to write the plot to. If not specified, a name like “plotnine-save-<hash>.<format>” is used. format : str Image format to use, automatically extract from file name extension. path : str Path to save plot to (if you just want to set path and not filename). width : number, optional Width (defaults to value set by the theme). If specified the `height` must also be given. height : number, optional Height (defaults to value set by the theme). If specified the `width` must also be given. units : str Units for width and height when either one is explicitly specified (in, cm, or mm). dpi : float DPI to use for raster graphics. If None, defaults to using the `dpi` of theme, if none is set then a `dpi` of 100. limitsize : bool If ``True`` (the default), ggsave will not save images larger than 50x50 inches, to prevent the common error of specifying dimensions in pixels. verbose : bool If ``True``, print the saving information. kwargs : dict Additional arguments to pass to matplotlib `savefig()`. """ fig_kwargs = {'bbox_inches': 'tight', # 'tight' is a good default 'format': format} fig_kwargs.update(kwargs) figure = [None] # nonlocal # filename, depends on the object if filename is None: ext = format if format else 'pdf' filename = self._save_filename(ext) if path: filename = os.path.join(path, filename) # Preserve the users object self = deepcopy(self) # theme self.theme = self.theme or theme_get() # The figure size should be known by the theme if width is not None and height is not None: width = to_inches(width, units) height = to_inches(height, units) self += theme(figure_size=(width, height)) elif (width is None and height is not None or width is not None and height is None): raise PlotnineError( "You must specify both width and height") width, height = self.theme.themeables.property('figure_size') if limitsize and (width > 25 or height > 25): raise PlotnineError( "Dimensions (width={}, height={}) exceed 25 inches " "(height and width are specified in inches/cm/mm, " "not pixels). If you are sure you want these " "dimensions, use 'limitsize=False'.".format(width, height)) if dpi is None: try: self.theme.themeables.property('dpi') except KeyError: self.theme = self.theme + theme(dpi=100) else: self.theme = self.theme + theme(dpi=dpi) if verbose: warn("Saving {0} x {1} {2} image.".format( from_inches(width, units), from_inches(height, units), units), PlotnineWarning) warn('Filename: {}'.format(filename), PlotnineWarning) # Helper function so that we can clean up when it fails def _save(): fig = figure[0] = self.draw() # savefig ignores the figure face & edge colors facecolor = fig.get_facecolor() edgecolor = fig.get_edgecolor() if edgecolor: fig_kwargs['facecolor'] = facecolor if edgecolor: fig_kwargs['edgecolor'] = edgecolor fig_kwargs['frameon'] = True fig.savefig(filename, **fig_kwargs) try: _save() except Exception as err: figure[0] and plt.close(figure[0]) raise err else: figure[0] and plt.close(figure[0])
python
def save(self, filename=None, format=None, path=None, width=None, height=None, units='in', dpi=None, limitsize=True, verbose=True, **kwargs): """ Save a ggplot object as an image file Parameters ---------- filename : str, optional File name to write the plot to. If not specified, a name like “plotnine-save-<hash>.<format>” is used. format : str Image format to use, automatically extract from file name extension. path : str Path to save plot to (if you just want to set path and not filename). width : number, optional Width (defaults to value set by the theme). If specified the `height` must also be given. height : number, optional Height (defaults to value set by the theme). If specified the `width` must also be given. units : str Units for width and height when either one is explicitly specified (in, cm, or mm). dpi : float DPI to use for raster graphics. If None, defaults to using the `dpi` of theme, if none is set then a `dpi` of 100. limitsize : bool If ``True`` (the default), ggsave will not save images larger than 50x50 inches, to prevent the common error of specifying dimensions in pixels. verbose : bool If ``True``, print the saving information. kwargs : dict Additional arguments to pass to matplotlib `savefig()`. """ fig_kwargs = {'bbox_inches': 'tight', # 'tight' is a good default 'format': format} fig_kwargs.update(kwargs) figure = [None] # nonlocal # filename, depends on the object if filename is None: ext = format if format else 'pdf' filename = self._save_filename(ext) if path: filename = os.path.join(path, filename) # Preserve the users object self = deepcopy(self) # theme self.theme = self.theme or theme_get() # The figure size should be known by the theme if width is not None and height is not None: width = to_inches(width, units) height = to_inches(height, units) self += theme(figure_size=(width, height)) elif (width is None and height is not None or width is not None and height is None): raise PlotnineError( "You must specify both width and height") width, height = self.theme.themeables.property('figure_size') if limitsize and (width > 25 or height > 25): raise PlotnineError( "Dimensions (width={}, height={}) exceed 25 inches " "(height and width are specified in inches/cm/mm, " "not pixels). If you are sure you want these " "dimensions, use 'limitsize=False'.".format(width, height)) if dpi is None: try: self.theme.themeables.property('dpi') except KeyError: self.theme = self.theme + theme(dpi=100) else: self.theme = self.theme + theme(dpi=dpi) if verbose: warn("Saving {0} x {1} {2} image.".format( from_inches(width, units), from_inches(height, units), units), PlotnineWarning) warn('Filename: {}'.format(filename), PlotnineWarning) # Helper function so that we can clean up when it fails def _save(): fig = figure[0] = self.draw() # savefig ignores the figure face & edge colors facecolor = fig.get_facecolor() edgecolor = fig.get_edgecolor() if edgecolor: fig_kwargs['facecolor'] = facecolor if edgecolor: fig_kwargs['edgecolor'] = edgecolor fig_kwargs['frameon'] = True fig.savefig(filename, **fig_kwargs) try: _save() except Exception as err: figure[0] and plt.close(figure[0]) raise err else: figure[0] and plt.close(figure[0])
[ "def", "save", "(", "self", ",", "filename", "=", "None", ",", "format", "=", "None", ",", "path", "=", "None", ",", "width", "=", "None", ",", "height", "=", "None", ",", "units", "=", "'in'", ",", "dpi", "=", "None", ",", "limitsize", "=", "Tru...
Save a ggplot object as an image file Parameters ---------- filename : str, optional File name to write the plot to. If not specified, a name like “plotnine-save-<hash>.<format>” is used. format : str Image format to use, automatically extract from file name extension. path : str Path to save plot to (if you just want to set path and not filename). width : number, optional Width (defaults to value set by the theme). If specified the `height` must also be given. height : number, optional Height (defaults to value set by the theme). If specified the `width` must also be given. units : str Units for width and height when either one is explicitly specified (in, cm, or mm). dpi : float DPI to use for raster graphics. If None, defaults to using the `dpi` of theme, if none is set then a `dpi` of 100. limitsize : bool If ``True`` (the default), ggsave will not save images larger than 50x50 inches, to prevent the common error of specifying dimensions in pixels. verbose : bool If ``True``, print the saving information. kwargs : dict Additional arguments to pass to matplotlib `savefig()`.
[ "Save", "a", "ggplot", "object", "as", "an", "image", "file" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/ggplot.py#L618-L730
train
214,496
has2k1/plotnine
plotnine/stats/smoothers.py
mavg
def mavg(data, xseq, **params): """ Fit moving average """ window = params['method_args']['window'] # The first average comes after the full window size # has been swept over rolling = data['y'].rolling(**params['method_args']) y = rolling.mean()[window:] n = len(data) stderr = rolling.std()[window:] x = data['x'][window:] data = pd.DataFrame({'x': x, 'y': y}) data.reset_index(inplace=True, drop=True) if params['se']: df = n - window # Original - Used data['ymin'], data['ymax'] = tdist_ci( y, df, stderr, params['level']) data['se'] = stderr return data
python
def mavg(data, xseq, **params): """ Fit moving average """ window = params['method_args']['window'] # The first average comes after the full window size # has been swept over rolling = data['y'].rolling(**params['method_args']) y = rolling.mean()[window:] n = len(data) stderr = rolling.std()[window:] x = data['x'][window:] data = pd.DataFrame({'x': x, 'y': y}) data.reset_index(inplace=True, drop=True) if params['se']: df = n - window # Original - Used data['ymin'], data['ymax'] = tdist_ci( y, df, stderr, params['level']) data['se'] = stderr return data
[ "def", "mavg", "(", "data", ",", "xseq", ",", "*", "*", "params", ")", ":", "window", "=", "params", "[", "'method_args'", "]", "[", "'window'", "]", "# The first average comes after the full window size", "# has been swept over", "rolling", "=", "data", "[", "'...
Fit moving average
[ "Fit", "moving", "average" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/stats/smoothers.py#L221-L243
train
214,497
has2k1/plotnine
plotnine/stats/smoothers.py
gpr
def gpr(data, xseq, **params): """ Fit gaussian process """ try: from sklearn import gaussian_process except ImportError: raise PlotnineError( "To use gaussian process smoothing, " "You need to install scikit-learn.") kwargs = params['method_args'] if not kwargs: warnings.warn( "See sklearn.gaussian_process.GaussianProcessRegressor " "for parameters to pass in as 'method_args'", PlotnineWarning) regressor = gaussian_process.GaussianProcessRegressor(**kwargs) X = np.atleast_2d(data['x']).T n = len(data) Xseq = np.atleast_2d(xseq).T regressor.fit(X, data['y']) data = pd.DataFrame({'x': xseq}) if params['se']: y, stderr = regressor.predict(Xseq, return_std=True) data['y'] = y data['se'] = stderr data['ymin'], data['ymax'] = tdist_ci( y, n-1, stderr, params['level']) else: data['y'] = regressor.predict(Xseq, return_std=True) return data
python
def gpr(data, xseq, **params): """ Fit gaussian process """ try: from sklearn import gaussian_process except ImportError: raise PlotnineError( "To use gaussian process smoothing, " "You need to install scikit-learn.") kwargs = params['method_args'] if not kwargs: warnings.warn( "See sklearn.gaussian_process.GaussianProcessRegressor " "for parameters to pass in as 'method_args'", PlotnineWarning) regressor = gaussian_process.GaussianProcessRegressor(**kwargs) X = np.atleast_2d(data['x']).T n = len(data) Xseq = np.atleast_2d(xseq).T regressor.fit(X, data['y']) data = pd.DataFrame({'x': xseq}) if params['se']: y, stderr = regressor.predict(Xseq, return_std=True) data['y'] = y data['se'] = stderr data['ymin'], data['ymax'] = tdist_ci( y, n-1, stderr, params['level']) else: data['y'] = regressor.predict(Xseq, return_std=True) return data
[ "def", "gpr", "(", "data", ",", "xseq", ",", "*", "*", "params", ")", ":", "try", ":", "from", "sklearn", "import", "gaussian_process", "except", "ImportError", ":", "raise", "PlotnineError", "(", "\"To use gaussian process smoothing, \"", "\"You need to install sci...
Fit gaussian process
[ "Fit", "gaussian", "process" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/stats/smoothers.py#L246-L279
train
214,498
has2k1/plotnine
plotnine/stats/smoothers.py
tdist_ci
def tdist_ci(x, df, stderr, level): """ Confidence Intervals using the t-distribution """ q = (1 + level)/2 delta = stats.t.ppf(q, df) * stderr return x - delta, x + delta
python
def tdist_ci(x, df, stderr, level): """ Confidence Intervals using the t-distribution """ q = (1 + level)/2 delta = stats.t.ppf(q, df) * stderr return x - delta, x + delta
[ "def", "tdist_ci", "(", "x", ",", "df", ",", "stderr", ",", "level", ")", ":", "q", "=", "(", "1", "+", "level", ")", "/", "2", "delta", "=", "stats", ".", "t", ".", "ppf", "(", "q", ",", "df", ")", "*", "stderr", "return", "x", "-", "delta...
Confidence Intervals using the t-distribution
[ "Confidence", "Intervals", "using", "the", "t", "-", "distribution" ]
566e579af705367e584fb27a74e6c5199624ca89
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/stats/smoothers.py#L282-L288
train
214,499