_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q271900 | CursesMenu.draw | test | def draw(self):
"""
Redraws the menu and refreshes the screen. Should be called whenever something changes that needs to be redrawn.
"""
self.screen.border(0)
if self.title is not None:
self.screen.addstr(2, 2, self.title, curses.A_STANDOUT)
if self.subtitle is not None:
self.screen.addstr(4, 2, self.subtitle, curses.A_BOLD)
for index, item in enumerate(self.items):
if self.current_option == index:
text_style = self.highlight
else:
text_style = self.normal
self.screen.addstr(5 + index, 4, item.show(index), text_style)
screen_rows, screen_cols = CursesMenu.stdscr.getmaxyx()
top_row = 0
if 6 + len(self.items) > screen_rows:
if screen_rows + self.current_option < 6 + len(self.items):
top_row = self.current_option
else:
top_row = 6 + len(self.items) - screen_rows
self.screen.refresh(top_row, 0, 0, 0, screen_rows - 1, screen_cols - 1) | python | {
"resource": ""
} |
q271901 | CursesMenu.process_user_input | test | def process_user_input(self):
"""
Gets the next single character and decides what to do with it
"""
user_input = self.get_input()
go_to_max = ord("9") if len(self.items) >= 9 else ord(str(len(self.items)))
if ord('1') <= user_input <= go_to_max:
self.go_to(user_input - ord('0') - 1)
elif user_input == curses.KEY_DOWN:
self.go_down()
elif user_input == curses.KEY_UP:
self.go_up()
elif user_input == ord("\n"):
self.select()
return user_input | python | {
"resource": ""
} |
q271902 | CursesMenu.select | test | def select(self):
"""
Select the current item and run it
"""
self.selected_option = self.current_option
self.selected_item.set_up()
self.selected_item.action()
self.selected_item.clean_up()
self.returned_value = self.selected_item.get_return()
self.should_exit = self.selected_item.should_exit
if not self.should_exit:
self.draw() | python | {
"resource": ""
} |
q271903 | parse_old_menu | test | def parse_old_menu(menu_data):
"""
Take an old-style menuData dictionary and return a CursesMenu
:param dict menu_data:
:return: A new CursesMenu
:rtype: CursesMenu
"""
menu_title = menu_data['title']
menu = CursesMenu(menu_title)
for item in menu_data["options"]:
item_type = item["type"]
item_title = item["title"]
if item_type == menuItem.COMMAND:
item_command = item["command"]
menu.append_item(CommandItem(item_title, item_command, menu))
elif item_type == menuItem.FUNCTION:
item_function = item["function"]
menu.append_item(FunctionItem(item_title, item_function, menu))
elif item_type == menuItem.EXITMENU:
menu.append_item(ExitItem(item_title, menu))
elif item_type == menuItem.NUMBER:
menu.append_item(SelectionItem(item_title, menu))
elif item_type == menuItem.MENU:
new_menu = parse_old_menu(item)
menu.append_item(SubmenuItem(item_title, menu, new_menu))
return menu | python | {
"resource": ""
} |
q271904 | top | test | def top(
df,
value: str,
limit: int,
order: str = 'asc',
group: Union[str, List[str]] = None
):
"""
Get the top or flop N results based on a column value for each specified group columns
---
### Parameters
*mandatory :*
- `value` (*str*): column name on which you will rank the results
- `limit` (*int*): Number to specify the N results you want to retrieve.
Use a positive number x to retrieve the first x results.
Use a negative number -x to retrieve the last x results.
*optional :*
- `order` (*str*): `"asc"` or `"desc"` to sort by ascending ou descending order. By default : `"asc"`.
- `group` (*str*, *list of str*): name(s) of columns on which you want to perform the group operation.
---
### Example
**Input**
| variable | Category | value |
|:--------:|:--------:|:-----:|
| lili | 1 | 50 |
| lili | 1 | 20 |
| toto | 1 | 100 |
| toto | 1 | 200 |
| toto | 1 | 300 |
| lala | 1 | 100 |
| lala | 1 | 150 |
| lala | 1 | 250 |
| lala | 2 | 350 |
| lala | 2 | 450 |
```cson
top:
value: 'value'
limit: 4
order: 'asc'
```
**Output**
| variable | Category | value |
|:--------:|:--------:|:-----:|
| lala | 1 | 250 |
| toto | 1 | 300 |
| lala | 2 | 350 |
| lala | 2 | 450 |
"""
ascending = order != 'desc'
limit = int(limit)
filter_func = 'nlargest' if (limit > 0) ^ ascending else 'nsmallest'
def _top(df):
return getattr(df, filter_func)(abs(limit), value).sort_values(by=value,
ascending=ascending)
if group is None:
df = _top(df)
else:
df = df.groupby(group).apply(_top)
return df | python | {
"resource": ""
} |
q271905 | top_group | test | def top_group(
df,
aggregate_by: List[str],
value: str,
limit: int,
order: str = 'asc',
function: str = 'sum',
group: Union[str, List[str]] = None
):
"""
Get the top or flop N results based on a function and a column value that agregates the input.
The result is composed by all the original lines including only lines corresponding
to the top groups
---
### Parameters
*mandatory :*
- `value` (*str*): Name of the column name on which you will rank the results.
- `limit` (*int*): Number to specify the N results you want to retrieve from the sorted values.
- Use a positive number x to retrieve the first x results.
- Use a negative number -x to retrieve the last x results.
- `aggregate_by` (*list of str*)): name(s) of columns you want to aggregate
*optional :*
- `order` (*str*): `"asc"` or `"desc"` to sort by ascending ou descending order. By default : `"asc"`.
- `group` (*str*, *list of str*): name(s) of columns on which you want to perform the group operation.
- `function` : Function to use to group over the group column
---
### Example
**Input**
| variable | Category | value |
|:--------:|:--------:|:-----:|
| lili | 1 | 50 |
| lili | 1 | 20 |
| toto | 1 | 100 |
| toto | 1 | 200 |
| toto | 1 | 300 |
| lala | 1 | 100 |
| lala | 1 | 150 |
| lala | 1 | 250 |
| lala | 2 | 350 |
| lala | 2 | 450 |
```cson
top_group:
group: ["Category"]
value: 'value'
aggregate_by: ["variable"]
limit: 2
order: "desc"
```
**Output**
| variable | Category | value |
|:--------:|:--------:|:-----:|
| toto | 1 | 100 |
| toto | 1 | 200 |
| toto | 1 | 300 |
| lala | 1 | 100 |
| lala | 1 | 150 |
| lala | 1 | 250 |
| lala | 2 | 350 |
| lala | 2 | 450 |
"""
aggregate_by = aggregate_by or []
group_top = group or []
df2 = df.groupby(group_top + aggregate_by).agg(function).reset_index()
df2 = top(df2, group=group, value=value, limit=limit, order=order).reset_index(drop=True)
df2 = df2[group_top + aggregate_by]
df = df2.merge(df, on=group_top + aggregate_by)
return df | python | {
"resource": ""
} |
q271906 | convert_str_to_datetime | test | def convert_str_to_datetime(df, *, column: str, format: str):
"""
Convert string column into datetime column
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column to format
- `format` (*str*): current format of the values (see [available formats](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
"""
df[column] = pd.to_datetime(df[column], format=format)
return df | python | {
"resource": ""
} |
q271907 | convert_datetime_to_str | test | def convert_datetime_to_str(df, *, column: str, format: str, new_column: str = None):
"""
Convert datetime column into string column
---
### Parameters
*mandatory :*
- column (*str*): name of the column to format
- format (*str*): format of the result values (see [available formats](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
*optional :*
- new_column (*str*): name of the output column. By default `column` is overwritten.
"""
new_column = new_column or column
df[new_column] = df[column].dt.strftime(format)
return df | python | {
"resource": ""
} |
q271908 | change_date_format | test | def change_date_format(
df, *,
column: str,
output_format: str,
input_format: str = None,
new_column: str = None,
new_time_zone=None
):
"""
Convert the format of a date
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column to change the format
- `output_format` (*str*): format of the output values (see [available formats](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior))
*optional :*
- `input_format` (*str*): format of the input values (by default let the parser detect it)
- `new_column` (*str*): name of the output column (by default overwrite `column`)
- `new_time_zone` (*str*): name of new time zone (by default no time zone conversion is done)
---
### Example
**Input**
label | date
:------:|:----:
France | 2017-03-22
Europe | 2016-03-22
```cson
change_date_format:
column: 'date'
input_format: '%Y-%m-%d'
output_format: '%Y-%m'
```
Output :
label | date
:------:|:----:
France | 2017-03
Europe | 2016-03
"""
new_column = new_column or column
df[new_column] = (pd.to_datetime(df[column], format=input_format, utc=True)
.dt.tz_convert(new_time_zone)
.dt.strftime(output_format))
return df | python | {
"resource": ""
} |
q271909 | cast | test | def cast(df, column: str, type: str, new_column=None):
"""
Convert column's type into type
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column to convert
- `type` (*str*): output type. It can be :
- `"int"` : integer type
- `"float"` : general number type
- `"str"` : text type
*optional :*
- `new_column` (*str*): name of the output column.
By default the `column` arguments is modified.
---
### Example
**Input**
| Column 1 | Column 2 | Column 3 |
|:-------:|:--------:|:--------:|
| 'one' | '2014' | 30.0 |
| 'two' | 2015.0 | '1' |
| 3.1 | 2016 | 450 |
```cson
postprocess: [
cast:
column: 'Column 1'
type: 'str'
cast:
column: 'Column 2'
type: 'int'
cast:
column: 'Column 3'
type: 'float'
]
```
**Output**
| Column 1 | Column 2 | Column 3 |
|:-------:|:------:|:--------:|
| 'one' | 2014 | 30.0 |
| 'two' | 2015 | 1.0 |
| '3.1' | 2016 | 450.0 |
"""
new_column = new_column or column
df[new_column] = df[column].astype(type)
return df | python | {
"resource": ""
} |
q271910 | rank | test | def rank(
df,
value_cols: Union[str, List[str]],
group_cols: List[str] = None,
rank_cols_names: List[str] = None,
method='min',
ascending: bool = True
):
"""
This function creates rank columns based on numeric values to be ranked.
---
### Parameters
*mandatory :*
- `value_cols` (*list*): name(s) of the columns used
*optional :*
- `group_cols` (*list*): name(s) of the column(s) used to
create each group inside which independent ranking needs to be applied
- `rank_cols_names` (*list*): the names of the added ranking columns.
If not filled, the ranking will be named after the value_cols with a '_rank' suffix
- `method` (*str*): method to use when encountering equal values:
- `'min'` (default): lowest rank in group
- `'max'`: highest rank in group
- `'average'`: average rank of group
- `'first'`: ranks assigned in order the values appear in the series
- `'dense'`: like 'min', but rank always increases by 1 between groups
- `ascending` (*boolean*): whether the rank should be determined based on
ascending (default) or descending order
---
### Example
**Input**
| ENTITY | YEAR | VALUE_1 | VALUE_2 |
| :---: | :---: | :---: | :---: |
| A | 2017 | 10 | 3 |
| A | 2017 | 20 | 1 |
| A | 2018 | 10 | 5 |
| A | 2018 | 30 | 4 |
| B | 2017 | 60 | 4 |
| B | 2017 | 40 | 3 |
| B | 2018 | 50 | 7 |
| B | 2018 | 50 | 6 |
```cson
rank :
value_cols: 'VALUE_1'
```
**Output**
| ENTITY | YEAR | VALUE_1 | VALUE_2 | VALUE_1_rank
| :---: | :---: | :---: | :---: | :---: |
| A | 2017 | 10 | 3 | 1 |
| A | 2017 | 20 | 1 | 3 |
| A | 2018 | 10 | 5 | 1 |
| A | 2018 | 30 | 4 | 4 |
| B | 2017 | 60 | 4 | 8 |
| B | 2017 | 40 | 3 | 5 |
| B | 2018 | 50 | 7 | 6 |
| B | 2018 | 50 | 6 | 6 |
"""
value_cols = [value_cols] if not isinstance(value_cols, list) else value_cols
for col in value_cols:
if not np.issubdtype(df[col].dtype, np.number):
raise TypeError(col + " specified in value_cols must be of numeric type")
if rank_cols_names is None:
rank_cols_names = [x + '_rank' for x in value_cols]
if group_cols is None:
df[rank_cols_names] = df[value_cols].rank(method=method, ascending=ascending)
else:
df[rank_cols_names] = (df.groupby(group_cols)[value_cols]
.rank(method=method, ascending=ascending))
if method != 'average':
df[rank_cols_names] = df[rank_cols_names].astype('int')
return df | python | {
"resource": ""
} |
q271911 | waterfall | test | def waterfall(
df,
date: str,
value: str,
start: Dict[str, str],
end: Dict[str, str],
upperGroup: Dict[str, str],
insideGroup: Dict[str, str] = None,
filters: List[str] = None
):
"""
Return a line for each bars of a waterfall chart, totals, groups, subgroups.
Compute the variation and variation rate for each line.
---
### Parameters
*mandatory :*
- `date` (*str*): name of the column that id the period of each lines
- `value` (*str*): name of the column that contains the vaue for each lines
- `start` (*dict*):
- `label`: text displayed under the first master column
- `id`: value in the date col that id lines for the first period
- `end` (*dict*):
- `label`: text displayed under the last master column
- `id`: value in the date col that id lines for the second period
*optional :*
- `upperGroup` (*dict*):
- `id`: name of the column that contains upperGroups unique IDs
- `label`: not required, text displayed under each upperGroups bars,
using ID when it's absent
- `groupsOrder`: not required, order of upperGroups
- `insideGroup` (*dict*):
- `id`: name of the column that contains insideGroups unique IDs
- `label`: not required, text displayed under each insideGroups bars,
using ID when it's absent
- `groupsOrder`: not required, order of insideGroups
- `filters` (*list*): columns to filters on
---
### Example
**Input**
| product_id | played | date | ord | category_id | category_name |
|:------------:|:--------:|:------:|:-----:|:-------------:|:---------------:|
| super clap | 12 | t1 | 1 | clap | Clap |
| clap clap | 1 | t1 | 10 | clap | Clap |
| tac | 1 | t1 | 1 | snare | Snare |
| super clap | 10 | t2 | 1 | clap | Clap |
| tac | 100 | t2 | 1 | snare | Snare |
| bom | 1 | t2 | 1 | tom | Tom |
```cson
waterfall:
upperGroup:
id: 'category_id'
label: 'category_name'
insideGroup:
id: 'product_id'
groupsOrder: 'ord'
date: 'date'
value: 'played'
start:
label: 'Trimestre 1'
id: 't1'
end:
label: 'Trimester 2'
id: 't2'
```
**Output**
| value | label | variation | groups | type | order |
|:-------:|:-----------:|:-----------:|:--------:|:------:|:-------:|
| 14 | Trimestre 1 | NaN | NaN | NaN | NaN |
| -3 | Clap | -0.230769 | clap | parent | NaN |
| -2 | super clap | -0.166667 | clap | child | 1 |
| -1 | clap clap | -1 | clap | child | 10 |
| 99 | Snare | 99 | snare | parent | NaN |
| 99 | tac | 99 | snare | child | 1 |
| 1 | Tom | inf | tom | parent | NaN |
| 1 | bom | inf | tom | child | 1 |
| 111 | Trimester 2 | NaN | NaN | NaN | NaN |
"""
if len(df) == 0:
return df
if filters is not None:
if isinstance(filters, str):
filters = [filters]
def sub_waterfall(df):
wa_df = waterfall(df, date, value, start, end, upperGroup, insideGroup)
for filters_col in filters:
wa_df[filters_col] = df[filters_col].values[0]
return wa_df
# filters df into a list of sub_df
list_of_sub_df = [df[(df[filters].values == i).all(axis=1)]
for i in df[filters].drop_duplicates().values]
return pd.concat([sub_waterfall(df) for df in list_of_sub_df], sort=False)
groups = {
'upperGroup': {
'type': 'parent',
'id': 'upperGroup',
'order': {
'by': ['upperGroup_order', 'groups'],
'ascending': [True, True]
},
'obj': upperGroup
}
}
if insideGroup is not None:
groups['insideGroup'] = {
'type': 'child',
'id': 'insideGroup',
'order': {
'by': ['type', 'insideGroup_order', 'label'],
'ascending': [False, True, True]
},
'obj': insideGroup
}
# prepare the dataframe with standard column names
df = _compute_rename(df, date, value, groups)
agg_conf = {'value': sum}
agg_conf.update({f'{col}_label': 'first' for col in groups.keys()})
agg_conf.update({f'{col}_order': 'first' for col in groups.keys()})
df = df.groupby(list(groups.keys()) + ['date']).agg(agg_conf).reset_index()
df_start, df_end = _compute_start_end(df, start, end)
df = _compute_value_diff(df, start, end, groups)
middle = _compute_upper_group(df)
if insideGroup is not None:
middle = pd.concat([middle, _compute_inside_group(df)])
ret = _compute_order(df_start, df_end, middle, groups)
return ret | python | {
"resource": ""
} |
q271912 | _basic_math_operation | test | def _basic_math_operation(df, new_column, column_1, column_2, op):
"""
Basic mathematical operation to apply operator on `column_1` and `column_2`
Both can be either a number or the name of a column of `df`
Will create a new column named `new_column`
"""
if not isinstance(column_1, (str, int, float)):
raise TypeError(f'column_1 must be a string, an integer or a float')
if not isinstance(column_2, (str, int, float)):
raise TypeError(f'column_2 must be a string, an integer or a float')
if isinstance(column_1, str):
column_1 = df[column_1]
if isinstance(column_2, str):
column_2 = df[column_2]
operator = getattr(_operator, op)
df[new_column] = operator(column_1, column_2)
return df | python | {
"resource": ""
} |
q271913 | round_values | test | def round_values(df, *, column: str, decimals: int, new_column: str = None):
"""
Round each value of a column
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column to round
- `decimals` (*int*): number of decimal to keeep
*optional :*
- `new_column` (*str*): name of the new column to create.
By default, no new column will be created and `column` will be replaced
---
### Example
** Input**
ENTITY|VALUE_1|VALUE_2
:-----:|:-----:|:-----:
A|-1.512|-1.504
A|0.432|0.14
```cson
round_values:
column: 'VALUE_1'
decimals:1
new_column: 'Pika'
```
**Output**
ENTITY|VALUE_1|VALUE_2|Pika
:-----:|:-----:|:-----:|:-----:
A|-1.512|-1.504|-1.5
A|0.432|0.14|0.4
"""
new_column = new_column or column
df[new_column] = df[column].round(decimals)
return df | python | {
"resource": ""
} |
q271914 | absolute_values | test | def absolute_values(df, *, column: str, new_column: str = None):
"""
Get the absolute numeric value of each element of a column
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column
*optional :*
- `new_column` (*str*): name of the column containing the result.
By default, no new column will be created and `column` will be replaced.
---
### Example
**Input**
| ENTITY | VALUE_1 | VALUE_2 |
|:------:|:-------:|:-------:|
| A | -1.512 | -1.504 |
| A | 0.432 | 0.14 |
```cson
absolute_values:
column: 'VALUE_1'
new_column: 'Pika'
```
**Output**
| ENTITY | VALUE_1 | VALUE_2 | Pika |
|:------:|:-------:|:-------:|:-----:|
| A | -1.512 | -1.504 | 1.512 |
| A | 0.432 | 0.14 | 0.432 |
"""
new_column = new_column or column
df[new_column] = abs(df[column])
return df | python | {
"resource": ""
} |
q271915 | pivot | test | def pivot(df, index: List[str], column: str, value: str, agg_function: str = 'mean'):
"""
Pivot the data. Reverse operation of melting
---
### Parameters
*mandatory :*
- `index` (*list*): names of index columns.
- `column` (*str*): column name to pivot on
- `value` (*str*): column name containing the value to fill the pivoted df
*optional :*
- `agg_function` (*str*): aggregation function to use among 'mean' (default), 'count', 'mean', 'max', 'min'
---
### Example
**Input**
| variable | wave | year | value |
|:--------:|:-------:|:--------:|:-----:|
| toto | wave 1 | 2014 | 300 |
| toto | wave 1 | 2015 | 250 |
| toto | wave 1 | 2016 | 450 |
```cson
pivot:
index: ['variable','wave']
column: 'year'
value: 'value'
```
**Output**
| variable | wave | 2014 | 2015 | 2015 |
|:--------:|:-------:|:------:|:----:|:----:|
| toto | wave 1 | 300 | 250 | 450 |
"""
if df.dtypes[value].type == np.object_:
df = pd.pivot_table(df, index=index,
columns=column,
values=value,
aggfunc=lambda x: ' '.join(x))
else:
df = pd.pivot_table(df, index=index,
columns=column,
values=value,
aggfunc=agg_function)
df = df.reset_index()
return df | python | {
"resource": ""
} |
q271916 | pivot_by_group | test | def pivot_by_group(
df,
variable,
value,
new_columns,
groups,
id_cols=None
):
"""
Pivot a dataframe by group of variables
---
### Parameters
*mandatory :*
* `variable` (*str*): name of the column used to create the groups.
* `value` (*str*): name of the column containing the value to fill the pivoted df.
* `new_columns` (*list of str*): names of the new columns.
* `groups` (*dict*): names of the groups with their corresponding variables.
**Warning**: the list of variables must have the same order as `new_columns`
*optional :*
* `id_cols` (*list of str*) : names of other columns to keep, default `None`.
---
### Example
**Input**
| type | variable | montant |
|:----:|:----------:|:-------:|
| A | var1 | 5 |
| A | var1_evol | 0.3 |
| A | var2 | 6 |
| A | var2_evol | 0.2 |
```cson
pivot_by_group :
id_cols: ['type']
variable: 'variable'
value: 'montant'
new_columns: ['value', 'variation']
groups:
'Group 1' : ['var1', 'var1_evol']
'Group 2' : ['var2', 'var2_evol']
```
**Ouput**
| type | variable | value | variation |
|:----:|:----------:|:-------:|:---------:|
| A | Group 1 | 5 | 0.3 |
| A | Group 2 | 6 | 0.2 |
"""
if id_cols is None:
index = [variable]
else:
index = [variable] + id_cols
param = pd.DataFrame(groups, index=new_columns)
temporary_colum = 'tmp'
df[temporary_colum] = df[variable]
for column in param.columns:
df.loc[df[variable].isin(param[column]), variable] = column
param = param.T
for column in param.columns:
df.loc[
df[temporary_colum].isin(param[column]), temporary_colum] = column
df = pivot(df, index, temporary_colum, value)
return df | python | {
"resource": ""
} |
q271917 | groupby | test | def groupby(df, *, group_cols: Union[str, List[str]],
aggregations: Dict[str, Union[str, List[str]]]):
"""
Aggregate values by groups.
---
### Parameters
*mandatory :*
- `group_cols` (*list*): list of columns used to group data
- `aggregations` (*dict*): dictionnary of values columns to group as keys and aggregation
function to use as values (See the [list of aggregation functions](
https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#aggregation))
---
### Example
**Input**
| ENTITY | YEAR | VALUE_1 | VALUE_2 |
|:------:|:----:|:-------:|:-------:|
| A | 2017 | 10 | 3 |
| A | 2017 | 20 | 1 |
| A | 2018 | 10 | 5 |
| A | 2018 | 30 | 4 |
| B | 2017 | 60 | 4 |
| B | 2017 | 40 | 3 |
| B | 2018 | 50 | 7 |
| B | 2018 | 60 | 6 |
```cson
groupby:
group_cols: ['ENTITY', 'YEAR']
aggregations:
'VALUE_1': 'sum',
'VALUE_2': 'mean'
```
**Output**
| ENTITY | YEAR | VALUE_1 | VALUE_2 |
|:------:|:----:|:-------:|:-------:|
| A | 2017 | 30 | 2.0 |
| A | 2018 | 40 | 4.5 |
| B | 2017 | 100 | 3.5 |
| B | 2018 | 110 | 6.5 |
"""
df = df.groupby(group_cols, as_index=False).agg(aggregations)
# When several aggregations are performed on the same column, pandas return
# a multi-indexed dataframe, so we need to flatten the columns index to get
# back to a unique level header
if df.columns.nlevels == 2:
level_0 = df.columns.get_level_values(0)
level_1 = df.columns.get_level_values(1)
new_columns = [(f'{x}_{y}' if x else y) for (x, y)
in zip(level_1, level_0)]
df.columns = new_columns
return df | python | {
"resource": ""
} |
q271918 | cumsum | test | def cumsum(df, new_column: str, column: str, index: list, date_column: str, date_format: str):
"""
DEPRECATED - please use `compute_cumsum` instead
"""
logging.getLogger(__name__).warning(f"DEPRECATED: use compute_cumsum")
date_temp = '__date_temp__'
if isinstance(index, str):
index = [index]
levels = list(range(0, len(index)))
df[date_temp] = pd.to_datetime(df[date_column], format=date_format)
reference_cols = [date_temp, date_column]
df = df.groupby(index + reference_cols).sum()
df[new_column] = df.groupby(level=levels)[column].cumsum()
df.reset_index(inplace=True)
del df[date_temp]
return df | python | {
"resource": ""
} |
q271919 | add_missing_row | test | def add_missing_row(
df: pd.DataFrame,
id_cols: List[str],
reference_col: str,
complete_index: Union[Dict[str, str], List[str]] = None,
method: str = None,
cols_to_keep: List[str] = None
) -> pd.DataFrame:
"""
Add missing row to a df base on a reference column
---
### Parameters
*mandatory :*
- `id_cols` (*list of str*): names of the columns used to create each group
- `reference_col` (*str*): name of the column used to identify missing rows
*optional :*
- `complete_index` (*list* or *dict*): [A, B, C] a list of values used to add missing rows.
It can also be a dict to declare a date range.
By default, use all values of reference_col.
- `method` (*str*): by default all missing rows are added. The possible values are :
- `"between"` : add missing rows having their value between min and max values for each group,
- `"between_and_after"` : add missing rows having their value bigger than min value for each group.
- `"between_and_before"` : add missing rows having their value smaller than max values for each group.
- `cols_to_keep` (*list of str*): name of other columns to keep, linked to the reference_col.
---
### Example
**Input**
YEAR | MONTH | NAME
:---:|:---:|:--:
2017|1|A
2017|2|A
2017|3|A
2017|1|B
2017|3|B
```cson
add_missing_row:
id_cols: ['NAME']
reference_col: 'MONTH'
```
**Output**
YEAR | MONTH | NAME
:---:|:---:|:--:
2017|1|A
2017|2|A
2017|3|A
2017|1|B
2017|2|B
2017|3|B
"""
if cols_to_keep is None:
cols_for_index = [reference_col]
else:
cols_for_index = [reference_col] + cols_to_keep
check_params_columns_duplicate(id_cols + cols_for_index)
if method == 'between' or method == 'between_and_after':
df['start'] = df.groupby(id_cols)[reference_col].transform(min)
id_cols += ['start']
if method == 'between' or method == 'between_and_before':
df['end'] = df.groupby(id_cols)[reference_col].transform(max)
id_cols += ['end']
names = id_cols + cols_for_index
new_df = df.set_index(names)
index_values = df.groupby(id_cols).sum().index.values
if complete_index is None:
complete_index = df.groupby(cols_for_index).sum().index.values
elif isinstance(complete_index, dict):
if complete_index['type'] == 'date':
freq = complete_index['freq']
date_format = complete_index['format']
start = complete_index['start']
end = complete_index['end']
if isinstance(freq, dict):
freq = pd.DateOffset(**{k: int(v) for k, v in freq.items()})
complete_index = pd.date_range(start=start, end=end, freq=freq)
complete_index = complete_index.strftime(date_format)
else:
raise ParamsValueError(f'Unknown complete index type: '
f'{complete_index["type"]}')
if not isinstance(index_values[0], tuple):
index_values = [(x,) for x in index_values]
if not isinstance(complete_index[0], tuple):
complete_index = [(x,) for x in complete_index]
new_tuples_index = [x + y for x in index_values for y in complete_index]
new_index = pd.MultiIndex.from_tuples(new_tuples_index, names=names)
new_df = new_df.reindex(new_index).reset_index()
if method == 'between' or method == 'between_and_after':
new_df = new_df[new_df[reference_col] >= new_df['start']]
del new_df['start']
if method == 'between' or method == 'between_and_before':
new_df = new_df[new_df[reference_col] <= new_df['end']]
del new_df['end']
return new_df | python | {
"resource": ""
} |
q271920 | catch | test | def catch(logger):
"""
Decorator to catch an exception and don't raise it.
Logs information if a decorator failed.
Note:
We don't want possible exceptions during logging to be raised.
This is used to decorate any function that gets executed
before or after the execution of the decorated function.
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
logger.warning(f"Exception raised in decorator: {func.__name__}")
return wrapper
return decorator | python | {
"resource": ""
} |
q271921 | log_message | test | def log_message(logger, message=""):
"""
Decorator to log a message before executing a function
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
_log_message(logger, func.__name__, message)
result = func(*args, **kwargs)
return result
return wrapper
return decorator | python | {
"resource": ""
} |
q271922 | log_time | test | def log_time(logger):
"""
Decorator to log the execution time of a function
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
_log_time(logger, func.__name__, start, end)
return result
return wrapper
return decorator | python | {
"resource": ""
} |
q271923 | log_shapes | test | def log_shapes(logger):
"""
Decorator to log the shapes of input and output dataframes
It considers all the dataframes passed either as arguments or keyword arguments as inputs
and all the dataframes returned as outputs.
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
input_shapes = _get_dfs_shapes(*args, **kwargs)
result = func(*args, **kwargs)
output_shapes = _get_dfs_shapes(result)
_log_shapes(logger, func.__name__, input_shapes, output_shapes)
return result
return wrapper
return decorator | python | {
"resource": ""
} |
q271924 | rename | test | def rename(
df,
values: Dict[str, Dict[str, str]] = None,
columns: Dict[str, Dict[str, str]] = None,
locale: str = None
):
"""
Replaces data values and column names according to the locale
---
### Parameters
- `values` (optional: dict):
- key: term to be replaced
- value:
- key: the locale e.g. 'en' or 'fr'
- value: term's translation
- `columns` (optional: dict):
- key: columns name to be replaced
- value:
- key: the locale e.g. 'en' or 'fr'
- value: column name's translation
- `locale` (optional: str): the locale you want to use.
By default the client locale is used.
---
### Example
**Input**
| label | value |
|:----------------:|:-----:|
| France | 100 |
| Europe wo France | 500 |
```cson
rename:
values:
'Europe wo France':
'en': 'Europe excl. France'
'fr': 'Europe excl. France'
columns:
'value':
'en': 'revenue'
'fr': 'revenue'
```
**Output**
| label | revenue |
|:-------------------:|:-------:|
| France | 100 |
| Europe excl. France | 500 |
"""
if values:
to_replace = list(values.keys())
value = [values[term][locale] for term in values]
df = df.replace(to_replace=to_replace, value=value)
if columns:
_keys = list(columns.keys())
_values = [column[locale] for column in columns.values()]
columns = dict(list(zip(_keys, _values)))
df = df.rename(columns=columns)
return df | python | {
"resource": ""
} |
q271925 | compute_cumsum | test | def compute_cumsum(
df,
id_cols: List[str],
reference_cols: List[str],
value_cols: List[str],
new_value_cols: List[str] = None,
cols_to_keep: List[str] = None
):
"""
Compute cumsum for a group of columns.
---
### Parameters
*mandatory :*
- `id_cols` (*list*): the columns id to create each group
- `reference_cols` (*list*): the columns to order the cumsum
- `value_cols` (*list*): the columns to cumsum
*optional :*
- `new_value_cols` (*list*): the new columns with the result cumsum
- `cols_to_keep` (*list*): other columns to keep in the dataset.
This option can be used if there is only one row by group [id_cols + reference_cols]
---
### Example
**Input**
MONTH | DAY | NAME | VALUE | X
:---:|:---:|:--:|:---:|:---:
1 | 1 | A | 1 | lo
2 | 1 | A | 1 | lo
2 | 15 | A | 1 | la
1 | 15 | B | 1 | la
```cson
compute_cumsum:
id_cols: ['NAME']
reference_cols: ['MONTH', 'DAY']
cumsum_cols: ['VALUE']
cols_to_keep: ['X']
```
**Output**
NAME | MONTH | DAY | X | VALUE
:---:|:---:|:--:|:---:|:---:
A | 1 | 1 | lo | 1
A | 2 | 1 | la | 2
A | 2 | 15 | lo | 3
B | 1 | 15 | la | 1
"""
if cols_to_keep is None:
cols_to_keep = []
if new_value_cols is None:
new_value_cols = value_cols
if len(value_cols) != len(new_value_cols):
raise ParamsValueError('`value_cols` and `new_value_cols` needs '
'to have the same number of elements')
check_params_columns_duplicate(id_cols + reference_cols + cols_to_keep + value_cols)
levels = list(range(0, len(id_cols)))
df = df.groupby(id_cols + reference_cols + cols_to_keep).sum()
df[new_value_cols] = df.groupby(level=levels)[value_cols].cumsum()
return df.reset_index() | python | {
"resource": ""
} |
q271926 | combine_columns_aggregation | test | def combine_columns_aggregation(
df,
id_cols: List[str],
cols_for_combination: Dict[str, str],
agg_func: Union[str, List[str], Dict[str, str]] = 'sum'
):
"""
Aggregates data to reproduce "All" category for requester
---
### Parameters
*mandatory :*
- `id_cols` (*list*): the columns id to group
- `cols_for_combination` (*dict*): colums corresponding to
the filters as key and their default value as value
*optional :*
- `agg_func` (*str*, *list* or *dict*): the function(s) to use for aggregating the data.
Accepted combinations are:
- string function name
- list of functions and/or function names, e.g. [np.sum, 'mean']
- dict of axis labels -> functions, function names or list of such.
"""
requesters_cols = list(cols_for_combination.keys())
requester_combination = [
list(item) for i in range(0, len(requesters_cols) + 1)
for item in itertools.combinations(requesters_cols, i)]
dfs_result = []
for comb in requester_combination:
df_tmp = df.groupby(id_cols + comb).agg(agg_func).reset_index()
for key in (set(cols_for_combination.keys()) - set(comb)):
df_tmp[key] = cols_for_combination[key]
dfs_result.append(df_tmp)
return pd.concat(dfs_result, sort=False, ignore_index=True) | python | {
"resource": ""
} |
q271927 | get_param_value_from_func_call | test | def get_param_value_from_func_call(param_name, func, call_args, call_kwargs):
"""
Get the value of a function's parameter based on its signature
and the call's args and kwargs.
Example:
>>> def foo(a, b, c=3, d=4):
... pass
...
>>> # what would be the value of "c" when calling foo(1, b=2, c=33) ?
>>> get_param_value_from_func_call('c', foo, [1], {'b': 2, 'c': 33})
33
"""
signature = inspect.signature(func)
params_list = signature.parameters.keys()
if param_name not in params_list:
raise TypeError(f"'{param_name}' not found in {func.__name__}"
f"parameters list ([{params_list}])")
call = signature.bind(*call_args, **call_kwargs)
call.apply_defaults()
return call.arguments[param_name] | python | {
"resource": ""
} |
q271928 | clean_cachedir_old_entries | test | def clean_cachedir_old_entries(cachedir: StoreBackendBase, func_name: str, limit: int) -> int:
"""Remove old entries from the cache"""
if limit < 1:
raise ValueError("'limit' must be greater or equal to 1")
cache_entries = get_cachedir_entries(cachedir, func_name)
cache_entries = sorted(cache_entries, key=lambda e: e.last_access, reverse=True)
cache_entries_to_remove = cache_entries[limit:]
for entry in cache_entries_to_remove:
shutil.rmtree(entry.path, ignore_errors=True)
return len(cache_entries_to_remove) | python | {
"resource": ""
} |
q271929 | roll_up | test | def roll_up(
df,
levels: List[str],
groupby_vars: List[str],
extra_groupby_cols: List[str] = None,
var_name: str = 'type',
value_name: str = 'value',
agg_func: str = 'sum',
drop_levels: List[str] = None
):
"""
Creates aggregates following a given hierarchy
---
### Parameters
*mandatory :*
- `levels` (*list of str*): name of the columns composing the hierarchy (from the top to the bottom level).
- `groupby_vars` (*list of str*): name of the columns with value to aggregate.
- `extra_groupby_cols` (*list of str*) optional: other columns used to group in each level.
*optional :*
- `var_name` (*str*) : name of the result variable column. By default, `“type”`.
- `value_name` (*str*): name of the result value column. By default, `“value”`.
- `agg_func` (*str*): name of the aggregation operation. By default, `“sum”`.
- `drop_levels` (*list of str*): the names of the levels that you may want to discard from the output.
---
### Example
**Input**
| Region | City | Population |
|:---------:|:--------:|:-----------:|
| Idf | Panam| 200 |
| Idf | Antony | 50 |
| Nord | Lille | 20 |
```cson
roll_up:
levels: ["Region", "City"]
groupby_vars: "Population"
```
**Output**
| Region | City | Population | value | type |
|:---------:|:--------:|:-----------:|:--------:|:------:|
| Idf | Panam| 200 | Panam | City |
| Idf | Antony | 50 | Antony | City |
| Nord | Lille | 20 | Lille | City |
| Idf | Nan | 250 | Idf | Region |
| Nord | Nan | 20 | Nord | Region |
"""
dfs = list()
groupby_cols_cpy = list(levels)
levels_cpy = list(levels)
levels_cpy.reverse()
extra_groupby_cols = extra_groupby_cols or []
drop_levels = drop_levels or []
previous_level = None
for top_level in levels_cpy:
# Aggregation
gb_df = getattr(
df.groupby(groupby_cols_cpy + extra_groupby_cols)[groupby_vars],
agg_func)().reset_index()
# Melt-like columns
gb_df[var_name] = top_level
gb_df[value_name] = gb_df[top_level]
dfs.append(gb_df)
if previous_level in drop_levels:
del dfs[-2]
previous_level = top_level
# Remove one level each time in the groupby: lowest level column needs
# a groupby with every levels, the next level needs every one except
# the lowest, etc. until the top level column that needs only itself
# inside the groupby.
groupby_cols_cpy.pop()
return pd.concat(dfs, sort=False).reset_index() | python | {
"resource": ""
} |
q271930 | argmax | test | def argmax(df, column: str, groups: Union[str, List[str]] = None):
"""
Keep the row of the data corresponding to the maximal value in a column
---
### Parameters
*mandatory :*
- `column` (*str*): name of the column containing the value you want to keep the maximum
*optional :*
- `groups` (*str or list(str)*): name of the column(s) used for 'groupby' logic
(the function will return the argmax by group)
---
### Example
**Input**
| variable | wave | year | value |
|:--------:|:-------:|:--------:|:-----:|
| toto | wave 1 | 2014 | 300 |
| toto | wave 1 | 2015 | 250 |
| toto | wave 1 | 2016 | 450 |
```cson
argmax:
column: 'year'
```
**Output**
| variable | wave | year | value |
|:--------:|:-------:|:--------:|:-----:|
| toto | wave 1 | 2016 | 450 |
"""
if groups is None:
df = df[df[column] == df[column].max()].reset_index(drop=True)
else:
group_max = df.groupby(groups)[column].transform('max')
df = (df
.loc[df[column] == group_max, :]
.drop_duplicates()
.reset_index(drop=True)
)
return df | python | {
"resource": ""
} |
q271931 | argmin | test | def argmin(df, column: str, groups: Union[str, List[str]] = None):
"""
Keep the row of the data corresponding to the minimal value in a column
---
### Parameters
*mandatory :*
- `column` (str): name of the column containing the value you want to keep the minimum
*optional :*
- `groups` (*str or list(str)*): name of the column(s) used for 'groupby' logic
(the function will return the argmax by group)
---
### Example
**Input**
| variable | wave | year | value |
|:--------:|:-------:|:--------:|:-----:|
| toto | wave 1 | 2014 | 300 |
| toto | wave 1 | 2015 | 250 |
| toto | wave 1 | 2016 | 450 |
```cson
argmin:
column: 'year'
]
```
**Output**
| variable | wave | year | value |
|:--------:|:-------:|:--------:|:-----:|
| toto | wave 1 | 2015 | 250 |
"""
if groups is None:
df = df[df[column] == df[column].min()].reset_index(drop=True)
else:
group_min = df.groupby(groups)[column].transform('min')
df = (df
.loc[df[column] == group_min, :]
.drop_duplicates()
.reset_index(drop=True)
)
return df | python | {
"resource": ""
} |
q271932 | fillna | test | def fillna(df, column: str, value=None, column_value=None):
"""
Can fill NaN values from a column with a given value or a column
---
### Parameters
- `column` (*str*): name of column you want to fill
- `value`: NaN will be replaced by this value
- `column_value`: NaN will be replaced by value from this column
*NOTE*: You must set either the 'value' parameter or the 'column_value' parameter
---
### Example
**Input**
| variable | wave | year | my_value |
|:--------:|:-------:|:--------:|:--------:|
| toto | wave 1 | 2014 | 300 |
| toto | wave 1 | 2015 | |
| toto | wave 1 | 2016 | 450 |
```cson
fillna:
column: 'my_value'
value: 0
```
**Output**
| variable | wave | year | my_value |
|:--------:|:-------:|:--------:|:--------:|
| toto | wave 1 | 2014 | 300 |
| toto | wave 1 | 2015 | 0 |
| toto | wave 1 | 2016 | 450 |
"""
if column not in df.columns:
df[column] = nan
if value is not None and column_value is not None:
raise ValueError('You cannot set both the parameters value and column_value')
if value is not None:
df[column] = df[column].fillna(value)
if column_value is not None:
if column_value not in df.columns:
raise ValueError(f'"{column_value}" is not a valid column name')
df[column] = df[column].fillna(df[column_value])
return df | python | {
"resource": ""
} |
q271933 | add_offset | test | def add_offset(dateobj, hr_offset: str, sign: str):
"""add a human readable offset to `dateobj` and return corresponding date.
rely on `pandas.Timedelta` and add the following extra shortcuts:
- "w", "week" and "weeks" for a week (i.e. 7days)
- "month', "months" for a month (i.e. no day computation, just increment the month)
- "y", "year', "years" for a year (i.e. no day computation, just increment the year)
"""
sign_coeff = 1 if sign == '+' else -1
try:
return dateobj + sign_coeff * pd.Timedelta(hr_offset)
except ValueError:
# pd.Timedelta could not parse the offset, let's try harder
match = TIMEDELTA_RGX.match(hr_offset)
if match is not None:
groups = match.groupdict()
unit = groups['unit'].lower()[0]
num = sign_coeff * int(groups['num'])
# is it a week ?
if unit == 'w':
return dateobj + num * timedelta(weeks=1)
# or a month ?
if unit == 'm':
return add_months(dateobj, num)
# or a year ?
if unit == 'y':
return add_years(dateobj, num)
# we did what we could, just re-raise the original exception
raise | python | {
"resource": ""
} |
q271934 | add_months | test | def add_months(dateobj, nb_months: int):
"""return `dateobj` + `nb_months`
If landing date doesn't exist (e.g. february, 30th), return the last
day of the landing month.
>>> add_months(date(2018, 1, 1), 1)
datetime.date(2018, 1, 1)
>>> add_months(date(2018, 1, 1), -1)
datetime.date(2017, 12, 1)
>>> add_months(date(2018, 1, 1), 25)
datetime.date(2020, 2, 1)
>>> add_months(date(2018, 1, 1), -25)
datetime.date(2015, 12, 1)
>>> add_months(date(2018, 1, 31), 1)
datetime.date(2018, 2, 28)
"""
nb_years, nb_months = divmod(nb_months, 12)
month = dateobj.month + nb_months
if month > 12:
nb_years += 1
month -= 12
year = dateobj.year + nb_years
lastday = monthrange(year, month)[1]
return dateobj.replace(year=year, month=month, day=min(lastday, dateobj.day)) | python | {
"resource": ""
} |
q271935 | add_years | test | def add_years(dateobj, nb_years):
"""return `dateobj` + `nb_years`
If landing date doesn't exist (e.g. february, 30th), return the last
day of the landing month.
>>> add_years(date(2018, 1, 1), 1)
datetime.date(2019, 1, 1)
>>> add_years(date(2018, 1, 1), -1)
datetime.date(2017, 1, 1)
>>> add_years(date(2020, 2, 29), 1)
datetime.date(2021, 2, 28)
>>> add_years(date(2020, 2, 29), -1)
datetime.date(2019, 2, 28)
"""
year = dateobj.year + nb_years
lastday = monthrange(year, dateobj.month)[1]
return dateobj.replace(year=year, day=min(lastday, dateobj.day)) | python | {
"resource": ""
} |
q271936 | parse_date | test | def parse_date(datestr: str, date_fmt: str) -> date:
"""parse `datestr` and return corresponding date object.
`datestr` should be a string matching `date_fmt` and parseable by `strptime`
but some offset can also be added using `(datestr) + OFFSET` or `(datestr) -
OFFSET` syntax. When using this syntax, `OFFSET` should be understable by
`pandas.Timedelta` (cf.
http://pandas.pydata.org/pandas-docs/stable/timedeltas.html) and `w`, `week`
`month` and `year` offset keywords are also accepted. `datestr` MUST be wrapped
with parenthesis.
Additionally, the following symbolic names are supported: `TODAY`,
`YESTERDAY`, `TOMORROW`.
Example usage:
>>> parse_date('2018-01-01', '%Y-%m-%d') datetime.date(2018, 1, 1)
parse_date('(2018-01-01) + 1day', '%Y-%m-%d') datetime.date(2018, 1, 2)
parse_date('(2018-01-01) + 2weeks', '%Y-%m-%d') datetime.date(2018, 1, 15)
Parameters: `datestr`: the date to parse, formatted as `date_fmt`
`date_fmt`: expected date format
Returns: The `date` object. If date could not be parsed, a ValueError will
be raised.
"""
rgx = re.compile(r'\((?P<date>.*)\)(\s*(?P<sign>[+-])(?P<offset>.*))?$')
datestr = datestr.strip()
match = rgx.match(datestr)
# if regexp doesn't match, date must match the expected format
if match is None:
return _norm_date(datestr, date_fmt)
datestr = match.group('date').strip()
dateobj = _norm_date(datestr, date_fmt)
offset = match.group('offset')
if offset:
return add_offset(dateobj, offset, match.group('sign'))
return dateobj | python | {
"resource": ""
} |
q271937 | filter_by_date | test | def filter_by_date(
df,
date_col: str,
date_format: str = '%Y-%m-%d',
start: str = None,
stop: str = None,
atdate: str = None
):
"""
Filter dataframe your data by date.
This function will interpret `start`, `stop` and `atdate` and build
the corresponding date range. The caller must specify either:
- `atdate`: keep all rows matching this date exactly,
- `start`: keep all rows matching this date onwards.
- `stop`: keep all rows matching dates before this one.
- `start` and `stop`: keep all rows between `start` and `stop`,
Any other combination will raise an error. The lower bound of the date range
will be included, the upper bound will be excluded.
When specified, `start`, `stop` and `atdate` values are expected to match the
`date_format` format or a known symbolic value (i.e. 'TODAY', 'YESTERDAY' or 'TOMORROW').
Additionally, the offset syntax "(date) + offset" is also supported (Mind
the parenthesis around the date string). In that case, the offset must be
one of the syntax supported by `pandas.Timedelta` (see [pandas doc](
http://pandas.pydata.org/pandas-docs/stable/timedeltas.html))
---
### Parameters
*mandatory :*
- `date_col` (*str*): the name of the dataframe's column to filter on
*optional :*
- `date_format` (*str*): expected date format in column `date_col` (see [available formats](
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior)
- `start` (*str*): if specified, lower bound (included) of the date range
- `stop` (*str*): if specified, upper bound (excluded) of the date range
- `atdate` (*str*): if specified, the exact date we're filtering on
"""
mask = None
if start is None and stop is None and atdate is None:
raise TypeError('either "start", "stop" or "atdate" must be specified')
if start is not None and atdate is not None:
raise TypeError('"start" and "atdate" are mutually exclusive')
if stop is not None and atdate is not None:
raise TypeError('"stop" and "atdate" are mutually exclusive')
# add a new column that will hold actual date objects instead of strings.
# This column is just temporary and will be removed before returning the
# filtered dataframe.
filtercol = str(uuid4())
df[filtercol] = pd.to_datetime(df[date_col], format=date_format)
if atdate is not None:
mask = df[filtercol] == parse_date(atdate, date_format)
elif start is not None and stop is not None:
mask = ((df[filtercol] >= parse_date(start, date_format)) &
(df[filtercol] < parse_date(stop, date_format)))
elif stop is None:
mask = df[filtercol] >= parse_date(start, date_format)
elif start is None:
mask = df[filtercol] < parse_date(stop, date_format)
return df[mask].drop(filtercol, axis=1) | python | {
"resource": ""
} |
q271938 | percentage | test | def percentage(
df,
column: str,
group_cols: Union[str, List[str]] = None,
new_column: str = None
):
"""
Add a column to the dataframe according to the groupby logic on group_cols
---
### Parameters
*mandatory :*
- `column` (*str*): name of the desired column you need percentage on
*optional :*
- `group_cols` (*list*): names of columns for the groupby logic
- `new_column` (*str*): name of the output column. By default `column` will be overwritten.
---
**Input**
| gender | sport | number |
|:------:|:----------:|:------:|
| male | bicycle | 17 |
| female | basketball | 17 |
| male | basketball | 3 |
| female | football | 7 |
| female | running | 30 |
| male | running | 20 |
| male | football | 21 |
| female | bicycle | 17 |
```cson
percentage:
new_column: 'number_percentage'
column: 'number'
group_cols: ['sport']
```
**Output**
| gender | sport | number | number_percentage |
|:------:|:----------:|:------:|:-----------------:|
| male | bicycle | 17 | 50.0 |
| female | basketball | 17 | 85.0 |
| male | basketball | 3 | 15.0 |
| female | football | 7 | 25.0 |
| female | running | 30 | 60.0 |
| male | running | 20 | 40.0 |
| male | football | 21 | 75.0 |
| female | bicycle | 17 | 50.0 |
"""
new_column = new_column or column
if group_cols is None:
df[new_column] = 100. * df[column] / sum(df[column])
else:
df[new_column] = 100. * df[column] / df.groupby(group_cols)[column].transform(sum)
return df | python | {
"resource": ""
} |
q271939 | ada_family_core | test | def ada_family_core(params, gparams, learning_rate = 0.01, eps= 1e-6, rho=0.95, method="ADADELTA",
beta=0.0, gsum_regularization = 0.0001):
"""
Optimize by SGD, AdaGrad, or AdaDelta.
"""
_, _, _, args = inspect.getargvalues(inspect.currentframe())
logging.info("ada_family_core: %s" % str(args.items()))
free_parameters = []
if method == "FINETUNING_ADAGRAD":
method = "ADAGRAD"
gsum_regularization = 0
oneMinusBeta = 1 - beta
gsums = [theano.shared(np.zeros_like(param.get_value(borrow=True), dtype=FLOATX), name="gsum_%s" % param.name) if (method == 'ADADELTA' or method == 'ADAGRAD') else None for param in params]
xsums = [theano.shared(np.zeros_like(param.get_value(borrow=True), dtype=FLOATX), name="xsum_%s" % param.name) if method == 'ADADELTA' else None for param in params]
# Fix for AdaGrad, init gsum to 1
if method == 'ADAGRAD':
for gsum in gsums:
gsum.set_value(gsum.get_value() ** 0)
updates = OrderedDict()
# Updates
for gparam, param, gsum, xsum in zip(gparams, params, gsums, xsums):
if method == 'ADADELTA':
updates[gsum] = rho * gsum + (1. - rho) * (gparam **2)
dparam = -T.sqrt((xsum + eps) / (updates[gsum] + eps)) * gparam
updates[xsum] =rho * xsum + (1. - rho) * (dparam **2)
updates[param] = param * oneMinusBeta + dparam
elif method == 'ADAGRAD':
updates[gsum] = gsum + (gparam **2) - gsum_regularization * gsum
updates[param] = param * oneMinusBeta - learning_rate * (gparam / (T.sqrt(updates[gsum] + eps)))
else:
updates[param] = param * oneMinusBeta - gparam * learning_rate
# Add free parameters
if method == 'ADADELTA':
free_parameters.extend(gsums + xsums)
elif method == 'ADAGRAD':
free_parameters.extend(gsums)
# Check dtype
for k in updates:
if updates[k].dtype != FLOATX:
updates[k] = updates[k].astype(FLOATX)
return updates.items(), free_parameters | python | {
"resource": ""
} |
q271940 | GeneralNeuralTrainer._learning_updates | test | def _learning_updates(self):
"""
Return updates in the training.
"""
params = self.training_params()
gradients = self.get_gradients(params)
return self.optimization_updates(params, gradients) | python | {
"resource": ""
} |
q271941 | GeneralNeuralTrainer.training_params | test | def training_params(self):
"""
Get parameters to be optimized.
"""
params = self.network.parameters
# Freeze parameters
if self.config.fixed_parameters:
logging.info("fixed parameters: %s" % ", ".join(map(str, self.config.fixed_parameters)))
params = [p for p in params if p not in self.config.fixed_parameters]
return params | python | {
"resource": ""
} |
q271942 | GeneralNeuralTrainer.optimization_updates | test | def optimization_updates(self, params, gradients):
"""
Return updates from optimization.
"""
updates, free_parameters = optimize_updates(params, gradients, self.config)
self.network.free_parameters.extend(free_parameters)
logging.info("Added %d free parameters for optimization" % len(free_parameters))
return updates | python | {
"resource": ""
} |
q271943 | FirstGlimpseLayer._first_glimpse_sensor | test | def _first_glimpse_sensor(self, x_t):
"""
Compute first glimpse position using down-sampled image.
"""
downsampled_img = theano.tensor.signal.downsample.max_pool_2d(x_t, (4,4))
downsampled_img = downsampled_img.flatten()
first_l = T.dot(downsampled_img, self.W_f)
if self.disable_reinforce:
wf_grad = self.W_f
if self.random_glimpse:
first_l = self.srng.uniform((2,), low=-1.7, high=1.7)
else:
sampled_l_t = self._sample_gaussian(first_l, self.cov)
sampled_pdf = self._multi_gaussian_pdf(disconnected_grad(sampled_l_t), first_l)
wf_grad = T.grad(T.log(sampled_pdf), self.W_f)
first_l = sampled_l_t
return first_l, wf_grad | python | {
"resource": ""
} |
q271944 | MyJointTrainingModel.prepare | test | def prepare(self):
"""
All codes that create parameters should be put into 'setup' function.
"""
self.output_dim = 10
self.encoder = Chain(self.input_dim).stack(Dense(self.internal_layer_size, 'tanh'))
self.decoder = Chain(self.internal_layer_size).stack(Dense(self.input_dim))
self.classifier = Chain(self.internal_layer_size).stack(Dense(50, 'tanh'),
Dense(self.output_dim),
Softmax())
self.register_inner_layers(self.encoder, self.decoder, self.classifier)
self.target_input = T.ivector('target')
self.register_external_inputs(self.target_input) | python | {
"resource": ""
} |
q271945 | MyJointTrainingModel.compute_tensor | test | def compute_tensor(self, x):
"""
Build the computation graph here.
"""
internal_variable = self.encoder.compute_tensor(x)
decoding_output = self.decoder.compute_tensor(internal_variable)
classification_output = self.classifier.compute_tensor(internal_variable)
auto_encoder_cost = AutoEncoderCost(decoding_output, x).get()
classification_cost = CrossEntropyCost(classification_output, self.target_input).get()
final_cost = 0.01 * auto_encoder_cost + classification_cost
error_rate = ErrorRateCost(classification_output, self.target_input).get()
self.register_monitors(("err", error_rate),
("encoder_cost", auto_encoder_cost),
("classify_cost", classification_cost))
return final_cost | python | {
"resource": ""
} |
q271946 | BasicDataset.map | test | def map(self, func):
"""
Process all data with given function.
The scheme of function should be x,y -> x,y.
"""
if self._train_set:
self._train_set = map(func, self._train_set)
if self._valid_set:
self._valid_set = map(func, self._valid_set)
if self._test_set:
self._test_set = map(func, self._test_set) | python | {
"resource": ""
} |
q271947 | BasicDataset.vectorize_target | test | def vectorize_target(self, size):
"""
Make targets be one-hot vectors.
"""
if self._train_set:
self._train_set = self._vectorize_set(self._train_set, size)
if self._valid_set:
self._valid_set = self._vectorize_set(self._valid_set, size)
if self._test_set:
self._test_set = self._vectorize_set(self._test_set, size) | python | {
"resource": ""
} |
q271948 | BasicDataset.report | test | def report(self):
"""
Print dataset statistics.
"""
logging.info("%s train=%d valid=%d test=%d" % (self.__class__.__name__,
len(list(self._train_set)) if self._train_set else 0,
len(list(self._valid_set)) if self._valid_set else 0,
len(list(self._test_set)) if self._test_set else 0)) | python | {
"resource": ""
} |
q271949 | CustomizeTrainer.train | test | def train(self, train_set, valid_set=None, test_set=None, train_size=None):
'''We train over mini-batches and evaluate periodically.'''
iteration = 0
while True:
if not iteration % self.config.test_frequency and test_set:
try:
self.test(iteration, test_set)
except KeyboardInterrupt:
logging.info('interrupted!')
break
if not iteration % self.validation_frequency and valid_set:
try:
if not self.evaluate(iteration, valid_set):
logging.info('patience elapsed, bailing out')
break
except KeyboardInterrupt:
logging.info('interrupted!')
break
train_message = ""
try:
train_message = self.train_func(train_set)
except KeyboardInterrupt:
logging.info('interrupted!')
break
if not iteration % self.config.monitor_frequency:
logging.info('monitor (iter=%i) %s', iteration + 1, train_message)
iteration += 1
if hasattr(self.network, "iteration_callback"):
self.network.iteration_callback()
yield train_message
if valid_set:
self.set_params(self.best_params)
if test_set:
self.test(0, test_set) | python | {
"resource": ""
} |
q271950 | NeuralLM.sample | test | def sample(self, input, steps):
"""
Sample outputs from LM.
"""
inputs = [[onehot(self.input_dim, x) for x in input]]
for _ in range(steps):
target = self.compute(inputs)[0,-1].argmax()
input.append(target)
inputs[0].append(onehot(self.input_dim, target))
return input | python | {
"resource": ""
} |
q271951 | Attention.compute_alignments | test | def compute_alignments(self, prev_state, precomputed_values, mask=None):
"""
Compute the alignment weights based on the previous state.
"""
WaSp = T.dot(prev_state, self.Wa)
UaH = precomputed_values
# For test time the UaH will be (time, output_dim)
if UaH.ndim == 2:
preact = WaSp[:, None, :] + UaH[None, :, :]
else:
preact = WaSp[:, None, :] + UaH
act = T.activate(preact, 'tanh')
align_scores = T.dot(act, self.Va) # ~ (batch, time)
if mask:
mask = (1 - mask) * -99.00
if align_scores.ndim == 3:
align_scores += mask[None, :]
else:
align_scores += mask
align_weights = T.nnet.softmax(align_scores)
return align_weights | python | {
"resource": ""
} |
q271952 | Attention.compute_context_vector | test | def compute_context_vector(self, prev_state, inputs, precomputed_values=None, mask=None):
"""
Compute the context vector with soft attention.
"""
precomputed_values = precomputed_values if precomputed_values else self.precompute(inputs)
align_weights = self.compute_alignments(prev_state, precomputed_values, mask)
context_vector = T.sum(align_weights[:, :, None] * inputs, axis=1)
return context_vector | python | {
"resource": ""
} |
q271953 | concatenate | test | def concatenate(vars, axis=-1):
"""
A utility function of concatenate.
"""
from deepy.core.neural_var import NeuralVariable
if isinstance(vars[0], NeuralVariable):
concat_var = Concatenate(axis=axis).compute(*vars)
if axis == -1 or axis == vars[0].tensor.ndim - 1:
concat_var.output_dim = sum([x.output_dim for x in vars], 0)
else:
concat_var = TT.concatenate(vars, axis)
return concat_var | python | {
"resource": ""
} |
q271954 | SequentialDataset._pad | test | def _pad(self, side, length):
"""
Pad sequences to given length in the left or right side.
"""
if self._train_set:
self._train_set = pad_dataset(self._train_set, side, length)
if self._valid_set:
self._valid_set = pad_dataset(self._valid_set, side, length)
if self._test_set:
self._test_set = pad_dataset(self._test_set, side, length) | python | {
"resource": ""
} |
q271955 | rmsprop_core | test | def rmsprop_core(params, gradients, momentum=0.9, learning_rate=0.01):
"""
RMSPROP optimization core.
"""
for param, grad in zip(params, gradients):
rms_ = theano.shared(np.zeros_like(param.get_value()), name=param.name + '_rms')
rms = momentum * rms_ + (1 - momentum) * grad * grad
yield rms_, rms
yield param, param - learning_rate * grad / T.sqrt(rms + 1e-8) | python | {
"resource": ""
} |
q271956 | Timer.report | test | def report(self):
"""
Report elapsed time.
"""
if not self.end_time:
self.end()
print ("Time: {} mins".format((self.end_time - self.start_time )/ 60)) | python | {
"resource": ""
} |
q271957 | TrainingValidator.run | test | def run(self, data_x):
"""
Run the model with validation data and return costs.
"""
output_vars = self.compute(*data_x)
return self._extract_costs(output_vars) | python | {
"resource": ""
} |
q271958 | TrainingValidator.invoke | test | def invoke(self):
"""
This function will be called after each iteration.
"""
self._counter += 1
if self._counter % self._freq == 0:
cnt = 0.
sum_map = defaultdict(float)
for x in self._trainer.get_data(self._data_split):
val_map = self.run(x)
if not isinstance(val_map, dict):
raise Exception("Monitor.run must return a dict.")
for k, val in val_map.items():
sum_map[k] += val
cnt += 1
for k in sum_map:
sum_map[k] /= cnt
new_best = self.compare(sum_map)
self._trainer.report(sum_map, self._data_split, new_best=new_best)
if new_best:
self._trainer.save_checkpoint(self._save_path) | python | {
"resource": ""
} |
q271959 | Loop._build_loop_vars | test | def _build_loop_vars(self):
"""
Create inner loop variables.
"""
from theano.tensor.var import TensorVariable
from deepy.core.neural_var import NeuralVariable
if not self._loop_vars:
self._ordered_out_keys = self._outputs.keys()
seq_keys = self._sequences.keys()
filled_out_keys = [k for k in self._ordered_out_keys if self._outputs[k]]
nonseq_keys = self._non_sequences.keys()
dummy_tensors, self._scan_local_vars = get_dummy_args(
sequences=[self._sequences[k].tensor for k in seq_keys],
outputs_info=[self._outputs[k].tensor for k in self._ordered_out_keys],
non_sequences=[self._non_sequences[k].tensor for k in nonseq_keys],
**self._kwargs
)
dummy_map = dict(zip(seq_keys + filled_out_keys + nonseq_keys, dummy_tensors))
arg_map = self._sequences.copy()
arg_map.update(self._outputs)
arg_map.update(self._non_sequences)
self._loop_vars = LoopVars()
for k, dummy_tensor in dummy_map.items():
dummy_var = NeuralVariable(dummy_tensor, dim=arg_map[k].dim())
self._loop_vars[k] = dummy_var | python | {
"resource": ""
} |
q271960 | Loop._scan_step | test | def _scan_step(self, vars):
"""
Internal scan with dummy input variables.
"""
from neural_var import NeuralVariable
if not self._loop_vars:
raise Exception("The loop is not initialized. To initialize the loop, use `with loop as vars`")
replace_map = {}
for k, var in vars.items():
if var is not None:
replace_map[self._dummy_nodes[k].tensor] = var.tensor
outputs = {}
for k in self._outputs:
if k not in self._loop_vars:
raise Exception("{} can not be found in loop vars.".format(k))
output_node = theano.clone(self._loop_vars[k].tensor, replace_map)
outputs[k] = NeuralVariable(output_node, self._loop_vars[k].dim())
return outputs | python | {
"resource": ""
} |
q271961 | momentum_core | test | def momentum_core(params, gradients, momentum=0.9, learning_rate=0.01):
"""
Momentum SGD optimization core.
"""
free_parameters = []
updates = []
for param, grad in zip(params, gradients):
delta = learning_rate * grad
velocity = theano.shared(np.zeros_like(param.get_value()), name=param.name + '_vel')
updates.append((velocity, momentum * velocity - delta))
updates.append((param, param + velocity))
free_parameters.append(velocity)
return updates, free_parameters | python | {
"resource": ""
} |
q271962 | Runtime.iftrain | test | def iftrain(self, then_branch, else_branch):
"""
Execute `then_branch` when training.
"""
return ifelse(self._training_flag, then_branch, else_branch, name="iftrain") | python | {
"resource": ""
} |
q271963 | NeuralTrainer.skip | test | def skip(self, n_batches, n_epochs=0):
"""
Skip N batches in the training.
"""
logging.info("skip %d epochs and %d batches" % (n_epochs, n_batches))
self._skip_batches = n_batches
self._skip_epochs = n_epochs | python | {
"resource": ""
} |
q271964 | NeuralTrainer.load_params | test | def load_params(self, path, exclude_free_params=False):
"""
Load parameters for the training.
This method can load free parameters and resume the training progress.
"""
self.network.load_params(path, exclude_free_params=exclude_free_params)
self.best_params = self.copy_params()
# Resume the progress
if self.network.train_logger.progress() > 0 or self.network.train_logger.epoch() > 0:
self.skip(self.network.train_logger.progress(), self.network.train_logger.epoch() - 1) | python | {
"resource": ""
} |
q271965 | NeuralTrainer.train | test | def train(self, train_set, valid_set=None, test_set=None, train_size=None):
"""
Train the model and return costs.
"""
self._epoch = 0
while True:
if self._skip_epochs > 0:
logging.info("skipping one epoch ...")
self._skip_epochs -= 1
self._epoch += 1
yield None
continue
# Test
if not self._epoch % self.config.test_frequency and test_set:
try:
self._run_test(self._epoch, test_set)
except KeyboardInterrupt:
logging.info('interrupted!')
break
# Validate
if not self._epoch % self.validation_frequency and valid_set:
try:
if not self._run_valid(self._epoch, valid_set):
logging.info('patience elapsed, bailing out')
break
except KeyboardInterrupt:
logging.info('interrupted!')
break
# Train one step
try:
costs = self._run_train(self._epoch, train_set, train_size)
except KeyboardInterrupt:
logging.info('interrupted!')
break
# Check costs
if np.isnan(costs[0][1]):
logging.info("NaN detected in costs, rollback to last parameters")
self.set_params(*self.checkpoint)
else:
self._epoch += 1
self.network.epoch_callback()
yield dict(costs)
if valid_set and self.config.get("save_best_parameters", True):
self.set_params(*self.best_params)
if test_set:
self._run_test(-1, test_set) | python | {
"resource": ""
} |
q271966 | NeuralTrainer._run_train | test | def _run_train(self, epoch, train_set, train_size=None):
"""
Run one training iteration.
"""
self.network.train_logger.record_epoch(epoch + 1)
costs = self.train_step(train_set, train_size)
if not epoch % self.config.monitor_frequency:
self.report(dict(costs), "train", epoch)
self.last_run_costs = costs
return costs | python | {
"resource": ""
} |
q271967 | NeuralTrainer._run_valid | test | def _run_valid(self, epoch, valid_set, dry_run=False, save_path=None):
"""
Run one valid iteration, return true if to continue training.
"""
costs = self.valid_step(valid_set)
# this is the same as: (J_i - J_f) / J_i > min improvement
_, J = costs[0]
new_best = False
if self.best_cost - J > self.best_cost * self.min_improvement:
# save the best cost and parameters
self.best_params = self.copy_params()
new_best = True
if not dry_run:
self.best_cost = J
self.best_epoch = epoch
self.save_checkpoint(save_path)
self.report(dict(costs), type="valid", epoch=0 if dry_run else epoch, new_best=new_best)
self.last_run_costs = costs
return epoch - self.best_epoch < self.patience | python | {
"resource": ""
} |
q271968 | NeuralTrainer.report | test | def report(self, score_map, type="valid", epoch=-1, new_best=False):
"""
Report the scores and record them in the log.
"""
type_str = type
if len(type_str) < 5:
type_str += " " * (5 - len(type_str))
info = " ".join("%s=%.2f" % el for el in score_map.items())
current_epoch = epoch if epoch > 0 else self.current_epoch()
epoch_str = "epoch={}".format(current_epoch + 1)
if epoch < 0:
epoch_str = "dryrun"
sys.stdout.write("\r")
sys.stdout.flush()
marker = " *" if new_best else ""
message = "{} ({}) {}{}".format(type_str, epoch_str, info, marker)
self.network.train_logger.record(message)
logging.info(message) | python | {
"resource": ""
} |
q271969 | NeuralTrainer.get_data | test | def get_data(self, data_split="train"):
"""
Get specified split of data.
"""
if data_split == 'train':
return self._current_train_set
elif data_split == 'valid':
return self._current_valid_set
elif data_split == 'test':
return self._current_test_set
else:
return None | python | {
"resource": ""
} |
q271970 | NeuralVariable.apply | test | def apply(self, func, dim=None):
"""
Apply a function to tensors.
"""
output_dim = dim if dim else self.output_dim
return NeuralVariable(func(self.tensor), output_dim) | python | {
"resource": ""
} |
q271971 | GeneralConfig.report | test | def report(self):
"""
Report usage of training parameters.
"""
if self.logger:
self.logger.info("accessed parameters:")
for key in self.used_parameters:
self.logger.info(" - %s %s" % (key, "(undefined)" if key in self.undefined_parameters else "")) | python | {
"resource": ""
} |
q271972 | GraphBuilder.var | test | def var(self, tensor_type, last_dim=0, test_shape=None):
"""
An alias of deepy.tensor.var.
"""
from deepy.tensor import var
return var(tensor_type, last_dim=last_dim, test_shape=test_shape) | python | {
"resource": ""
} |
q271973 | GraphBuilder.create_vars_from_data | test | def create_vars_from_data(self, dataset, split="train"):
"""
Create vars given a dataset and set test values.
Useful when dataset is already defined.
"""
from deepy.core.neural_var import NeuralVariable
vars = []
if split == "valid":
data_split = dataset.valid_set()
elif split == "test":
data_split = dataset.test_set()
else:
data_split = dataset.train_set()
first_data_piece = list(data_split)[0]
for i, numpy_tensor in enumerate(first_data_piece):
if numpy_tensor.dtype == "int64":
numpy_tensor = numpy_tensor.astype("int32")
if numpy_tensor.dtype == "float64":
numpy_tensor = numpy_tensor.astype(env.FLOATX)
type_map = {
0: "scalar",
1: "vector",
2: "matrix",
3: "tensor3",
4: "tensor4",
5: "tensor5",
}
tensor_type = type_map[numpy_tensor.ndim] if numpy_tensor.ndim in type_map else type_map[0]
if numpy_tensor.dtype.kind == "i":
tensor_type = "i" + tensor_type
theano_tensor = getattr(TT, tensor_type)("input_{}_{}".format(i + 1, tensor_type))
last_dim = numpy_tensor.shape[-1]
var = NeuralVariable(theano_tensor, dim=last_dim)
var.set_test_value(numpy_tensor)
vars.append(var)
return vars | python | {
"resource": ""
} |
q271974 | GraphBuilder.shared | test | def shared(self, value, name=None):
"""
Create a shared theano scalar value.
"""
if type(value) == int:
final_value = np.array(value, dtype="int32")
elif type(value) == float:
final_value = np.array(value, dtype=env.FLOATX)
else:
final_value = value
return theano.shared(final_value, name=name) | python | {
"resource": ""
} |
q271975 | AutoEncoder.stack_encoders | test | def stack_encoders(self, *layers):
"""
Stack encoding layers, this must be done before stacking decoding layers.
"""
self.stack(*layers)
self.encoding_layes.extend(layers) | python | {
"resource": ""
} |
q271976 | AutoEncoder.stack_decoders | test | def stack_decoders(self, *layers):
"""
Stack decoding layers.
"""
self.stack(*layers)
self.decoding_layers.extend(layers) | python | {
"resource": ""
} |
q271977 | AutoEncoder.encode | test | def encode(self, x):
"""
Encode given input.
"""
if not self.encoding_network:
self.encoding_network = NeuralNetwork(self.input_dim, self.input_tensor)
self.encoding_network.input_variables = self.input_variables
for layer in self.encoding_layes:
self.encoding_network.stack_layer(layer, no_setup=True)
return self.encoding_network.compute(*x) | python | {
"resource": ""
} |
q271978 | AutoEncoder.decode | test | def decode(self, x):
"""
Decode given representation.
"""
if not self.rep_dim:
raise Exception("rep_dim must be set to decode.")
if not self.decoding_network:
self.decoding_network = NeuralNetwork(self.rep_dim)
for layer in self.decoding_layers:
self.decoding_network.stack_layer(layer, no_setup=True)
return self.decoding_network.compute(x) | python | {
"resource": ""
} |
q271979 | create_2d_gaussian | test | def create_2d_gaussian(dim, sigma):
"""
This function creates a 2d gaussian kernel with the standard deviation
denoted by sigma
:param dim: integer denoting a side (1-d) of gaussian kernel
:param sigma: floating point indicating the standard deviation
:returns: a numpy 2d array
"""
# check if the dimension is odd
if dim % 2 == 0:
raise ValueError("Kernel dimension should be odd")
# initialize the kernel
kernel = np.zeros((dim, dim), dtype=np.float16)
# calculate the center point
center = dim/2
# calculate the variance
variance = sigma ** 2
# calculate the normalization coefficeint
coeff = 1. / (2 * variance)
# create the kernel
for x in range(0, dim):
for y in range(0, dim):
x_val = abs(x - center)
y_val = abs(y - center)
numerator = x_val**2 + y_val**2
denom = 2*variance
kernel[x,y] = coeff * np.exp(-1. * numerator/denom)
return kernel/sum(sum(kernel)) | python | {
"resource": ""
} |
q271980 | NeuralNetwork.register_layer | test | def register_layer(self, layer):
"""
Register the layer so that it's param will be trained.
But the output of the layer will not be stacked.
"""
if type(layer) == Block:
layer.fix()
self.parameter_count += layer.parameter_count
self.parameters.extend(layer.parameters)
self.free_parameters.extend(layer.free_parameters)
self.training_monitors.extend(layer.training_monitors)
self.testing_monitors.extend(layer.testing_monitors)
self.updates.extend(layer.updates)
self.training_updates.extend(layer.training_updates)
self.input_variables.extend(layer.external_inputs)
self.target_variables.extend(layer.external_targets)
self.training_callbacks.extend(layer.training_callbacks)
self.testing_callbacks.extend(layer.testing_callbacks)
self.epoch_callbacks.extend(layer.epoch_callbacks) | python | {
"resource": ""
} |
q271981 | NeuralNetwork.monitor_layer_outputs | test | def monitor_layer_outputs(self):
"""
Monitoring the outputs of each layer.
Useful for troubleshooting convergence problems.
"""
for layer, hidden in zip(self.layers, self._hidden_outputs):
self.training_monitors.append(('mean(%s)' % (layer.name), abs(hidden).mean())) | python | {
"resource": ""
} |
q271982 | NeuralNetwork.all_parameters | test | def all_parameters(self):
"""
Return all parameters.
"""
params = []
params.extend(self.parameters)
params.extend(self.free_parameters)
return params | python | {
"resource": ""
} |
q271983 | NeuralNetwork.setup_variables | test | def setup_variables(self):
"""
Set up variables.
"""
if self.input_tensor:
if type(self.input_tensor) == int:
x = dim_to_var(self.input_tensor, name="x")
else:
x = self.input_tensor
else:
x = T.matrix('x')
self.input_variables.append(x)
self._output = x
self._test_output = x | python | {
"resource": ""
} |
q271984 | NeuralNetwork.compute | test | def compute(self, *x):
"""
Return network output.
"""
self._compile()
outs = self._compute(*x)
if self._output_keys:
return MapDict(dict(zip(self._output_keys, outs)))
else:
return outs | python | {
"resource": ""
} |
q271985 | NeuralNetwork.save_params | test | def save_params(self, path, new_thread=False):
"""
Save parameters to file.
"""
save_logger.info(path)
param_variables = self.all_parameters
params = [p.get_value().copy() for p in param_variables]
if new_thread:
thread = Thread(target=save_network_params, args=(params, path))
thread.start()
else:
save_network_params(params, path)
self.train_logger.save(path) | python | {
"resource": ""
} |
q271986 | NeuralNetwork.load_params | test | def load_params(self, path, exclude_free_params=False):
"""
Load parameters from file.
"""
if not os.path.exists(path): return;
logging.info("loading parameters from %s" % path)
# Decide which parameters to load
if exclude_free_params:
params_to_load = self.parameters
else:
params_to_load = self.all_parameters
# Load parameters
if path.endswith(".gz"):
opener = gzip.open if path.lower().endswith('.gz') else open
handle = opener(path, 'rb')
saved_params = pickle.load(handle)
handle.close()
# Write parameters
for target, source in zip(params_to_load, saved_params):
logging.info('%s: setting value %s', target.name, source.shape)
target.set_value(source)
elif path.endswith(".npz"):
arrs = np.load(path)
# Write parameters
for target, idx in zip(params_to_load, range(len(arrs.keys()))):
source = arrs['arr_%d' % idx]
logging.info('%s: setting value %s', target.name, source.shape)
target.set_value(source)
else:
raise Exception("File format of %s is not supported, use '.gz' or '.npz' or '.uncompressed.gz'" % path)
self.train_logger.load(path) | python | {
"resource": ""
} |
q271987 | NeuralNetwork.report | test | def report(self):
"""
Print network statistics.
"""
logging.info("network inputs: %s", " ".join(map(str, self.input_variables)))
logging.info("network targets: %s", " ".join(map(str, self.target_variables)))
logging.info("network parameters: %s", " ".join(map(str, self.all_parameters)))
logging.info("parameter count: %d", self.parameter_count) | python | {
"resource": ""
} |
q271988 | NeuralLayer.register_parameters | test | def register_parameters(self, *parameters):
"""
Register parameters.
"""
for param in parameters:
self.parameter_count += np.prod(param.get_value().shape)
self.parameters.extend(parameters) | python | {
"resource": ""
} |
q271989 | NeuralLayer.register_updates | test | def register_updates(self, *updates):
"""
Register updates that will be executed in each iteration.
"""
for key, node in updates:
if key not in self._registered_updates:
self.updates.append((key, node))
self._registered_updates.add(key) | python | {
"resource": ""
} |
q271990 | NeuralLayer.register_training_updates | test | def register_training_updates(self, *updates):
"""
Register updates that will only be executed in training phase.
"""
for key, node in updates:
if key not in self._registered_training_updates:
self.training_updates.append((key, node))
self._registered_training_updates.add(key) | python | {
"resource": ""
} |
q271991 | NeuralLayer.register_monitors | test | def register_monitors(self, *monitors):
"""
Register monitors they should be tuple of name and Theano variable.
"""
for key, node in monitors:
if key not in self._registered_monitors:
node *= 1.0 # Avoid CudaNdarray
self.training_monitors.append((key, node))
self.testing_monitors.append((key, node))
self._registered_monitors.add(key) | python | {
"resource": ""
} |
q271992 | multiple_l2_norm | test | def multiple_l2_norm(tensors):
"""
Get the L2 norm of multiple tensors.
This function is taken from blocks.
"""
# Another way for doing this, I don't know which one is fast
# return T.sqrt(sum(T.sum(t ** 2) for t in tensors))
flattened = [T.as_tensor_variable(t).flatten() for t in tensors]
flattened = [(t if t.ndim > 0 else t.dimshuffle('x'))
for t in flattened]
joined = T.join(0, *flattened)
return T.sqrt(T.sqr(joined).sum()) | python | {
"resource": ""
} |
q271993 | StreamPickler.dump_one | test | def dump_one(elt_to_pickle, file_obj):
"""
dumps one element to file_obj, a file opened in write mode
"""
pickled_elt_str = dumps(elt_to_pickle)
file_obj.write(pickled_elt_str)
# record separator is a blank line
# (since pickled_elt_str might contain its own newlines)
file_obj.write('\n\n') | python | {
"resource": ""
} |
q271994 | StreamPickler.load | test | def load(file_obj):
"""
load contents from file_obj, returning a generator that yields one
element at a time
"""
cur_elt = []
for line in file_obj:
cur_elt.append(line)
if line == '\n':
pickled_elt_str = ''.join(cur_elt)
cur_elt = []
try:
elt = loads(pickled_elt_str)
except ValueError:
continue
yield elt | python | {
"resource": ""
} |
q271995 | Block.load_params | test | def load_params(self, path, exclude_free_params=False):
from deepy.core import graph
"""
Load parameters to the block.
"""
from deepy.core.comp_graph import ComputationalGraph
model = graph.compile(blocks=[self])
model.load_params(path, exclude_free_params=exclude_free_params) | python | {
"resource": ""
} |
q271996 | OAuth2.create_request_elements | test | def create_request_elements(
cls, request_type, credentials, url, method='GET', params=None,
headers=None, body='', secret=None, redirect_uri='', scope='',
csrf='', user_state=''
):
"""
Creates |oauth2| request elements.
"""
headers = headers or {}
params = params or {}
consumer_key = credentials.consumer_key or ''
consumer_secret = credentials.consumer_secret or ''
token = credentials.token or ''
refresh_token = credentials.refresh_token or credentials.token or ''
# Separate url base and query parameters.
url, base_params = cls._split_url(url)
# Add params extracted from URL.
params.update(dict(base_params))
if request_type == cls.USER_AUTHORIZATION_REQUEST_TYPE:
# User authorization request.
# TODO: Raise error for specific message for each missing argument.
if consumer_key and redirect_uri and (
csrf or not cls.supports_csrf_protection):
params['client_id'] = consumer_key
params['redirect_uri'] = redirect_uri
params['scope'] = scope
if cls.supports_user_state:
params['state'] = base64.urlsafe_b64encode(
json.dumps(
{"csrf": csrf, "user_state": user_state}
).encode('utf-8')
)
else:
params['state'] = csrf
params['response_type'] = 'code'
# Add authorization header
headers.update(cls._authorization_header(credentials))
else:
raise OAuth2Error(
'Credentials with valid consumer_key and arguments '
'redirect_uri, scope and state are required to create '
'OAuth 2.0 user authorization request elements!')
elif request_type == cls.ACCESS_TOKEN_REQUEST_TYPE:
# Access token request.
if consumer_key and consumer_secret:
params['code'] = token
params['client_id'] = consumer_key
params['client_secret'] = consumer_secret
params['redirect_uri'] = redirect_uri
params['grant_type'] = 'authorization_code'
# TODO: Check whether all providers accept it
headers.update(cls._authorization_header(credentials))
else:
raise OAuth2Error(
'Credentials with valid token, consumer_key, '
'consumer_secret and argument redirect_uri are required '
'to create OAuth 2.0 access token request elements!')
elif request_type == cls.REFRESH_TOKEN_REQUEST_TYPE:
# Refresh access token request.
if refresh_token and consumer_key and consumer_secret:
params['refresh_token'] = refresh_token
params['client_id'] = consumer_key
params['client_secret'] = consumer_secret
params['grant_type'] = 'refresh_token'
else:
raise OAuth2Error(
'Credentials with valid refresh_token, consumer_key, '
'consumer_secret are required to create OAuth 2.0 '
'refresh token request elements!')
elif request_type == cls.PROTECTED_RESOURCE_REQUEST_TYPE:
# Protected resource request.
# Add Authorization header. See:
# http://tools.ietf.org/html/rfc6749#section-7.1
if credentials.token_type == cls.BEARER:
# http://tools.ietf.org/html/rfc6750#section-2.1
headers.update(
{'Authorization': 'Bearer {0}'.format(credentials.token)})
elif token:
params['access_token'] = token
else:
raise OAuth2Error(
'Credentials with valid token are required to create '
'OAuth 2.0 protected resources request elements!')
request_elements = core.RequestElements(
url, method, params, headers, body)
return cls._x_request_elements_filter(
request_type, request_elements, credentials) | python | {
"resource": ""
} |
q271997 | OAuth2.decode_state | test | def decode_state(cls, state, param='user_state'):
"""
Decode state and return param.
:param str state:
state parameter passed through by provider
:param str param:
key to query from decoded state variable. Options include 'csrf'
and 'user_state'.
:returns:
string value from decoded state
"""
if state and cls.supports_user_state:
# urlsafe_b64 may include = which the browser quotes so must
# unquote Cast to str to void b64decode translation error. Base64
# should be str compatible.
return json.loads(base64.urlsafe_b64decode(
unquote(str(state))).decode('utf-8'))[param]
else:
return state if param == 'csrf' else '' | python | {
"resource": ""
} |
q271998 | Facebook._x_credentials_parser | test | def _x_credentials_parser(credentials, data):
"""
We need to override this method to fix Facebooks naming deviation.
"""
# Facebook returns "expires" instead of "expires_in".
credentials.expire_in = data.get('expires')
if data.get('token_type') == 'bearer':
# TODO: cls is not available here, hardcode for now.
credentials.token_type = 'Bearer'
return credentials | python | {
"resource": ""
} |
q271999 | Google._x_request_elements_filter | test | def _x_request_elements_filter(cls, request_type, request_elements,
credentials):
"""
Google doesn't accept client ID and secret to be at the same time in
request parameters and in the basic authorization header in the access
token request.
"""
if request_type is cls.ACCESS_TOKEN_REQUEST_TYPE:
params = request_elements[2]
del params['client_id']
del params['client_secret']
return request_elements | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.