id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
159,021 | from siuba.siu import CallTreeLocal, FunctionLookupError, ExecutionValidatorVisitor
from .groupby import SeriesGroupBy
from .translate import (
forward_method,
not_implemented,
method_agg_op,
method_win_op_agg_result
)
from siuba.experimental.pd_groups.groupby import SeriesGroupBy, GroupByAgg, broadcast_agg, is_compatible
from siuba.ops import ALL_OPS
from siuba import ops
from .translate import GroupByAgg, SeriesGroupBy
from siuba.ops.generics import ALL_PROPERTIES, ALL_ACCESSORS
from siuba.siu import Call, singledispatch2
from siuba.dply.verbs import mutate, filter, summarize, DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
import warnings
def _copy_dispatch(dispatcher, cls, func = None):
if func is None:
return lambda f: _copy_dispatch(dispatcher, cls, f)
# Note stripping symbolics may occur twice. Once in the original, and once
# in this dispatcher.
new_dispatch = singledispatch2(cls, func)
new_dispatch.register(object, dispatcher)
return new_dispatch | null |
159,022 | from siuba.siu import CallTreeLocal, FunctionLookupError, ExecutionValidatorVisitor
from .groupby import SeriesGroupBy
from .translate import (
forward_method,
not_implemented,
method_agg_op,
method_win_op_agg_result
)
from siuba.experimental.pd_groups.groupby import SeriesGroupBy, GroupByAgg, broadcast_agg, is_compatible
from siuba.ops import ALL_OPS
from siuba import ops
from .translate import GroupByAgg, SeriesGroupBy
from siuba.ops.generics import ALL_PROPERTIES, ALL_ACCESSORS
from siuba.siu import Call, singledispatch2
from siuba.dply.verbs import mutate, filter, summarize, DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
import warnings
def grouped_eval(__data, expr, require_agg = False):
if is_scalar(expr):
return expr
if isinstance(expr, Call):
try:
call = call_listener.enter(expr)
call_validator.visit(call)
except FunctionLookupError as e:
fallback_warning(expr, str(e))
call = expr
#
grouped_res = call(__data)
if isinstance(grouped_res, SeriesGroupBy):
if not is_compatible(grouped_res, __data):
raise ValueError("Incompatible groupers")
# TODO: may want to validate result is correct length / index?
# e.g. a SeriesGroupBy could be compatible and not an agg
if require_agg:
return grouped_res.obj
else:
# broadcast from aggregate to original length (like transform)
return broadcast_agg(grouped_res)
else:
# can happen right now if user selects, e.g., a property of the
# groupby object, like .dtype, which returns a single value
# in the future, could restrict set of operations user could perform
raise ValueError("Result must be subclass of SeriesGroupBy")
raise ValueError("Grouped expressions must be a siu expression or scalar")
def _transform_args(args):
out = []
for expr in args:
if is_scalar(expr):
out.append(expr)
elif isinstance(expr, Call):
try:
call = call_listener.enter(expr)
call_validator.visit(call)
out.append(call)
except FunctionLookupError as e:
fallback_warning(expr, str(e))
return None
elif callable(expr):
return None
return out
def mutate(__data, *args, **kwargs):
"""Assign new variables to a DataFrame, while keeping existing ones.
Parameters
----------
__data: pd.DataFrame
**kwargs:
new_col_name=value pairs, where value can be a function taking a singledispatch2
argument for the data being operated on.
See Also
--------
transmute : Returns a DataFrame with only the newly created columns.
Examples
--------
>>> from siuba import _, mutate, head
>>> from siuba.data import cars
>>> cars >> mutate(cyl2 = _.cyl * 2, cyl4 = _.cyl2 * 2) >> head(2)
cyl mpg hp cyl2 cyl4
0 6 21.0 110 12 24
1 6 21.0 110 12 24
"""
new_names, df_res = _mutate_cols(__data, args, kwargs)
return df_res
The provided code snippet includes necessary dependencies for implementing the `fast_mutate` function. Write a Python function `def fast_mutate(__data, **kwargs)` to solve the following problem:
Warning: this function is experimental
Here is the function:
def fast_mutate(__data, **kwargs):
"""Warning: this function is experimental"""
# transform call trees, potentially bail out to slow method --------
new_vals = _transform_args(kwargs.values())
if new_vals is None:
return mutate(__data, **kwargs)
# perform fast method ----
out = __data.obj.copy()
groupings = __data.grouper.groupings
for name, expr in zip(kwargs, new_vals):
res = grouped_eval(__data, expr)
out[name] = res
return out.groupby(groupings) | Warning: this function is experimental |
159,023 | from siuba.siu import CallTreeLocal, FunctionLookupError, ExecutionValidatorVisitor
from .groupby import SeriesGroupBy
from .translate import (
forward_method,
not_implemented,
method_agg_op,
method_win_op_agg_result
)
from siuba.experimental.pd_groups.groupby import SeriesGroupBy, GroupByAgg, broadcast_agg, is_compatible
from siuba.ops import ALL_OPS
from siuba import ops
from .translate import GroupByAgg, SeriesGroupBy
from siuba.ops.generics import ALL_PROPERTIES, ALL_ACCESSORS
from siuba.siu import Call, singledispatch2
from siuba.dply.verbs import mutate, filter, summarize, DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
import warnings
def grouped_eval(__data, expr, require_agg = False):
if is_scalar(expr):
return expr
if isinstance(expr, Call):
try:
call = call_listener.enter(expr)
call_validator.visit(call)
except FunctionLookupError as e:
fallback_warning(expr, str(e))
call = expr
#
grouped_res = call(__data)
if isinstance(grouped_res, SeriesGroupBy):
if not is_compatible(grouped_res, __data):
raise ValueError("Incompatible groupers")
# TODO: may want to validate result is correct length / index?
# e.g. a SeriesGroupBy could be compatible and not an agg
if require_agg:
return grouped_res.obj
else:
# broadcast from aggregate to original length (like transform)
return broadcast_agg(grouped_res)
else:
# can happen right now if user selects, e.g., a property of the
# groupby object, like .dtype, which returns a single value
# in the future, could restrict set of operations user could perform
raise ValueError("Result must be subclass of SeriesGroupBy")
raise ValueError("Grouped expressions must be a siu expression or scalar")
def _transform_args(args):
out = []
for expr in args:
if is_scalar(expr):
out.append(expr)
elif isinstance(expr, Call):
try:
call = call_listener.enter(expr)
call_validator.visit(call)
out.append(call)
except FunctionLookupError as e:
fallback_warning(expr, str(e))
return None
elif callable(expr):
return None
return out
def filter(__data, *args):
"""Keep rows where conditions are true.
Parameters
----------
__data:
The data being filtered.
*args:
conditions that must be met to keep a column.
Examples
--------
>>> from siuba import _, filter
>>> from siuba.data import cars
Keep rows where cyl is 4 *and* mpg is less than 25.
>>> cars >> filter(_.cyl == 4, _.mpg < 22)
cyl mpg hp
20 4 21.5 97
31 4 21.4 109
Use `|` to represent an OR condition. For example, the code below keeps
rows where hp is over 250 *or* mpg is over 32.
>>> cars >> filter((_.hp > 300) | (_.mpg > 32))
cyl mpg hp
17 4 32.4 66
19 4 33.9 65
30 8 15.0 335
"""
crnt_indx = True
for arg in args:
res = arg(__data) if callable(arg) else arg
if isinstance(res, pd.DataFrame):
crnt_indx &= res.all(axis=1)
elif isinstance(res, pd.Series):
crnt_indx &= res
else:
crnt_indx &= res
# use loc or iloc to subset, depending on crnt_indx ----
# the main issue here is that loc can't remove all rows using a slice
# and iloc can't use a boolean series
if isinstance(crnt_indx, bool) or isinstance(crnt_indx, np.bool_):
# iloc can do slice, but not a bool series
result = __data.iloc[slice(None) if crnt_indx else slice(0),:]
else:
result = __data.loc[crnt_indx,:]
return result
The provided code snippet includes necessary dependencies for implementing the `fast_filter` function. Write a Python function `def fast_filter(__data, *args)` to solve the following problem:
Warning: this function is experimental
Here is the function:
def fast_filter(__data, *args):
"""Warning: this function is experimental"""
# transform call trees, potentially bail out to slow method --------
new_vals = _transform_args(args)
if new_vals is None:
return filter(__data, *args)
# perform fast method ----
out = []
groupings = __data.grouper.groupings
for expr in args:
res = grouped_eval(__data, expr)
out.append(res)
filter_df = filter.dispatch(__data.obj.__class__)
df_result = filter_df(__data.obj, *out)
# TODO: research how to efficiently & robustly subset groupings
group_names = [ping.name for ping in groupings]
return df_result.groupby(group_names) | Warning: this function is experimental |
159,024 | from siuba.siu import CallTreeLocal, FunctionLookupError, ExecutionValidatorVisitor
from .groupby import SeriesGroupBy
from .translate import (
forward_method,
not_implemented,
method_agg_op,
method_win_op_agg_result
)
from siuba.experimental.pd_groups.groupby import SeriesGroupBy, GroupByAgg, broadcast_agg, is_compatible
from siuba.ops import ALL_OPS
from siuba import ops
from .translate import GroupByAgg, SeriesGroupBy
from siuba.ops.generics import ALL_PROPERTIES, ALL_ACCESSORS
from siuba.siu import Call, singledispatch2
from siuba.dply.verbs import mutate, filter, summarize, DataFrameGroupBy
from pandas.core.dtypes.inference import is_scalar
import warnings
def grouped_eval(__data, expr, require_agg = False):
if is_scalar(expr):
return expr
if isinstance(expr, Call):
try:
call = call_listener.enter(expr)
call_validator.visit(call)
except FunctionLookupError as e:
fallback_warning(expr, str(e))
call = expr
#
grouped_res = call(__data)
if isinstance(grouped_res, SeriesGroupBy):
if not is_compatible(grouped_res, __data):
raise ValueError("Incompatible groupers")
# TODO: may want to validate result is correct length / index?
# e.g. a SeriesGroupBy could be compatible and not an agg
if require_agg:
return grouped_res.obj
else:
# broadcast from aggregate to original length (like transform)
return broadcast_agg(grouped_res)
else:
# can happen right now if user selects, e.g., a property of the
# groupby object, like .dtype, which returns a single value
# in the future, could restrict set of operations user could perform
raise ValueError("Result must be subclass of SeriesGroupBy")
raise ValueError("Grouped expressions must be a siu expression or scalar")
def _transform_args(args):
out = []
for expr in args:
if is_scalar(expr):
out.append(expr)
elif isinstance(expr, Call):
try:
call = call_listener.enter(expr)
call_validator.visit(call)
out.append(call)
except FunctionLookupError as e:
fallback_warning(expr, str(e))
return None
elif callable(expr):
return None
return out
def summarize(__data, *args, **kwargs):
"""Assign variables that are single number summaries of a DataFrame.
Grouped DataFrames will produce one row for each group. Otherwise, summarize
produces a DataFrame with a single row.
Parameters
----------
__data: a DataFrame
The data being summarized.
**kwargs:
new_col_name=value pairs, where value can be a function taking
a single argument for the data being operated on.
Examples
--------
>>> from siuba import _, group_by, summarize
>>> from siuba.data import cars
>>> cars >> summarize(avg = _.mpg.mean(), n = _.shape[0])
avg n
0 20.090625 32
>>> g_cyl = cars >> group_by(_.cyl)
>>> g_cyl >> summarize(min = _.mpg.min())
cyl min
0 4 21.4
1 6 17.8
2 8 10.4
>>> g_cyl >> summarize(mpg_std_err = _.mpg.std() / _.shape[0]**.5)
cyl mpg_std_err
0 4 1.359764
1 6 0.549397
2 8 0.684202
"""
results = {}
for ii, expr in enumerate(args):
if not callable(expr):
raise TypeError(
"Unnamed arguments to summarize must be callable, but argument number "
f"{ii} was type: {type(expr)}"
)
res = expr(__data)
if isinstance(res, DataFrame):
if len(res) != 1:
raise ValueError(
f"Summarize argument `{ii}` returned a DataFrame with {len(res)} rows."
" Result must only be a single row."
)
for col_name in res.columns:
results[col_name] = res[col_name].array
else:
raise ValueError(
"Unnamed arguments to summarize must return a DataFrame, but argument "
f"`{ii} returned type: {type(expr)}"
)
for k, v in kwargs.items():
# TODO: raise error if a named expression returns a DataFrame
res = v(__data) if callable(v) else v
if is_scalar(res) or len(res) == 1:
# keep result, but use underlying array to avoid crazy index issues
# on DataFrame construction (#138)
results[k] = res.array if isinstance(res, pd.Series) else res
else:
raise ValueError(
f"Summarize argument `{k}` must return result of length 1 or a scalar.\n\n"
f"Result type: {type(res)}\n"
f"Result length: {len(res)}"
)
# must pass index, or raises error when using all scalar values
return DataFrame(results, index = [0])
The provided code snippet includes necessary dependencies for implementing the `fast_summarize` function. Write a Python function `def fast_summarize(__data, **kwargs)` to solve the following problem:
Warning: this function is experimental
Here is the function:
def fast_summarize(__data, **kwargs):
"""Warning: this function is experimental"""
# transform call trees, potentially bail out to slow method --------
new_vals = _transform_args(kwargs.values())
if new_vals is None:
return summarize(__data, **kwargs)
# perform fast method ----
groupings = __data.grouper.groupings
# TODO: better way of getting this frame?
out = __data.grouper.result_index.to_frame()
for name, expr in kwargs.items():
# special case: set scalars directly
res = grouped_eval(__data, expr, require_agg = True)
out[name] = res
return out.reset_index(drop = True) | Warning: this function is experimental |
159,025 | import pandas as pd
import numpy as np
from pandas.core.groupby import DataFrameGroupBy
from siuba.dply.tidyselect import var_create, var_select
from siuba.dply.verbs import gather, separate, extract, expand, complete
from siuba.siu import singledispatch2, Call
from siuba.dply.forcats import fct_inorder
from .utils import vec_as_names, reconstruct_tibble
from typing import Any
def _maybe_list(seq):
if seq is None:
return None
return list(seq) | null |
159,026 | import pandas as pd
import numpy as np
from pandas.core.groupby import DataFrameGroupBy
from siuba.dply.tidyselect import var_create, var_select
from siuba.dply.verbs import gather, separate, extract, expand, complete
from siuba.siu import singledispatch2, Call
from siuba.dply.forcats import fct_inorder
from .utils import vec_as_names, reconstruct_tibble
from typing import Any
def _select_expr_slice(x: "tuple[str]") -> Call:
from operator import getitem
from siuba.siu import strip_symbolic, Symbolic
return strip_symbolic(
getitem(Symbolic(), x)
)
def _tidy_select(__data, cols, arg_name):
if cols is None:
return {}
var_list = var_create(cols)
od = var_select(__data.columns, *var_list)
# TODO: where in tidyselect package does this check happen?
missing = set(od) - set(__data.columns)
if missing:
raise ValueError(
f"{arg_name} must select columns present in the data. "
f"Could not find these columns: {missing}"
)
return od
def pivot_wider_spec(
__data,
spec,
names_repair = "check_unique",
id_cols = None,
id_expand = False,
values_fill = None,
values_fn = None,
unused_fn = None
):
input_ = __data
# guards ----
if isinstance(__data, DataFrameGroupBy):
__data = __data.obj
if _is_select_everything(id_cols):
# restores id_cols to the default, which uses all remaining cols
id_cols = None
if unused_fn is not None:
raise NotImplementedError()
if not isinstance(id_expand, bool):
raise TypeError("`id_expand` argument must be True or False.")
# handle tidyselection ----------------------------------------------------
name_vars = spec.columns[~spec.columns.isin([".name", ".value"])].tolist()
val_vars = spec.loc[:, ".value"].unique().tolist()
# select id columns
if id_cols is None:
others = {*name_vars, *val_vars}
id_vars = [col for col in __data.columns if col not in others]
else:
id_vars = _tidy_select(__data, id_cols, "id_cols")
id_var_bad = set(id_vars) & set([*name_vars, *val_vars])
if id_var_bad:
raise ValueError(
"id_cols contains columns that are in "
f"names_from or values_from: {id_var_bad}."
)
# use a categoricals for name columns, to ensure their order in wide format
# is first-observed order (pandas by default uses alphabetical)
tmp = __data.copy()
for name in name_vars:
tmp[name] = fct_inorder(tmp[name])
# pivot to wide -----------------------------------------------------------
if values_fn is None:
# this block is essentially pd.pivot (which also uses unstack), but also
# supports filing NAs, and resets indexes
if not len(id_vars):
# without id_vars we try to pivot to a frame with 1 row
n_id_vars = 1
tmp = tmp.set_index([np.zeros(len(tmp.index)), *name_vars])
else:
n_id_vars = len(id_vars)
tmp = tmp.set_index([*id_vars, *name_vars])
to_unstack = list(range(n_id_vars, n_id_vars + len(name_vars)))
wide = (tmp
.loc[:, list(val_vars)]
.unstack(to_unstack, fill_value=values_fill)
)
else:
# pivot_table requires a values_fn, so we only use it when one is provided.
if not len(id_vars):
# pivot_table without an index var is equivalent to the index being constant.
# normally the index vars are the index of the pivot_table result, but without
# an index column explicitly named, the value vars become the rows.
# so we need to create an explicit index column...
index_cols = [_unique_col_name(tmp)]
tmp.loc[:, index_cols[0]] = np.zeros(len(tmp.index))
else:
index_cols = list(id_vars)
# this ensures a single value column won't be used when constructing names
# since a list creates a new index dimension
_values = list(val_vars) if len(val_vars) > 1 else list(val_vars)[0]
wide = pd.pivot_table(
tmp,
index=index_cols,
columns=list(name_vars),
values=list(val_vars),
fill_value=values_fill,
aggfunc=values_fn,
)
if wide.index.names != index_cols:
raise ValueError(
"pivot_wider produced a result with incorrect index variables. "
"There is a bug in pandas when attempting to aggregate by a values_fn "
"that is not an aggregate.\n\n"
f"Do all the values_fn arguments return single values?: {values_fn}"
)
# flatten / reset indexes -------------------------------------------------
# flatten column index ----
if isinstance(wide, pd.Series):
# the .unstack approach returns a Series when there are no id cols.
# in this case we make it a frame and don't sort the two columns.
wide = wide.reset_index()
collapsed_names = _names_from_spec(spec, wide.columns)
wide.columns = collapsed_names
# add missing columns and reorder to spec ----
missing_cols = list(spec[".name"][~spec[".name"].isin(wide.columns)])
if missing_cols:
wide[missing_cols] = values_fill
wide = wide.loc[:, list(spec[".name"])]
# validate names and move id vars to columns ----
# note: in pandas 1.5+ we can use the allow_duplicates option to reset, even
# when index and column names overlap. for now, repair names, rename, then reset.
_all_raw_names = list(map(str, [*id_vars, *wide.columns]))
unique_names = vec_as_names(_all_raw_names, repair="unique")
repaired_names = vec_as_names(_all_raw_names, repair=names_repair)
uniq_id_vars = unique_names[:len(id_vars)]
uniq_val_vars = unique_names[len(id_vars):]
final_id_vars = repaired_names[:len(id_vars)]
final_val_vars = repaired_names[len(id_vars):]
wide.columns = uniq_val_vars
if id_vars:
wide.rename_axis(uniq_id_vars, inplace=True)
wide.reset_index(drop=False, inplace=True)
else:
wide.reset_index(drop=True, inplace=True)
wide.columns = repaired_names
# expand id levels --------------------------------------------------------
if id_expand:
if values_fill is not None and not isinstance(values_fill, dict):
values_fill = {k: values_fill for k in final_val_vars}
wide = complete(wide, *final_id_vars, fill=values_fill, explicit=False)
# reconstruct with groupings
return reconstruct_tibble(input_, wide)
def build_wider_spec(
__data,
names_from = "name",
values_from = "value",
names_prefix = "_",
names_sep = "_",
names_glue = None,
names_sort = False,
names_vary = "fastest",
names_expand = False
):
if isinstance(__data, DataFrameGroupBy):
__data = __data.obj
# guards ----
if names_vary not in {"fastest", "slowest"}:
raise ValueError(
"names_vary must be one of 'fastest', 'slowest', but received "
f"argument: {repr(names_vary)}"
)
if names_sort:
raise NotImplementedError()
if not isinstance(names_expand, bool):
raise TypeError(
"names_expand must be set to True or False. "
f"Received type: {type(names_expand)}."
)
# validate tidy selections ------------------------------------------------
orig_vars = __data.columns
name_vars = _tidy_select(__data, names_from, "names_from")
val_vars = _tidy_select(__data, values_from, "values_form")
if not name_vars:
raise ValueError("`names_from` must select at least one column.")
if not val_vars:
raise ValueError("`values_from` must select at least one column.")
# get unique variable levels from names_from columns ----------------------
name_data = __data.loc[:, list(name_vars)]
if names_expand:
# cartesian product of unique level names
# TODO: should nan values be turned into "NA" to match dplyr?
row_ids = expand(name_data, *name_vars)
else:
# distinct rows of variables
row_ids = name_data.drop_duplicates()
# cross with value var names ----------------------------------------------
value_levels = pd.Series(list(val_vars), name = ".value")
if names_vary == "fastest":
spec = pd.merge(
value_levels,
row_ids,
how="cross"
)
else:
# the left arg varies slowest, so use names on the left, then relocate.
spec = (
pd.merge(
row_ids,
value_levels,
how="cross"
)
.loc[:, lambda d: [".value", *d.columns[:-1]]]
)
# get columns used to construct .name
if len(value_levels) > 1:
df_name_parts = spec
else:
df_name_parts = spec.drop(columns=".value")
# TODO: remove use of multiindex, which is unnecessary
if len(df_name_parts.columns) > 1:
spec_as_multi = pd.MultiIndex.from_frame(df_name_parts)
name_col = _collapse_index_names(spec_as_multi, sep=names_sep, glue=names_glue)
else:
name_col = list(df_name_parts.iloc[:, 0])
spec.insert(0, ".name", name_col)
return spec
The provided code snippet includes necessary dependencies for implementing the `pivot_wider` function. Write a Python function `def pivot_wider( __data, id_cols=None, id_expand=False, names_from="name", names_prefix="", names_sep="_", names_glue=None, names_sort=None, names_vary="fastest", names_expand=False, names_repair="check_unique", values_from="value", values_fill=None, values_fn=None, unused_fn=None )` to solve the following problem:
Pivot data from long to wide format. This function splits a column, putting the pieces side-by-side based on an index. Parameters ---------- __data: The input data. id_cols: A selection of columns that uniquely identify each observation. id_expand: Whether to ensure each unique combination of id_cols is a row in the data before pivoting, using `expand()`. This results in more rows. When True, this also sorts the final result by the `id_cols`. names_from, values_from: A pair fo arguments describing which column (or columns) to get the name of the output column (names_from), and which column (or columns) to get the cell values from (values_from). names_prefix: String added to the start of every variable name. names_sep: If names_from or values_from contains multiple values, this will be used to join their values together into a single string to use as a column name. names_glue: Instead of names_sep and names_prefix, supply a string template that uses the names_from columns (and a special .value variable) to create custom column names. names_sort: Should the column names be sorted? The default is False, which results in column names ordered by first appearance. names_vary: Option specifying how columns are ordered when names_from and values_from both identify new columns. "fastest" varies names_from fastest, while "slowest" varies names_from slowest. names_expand: Whether to ensure all combinations of names_from columns are in the result using the `expand()` function. This results in more columns in the output. names_repair: Strategy for fixing of invalid column names. "minimal" leaves them as is. "check_unique" raises an error if there are duplicate names. "unique" de-duplicates names by appending "___{position}" to them. values_fill: A scalar value used to fill in any missing values. Alternatively, a dictionary mapping column names to fill values. values_fn: An optional function to apply to each cell of the output. This is useful when each cell would contain multiple values. E.g. values_fn="max" would calculate the max value. unused_fn: Not implemented. Examples -------- >>> from siuba import _ >>> df = pd.DataFrame( ... {"id": ["a", "b", "a"], "name": ["x", "x", "y"], "value": [1, 2, 3]} ... ) >>> df id name value 0 a x 1 1 b x 2 2 a y 3 >>> pivot_wider(df, names_from=_.name, values_from=_.value) id x y 0 a 1.0 3.0 1 b 2.0 NaN >>> pivot_wider(df, names_from=_.name, values_from=_.value, values_fill=0) id x y 0 a 1 3 1 b 2 0 >>> many = pd.DataFrame({ ... "id": [1, 1, 2, 2], ... "var": ["one", "two", "one", "two"], ... "x": [1, 2, 3, 4], ... "y": [6, 7, 8, 9] ... }) >>> pivot_wider(many, names_from=_.var, values_from=_[_.x, _.y]) id x_one x_two y_one y_two 0 1 1 2 6 7 1 2 3 4 8 9 >>> pivot_wider(many, names_from=_.var, values_from=_[_.x, _.y], names_vary="slowest") id x_one y_one x_two y_two 0 1 1 6 2 7 1 2 3 8 4 9 >>> pivot_wider(many, names_from=_.var, values_from=_[_.x, _.y], names_sep=".") id x.one x.two y.one y.two 0 1 1 2 6 7 1 2 3 4 8 9 >>> glue = "{variable}_X_{value}" >>> pivot_wider(many, names_from=_.var, values_from=_[_.x, _.y], names_glue=glue) id x_X_one x_X_two y_X_one y_X_two 0 1 1 2 6 7 1 2 3 4 8 9 >>> from siuba.data import warpbreaks >>> warpbreaks.head() breaks wool tension 0 26 A L 1 30 A L 2 54 A L 3 25 A L 4 70 A L >>> pivot_wider(warpbreaks, names_from=_.wool, values_from=_.breaks, values_fn="mean") tension A B 0 H 24.555556 18.777778 1 L 44.555556 28.222222 2 M 24.000000 28.777778
Here is the function:
def pivot_wider(
__data,
id_cols=None,
id_expand=False,
names_from="name",
names_prefix="",
names_sep="_",
names_glue=None,
names_sort=None,
names_vary="fastest",
names_expand=False,
names_repair="check_unique",
values_from="value",
values_fill=None,
values_fn=None,
unused_fn=None
):
"""Pivot data from long to wide format.
This function splits a column, putting the pieces side-by-side based on an index.
Parameters
----------
__data:
The input data.
id_cols:
A selection of columns that uniquely identify each observation.
id_expand:
Whether to ensure each unique combination of id_cols is a row in the data
before pivoting, using `expand()`. This results in more rows. When True,
this also sorts the final result by the `id_cols`.
names_from, values_from:
A pair fo arguments describing which column (or columns) to get the name of
the output column (names_from), and which column (or columns) to get the
cell values from (values_from).
names_prefix:
String added to the start of every variable name.
names_sep:
If names_from or values_from contains multiple values, this will be used
to join their values together into a single string to use as a column name.
names_glue:
Instead of names_sep and names_prefix, supply a string template that uses
the names_from columns (and a special .value variable) to create custom
column names.
names_sort:
Should the column names be sorted? The default is False, which results
in column names ordered by first appearance.
names_vary:
Option specifying how columns are ordered when names_from and values_from
both identify new columns. "fastest" varies names_from fastest, while "slowest"
varies names_from slowest.
names_expand:
Whether to ensure all combinations of names_from columns are in the result
using the `expand()` function. This results in more columns in the output.
names_repair:
Strategy for fixing of invalid column names. "minimal" leaves them as is.
"check_unique" raises an error if there are duplicate names. "unique"
de-duplicates names by appending "___{position}" to them.
values_fill:
A scalar value used to fill in any missing values. Alternatively, a
dictionary mapping column names to fill values.
values_fn:
An optional function to apply to each cell of the output. This is useful
when each cell would contain multiple values. E.g. values_fn="max" would
calculate the max value.
unused_fn:
Not implemented.
Examples
--------
>>> from siuba import _
>>> df = pd.DataFrame(
... {"id": ["a", "b", "a"], "name": ["x", "x", "y"], "value": [1, 2, 3]}
... )
>>> df
id name value
0 a x 1
1 b x 2
2 a y 3
>>> pivot_wider(df, names_from=_.name, values_from=_.value)
id x y
0 a 1.0 3.0
1 b 2.0 NaN
>>> pivot_wider(df, names_from=_.name, values_from=_.value, values_fill=0)
id x y
0 a 1 3
1 b 2 0
>>> many = pd.DataFrame({
... "id": [1, 1, 2, 2],
... "var": ["one", "two", "one", "two"],
... "x": [1, 2, 3, 4],
... "y": [6, 7, 8, 9]
... })
>>> pivot_wider(many, names_from=_.var, values_from=_[_.x, _.y])
id x_one x_two y_one y_two
0 1 1 2 6 7
1 2 3 4 8 9
>>> pivot_wider(many, names_from=_.var, values_from=_[_.x, _.y], names_vary="slowest")
id x_one y_one x_two y_two
0 1 1 6 2 7
1 2 3 8 4 9
>>> pivot_wider(many, names_from=_.var, values_from=_[_.x, _.y], names_sep=".")
id x.one x.two y.one y.two
0 1 1 2 6 7
1 2 3 4 8 9
>>> glue = "{variable}_X_{value}"
>>> pivot_wider(many, names_from=_.var, values_from=_[_.x, _.y], names_glue=glue)
id x_X_one x_X_two y_X_one y_X_two
0 1 1 2 6 7
1 2 3 4 8 9
>>> from siuba.data import warpbreaks
>>> warpbreaks.head()
breaks wool tension
0 26 A L
1 30 A L
2 54 A L
3 25 A L
4 70 A L
>>> pivot_wider(warpbreaks, names_from=_.wool, values_from=_.breaks, values_fn="mean")
tension A B
0 H 24.555556 18.777778
1 L 44.555556 28.222222
2 M 24.000000 28.777778
"""
input_ = __data
if isinstance(__data, DataFrameGroupBy):
__data = __data.obj
# create spec ----
spec = build_wider_spec(
__data,
names_from = names_from,
values_from = values_from,
names_prefix = names_prefix,
names_sep = names_sep,
names_glue = names_glue,
names_sort = names_sort,
names_vary = names_vary,
names_expand = names_expand
)
# select id columns ---
# necessary here, since if the spec is 0 rows you cannot know values_from
# TODO: clean up symbolic handling of slices
if id_cols is None:
name_vars = _tidy_select(__data, names_from, "names_from")
val_vars = _tidy_select(__data, values_from, "values_from")
others = {*name_vars, *val_vars}
id_cols = tuple([col for col in __data.columns if col not in others])
id_vars = _select_expr_slice(id_cols)
else:
id_vars = id_cols
out = pivot_wider_spec(
input_,
spec,
names_repair = names_repair,
id_cols = id_vars,
id_expand = id_expand,
values_fill = values_fill,
values_fn = values_fn,
unused_fn = unused_fn
)
return out | Pivot data from long to wide format. This function splits a column, putting the pieces side-by-side based on an index. Parameters ---------- __data: The input data. id_cols: A selection of columns that uniquely identify each observation. id_expand: Whether to ensure each unique combination of id_cols is a row in the data before pivoting, using `expand()`. This results in more rows. When True, this also sorts the final result by the `id_cols`. names_from, values_from: A pair fo arguments describing which column (or columns) to get the name of the output column (names_from), and which column (or columns) to get the cell values from (values_from). names_prefix: String added to the start of every variable name. names_sep: If names_from or values_from contains multiple values, this will be used to join their values together into a single string to use as a column name. names_glue: Instead of names_sep and names_prefix, supply a string template that uses the names_from columns (and a special .value variable) to create custom column names. names_sort: Should the column names be sorted? The default is False, which results in column names ordered by first appearance. names_vary: Option specifying how columns are ordered when names_from and values_from both identify new columns. "fastest" varies names_from fastest, while "slowest" varies names_from slowest. names_expand: Whether to ensure all combinations of names_from columns are in the result using the `expand()` function. This results in more columns in the output. names_repair: Strategy for fixing of invalid column names. "minimal" leaves them as is. "check_unique" raises an error if there are duplicate names. "unique" de-duplicates names by appending "___{position}" to them. values_fill: A scalar value used to fill in any missing values. Alternatively, a dictionary mapping column names to fill values. values_fn: An optional function to apply to each cell of the output. This is useful when each cell would contain multiple values. E.g. values_fn="max" would calculate the max value. unused_fn: Not implemented. Examples -------- >>> from siuba import _ >>> df = pd.DataFrame( ... {"id": ["a", "b", "a"], "name": ["x", "x", "y"], "value": [1, 2, 3]} ... ) >>> df id name value 0 a x 1 1 b x 2 2 a y 3 >>> pivot_wider(df, names_from=_.name, values_from=_.value) id x y 0 a 1.0 3.0 1 b 2.0 NaN >>> pivot_wider(df, names_from=_.name, values_from=_.value, values_fill=0) id x y 0 a 1 3 1 b 2 0 >>> many = pd.DataFrame({ ... "id": [1, 1, 2, 2], ... "var": ["one", "two", "one", "two"], ... "x": [1, 2, 3, 4], ... "y": [6, 7, 8, 9] ... }) >>> pivot_wider(many, names_from=_.var, values_from=_[_.x, _.y]) id x_one x_two y_one y_two 0 1 1 2 6 7 1 2 3 4 8 9 >>> pivot_wider(many, names_from=_.var, values_from=_[_.x, _.y], names_vary="slowest") id x_one y_one x_two y_two 0 1 1 6 2 7 1 2 3 8 4 9 >>> pivot_wider(many, names_from=_.var, values_from=_[_.x, _.y], names_sep=".") id x.one x.two y.one y.two 0 1 1 2 6 7 1 2 3 4 8 9 >>> glue = "{variable}_X_{value}" >>> pivot_wider(many, names_from=_.var, values_from=_[_.x, _.y], names_glue=glue) id x_X_one x_X_two y_X_one y_X_two 0 1 1 2 6 7 1 2 3 4 8 9 >>> from siuba.data import warpbreaks >>> warpbreaks.head() breaks wool tension 0 26 A L 1 30 A L 2 54 A L 3 25 A L 4 70 A L >>> pivot_wider(warpbreaks, names_from=_.wool, values_from=_.breaks, values_fn="mean") tension A B 0 H 24.555556 18.777778 1 L 44.555556 28.222222 2 M 24.000000 28.777778 |
159,027 | import re
import pandas as pd
from collections import Counter
from typing import Callable
def _make_unique(name):
pass | null |
159,028 | import pandas as pd
import siuba.ops
from sqlalchemy import sql
from sqlalchemy import types as sqla_types
from siuba.dply.verbs import collect, distinct
from siuba.sql import LazyTbl
from siuba.sql.utils import (
_sql_select,
_sql_column_collection,
_sql_add_columns,
_sql_with_only_columns,
_sql_case
)
from .pivot_wide import (
pivot_wider,
pivot_wider_spec,
build_wider_spec,
_is_select_everything,
_tidy_select,
_select_expr_slice
)
from .utils import vec_as_names
from .sql_pivot_long import _safe_to_dict
def _build_wider_spec(__data: LazyTbl, *args, **kwargs):
# building the spec only really needs the columns names. however, because we
# matched tidyr behavior, we just pass a DataFrame in for now.
raise NotImplementedError(
"build_wider_spec currently requires a DataFrame. Please collect() your "
f"data first. Received type: {type(__data)}"
) | null |
159,029 | import pandas as pd
import siuba.ops
from sqlalchemy import sql
from sqlalchemy import types as sqla_types
from siuba.dply.verbs import collect, distinct
from siuba.sql import LazyTbl
from siuba.sql.utils import (
_sql_select,
_sql_column_collection,
_sql_add_columns,
_sql_with_only_columns,
_sql_case
)
from .pivot_wide import (
pivot_wider,
pivot_wider_spec,
build_wider_spec,
_is_select_everything,
_tidy_select,
_select_expr_slice
)
from .utils import vec_as_names
from .sql_pivot_long import _safe_to_dict
_OPS_DEFAULT=siuba.ops.max
def _sql_select(columns, *args, **kwargs):
from sqlalchemy import sql
if is_sqla_12() or is_sqla_13():
# use old syntax, where columns are passed as a list
return sql.select(columns, *args, **kwargs)
return sql.select(*columns, *args, **kwargs)
def _sql_case(whens, **kwargs):
from sqlalchemy import sql
if is_sqla_12() or is_sqla_13():
return sql.case(whens, **kwargs)
return sql.case(*whens, **kwargs)
def _tidy_select(__data, cols, arg_name):
if cols is None:
return {}
var_list = var_create(cols)
od = var_select(__data.columns, *var_list)
# TODO: where in tidyselect package does this check happen?
missing = set(od) - set(__data.columns)
if missing:
raise ValueError(
f"{arg_name} must select columns present in the data. "
f"Could not find these columns: {missing}"
)
return od
def _is_select_everything(expr):
# this is crazy but probably fine for now.
if (isinstance(expr, Call) and expr.func == "__getitem__"):
sub_expr = expr.args[1]
return (
isinstance(sub_expr, Call)
and sub_expr.func == "__siu_slice__"
and len(sub_expr.args) == 1 and
sub_expr.args[0] == slice(None, None, None)
)
return False
def vec_as_names(names, *, repair: "str | Callable"):
"""Validate and repair column names.
Parameters
----------
names:
A list-like of column names
repair:
"""
# minimal, unique, universal, check_unique
# minimal: names can be accessed using df[<name>], e.g. "x", np.nan
# unique: names are minimal and no duplicates. Can be accessed using df[name]
# check_unique:
# universal: accessible by attribute (may throw an error if its a pandas method)
if repair not in {"unique", "check_unique", "minimal"} and not callable(repair):
raise NotImplementedError()
validate_unique = callable(repair) or repair == "check_unique"
# minimal ---
if repair == "minimal":
return names
# custom function ---
if callable(repair):
names = repair(names)
# check_unique ----
raw_uniq = Counter(names)
if len(raw_uniq) < len(names) and validate_unique:
duplicates = [entry for entry, n in raw_uniq.items() if n > 1]
raise ValueError(
f"Names must be unique, but detected {len(duplicates)} duplicate name(s).\n\n"
f"Duplicated names: {duplicates}"
)
stripped_names = list(map(_strip_suffix, names))
uniq = Counter(stripped_names)
# name repair unique ----
result = []
for ii, name in enumerate(stripped_names):
if uniq[name] > 1:
result.append(f"{name}___{ii}")
else:
result.append(name)
return result
def _safe_to_dict(df, *args, **kwargs):
"""Return something like df.to_dict(), but ensure t contains no numpy types.
For context on this pandas issue, see issues linked in this PR:
https://github.com/pandas-dev/pandas/issues/13258
"""
import json
return json.loads(df.to_json(*args, **kwargs))
def _pivot_wider_spec(
__data: LazyTbl,
spec,
names_repair = "check_unique",
id_cols = None,
id_expand = False,
values_fill = None,
values_fn = _OPS_DEFAULT,
unused_fn = None
):
# Key differences:
# * values_fn by default is "MAX"
lazy_tbl = __data
__data = pd.DataFrame(columns = list(__data.last_op.alias().columns.keys()))
if id_expand:
raise NotImplementedError()
if values_fill is not None:
raise NotImplementedError()
if isinstance(values_fn, str):
_f = lazy_tbl.translator.aggregate.local.get(values_fn)
if _f is None:
raise ValueError(
f"values_fn={repr(values_fn)} does not have a SQL translation."
)
values_fn = _f
elif not hasattr(values_fn, "dispatch"):
raise NotImplementedError(
"values_fn currently must be column operation function. For example:\n\n"
"from siuba.ops import mean\n"
"pivot_wider(..., values_fn=mean)"
)
# TODO: all of this down to "pivot to wide" taken from original func ------
if _is_select_everything(id_cols):
id_cols = None
if unused_fn is not None:
raise NotImplementedError()
if not isinstance(id_expand, bool):
raise TypeError("`id_expand` argument must be True or False.")
# tidyselect ----
name_vars = spec.columns[~spec.columns.isin([".name", ".value"])].tolist()
val_vars = spec.loc[:, ".value"].unique().tolist()
# select id columns
if id_cols is None:
others = {*name_vars, *val_vars}
id_vars = [col for col in __data.columns if col not in others]
else:
id_vars = _tidy_select(__data, id_cols, "id_cols")
id_var_bad = set(id_vars) & set([*name_vars, *val_vars])
if id_var_bad:
raise ValueError(
"id_cols contains columns that are in "
f"names_from or values_from: {id_var_bad}."
)
# pivot to wide -----------------------------------------------------------
# each row of spec becomes a CASE_WHEN.
# spec columns: .name, .value, <name_cols...>
# SELECT
# FN(CASE
# WHEN {<name_col_key1>} == {<name_col_val1>} and [...] THEN {.value1}
# ) AS .name1,
# ... AS .name2,
# ... AS .name3
sel_alias = lazy_tbl.last_op.alias()
sel_cols = sel_alias.columns
dispatch_cls = lazy_tbl.translator.aggregate.dispatch_cls
wide_name_cols = []
for row in _safe_to_dict(spec, orient="records"):
when_clause = sql.and_(sel_cols[k] == row[k] for k in name_vars)
when_then = (when_clause, sel_cols[row[".value"]])
col = values_fn(dispatch_cls(), _sql_case([when_then]))
wide_name_cols.append(col)
wide_id_cols = [sel_cols[id_] for id_ in id_vars]
_all_raw_names = list(map(str, [*id_vars, *spec[".name"]]))
repaired_names = vec_as_names(_all_raw_names, repair=names_repair)
labeled_cols = [
col.label(name) for name, col in
zip(repaired_names, [*wide_id_cols, *wide_name_cols])
]
final_sel = _sql_select(labeled_cols).group_by(*wide_id_cols)
return lazy_tbl.append_op(final_sel) | null |
159,030 | import pandas as pd
import siuba.ops
from sqlalchemy import sql
from sqlalchemy import types as sqla_types
from siuba.dply.verbs import collect, distinct
from siuba.sql import LazyTbl
from siuba.sql.utils import (
_sql_select,
_sql_column_collection,
_sql_add_columns,
_sql_with_only_columns,
_sql_case
)
from .pivot_wide import (
pivot_wider,
pivot_wider_spec,
build_wider_spec,
_is_select_everything,
_tidy_select,
_select_expr_slice
)
from .utils import vec_as_names
from .sql_pivot_long import _safe_to_dict
_OPS_DEFAULT=siuba.ops.max
def collect(__data, *args, **kwargs):
def distinct(__data, *args, _keep_all = False, **kwargs):
def _select_expr_slice(x: "tuple[str]") -> Call:
def _tidy_select(__data, cols, arg_name):
def pivot_wider_spec(
__data,
spec,
names_repair = "check_unique",
id_cols = None,
id_expand = False,
values_fill = None,
values_fn = None,
unused_fn = None
):
def build_wider_spec(
__data,
names_from = "name",
values_from = "value",
names_prefix = "_",
names_sep = "_",
names_glue = None,
names_sort = False,
names_vary = "fastest",
names_expand = False
):
def vec_as_names(names, *, repair: "str | Callable"):
def _pivot_wider(
__data: LazyTbl,
id_cols=None,
id_expand=False,
names_from="name",
names_prefix="",
names_sep="_",
names_glue=None,
names_sort=None,
names_vary="fastest",
names_expand=False,
names_repair="check_unique",
values_from="value",
values_fill=None,
values_fn=_OPS_DEFAULT,
unused_fn=None
):
# note that we use three forms of the data: __data for tidyselect,
# distinct_data for spec creation, and lazy_tbl for the actual pivot
lazy_tbl = __data
__data = pd.DataFrame(columns = list(__data.last_op.alias().columns.keys()))
# tidyselect variable names -----------------------------------------------
# adapted from pivot_wide
name_vars = _tidy_select(__data, names_from, "names_from")
val_vars = _tidy_select(__data, values_from, "values_from")
if id_cols is None:
others = {*name_vars, *val_vars}
id_cols = tuple([col for col in __data.columns if col not in others])
id_vars = _select_expr_slice(id_cols)
else:
id_vars = id_cols
# create dummy data with all names_from levels ----------------------------
distinct_data = collect(distinct(lazy_tbl, *name_vars)).copy()
distinct_data[list(val_vars)] = True
vec_as_names(list(distinct_data.columns), repair="check_unique")
# build spec and pivot ----------------------------------------------------
spec = build_wider_spec(
distinct_data,
names_from = names_from,
values_from = values_from,
names_prefix = names_prefix,
names_sep = names_sep,
names_glue = names_glue,
names_sort = names_sort,
names_vary = names_vary,
names_expand = names_expand
)
out = pivot_wider_spec(
lazy_tbl,
spec,
names_repair = names_repair,
id_cols = id_vars,
id_expand = id_expand,
values_fill = values_fill,
values_fn = values_fn,
unused_fn = unused_fn
)
return out | null |
159,031 | import pandas as pd
import numpy as np
import re
from typing import Union, Tuple, Dict, Optional, Callable
from pandas.core.groupby import DataFrameGroupBy
from siuba.dply.verbs import gather, var_create, var_select, separate, extract
from siuba.siu import singledispatch2
from .utils import vec_as_names, reconstruct_tibble, check_dict_of_functions
def pivot_longer(
__data,
*cols,
names_to: Union[str, Tuple[str, ...]] = "name",
names_prefix: Optional[str] = None,
names_sep: Optional[str] = None,
names_pattern: Optional[str] = None,
names_ptypes: Optional[Tuple] = None,
names_repair: str = "check_unique",
values_to: str = "value",
values_drop_na: bool = False,
values_ptypes: Optional[Union[str, Tuple[str, ...]]] = None,
values_transform: Optional[Dict] = None,
):
def _pivot_longer_gdf(__data, *args, **kwargs):
# TODO: consolidate all verbs that punt to DataFrame version (#118)
prior_groups = [el.name for el in __data.grouper.groupings]
df = __data.obj
res = pivot_longer(df, *args, **kwargs)
missing_groups = set(prior_groups) - set(res.columns)
if missing_groups:
raise ValueError(
"When using pivot_longer on grouped data, the result must contain "
"original grouping columns. Missing group columns: %s" %missing_groups
)
return res.groupby(prior_groups) | null |
159,032 | import pandas as pd
from sqlalchemy import sql
from sqlalchemy import types as sqla_types
from siuba.dply.verbs import spread
from siuba.sql import LazyTbl
from siuba.sql.utils import (
_sql_select,
_sql_column_collection,
_sql_add_columns,
_sql_with_only_columns,
)
from .pivot_long import pivot_longer, pivot_longer_spec, build_longer_spec, spec_to_multiindex
def unpack_spec_row(d):
internal = {".name", ".value"}
return d[".name"], d[".value"], {k:v for k,v in d.items() if k not in internal} | null |
159,033 | import pandas as pd
from sqlalchemy import sql
from sqlalchemy import types as sqla_types
from siuba.dply.verbs import spread
from siuba.sql import LazyTbl
from siuba.sql.utils import (
_sql_select,
_sql_column_collection,
_sql_add_columns,
_sql_with_only_columns,
)
from .pivot_long import pivot_longer, pivot_longer_spec, build_longer_spec, spec_to_multiindex
def build_longer_spec(
__data,
*cols,
names_to="name",
values_to="value",
names_prefix: "str | None"=None,
names_sep=None,
names_pattern=None,
names_ptypes=None,
names_transform: "dict[str, Callable] | None"=None
):
if names_sep is not None and names_pattern is not None:
raise ValueError("You may only use either `names_sep` or "
"`names_pattern`.")
if isinstance(names_to, str):
names_to = (names_to,)
# select id columns and measure data --------------------------------------
var_list = var_create(*cols)
od = var_select(__data.columns, *var_list)
value_vars = list(od)
if not value_vars:
raise ValueError(
"Please select at least 1 column of values in pivot_longer.\n\n"
"E.g. pivot_longer(data, _.some_col, _.another_col, ...)"
)
# note that this will include repeats in the data (e.g. two columns named "a")
wide_values = __data.loc[:,value_vars]
wide_cols = list(wide_values.columns)
# strip prefix ------------------------------------------------------------
if names_prefix is None:
names = wide_cols
elif isinstance(names_prefix, str):
names = [re.sub(f"^{names_prefix}", "", name) for name in wide_cols]
else:
raise TypeError("names_prefix must be a string or None.")
# start spec and split name into parts ------------------------------------
# note that we set .name to be the names with names_prefix removed, do all
# of the part splitting off that name, then set .name to the original values
# at the very end.
df_spec = pd.DataFrame({".name": names, ".value": values_to})
if names_sep:
df_spec = separate(df_spec, ".name", names_to, names_sep, remove=False)
elif names_pattern:
df_spec = extract(df_spec, ".name", names_to, names_pattern, remove=False)
else:
if len(names_to) == 1:
df_spec = df_spec.assign(**{names_to[0]: df_spec[".name"]})
else:
raise TypeError(
"pivot_longer either needs names_to to be string, or to receive "
"names_sep or names_pattern arguments."
)
# setting names back to original
df_spec[".name"] = wide_cols
# transform columns -------------------------------------------------------
if names_transform:
_cols = list(df_spec.columns[2:])
transforms = check_dict_of_functions(names_transform, _cols, "names_transform")
for col_name, func in transforms.items():
df_spec[col_name] = func(df_spec[col_name])
return df_spec
def _build_longer_spec(__data: LazyTbl, *args, **kwargs):
# building the spec only really needs the columns names. however, because we
# matched tidyr behavior, we just pass a DataFrame in for now.
df_data = pd.DataFrame(columns = list(__data.last_op.alias().columns.keys()))
return build_longer_spec(df_data, *args, **kwargs) | null |
159,034 | import pandas as pd
from sqlalchemy import sql
from sqlalchemy import types as sqla_types
from siuba.dply.verbs import spread
from siuba.sql import LazyTbl
from siuba.sql.utils import (
_sql_select,
_sql_column_collection,
_sql_add_columns,
_sql_with_only_columns,
)
from .pivot_long import pivot_longer, pivot_longer_spec, build_longer_spec, spec_to_multiindex
def _safe_to_dict(df, *args, **kwargs):
"""Return something like df.to_dict(), but ensure t contains no numpy types.
For context on this pandas issue, see issues linked in this PR:
https://github.com/pandas-dev/pandas/issues/13258
"""
import json
return json.loads(df.to_json(*args, **kwargs))
def _values_to_select(sel_columns, spec_row: dict, value_vars: "list[str]"):
final_cols = []
for long_name in value_vars:
wide_name = spec_row[long_name]
if pd.isna(wide_name):
final_cols.append(sql.null().label(long_name))
else:
final_cols.append(sel_columns[wide_name].label(long_name))
return final_cols
def spread(__data, key, value, fill = None, reset_index = True):
"""Reshape table by spreading it out to wide format.
Parameters
----------
__data:
The input data.
key:
Column whose values will be used as new column names.
value:
Column whose values will fill the new column entries.
fill:
Value to set for any missing values. By default keeps them as missing values.
Examples
--------
>>> import pandas as pd
>>> from siuba import _, gather
>>> df = pd.DataFrame({"id": ["a", "b"], "x": [1, 2], "y": [3, None]})
>>> long = gather(df, "key", "value", -_.id, drop_na=True)
>>> long
id key value
0 a x 1.0
1 b x 2.0
2 a y 3.0
>>> spread(long, "key", "value")
id x y
0 a 1.0 3.0
1 b 2.0 NaN
"""
key_col = _get_single_var_select(__data.columns, key)
val_col = _get_single_var_select(__data.columns, value)
id_cols = [col for col in __data.columns if col not in (key_col, val_col)]
wide = __data.set_index(id_cols + [key_col]).unstack(level = -1)
if fill is not None:
wide.fillna(fill, inplace = True)
# remove multi-index from both rows and cols
wide.columns = wide.columns.droplevel().rename(None)
if reset_index:
wide.reset_index(inplace = True)
return wide
def _sql_select(columns, *args, **kwargs):
from sqlalchemy import sql
if is_sqla_12() or is_sqla_13():
# use old syntax, where columns are passed as a list
return sql.select(columns, *args, **kwargs)
return sql.select(*columns, *args, **kwargs)
def spec_to_multiindex(df_spec):
# _value will be the outer column index, and split name columns the inner.
# this allows us to stack the inner part, while that _values stays as columns.
internal = {".value", ".name"}
#other_cols = [name for name in df_spec.columns if name not in internal]
#df_spec[other_cols].apply(lambda ser: tuple(ser), axis=1)
other_cols = [nm for nm in df_spec.columns if nm not in internal]
# get final columns together
indx_cols = [".value", *other_cols]
indx_names = [None] + other_cols
df_final = df_spec.loc[:, indx_cols]
# ensure levels of _value in multi-index are in first-observed order
# otherwise, their final columns come out alphabetically in pivot_longer.
val = df_final[".value"]
df_final[".value"] = pd.Categorical(val, val.dropna().unique())
return pd.MultiIndex.from_frame(df_final, names=indx_names)
def _pivot_longer_spec(
__data: LazyTbl,
spec,
names_repair = "check_unique",
values_drop_na: bool = False,
values_ptypes = None,
values_transform = None
) -> LazyTbl:
if values_ptypes is not None:
raise NotImplementedError()
if values_transform is not None:
raise NotImplementedError()
sel = __data.last_op
sel_alias = sel.alias()
# extract info from spec ----
column_index = spec_to_multiindex(spec)
wide_names = list(spec[".name"].unique())
wide_ids = [name for name in sel_alias.columns.keys() if name not in wide_names]
long_name_vars = [k for k in spec.columns if k not in {".name", ".value"}]
long_val_vars = list(spec[".value"].unique())
# guard against bad specifications ----
bad_names = set(wide_names) - set(sel_alias.columns.keys())
if bad_names:
raise ValueError(f"Pivot spec contains columns not in the data: {bad_names}")
# reshape to long (via union all) ----
sel_cols = sel_alias.columns
# each row maps <new_name>: literal for name vars, or <new_name>: column
aligned_vars = spread(spec, ".value", ".name")
union_parts = []
for row in _safe_to_dict(aligned_vars, orient="records"):
id_cols = [sel_cols[_id] for _id in wide_ids]
# TODO: handle when value name (row[k]) is NULL
value_cols = _values_to_select(sel_cols, row, long_val_vars)
name_cols = [
sql.literal(row[k]).label(k)
for k in long_name_vars
]
union_parts.append(_sql_select([*id_cols, *name_cols, *value_cols]))
# TODO: what is the base class we are willing to let the select type be?
# this is a CompoundSelect. Shares GenerativeSelect with sql.select()
sel_union = sql.union_all(*union_parts)
if values_drop_na:
alias = sel_union.alias()
# TODO: sqlalchemy 1.4+ prefers .is_not()
bool_clause = sql.and_(*[alias.columns[k].isnot(None) for k in long_val_vars])
return __data.append_op(alias.select().where(bool_clause))
return __data.append_op(sel_union) | null |
159,035 | from IPython import get_ipython
from IPython.core.history import HistoryAccessor
from IPython.core.completer import cursor_to_position, _FakeJediCompletion
from typing import Iterable, Any
from functools import wraps, partial
from contextlib import contextmanager
from siuba.siu import Symbolic
def wrap_jedi_matches(_jedi_matches):
def siuba_jedi_override(shell):
# wrap the bound method _jedi_matches. Note that Completer is actually an instance
self = shell.Completer
wrapped = wrap_jedi_matches(self._jedi_matches)
shell.Completer._jedi_matches = partial(wrapped, self) | null |
159,036 | from siuba.siu import symbolic_dispatch
from pandas.arrays import DatetimeArray, PeriodArray
from pandas import DatetimeIndex, PeriodIndex, Period, Timestamp, Series
from pandas import offsets
from pandas.core.dtypes.common import is_period_dtype, is_datetime64_any_dtype
from abc import ABC
def _make_abc(name, subclasses):
cls = type(name, (ABC,), {})
for child in subclasses: cls.register(child)
return cls | null |
159,037 | from siuba.siu import symbolic_dispatch
from pandas.arrays import DatetimeArray, PeriodArray
from pandas import DatetimeIndex, PeriodIndex, Period, Timestamp, Series
from pandas import offsets
from pandas.core.dtypes.common import is_period_dtype, is_datetime64_any_dtype
from abc import ABC
def _get_offset(unit):
cls = prefix_mapping.get(unit)
if cls is None:
raise ValueError("unit {} not a valid offset".format(unit))
return cls
def _(x, unit = "S"):
cls_offset = _get_offset(unit)
if issubclass(cls_offset, offsets.Tick):
return x.floor(unit)
# note: x - 0*offset shifts anchor forward for some reason, so we
# add then subtract to ensure it doesn't change anchor points
offset = cls_offset(n = 1)
return x.normalize() + offset - offset | null |
159,038 | from siuba.siu import symbolic_dispatch
from pandas.arrays import DatetimeArray, PeriodArray
from pandas import DatetimeIndex, PeriodIndex, Period, Timestamp, Series
from pandas import offsets
from pandas.core.dtypes.common import is_period_dtype, is_datetime64_any_dtype
from abc import ABC
def _(x, unit = "S"):
return x.asfreq(unit, how = "start") | null |
159,039 | from siuba.siu import symbolic_dispatch
from pandas.arrays import DatetimeArray, PeriodArray
from pandas import DatetimeIndex, PeriodIndex, Period, Timestamp, Series
from pandas import offsets
from pandas.core.dtypes.common import is_period_dtype, is_datetime64_any_dtype
from abc import ABC
def _get_series_dispatcher(f, x):
if is_period_dtype(x):
return f.registry[PeriodType]
elif is_datetime64_any_dtype(x):
return f.registry[DatetimeType]
raise TypeError("does not seem to be a period or datetime")
def floor_date(x, unit = "S"):
raise TypeError("floor_date not implemented for class {}".format(type(x)))
floor_date.__doc__ = DOCSTRING
floor_date.__wrapped__.__doc__ = DOCSTRING
def _(x, *args, **kwargs):
# dispatch to either period or datetime version
f = _get_series_dispatcher(floor_date, x)
return f(x.dt, *args, **kwargs) | null |
159,040 | from siuba.siu import symbolic_dispatch
from pandas.arrays import DatetimeArray, PeriodArray
from pandas import DatetimeIndex, PeriodIndex, Period, Timestamp, Series
from pandas import offsets
from pandas.core.dtypes.common import is_period_dtype, is_datetime64_any_dtype
from abc import ABC
def _get_offset(unit):
cls = prefix_mapping.get(unit)
if cls is None:
raise ValueError("unit {} not a valid offset".format(unit))
return cls
def _(x, unit = "S"):
cls_offset = _get_offset(unit)
if issubclass(cls_offset, offsets.Tick):
return x.ceil(unit)
offset = cls_offset(n = 0)
# the 0 ensures it will not rollforward an anchor point
return x.normalize() + offset | null |
159,041 | from siuba.siu import symbolic_dispatch
from pandas.arrays import DatetimeArray, PeriodArray
from pandas import DatetimeIndex, PeriodIndex, Period, Timestamp, Series
from pandas import offsets
from pandas.core.dtypes.common import is_period_dtype, is_datetime64_any_dtype
from abc import ABC
def _(x, unit = "S"):
raise NotImplementedError(
"It is not possible to use ceil_date on a Period. "
"Try converting to a DatetimeIndex."
) | null |
159,042 | from siuba.siu import symbolic_dispatch
from pandas.arrays import DatetimeArray, PeriodArray
from pandas import DatetimeIndex, PeriodIndex, Period, Timestamp, Series
from pandas import offsets
from pandas.core.dtypes.common import is_period_dtype, is_datetime64_any_dtype
from abc import ABC
def _get_series_dispatcher(f, x):
if is_period_dtype(x):
return f.registry[PeriodType]
elif is_datetime64_any_dtype(x):
return f.registry[DatetimeType]
raise TypeError("does not seem to be a period or datetime")
def ceil_date(x, unit = "S"):
raise TypeError("ceil_date not implemented for class {}".format(type(x)))
ceil_date.__doc__ = DOCSTRING
ceil_date.__wrapped__.__doc__ = DOCSTRING
def _(x, *args, **kwargs):
f = _get_series_dispatcher(ceil_date, x)
return f(x.dt, *args, **kwargs) | null |
159,043 | from typing import List, Union
from io import BytesIO
import PIL
from PIL import ImageSequence, Image
import requests
import os
import numpy as np
import imageio
def images_to_gif_bytes(images: List, duration: int = 1000) -> bytes:
with BytesIO() as output_buffer:
# Save the first image
images[0].save(output_buffer,
format='GIF',
save_all=True,
append_images=images[1:],
duration=duration,
loop=0) # 0 means the GIF will loop indefinitely
# Get the byte array from the buffer
gif_bytes = output_buffer.getvalue()
return gif_bytes
def save_as_gif(images: List, file_path: str, duration: int = 1000):
with open(file_path, "wb") as f:
f.write(images_to_gif_bytes(images, duration)) | null |
159,044 | from typing import List, Union
from io import BytesIO
import PIL
from PIL import ImageSequence, Image
import requests
import os
import numpy as np
import imageio
def images_to_mp4_bytes(images: List[Image.Image], duration: int = 1000) -> bytes:
with BytesIO() as output_buffer:
with imageio.get_writer(output_buffer, format='mp4', fps=1/(duration/1000)) as writer:
for img in images:
writer.append_data(np.array(img))
mp4_bytes = output_buffer.getvalue()
return mp4_bytes
def save_as_mp4(images: List[Image.Image], file_path: str, duration: int = 1000):
with open(file_path, "wb") as f:
f.write(images_to_mp4_bytes(images, duration)) | null |
159,045 | from typing import List, Union
from io import BytesIO
import PIL
from PIL import ImageSequence, Image
import requests
import os
import numpy as np
import imageio
def scale_aspect_fill(img, new_width, new_height):
new_width = int(new_width)
new_height = int(new_height)
original_width, original_height = img.size
ratio_w = float(new_width) / original_width
ratio_h = float(new_height) / original_height
if ratio_w > ratio_h:
# It must be fixed by width
resize_width = new_width
resize_height = round(original_height * ratio_w)
else:
# Fixed by height
resize_width = round(original_width * ratio_h)
resize_height = new_height
img_resized = img.resize((resize_width, resize_height), Image.LANCZOS)
# Calculate cropping boundaries and do crop
left = (resize_width - new_width) / 2
top = (resize_height - new_height) / 2
right = (resize_width + new_width) / 2
bottom = (resize_height + new_height) / 2
img_cropped = img_resized.crop((left, top, right, bottom))
return img_cropped | null |
159,046 | from typing import List, Union
from io import BytesIO
import PIL
from PIL import ImageSequence, Image
import requests
import os
import numpy as np
import imageio
def get_image(img_path) -> PIL.Image.Image:
if img_path.startswith("http"):
return PIL.Image.open(requests.get(img_path, stream=True).raw)
if os.path.exists(img_path):
return Image.open(img_path)
raise Exception("File not found")
def extract_gif_frames_from_midpoint(image: Union[str, PIL.Image.Image], fps: int=8, target_duration: int=1000) -> list:
# Load the GIF
image = get_image(image) if type(image) is str else image
frames = []
estimated_frame_time = None
# some gifs contain the duration - others don't
# so if there is a duration we will grab it otherwise we will fall back
for frame in ImageSequence.Iterator(image):
frames.append(frame.copy())
if 'duration' in frame.info:
frame_info_duration = frame.info['duration']
if frame_info_duration > 0:
estimated_frame_time = frame_info_duration
if estimated_frame_time is None:
if len(frames) <= 16:
# assume it's 8fps
estimated_frame_time = 1000 // 8
else:
# assume it's 15 fps
estimated_frame_time = 70
if len(frames) < fps:
raise ValueError(f"fps of {fps} is too small for this gif as it only has {len(frames)} frames.")
skip = len(frames) // fps
upper_bound_index = len(frames) - 1
best_indices = [x for x in range(0, len(frames), skip)][:fps]
offset = int(upper_bound_index - best_indices[-1]) // 2
best_indices = [x + offset for x in best_indices]
best_duration = (best_indices[-1] - best_indices[0]) * estimated_frame_time
while True:
skip -= 1
if skip == 0:
break
indices = [x for x in range(0, len(frames), skip)][:fps]
# center the indices, so we sample the middle of the gif...
offset = int(upper_bound_index - indices[-1]) // 2
if offset == 0:
# can't shift
break
indices = [x + offset for x in indices]
# is the new duration closer to the target than last guess?
duration = (indices[-1] - indices[0]) * estimated_frame_time
if abs(duration - target_duration) > abs(best_duration - target_duration):
break
best_indices = indices
best_duration = duration
return [frames[index] for index in best_indices] | null |
159,047 | from typing import List, Union
from io import BytesIO
import PIL
from PIL import ImageSequence, Image
import requests
import os
import numpy as np
import imageio
The provided code snippet includes necessary dependencies for implementing the `get_crop_coordinates` function. Write a Python function `def get_crop_coordinates(old_size: tuple, new_size: tuple) -> tuple` to solve the following problem:
Calculate the crop coordinates after scaling an image to fit a new size. :param old_size: tuple of the form (width, height) representing the original size of the image. :param new_size: tuple of the form (width, height) representing the desired size after scaling. :return: tuple of the form (left, upper, right, lower) representing the normalized crop coordinates.
Here is the function:
def get_crop_coordinates(old_size: tuple, new_size: tuple) -> tuple:
"""
Calculate the crop coordinates after scaling an image to fit a new size.
:param old_size: tuple of the form (width, height) representing the original size of the image.
:param new_size: tuple of the form (width, height) representing the desired size after scaling.
:return: tuple of the form (left, upper, right, lower) representing the normalized crop coordinates.
"""
# Check if the input tuples have the right form (width, height)
if not (isinstance(old_size, tuple) and isinstance(new_size, tuple) and
len(old_size) == 2 and len(new_size) == 2):
raise ValueError("old_size and new_size should be tuples of the form (width, height)")
# Extract the width and height from the old and new sizes
old_width, old_height = old_size
new_width, new_height = new_size
# Calculate the ratios for width and height
ratio_w = float(new_width) / old_width
ratio_h = float(new_height) / old_height
# Determine which dimension is fixed (width or height)
if ratio_w > ratio_h:
# It must be fixed by width
resize_width = new_width
resize_height = round(old_height * ratio_w)
else:
# Fixed by height
resize_width = round(old_width * ratio_h)
resize_height = new_height
# Calculate cropping boundaries in the resized image space
left = (resize_width - new_width) / 2
upper = (resize_height - new_height) / 2
right = (resize_width + new_width) / 2
lower = (resize_height + new_height) / 2
# Normalize the cropping coordinates
# Return the normalized coordinates as a tuple
return (left, upper, right, lower) | Calculate the crop coordinates after scaling an image to fit a new size. :param old_size: tuple of the form (width, height) representing the original size of the image. :param new_size: tuple of the form (width, height) representing the desired size after scaling. :return: tuple of the form (left, upper, right, lower) representing the normalized crop coordinates. |
159,048 | from typing import List, Union
from io import BytesIO
import PIL
from PIL import ImageSequence, Image
import requests
import os
import numpy as np
import imageio
res_to_aspect_map = {
1024: aspect_ratio_to_1024_map,
512: {key: [value[0] // 2, value[1] // 2] for key, value in aspect_ratio_to_1024_map.items()},
}
def best_aspect_ratio(aspect_ratio: float, resolution: int):
map = res_to_aspect_map[resolution]
d = 99999999
res = None
for key, value in map.items():
ar = value[0] / value[1]
diff = abs(aspect_ratio - ar)
if diff < d:
d = diff
res = value
ar = res[0] / res[1]
return f"{ar:.2f}", res | null |
159,049 | import inspect
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import PIL.Image
import torch
import torch.nn.functional as F
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from hotshot_xl import HotshotPipelineXLOutput
from diffusers.image_processor import VaeImageProcessor
from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
from diffusers.models import AutoencoderKL, ControlNetModel
from diffusers.models.attention_processor import (
AttnProcessor2_0,
LoRAAttnProcessor2_0,
LoRAXFormersAttnProcessor,
XFormersAttnProcessor,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import (
is_accelerate_available,
is_accelerate_version,
logging,
replace_example_docstring,
)
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.utils.torch_utils import randn_tensor, is_compiled_module
from ..models.unet import UNet3DConditionModel
from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
from einops import rearrange
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `rescale_noise_cfg` function. Write a Python function `def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0)` to solve the following problem:
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
Here is the function:
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
"""
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
"""
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
# rescale the results from guidance (fixes overexposure)
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
return noise_cfg | Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 |
159,050 | import os
import inspect
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from hotshot_xl import HotshotPipelineXLOutput
from diffusers.image_processor import VaeImageProcessor
from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
from diffusers.models import AutoencoderKL
from hotshot_xl.models.unet import UNet3DConditionModel
from diffusers.models.attention_processor import (
AttnProcessor2_0,
LoRAAttnProcessor2_0,
LoRAXFormersAttnProcessor,
XFormersAttnProcessor,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import (
is_accelerate_available,
is_accelerate_version,
logging,
replace_example_docstring,
)
from diffusers.utils.torch_utils import randn_tensor
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from tqdm import tqdm
from einops import repeat, rearrange
from diffusers.utils import deprecate, logging
import gc
The provided code snippet includes necessary dependencies for implementing the `rescale_noise_cfg` function. Write a Python function `def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0)` to solve the following problem:
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
Here is the function:
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
"""
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
"""
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
# rescale the results from guidance (fixes overexposure)
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
return noise_cfg | Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 |
159,051 | import torch
from torch import nn
from .transformer_3d import Transformer3DModel
from .resnet import Downsample3D, ResnetBlock3D, Upsample3D
from .transformer_temporal import TransformerTemporal
class CrossAttnDownBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
transformer_layers_per_block: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
num_attention_heads=1,
cross_attention_dim=1280,
output_scale_factor=1.0,
downsample_padding=1,
add_downsample=True,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
):
super().__init__()
resnets = []
attentions = []
temporal_attentions = []
self.has_cross_attention = True
self.num_attention_heads = num_attention_heads
for i in range(num_layers):
in_channels = in_channels if i == 0 else out_channels
resnets.append(
ResnetBlock3D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
if dual_cross_attention:
raise NotImplementedError
attentions.append(
Transformer3DModel(
num_attention_heads,
out_channels // num_attention_heads,
in_channels=out_channels,
num_layers=transformer_layers_per_block,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
)
)
temporal_attentions.append(
TransformerTemporal(
num_attention_heads=8,
attention_head_dim=out_channels // 8,
in_channels=out_channels,
cross_attention_dim=None,
)
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
self.temporal_attentions = nn.ModuleList(temporal_attentions)
if add_downsample:
self.downsamplers = nn.ModuleList(
[
Downsample3D(
out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
)
]
)
else:
self.downsamplers = None
self.gradient_checkpointing = False
def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None,
cross_attention_kwargs=None, enable_temporal_attentions: bool = True):
output_states = ()
for resnet, attn, temporal_attention \
in zip(self.resnets, self.attentions, self.temporal_attentions):
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
else:
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb,
use_reentrant=False)
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(attn, return_dict=False),
hidden_states,
encoder_hidden_states,
use_reentrant=False
)[0]
if enable_temporal_attentions and temporal_attention is not None:
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temporal_attention),
hidden_states, encoder_hidden_states,
use_reentrant=False)
else:
hidden_states = resnet(hidden_states, temb)
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample
if temporal_attention and enable_temporal_attentions:
hidden_states = temporal_attention(hidden_states,
encoder_hidden_states=encoder_hidden_states)
output_states += (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states += (hidden_states,)
return hidden_states, output_states
def temporal_parameters(self) -> list:
output = []
for block in self.temporal_attentions:
if block:
output.extend(block.parameters())
return output
class DownBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
output_scale_factor=1.0,
add_downsample=True,
downsample_padding=1,
):
super().__init__()
resnets = []
temporal_attentions = []
for i in range(num_layers):
in_channels = in_channels if i == 0 else out_channels
resnets.append(
ResnetBlock3D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
temporal_attentions.append(
TransformerTemporal(
num_attention_heads=8,
attention_head_dim=out_channels // 8,
in_channels=out_channels,
cross_attention_dim=None
)
)
self.resnets = nn.ModuleList(resnets)
self.temporal_attentions = nn.ModuleList(temporal_attentions)
if add_downsample:
self.downsamplers = nn.ModuleList(
[
Downsample3D(
out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
)
]
)
else:
self.downsamplers = None
self.gradient_checkpointing = False
def forward(self, hidden_states, temb=None, encoder_hidden_states=None, enable_temporal_attentions: bool = True):
output_states = ()
for resnet, temporal_attention in zip(self.resnets, self.temporal_attentions):
if self.training and self.gradient_checkpointing:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb,
use_reentrant=False)
if enable_temporal_attentions and temporal_attention is not None:
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(temporal_attention),
hidden_states, encoder_hidden_states,
use_reentrant=False)
else:
hidden_states = resnet(hidden_states, temb)
if enable_temporal_attentions and temporal_attention:
hidden_states = temporal_attention(hidden_states, encoder_hidden_states=encoder_hidden_states)
output_states += (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states += (hidden_states,)
return hidden_states, output_states
def temporal_parameters(self) -> list:
output = []
for block in self.temporal_attentions:
if block:
output.extend(block.parameters())
return output
def get_down_block(
down_block_type,
num_layers,
in_channels,
out_channels,
temb_channels,
add_downsample,
resnet_eps,
resnet_act_fn,
transformer_layers_per_block=1,
num_attention_heads=None,
resnet_groups=None,
cross_attention_dim=None,
downsample_padding=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
resnet_skip_time_act=False,
resnet_out_scale_factor=1.0,
cross_attention_norm=None,
attention_head_dim=None,
downsample_type=None,
):
down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type
if down_block_type == "DownBlock3D":
return DownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
resnet_time_scale_shift=resnet_time_scale_shift,
)
elif down_block_type == "CrossAttnDownBlock3D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock3D")
return CrossAttnDownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
transformer_layers_per_block=transformer_layers_per_block,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
cross_attention_dim=cross_attention_dim,
num_attention_heads=num_attention_heads,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
)
raise ValueError(f"{down_block_type} does not exist.") | null |
159,052 | import torch
from torch import nn
from .transformer_3d import Transformer3DModel
from .resnet import Downsample3D, ResnetBlock3D, Upsample3D
from .transformer_temporal import TransformerTemporal
class CrossAttnUpBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
prev_output_channel: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
transformer_layers_per_block: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
num_attention_heads=1,
cross_attention_dim=1280,
output_scale_factor=1.0,
add_upsample=True,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
):
def forward(
self,
hidden_states,
res_hidden_states_tuple,
temb=None,
encoder_hidden_states=None,
upsample_size=None,
cross_attention_kwargs=None,
attention_mask=None,
enable_temporal_attentions: bool = True
):
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
def temporal_parameters(self) -> list:
class UpBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
prev_output_channel: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
output_scale_factor=1.0,
add_upsample=True,
):
def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, encoder_hidden_states=None,
enable_temporal_attentions: bool = True):
def create_custom_forward(module):
def custom_forward(*inputs):
def temporal_parameters(self) -> list:
def get_up_block(
up_block_type,
num_layers,
in_channels,
out_channels,
prev_output_channel,
temb_channels,
add_upsample,
resnet_eps,
resnet_act_fn,
transformer_layers_per_block=1,
num_attention_heads=None,
resnet_groups=None,
cross_attention_dim=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
resnet_skip_time_act=False,
resnet_out_scale_factor=1.0,
cross_attention_norm=None,
attention_head_dim=None,
upsample_type=None,
):
up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
if up_block_type == "UpBlock3D":
return UpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
resnet_time_scale_shift=resnet_time_scale_shift,
)
elif up_block_type == "CrossAttnUpBlock3D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock3D")
return CrossAttnUpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
transformer_layers_per_block=transformer_layers_per_block,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
cross_attention_dim=cross_attention_dim,
num_attention_heads=num_attention_heads,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
)
raise ValueError(f"{up_block_type} does not exist.") | null |
159,053 | import sys
import os
import argparse
import torch
from hotshot_xl.pipelines.hotshot_xl_pipeline import HotshotXLPipeline
from hotshot_xl.pipelines.hotshot_xl_controlnet_pipeline import HotshotXLControlNetPipeline
from hotshot_xl.models.unet import UNet3DConditionModel
import torchvision.transforms as transforms
from einops import rearrange
from hotshot_xl.utils import save_as_gif, save_as_mp4, extract_gif_frames_from_midpoint, scale_aspect_fill
from torch import autocast
from diffusers import ControlNetModel
from contextlib import contextmanager
from diffusers.schedulers.scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from diffusers.schedulers.scheduling_euler_discrete import EulerDiscreteScheduler
def parse_args():
parser = argparse.ArgumentParser(description="Hotshot-XL inference")
parser.add_argument("--pretrained_path", type=str, default="hotshotco/Hotshot-XL")
parser.add_argument("--xformers", action="store_true")
parser.add_argument("--spatial_unet_base", type=str)
parser.add_argument("--lora", type=str)
parser.add_argument("--output", type=str, required=True)
parser.add_argument("--steps", type=int, default=30)
parser.add_argument("--prompt", type=str,
default="a bulldog in the captains chair of a spaceship, hd, high quality")
parser.add_argument("--negative_prompt", type=str, default="blurry")
parser.add_argument("--seed", type=int, default=455)
parser.add_argument("--width", type=int, default=672)
parser.add_argument("--height", type=int, default=384)
parser.add_argument("--target_width", type=int, default=512)
parser.add_argument("--target_height", type=int, default=512)
parser.add_argument("--og_width", type=int, default=1920)
parser.add_argument("--og_height", type=int, default=1080)
parser.add_argument("--video_length", type=int, default=8)
parser.add_argument("--video_duration", type=int, default=1000)
parser.add_argument("--low_vram_mode", action="store_true")
parser.add_argument('--scheduler', type=str, default='EulerAncestralDiscreteScheduler',
help='Name of the scheduler to use')
parser.add_argument("--control_type", type=str, default=None, choices=["depth", "canny"])
parser.add_argument("--controlnet_conditioning_scale", type=float, default=0.7)
parser.add_argument("--control_guidance_start", type=float, default=0.0)
parser.add_argument("--control_guidance_end", type=float, default=1.0)
parser.add_argument("--gif", type=str, default=None)
parser.add_argument("--precision", type=str, default='f16', choices=[
'f16', 'f32', 'bf16'
])
parser.add_argument("--autocast", type=str, default=None, choices=[
'f16', 'bf16'
])
return parser.parse_args() | null |
159,054 | import sys
import os
import argparse
import torch
from hotshot_xl.pipelines.hotshot_xl_pipeline import HotshotXLPipeline
from hotshot_xl.pipelines.hotshot_xl_controlnet_pipeline import HotshotXLControlNetPipeline
from hotshot_xl.models.unet import UNet3DConditionModel
import torchvision.transforms as transforms
from einops import rearrange
from hotshot_xl.utils import save_as_gif, save_as_mp4, extract_gif_frames_from_midpoint, scale_aspect_fill
from torch import autocast
from diffusers import ControlNetModel
from contextlib import contextmanager
from diffusers.schedulers.scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from diffusers.schedulers.scheduling_euler_discrete import EulerDiscreteScheduler
to_pil = transforms.ToPILImage()
def to_pil_images(video_frames: torch.Tensor, output_type='pil'):
video_frames = rearrange(video_frames, "b c f w h -> b f c w h")
bsz = video_frames.shape[0]
images = []
for i in range(bsz):
video = video_frames[i]
for j in range(video.shape[0]):
if output_type == "pil":
images.append(to_pil(video[j]))
else:
images.append(video[j])
return images | null |
159,055 | import sys
import os
import argparse
import torch
from hotshot_xl.pipelines.hotshot_xl_pipeline import HotshotXLPipeline
from hotshot_xl.pipelines.hotshot_xl_controlnet_pipeline import HotshotXLControlNetPipeline
from hotshot_xl.models.unet import UNet3DConditionModel
import torchvision.transforms as transforms
from einops import rearrange
from hotshot_xl.utils import save_as_gif, save_as_mp4, extract_gif_frames_from_midpoint, scale_aspect_fill
from torch import autocast
from diffusers import ControlNetModel
from contextlib import contextmanager
from diffusers.schedulers.scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from diffusers.schedulers.scheduling_euler_discrete import EulerDiscreteScheduler
def maybe_auto_cast(data_type):
if data_type:
with autocast("cuda", dtype=data_type):
yield
else:
yield | null |
159,056 | import argparse
import math
import os
import traceback
from pathlib import Path
import time
import torch
import torch.utils.checkpoint
import torch.multiprocessing as mp
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import set_seed
from diffusers import AutoencoderKL
from diffusers.optimization import get_scheduler
from diffusers import DDPMScheduler
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer, CLIPTextModelWithProjection
import torch.nn.functional as F
import gc
from typing import Callable
from PIL import Image
import numpy as np
from concurrent.futures import ThreadPoolExecutor
from hotshot_xl.models.unet import UNet3DConditionModel
from hotshot_xl.pipelines.hotshot_xl_pipeline import HotshotXLPipeline
from hotshot_xl.utils import get_crop_coordinates, res_to_aspect_map, scale_aspect_fill
from einops import rearrange
from torch.utils.data import Dataset, DataLoader
from datetime import timedelta
from accelerate.utils.dataclasses import InitProcessGroupKwargs
from diffusers.utils import is_wandb_available
res_to_aspect_map = {
1024: aspect_ratio_to_1024_map,
512: {key: [value[0] // 2, value[1] // 2] for key, value in aspect_ratio_to_1024_map.items()},
}
def parse_args():
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default="hotshotco/Hotshot-XL",
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--unet_resume_path",
type=str,
default=None,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--data_dir",
type=str,
required=True,
help="Path to data to train.",
)
parser.add_argument(
"--report_to",
type=str,
default="wandb",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument("--run_validation_at_start", action="store_true")
parser.add_argument("--max_vae_encode", type=int, default=None)
parser.add_argument("--vae_b16", action="store_true")
parser.add_argument("--disable_optimizer_restore", action="store_true")
parser.add_argument(
"--latent_nan_checking",
action="store_true",
help="Check if latents contain nans - important if vae is f16",
)
parser.add_argument(
"--test_prompts",
type=str,
default=None,
)
parser.add_argument(
"--project_name",
type=str,
default="fine-tune-hotshot-xl",
help="the name of the run",
)
parser.add_argument(
"--run_name",
type=str,
default="run-01",
help="the name of the run",
)
parser.add_argument(
"--output_dir",
type=str,
default="output",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--noise_offset", type=float, default=0.05, help="The scale of noise offset.")
parser.add_argument("--seed", type=int, default=111, help="A seed for reproducible training.")
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--aspect_ratio",
type=str,
default="1.75",
choices=list(res_to_aspect_map[512].keys()),
help="Aspect ratio to train at",
)
parser.add_argument("--xformers", action="store_true")
parser.add_argument(
"--train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader."
)
parser.add_argument("--num_train_epochs", type=int, default=1)
parser.add_argument(
"--max_train_steps",
type=int,
default=9999999,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--gradient_checkpointing",
action="store_true",
help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-6,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=False,
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument(
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
)
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--mixed_precision",
type=str,
default="no",
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU."
),
)
parser.add_argument(
"--validate_every_steps",
type=int,
default=100,
help="Run inference every",
)
parser.add_argument(
"--save_n_steps",
type=int,
default=100,
help="Save the model every n global_steps",
)
parser.add_argument(
"--save_starting_step",
type=int,
default=100,
help="The step from which it starts saving intermediary checkpoints",
)
parser.add_argument(
"--nccl_timeout",
type=int,
help="nccl_timeout",
default=3600
)
parser.add_argument("--snr_gamma", action="store_true")
args = parser.parse_args()
return args | null |
159,057 | import argparse
import math
import os
import traceback
from pathlib import Path
import time
import torch
import torch.utils.checkpoint
import torch.multiprocessing as mp
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import set_seed
from diffusers import AutoencoderKL
from diffusers.optimization import get_scheduler
from diffusers import DDPMScheduler
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer, CLIPTextModelWithProjection
import torch.nn.functional as F
import gc
from typing import Callable
from PIL import Image
import numpy as np
from concurrent.futures import ThreadPoolExecutor
from hotshot_xl.models.unet import UNet3DConditionModel
from hotshot_xl.pipelines.hotshot_xl_pipeline import HotshotXLPipeline
from hotshot_xl.utils import get_crop_coordinates, res_to_aspect_map, scale_aspect_fill
from einops import rearrange
from torch.utils.data import Dataset, DataLoader
from datetime import timedelta
from accelerate.utils.dataclasses import InitProcessGroupKwargs
from diffusers.utils import is_wandb_available
def add_time_ids(
unet_config,
unet_add_embedding,
text_encoder_2: CLIPTextModelWithProjection,
original_size: tuple,
crops_coords_top_left: tuple,
target_size: tuple,
dtype: torch.dtype):
add_time_ids = list(original_size + crops_coords_top_left + target_size)
passed_add_embed_dim = (
unet_config.addition_time_embed_dim * len(add_time_ids) + text_encoder_2.config.projection_dim
)
expected_add_embed_dim = unet_add_embedding.linear_1.in_features
if expected_add_embed_dim != passed_add_embed_dim:
raise ValueError(
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
)
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
return add_time_ids | null |
159,058 | import tensorflow as tf
import argparse
import numpy as np
import sys
import time
import math
from .utils import *
from .model import *
from .sampler import *
model = model_dict[args.model](args, n_items, n_users)
def evaluate(source, sess):
total_recall = 0.0
total_ndcg = 0.0
count = 0.0
for batch in source:
feed_dict = {model.inp: batch[1], model.user:batch[0], model.pos:batch[2]}
recall, ndcg = sess.run([model.recall_at_k, model.ndcg_at_k], feed_dict=feed_dict)
count += len(batch[0])
total_recall += recall
total_ndcg += ndcg
val_recall = total_recall / count
val_ndcg = total_ndcg / count
return [val_recall, val_ndcg] | null |
159,059 | import pandas as pd
import numpy as np
import random
import os
import json
import datetime as dt
from collections import Counter
data_path = 'data/'
class Corpus(object):
def __init__(self, ItemId):
def prepare_data(corpus_item, corpus_user, data, dname, path_to_data):
def data_generator(args):
path_to_data= data_path + args.data + '/'
if not os.path.exists(path_to_data + args.data + '_train_tr.json'):
tr_df = pd.read_csv(path_to_data + args.data + '_train_tr.txt', sep='\t')
val_df = pd.read_csv(path_to_data + args.data + '_train_valid.txt', sep='\t')
test_df = pd.read_csv(path_to_data + args.data + '_test.txt', sep='\t')
corpus_item = Corpus(tr_df['ItemId'])
corpus_user = Corpus(tr_df['UserId'])
np.save(path_to_data + args.data + '_item_dict', np.asarray(corpus_item.dict.idx2item))
np.save(path_to_data + args.data + '_user_dict', np.asarray(corpus_user.dict.idx2item))
tr = tr_df.sort_values(['UserId', 'Time']).groupby('UserId')['ItemId'].apply(list).to_dict()
val = val_df.sort_values(['UserId', 'Time']).groupby('UserId')['ItemId'].apply(list).to_dict()
test = test_df.sort_values(['UserId', 'Time']).groupby('UserId')['ItemId'].apply(list).to_dict()
_ = prepare_data(corpus_item, corpus_user, tr, args.data + '_train_tr', path_to_data)
_ = prepare_data(corpus_item, corpus_user, val, args.data + '_train_valid',path_to_data)
_ = prepare_data(corpus_item, corpus_user, test, args.data + '_test', path_to_data)
with open(path_to_data + args.data + '_train_tr.json', 'r') as fp:
train_data = json.load(fp)
with open(path_to_data + args.data + '_train_valid.json', 'r') as fp:
val_data = json.load(fp)
with open(path_to_data + args.data + '_test.json', 'r') as fp:
test_data = json.load(fp)
item2idx = np.load(path_to_data + args.data + '_item_dict.npy')
user2idx = np.load(path_to_data + args.data + '_user_dict.npy')
n_items = item2idx.size
n_users = user2idx.size
return [train_data, val_data, test_data, n_items, n_users] | null |
159,060 | import pandas as pd
import numpy as np
import random
import os
import json
import datetime as dt
from collections import Counter
def prepare_eval_test(data, batch_size, max_test_len=100):
if batch_size < 2:
batch_size = 2
uids = data.keys()
all_u = []
all_inp = []
all_pos = []
for u in uids:
#all_u.append(int(u))
#cur_u = []
itemids = data[u]
nb_test = min(max_test_len, len(itemids)) - 1
all_u.extend([int(u)] * nb_test)
for i in range(1, nb_test+1):
pos = itemids[i]
inp = np.zeros([max_test_len], dtype=np.int32)
start = max_test_len - i
len_of_item = i
inp[start:] = itemids[i-len_of_item: i]
#inp = np.zeros([max_test_len], dtype=np.int32)
#pos = np.zeros([max_test_len], dtype=np.int32)
#l = min(max_test_len, len(itemids))
#inp[:l] = itemids[:l]
#pos[:l-1] = itemids[1:l]
all_inp.append(inp)
all_pos.append(pos)
num_batches = int(len(all_u) / batch_size)
batches = []
for i in range(num_batches):
batch_u = all_u[i*batch_size: (i+1)*batch_size]
batch_inp = all_inp[i*batch_size: (i+1)*batch_size]
batch_pos = all_pos[i*batch_size: (i+1)*batch_size]
batches.append((batch_u, batch_inp, batch_pos))
if num_batches * batch_size < len(all_u):
batches.append((all_u[num_batches * batch_size:], all_inp[num_batches * batch_size:], all_pos[num_batches * batch_size:]))
return batches | null |
159,061 | import pandas as pd
import numpy as np
import random
import os
import json
import datetime as dt
from collections import Counter
data_path = 'data/'
def preprocess_session(dname):
data = pd.read_csv(data_path + dname + '/' + dname + '.tsv', sep='\t', header=None)
if dname == 'tmall':
data.columns = ['SessionId', 'ItemId', 'Time']
else:
raise NotImplementedError
session_lengths = data.groupby('SessionId').size()
data = data[np.in1d(data.SessionId, session_lengths[session_lengths>2].index)]
item_supports = data.groupby('ItemId').size()
data = data[np.in1d(data.ItemId, item_supports[item_supports>=10].index)]
print('Unique items: {}'.format(data.ItemId.nunique()))
session_lengths = data.groupby('SessionId').size()
print('Average session length: {}'.format(session_lengths.mean()))
data = data[np.in1d(data.SessionId, session_lengths[session_lengths>2].index)]
session_lengths = data.groupby('SessionId').size()
print('Average session length after removing sessions with less than two event: {}'.format(session_lengths.mean()))
session_max_times = data.groupby('SessionId').Time.max()
tmax = data.Time.max()
session_train = session_max_times[session_max_times < tmax-86400*2].index
session_test = session_max_times[session_max_times >= tmax-86400*2].index
train = data[np.in1d(data.SessionId, session_train)]
test = data[np.in1d(data.SessionId, session_test)]
test = test[np.in1d(test.ItemId, train.ItemId)]
tslength = test.groupby('SessionId').size()
test = test[np.in1d(test.SessionId, tslength[tslength>2].index)]
test_session = test.SessionId.unique()
test_session_ = np.random.choice(test_session, int(len(test_session) / 2), replace=False)
test_ = test.loc[test['SessionId'].isin(test_session_)]
val_ = test.loc[~test['SessionId'].isin(test_session_)]
print('Train size: {}'.format(len(train)))
print('Dev size: {}'.format(len(val_)))
print('Test size: {}'.format(len(test_)))
header = columns = ['SessionId', 'ItemId', 'Time']
train.to_csv(data_path + dname + '/' + dname + '_train_tr.txt', sep='\t', columns=columns, header=header, index=False)
test_.to_csv(data_path + dname + '/' + dname + '_test.txt', sep='\t',columns=columns, header=header, index=False)
val_.to_csv(data_path + dname + '/' + dname + '_train_valid.txt', sep='\t', columns=columns, header=header, index=False) | null |
159,062 | import pandas as pd
import numpy as np
import random
import os
import json
import datetime as dt
from collections import Counter
data_path = 'data/'
def preprocess_sequence(dname):
data = pd.read_csv(data_path + dname + '/' + dname + '.tsv', sep='\t', header=None)
if dname == 'delicious':
data.columns = ['user', 'item', 'TimeStr']
data['Time'] = data.TimeStr.apply(lambda x: dt.datetime.strptime(x, '%Y-%m-%d').timestamp())
del(data['TimeStr'])
elif dname == 'googlelocal' or dname == 'elec' or dname == 'game' or dname == 'ml1m' or \
dname == 'home' or dname == 'beauty' or dname == 'book' or dname == 'app' or dname == 'clothing':
data.columns = ['user', 'item', 'Time']
elif dname == 'gowalla':
data.columns = ['user', 'TimeStr', 'lat', 'long', 'item']
data['Time'] = data.TimeStr.apply(lambda x: dt.datetime.strptime(x, '%Y-%m-%dT%H:%M:%SZ').timestamp())
del(data['lat'])
del(data['long'])
del(data['TimeStr'])
elif dname == 'brightkite':
data.columns = ['user', 'item', 'TimeStr']
data['Time'] = data.TimeStr.apply(lambda x: dt.datetime.strptime(x, '%Y-%m-%dT%H:%M:%SZ').timestamp())
del(data['TimeStr'])
else:
raise NotImplementedError
event_lengths = data.groupby('user').size()
print('Average check-ins per user: {}'.format(event_lengths.mean()))
data = data[np.in1d(data.user, event_lengths[event_lengths>10].index)]
item_supports = data.groupby('item').size()
# 50 for delicious, 10 for gowalla
data = data[np.in1d(data.item, item_supports[item_supports>=10].index)]
print('Unique items: {}'.format(data.item.nunique()))
event_lengths = data.groupby('user').size()
data = data[np.in1d(data.user, event_lengths[event_lengths>=10].index)]
event_lengths = data.groupby('user').size()
print('Average check-ins per user after removing sessions with one event: {}'.format(event_lengths.mean()))
tmin = data.Time.min()
tmax = data.Time.max()
pivot = (tmax-tmin) * 0.9 + tmin
train = data.loc[data['Time'] < pivot]
test = data.loc[data['Time'] >= pivot]
tr_event_lengths = train.groupby('user').size()
train = train[np.in1d(train.user, tr_event_lengths[tr_event_lengths>3].index)]
print('Average (train) check-ins per user: {}'.format(tr_event_lengths.mean()))
user_to_predict = train.user.unique()
test = test[test['user'].isin(user_to_predict)]
item_to_predict = train.item.unique()
test = test[test['item'].isin(item_to_predict)]
test_event_lengths = test.groupby('user').size()
test = test[np.in1d(test.user, test_event_lengths[test_event_lengths>3].index)]
print('Average (test) check-ins per user: {}'.format(test_event_lengths.mean()))
test_user = test.user.unique()
test_user_ = np.random.choice(test_user, int(len(test_user) / 2), replace=False)
test_ = test.loc[test['user'].isin(test_user_)]
val_ = test.loc[~test['user'].isin(test_user_)]
print('Train size: {}'.format(len(train)))
print('Dev size: {}'.format(len(val_)))
print('Test size: {}'.format(len(test_)))
header = columns = ['user', 'item', 'Time']
header = ['UserId', 'ItemId', 'Time']
train.to_csv(data_path + dname + '/' + dname + '_train_tr.txt', sep='\t', columns=columns, header=header, index=False)
test_.to_csv(data_path + dname + '/' + dname + '_test.txt', sep='\t',columns=columns, header=header, index=False)
val_.to_csv(data_path + dname + '/' + dname + '_train_valid.txt', sep='\t', columns=columns, header=header, index=False) | null |
159,063 | import numpy as np
from multiprocessing import Process, Queue
def random_neg(pos, n, s):
'''
p: positive one
n: number of items
s: size of samples.
'''
neg = set()
for _ in range(s):
t = np.random.randint(1, n+1)
while t in pos or t in neg:
t = np.random.randint(1, n+1)
neg.add(t)
return list(neg)
The provided code snippet includes necessary dependencies for implementing the `sample_function` function. Write a Python function `def sample_function(data, n_items, n_users, batch_size, max_len, neg_size, result_queue, SEED, neg_method='rand')` to solve the following problem:
data: list of train data, key: user, value: a set of all user's clicks. tensors: list of train tensors, each element of list is also a list. masks: list of train masks, each element of list is also a list. batch_size: number of samples in a batch. neg_size: number of negative samples.
Here is the function:
def sample_function(data, n_items, n_users, batch_size, max_len, neg_size, result_queue, SEED, neg_method='rand'):
'''
data: list of train data, key: user, value: a set of all user's clicks.
tensors: list of train tensors, each element of list is also a list.
masks: list of train masks, each element of list is also a list.
batch_size: number of samples in a batch.
neg_size: number of negative samples.
'''
num_samples = np.array([len(data[str(u)]) for u in range(1, n_users+1)])
prob_ = num_samples / (1.0 * np.sum(num_samples))
def sample():
'''
# sample a user based on behavior frequency.
#TODO: more efficient non-uniform sampling method.
Compute utility lists for non-uniform sampling from discrete distributions.
Refer to https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/
for details
'''
user = np.random.choice(a=range(1,1+n_users), p=prob_)
u = str(user)
# sample a slice from user u randomly.
idx = np.random.randint(1, len(data[u]))
start = 0 if idx >= max_len else max_len - idx
len_of_item = max_len - start
# Assume max_len is set to 5, and we want to predict the 4-th entry in the sequence
# Then the length of historical items is 3.
# The following code will return the array like [0, 0, x, x, x]
# i.e. the zero is padded to the left.
seq = np.zeros([max_len], dtype=np.int32)
seq[start:] = data[u][idx-len_of_item:idx]
pos = data[u][idx]
neg = np.zeros([neg_size], dtype=np.int32)
if neg_method == 'rand':
neg = random_neg([pos], n_items, neg_size)
else:
raise NotImplementedError
return (user, seq, pos, neg)
np.random.seed(SEED)
while True:
one_batch = []
for i in range(batch_size):
one_batch.append(sample())
result_queue.put(list(zip(*one_batch))) | data: list of train data, key: user, value: a set of all user's clicks. tensors: list of train tensors, each element of list is also a list. masks: list of train masks, each element of list is also a list. batch_size: number of samples in a batch. neg_size: number of negative samples. |
159,064 | import tensorflow as tf
import sys
import os
import numpy as np
def log2(x):
numerator = tf.log(x)
denominator = tf.log(tf.constant(2, dtype=numerator.dtype))
return numerator / denominator | null |
159,065 | import tensorflow as tf
import argparse
import numpy as np
import sys
import time
import math
from .utils import *
from .model import *
from .sampler import *
model = NeuralSeqRecommender(args, n_items, n_users)
def evaluate(source, sess):
total_hit_k = 0.0
total_ndcg_k = 0.0
count = 0.0
for batch in source:
feed_dict = {model.inp: batch[1], model.dropout: 0.}
feed_dict[model.pos] = batch[2]
hit, ndcg, n_target = sess.run([model.hit_at_k, model.ndcg_at_k, model.num_target], feed_dict=feed_dict)
count += n_target
total_hit_k += hit
total_ndcg_k += ndcg
val_hit = total_hit_k / count
val_ndcg = total_ndcg_k / count
return [val_hit, val_ndcg] | null |
159,066 | import pandas as pd
import numpy as np
import random
import os
import json
import datetime as dt
from collections import Counter
data_path = 'data/'
class Corpus(object):
def __init__(self, ItemId):
self.dict = Dictionary()
for item in ItemId:
self.dict.add_item(item)
self.dict.prep_dict()
def prepare_data(corpus_item, corpus_user, data, dname, path_to_data):
ret = {}
user_str_ids = data.keys()
for u in user_str_ids:
u_int_id = corpus_user.dict.item2idx[u]
i_int_ids = []
item_str_ids = data[u]
for i in item_str_ids:
i_int_ids.append(corpus_item.dict.item2idx[i])
ret[u_int_id] = i_int_ids
with open(path_to_data + dname + '.json', 'w') as fp:
json.dump(ret, fp)
return ret
def data_generator(args):
path_to_data= data_path + args.data + '/'
if not os.path.exists(path_to_data + args.data + '_train_tr.json'):
tr_df = pd.read_csv(path_to_data + args.data + '_train_tr.txt', sep='\t')
val_df = pd.read_csv(path_to_data + args.data + '_train_valid.txt', sep='\t')
test_df = pd.read_csv(path_to_data + args.data + '_test.txt', sep='\t')
corpus_item = Corpus(tr_df['ItemId'])
corpus_user = Corpus(tr_df['UserId'])
np.save(path_to_data + args.data + '_item_dict', np.asarray(corpus_item.dict.idx2item))
np.save(path_to_data + args.data + '_user_dict', np.asarray(corpus_user.dict.idx2item))
tr = tr_df.sort_values(['UserId', 'Time']).groupby('UserId')['ItemId'].apply(list).to_dict()
val = val_df.sort_values(['UserId', 'Time']).groupby('UserId')['ItemId'].apply(list).to_dict()
test = test_df.sort_values(['UserId', 'Time']).groupby('UserId')['ItemId'].apply(list).to_dict()
_ = prepare_data(corpus_item, corpus_user, tr, args.data + '_train_tr', path_to_data)
_ = prepare_data(corpus_item, corpus_user, val, args.data + '_train_valid',path_to_data)
_ = prepare_data(corpus_item, corpus_user, test, args.data + '_test', path_to_data)
with open(path_to_data + args.data + '_train_tr.json', 'r') as fp:
train_data = json.load(fp)
with open(path_to_data + args.data + '_train_valid.json', 'r') as fp:
val_data = json.load(fp)
with open(path_to_data + args.data + '_test.json', 'r') as fp:
test_data = json.load(fp)
item2idx = np.load(path_to_data + args.data + '_item_dict.npy')
user2idx = np.load(path_to_data + args.data + '_user_dict.npy')
n_items = item2idx.size
n_users = user2idx.size
return [train_data, val_data, test_data, n_items, n_users] | null |
159,067 | import pandas as pd
import numpy as np
import random
import os
import json
import datetime as dt
from collections import Counter
def prepare_eval_test(data, batch_size, max_test_len=100):
if batch_size < 2:
batch_size = 2
uids = data.keys()
all_u = []
all_inp = []
all_pos = []
for u in uids:
all_u.append(int(u))
itemids = data[u]
inp = np.zeros([max_test_len], dtype=np.int32)
pos = np.zeros([max_test_len], dtype=np.int32)
l = min(max_test_len, len(itemids))
inp[:l] = itemids[:l]
pos[:l-1] = itemids[1:l]
all_inp.append(inp)
all_pos.append(pos)
num_batches = int(len(all_u) / batch_size)
batches = []
for i in range(num_batches):
batch_u = all_u[i*batch_size: (i+1)*batch_size]
batch_inp = all_inp[i*batch_size: (i+1)*batch_size]
batch_pos = all_pos[i*batch_size: (i+1)*batch_size]
batches.append((batch_u, batch_inp, batch_pos))
if num_batches * batch_size < len(all_u):
batches.append((all_u[num_batches * batch_size:], all_inp[num_batches * batch_size:], all_pos[num_batches * batch_size:]))
return batches | null |
159,068 | import pandas as pd
import numpy as np
import random
import os
import json
import datetime as dt
from collections import Counter
data_path = 'data/'
The provided code snippet includes necessary dependencies for implementing the `preprocess_session` function. Write a Python function `def preprocess_session(dname)` to solve the following problem:
The model can be applied for session-based recommendation, where a sequence is seen as a user's history. The data should contain three columns, i.e., SessionId, ItemId and Time with Tab as separator.
Here is the function:
def preprocess_session(dname):
'''
The model can be applied for session-based recommendation, where a sequence is seen as a user's history.
The data should contain three columns, i.e., SessionId, ItemId and Time with Tab as separator.
'''
data = pd.read_csv(data_path + dname + '/' + dname + '.tsv', sep='\t', header=None)
data.columns = ['SessionId', 'ItemId', 'Time']
session_lengths = data.groupby('SessionId').size()
data = data[np.in1d(data.SessionId, session_lengths[session_lengths>2].index)]
item_supports = data.groupby('ItemId').size()
data = data[np.in1d(data.ItemId, item_supports[item_supports>=10].index)]
print('Unique items: {}'.format(data.ItemId.nunique()))
session_lengths = data.groupby('SessionId').size()
print('Average session length: {}'.format(session_lengths.mean()))
data = data[np.in1d(data.SessionId, session_lengths[session_lengths>2].index)]
session_lengths = data.groupby('SessionId').size()
print('Average session length after removing sessions with less than two event: {}'.format(session_lengths.mean()))
session_max_times = data.groupby('SessionId').Time.max()
tmax = data.Time.max()
session_train = session_max_times[session_max_times < tmax-86400*2].index # We preserve sessions of last two days as validation and test data
session_test = session_max_times[session_max_times >= tmax-86400*2].index
train = data[np.in1d(data.SessionId, session_train)]
test = data[np.in1d(data.SessionId, session_test)]
test = test[np.in1d(test.ItemId, train.ItemId)]
tslength = test.groupby('SessionId').size()
test = test[np.in1d(test.SessionId, tslength[tslength>2].index)]
test_session = test.SessionId.unique()
test_session_ = np.random.choice(test_session, int(len(test_session) / 2), replace=False)
test_ = test.loc[test['SessionId'].isin(test_session_)]
val_ = test.loc[~test['SessionId'].isin(test_session_)]
print('Train size: {}'.format(len(train)))
print('Dev size: {}'.format(len(val_)))
print('Test size: {}'.format(len(test_)))
columns = ['SessionId', 'ItemId', 'Time']
header = ['UserId', 'ItemId', 'Time']
train.to_csv(data_path + dname + '/' + dname + '_train_tr.txt', sep='\t', columns=columns, header=header, index=False)
test_.to_csv(data_path + dname + '/' + dname + '_test.txt', sep='\t',columns=columns, header=header, index=False)
val_.to_csv(data_path + dname + '/' + dname + '_train_valid.txt', sep='\t', columns=columns, header=header, index=False) | The model can be applied for session-based recommendation, where a sequence is seen as a user's history. The data should contain three columns, i.e., SessionId, ItemId and Time with Tab as separator. |
159,069 | import pandas as pd
import numpy as np
import random
import os
import json
import datetime as dt
from collections import Counter
data_path = 'data/'
The provided code snippet includes necessary dependencies for implementing the `preprocess_sequence` function. Write a Python function `def preprocess_sequence(dname)` to solve the following problem:
For sequential recommendation. The data should contain three columns, i.e., user, item and Time with Tab as separator.
Here is the function:
def preprocess_sequence(dname):
'''
For sequential recommendation.
The data should contain three columns, i.e., user, item and Time with Tab as separator.
'''
data = pd.read_csv(data_path + dname + '/' + dname + '.tsv', sep='\t', header=None)
data.columns = ['user', 'item', 'Time']
event_lengths = data.groupby('user').size()
print('Average check-ins per user: {}'.format(event_lengths.mean()))
data = data[np.in1d(data.user, event_lengths[event_lengths>10].index)]
item_supports = data.groupby('item').size()
# 50 for delicious, 10 for gowalla
data = data[np.in1d(data.item, item_supports[item_supports>=10].index)]
print('Unique items: {}'.format(data.item.nunique()))
event_lengths = data.groupby('user').size()
data = data[np.in1d(data.user, event_lengths[event_lengths>=10].index)]
event_lengths = data.groupby('user').size()
print('Average check-ins per user after removing sessions with one event: {}'.format(event_lengths.mean()))
tmin = data.Time.min()
tmax = data.Time.max()
pivot = (tmax-tmin) * 0.9 + tmin # Preserve last 10% as validation and test data
train = data.loc[data['Time'] < pivot]
test = data.loc[data['Time'] >= pivot]
tr_event_lengths = train.groupby('user').size()
train = train[np.in1d(train.user, tr_event_lengths[tr_event_lengths>3].index)]
print('Average (train) check-ins per user: {}'.format(tr_event_lengths.mean()))
user_to_predict = train.user.unique()
test = test[test['user'].isin(user_to_predict)]
item_to_predict = train.item.unique()
test = test[test['item'].isin(item_to_predict)]
test_event_lengths = test.groupby('user').size()
test = test[np.in1d(test.user, test_event_lengths[test_event_lengths>3].index)]
print('Average (test) check-ins per user: {}'.format(test_event_lengths.mean()))
test_user = test.user.unique()
test_user_ = np.random.choice(test_user, int(len(test_user) / 2), replace=False)
test_ = test.loc[test['user'].isin(test_user_)]
val_ = test.loc[~test['user'].isin(test_user_)]
print('Train size: {}'.format(len(train)))
print('Dev size: {}'.format(len(val_)))
print('Test size: {}'.format(len(test_)))
columns = ['user', 'item', 'Time']
header = ['UserId', 'ItemId', 'Time']
train.to_csv(data_path + dname + '/' + dname + '_train_tr.txt', sep='\t', columns=columns, header=header, index=False)
test_.to_csv(data_path + dname + '/' + dname + '_test.txt', sep='\t',columns=columns, header=header, index=False)
val_.to_csv(data_path + dname + '/' + dname + '_train_valid.txt', sep='\t', columns=columns, header=header, index=False) | For sequential recommendation. The data should contain three columns, i.e., user, item and Time with Tab as separator. |
159,070 | import numpy as np
from multiprocessing import Process, Queue
def random_neg(pos, n, s):
'''
p: positive one
n: number of items
s: size of samples.
'''
neg = set()
for _ in range(s):
t = np.random.randint(1, n+1)
while t in pos or t in neg:
t = np.random.randint(1, n+1)
neg.add(t)
return list(neg)
The provided code snippet includes necessary dependencies for implementing the `sample_function` function. Write a Python function `def sample_function(data, n_items, n_users, batch_size, max_len, neg_size, result_queue, SEED, neg_method='rand')` to solve the following problem:
data: list of train data, key: user, value: a set of all user's clicks. tensors: list of train tensors, each element of list is also a list. masks: list of train masks, each element of list is also a list. batch_size: number of samples in a batch. neg_size: number of negative samples.
Here is the function:
def sample_function(data, n_items, n_users, batch_size, max_len, neg_size, result_queue, SEED, neg_method='rand'):
'''
data: list of train data, key: user, value: a set of all user's clicks.
tensors: list of train tensors, each element of list is also a list.
masks: list of train masks, each element of list is also a list.
batch_size: number of samples in a batch.
neg_size: number of negative samples.
'''
num_samples = np.array([len(data[str(u)]) for u in range(1, n_users+1)])
prob_ = num_samples / (1.0 * np.sum(num_samples))
def sample():
# sample a user based on behavior frequency.
user = np.random.choice(a=range(1,1+n_users), p=prob_)
u = str(user)
# sample a slice from user u randomly.
if len(data[u]) <= max_len:
idx = 0
else:
idx = np.random.randint(0, len(data[u])-max_len+1)
seq = np.zeros([max_len], dtype=np.int32)
for i, itemid in enumerate(data[u][idx:idx+max_len]):
seq[i] = itemid
pos = np.zeros([max_len], dtype=np.int32)
neg = np.zeros([max_len, neg_size], dtype=np.int32)
l = len(data[u]) - idx - 1
l = min(l, max_len)
for j in range(l):
pos[j] = data[u][idx+1+j]
if neg_method == 'rand':
neg[j,:] = random_neg([pos[j]], n_items, neg_size)
else: # Currently we only support random negative samples.
raise NotImplementedError
return (user, seq, pos, neg)
np.random.seed(SEED)
while True:
one_batch = []
for i in range(batch_size):
one_batch.append(sample())
result_queue.put(list(zip(*one_batch))) | data: list of train data, key: user, value: a set of all user's clicks. tensors: list of train tensors, each element of list is also a list. masks: list of train masks, each element of list is also a list. batch_size: number of samples in a batch. neg_size: number of negative samples. |
159,071 | import tensorflow as tf
import sys
from .base import LSTMNet
from .base import TemporalConvNet
from .base import TransformerNet
def log2(x):
numerator = tf.log(x)
denominator = tf.log(tf.constant(2, dtype=numerator.dtype))
return numerator / denominator | null |
159,072 | import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
The provided code snippet includes necessary dependencies for implementing the `normalize` function. Write a Python function `def normalize(inputs, epsilon = 1e-8, scope="ln", reuse=None)` to solve the following problem:
Applies layer normalization. Args: inputs: A tensor with 2 or more dimensions, where the first dimension has `batch_size`. epsilon: A floating number. A very small number for preventing ZeroDivision Error. scope: Optional scope for `variable_scope`. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: A tensor with the same shape and data dtype as `inputs`.
Here is the function:
def normalize(inputs,
epsilon = 1e-8,
scope="ln",
reuse=None):
'''Applies layer normalization.
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension has
`batch_size`.
epsilon: A floating number. A very small number for preventing ZeroDivision Error.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A tensor with the same shape and data dtype as `inputs`.
'''
with tf.variable_scope(scope, reuse=reuse):
inputs_shape = inputs.get_shape()
params_shape = inputs_shape[-1:]
mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)
beta= tf.Variable(tf.zeros(params_shape))
gamma = tf.Variable(tf.ones(params_shape))
normalized = (inputs - mean) / ( (variance + epsilon) ** (.5) )
outputs = gamma * normalized + beta
return outputs | Applies layer normalization. Args: inputs: A tensor with 2 or more dimensions, where the first dimension has `batch_size`. epsilon: A floating number. A very small number for preventing ZeroDivision Error. scope: Optional scope for `variable_scope`. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: A tensor with the same shape and data dtype as `inputs`. |
159,073 | import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
The provided code snippet includes necessary dependencies for implementing the `multihead_attention` function. Write a Python function `def multihead_attention(queries, keys, num_units=None, num_heads=8, dropout_keep_prob=1.0, causality=False, scope="multihead_attention", reuse=None, with_qk=False)` to solve the following problem:
Applies multihead attention. Args: queries: A 3d tensor with shape of [N, T_q, C_q]. keys: A 3d tensor with shape of [N, T_k, C_k]. num_units: A scalar. Attention size. dropout_rate: A floating point number. is_training: Boolean. Controller of mechanism for dropout. causality: Boolean. If true, units that reference the future are masked. num_heads: An int. Number of heads. scope: Optional scope for `variable_scope`. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns A 3d tensor with shape of (N, T_q, C)
Here is the function:
def multihead_attention(queries,
keys,
num_units=None,
num_heads=8,
dropout_keep_prob=1.0,
causality=False,
scope="multihead_attention",
reuse=None,
with_qk=False):
'''
Applies multihead attention.
Args:
queries: A 3d tensor with shape of [N, T_q, C_q].
keys: A 3d tensor with shape of [N, T_k, C_k].
num_units: A scalar. Attention size.
dropout_rate: A floating point number.
is_training: Boolean. Controller of mechanism for dropout.
causality: Boolean. If true, units that reference the future are masked.
num_heads: An int. Number of heads.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns
A 3d tensor with shape of (N, T_q, C)
'''
with tf.variable_scope(scope, reuse=reuse):
# Set the fall back option for num_units
if num_units is None:
num_units = queries.get_shape().as_list[-1]
# Linear projections
# Q = tf.layers.dense(queries, num_units, activation=tf.nn.relu) # (N, T_q, C)
# K = tf.layers.dense(keys, num_units, activation=tf.nn.relu) # (N, T_k, C)
# V = tf.layers.dense(keys, num_units, activation=tf.nn.relu) # (N, T_k, C)
Q = tf.layers.dense(queries, num_units, activation=None) # (N, T_q, C)
K = tf.layers.dense(keys, num_units, activation=None) # (N, T_k, C)
V = tf.layers.dense(keys, num_units, activation=None) # (N, T_k, C)
# Split and concat
Q_ = tf.concat(tf.split(Q, num_heads, axis=2), axis=0) # (h*N, T_q, C/h)
K_ = tf.concat(tf.split(K, num_heads, axis=2), axis=0) # (h*N, T_k, C/h)
V_ = tf.concat(tf.split(V, num_heads, axis=2), axis=0) # (h*N, T_k, C/h)
# Multiplication
outputs = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1])) # (h*N, T_q, T_k)
# Scale
outputs = outputs / (K_.get_shape().as_list()[-1] ** 0.5)
# Key Masking
key_masks = tf.sign(tf.abs(tf.reduce_sum(keys, axis=-1))) # (N, T_k)
key_masks = tf.tile(key_masks, [num_heads, 1]) # (h*N, T_k)
key_masks = tf.tile(tf.expand_dims(key_masks, 1), [1, tf.shape(queries)[1], 1]) # (h*N, T_q, T_k)
paddings = tf.ones_like(outputs)*(-2**32+1)
outputs = tf.where(tf.equal(key_masks, 0), paddings, outputs) # (h*N, T_q, T_k)
# Causality = Future blinding
if causality:
diag_vals = tf.ones_like(outputs[0, :, :]) # (T_q, T_k)
try:
tril = tf.contrib.linalg.LinearOperatorLowerTriangular(diag_vals).to_dense() # (T_q, T_k)
except:
tril = tf.contrib.linalg.LinearOperatorTriL(diag_vals).to_dense() # (T_q, T_k)
masks = tf.tile(tf.expand_dims(tril, 0), [tf.shape(outputs)[0], 1, 1]) # (h*N, T_q, T_k)
paddings = tf.ones_like(masks)*(-2**32+1)
outputs = tf.where(tf.equal(masks, 0), paddings, outputs) # (h*N, T_q, T_k)
# Activation
outputs = tf.nn.softmax(outputs) # (h*N, T_q, T_k)
# Query Masking
query_masks = tf.sign(tf.abs(tf.reduce_sum(queries, axis=-1))) # (N, T_q)
query_masks = tf.tile(query_masks, [num_heads, 1]) # (h*N, T_q)
query_masks = tf.tile(tf.expand_dims(query_masks, -1), [1, 1, tf.shape(keys)[1]]) # (h*N, T_q, T_k)
outputs *= query_masks # broadcasting. (N, T_q, C)
# Dropouts
outputs = tf.nn.dropout(outputs, keep_prob=dropout_keep_prob)
# Weighted sum
outputs = tf.matmul(outputs, V_) # ( h*N, T_q, C/h)
# Restore shape
outputs = tf.concat(tf.split(outputs, num_heads, axis=0), axis=2 ) # (N, T_q, C)
# Residual connection
outputs += queries
# Normalize
#outputs = normalize(outputs) # (N, T_q, C)
if with_qk:
return Q, K
else:
return outputs | Applies multihead attention. Args: queries: A 3d tensor with shape of [N, T_q, C_q]. keys: A 3d tensor with shape of [N, T_k, C_k]. num_units: A scalar. Attention size. dropout_rate: A floating point number. is_training: Boolean. Controller of mechanism for dropout. causality: Boolean. If true, units that reference the future are masked. num_heads: An int. Number of heads. scope: Optional scope for `variable_scope`. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns A 3d tensor with shape of (N, T_q, C) |
159,074 | import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
The provided code snippet includes necessary dependencies for implementing the `feedforward` function. Write a Python function `def feedforward(inputs, num_units=[2048, 512], scope="multihead_attention", dropout_keep_prob=1.0, reuse=None)` to solve the following problem:
Point-wise feed forward net. Args: inputs: A 3d tensor with shape of [N, T, C]. num_units: A list of two integers. scope: Optional scope for `variable_scope`. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: A 3d tensor with the same shape and dtype as inputs
Here is the function:
def feedforward(inputs,
num_units=[2048, 512],
scope="multihead_attention",
dropout_keep_prob=1.0,
reuse=None):
'''
Point-wise feed forward net.
Args:
inputs: A 3d tensor with shape of [N, T, C].
num_units: A list of two integers.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A 3d tensor with the same shape and dtype as inputs
'''
with tf.variable_scope(scope, reuse=reuse):
# Inner layer
params = {"inputs": inputs, "filters": num_units[0], "kernel_size": 1,
"activation": tf.nn.relu, "use_bias": True}
outputs = tf.layers.conv1d(**params)
outputs = tf.nn.dropout(outputs, keep_prob=dropout_keep_prob)
#outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=tf.convert_to_tensor(is_training))
# Readout layer
params = {"inputs": outputs, "filters": num_units[1], "kernel_size": 1,
"activation": None, "use_bias": True}
outputs = tf.layers.conv1d(**params)
outputs = tf.nn.dropout(outputs, keep_prob=dropout_keep_prob)
#outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=tf.convert_to_tensor(is_training))
# Residual connection
outputs += inputs
# Normalize
#outputs = normalize(outputs)
return outputs | Point-wise feed forward net. Args: inputs: A 3d tensor with shape of [N, T, C]. num_units: A list of two integers. scope: Optional scope for `variable_scope`. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: A 3d tensor with the same shape and dtype as inputs |
159,075 | import math
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import make_scorer
from sklearn.model_selection import StratifiedKFold
from time import time
from .model import AutoInt
import argparse
import os
def str2bool(v):
if v.lower() in ['yes', 'true', 't', 'y', '1']:
return True
elif v.lower() in ['no', 'false', 'f', 'n', '0']:
return False
else:
raise argparse.ArgumentTypeError('Unsupported value encountered.') | null |
159,076 | import math
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import make_scorer
from sklearn.model_selection import StratifiedKFold
from time import time
from .model import AutoInt
import argparse
import os
def str2list(v):
v=v.split(',')
v=[int(_.strip('[]')) for _ in v]
return v
def str2list2(v):
v=v.split(',')
v=[float(_.strip('[]')) for _ in v]
return v
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--is_save', action='store_true')
parser.add_argument('--greater_is_better', action='store_true', help='early stop criterion')
parser.add_argument('--has_residual', action='store_true', help='add residual')
parser.add_argument('--blocks', type=int, default=2, help='#blocks')
parser.add_argument('--block_shape', type=str2list, default=[16,16], help='output shape of each block')
parser.add_argument('--heads', type=int, default=2, help='#heads')
parser.add_argument('--embedding_size', type=int, default=16)
parser.add_argument('--dropout_keep_prob', type=str2list2, default=[1, 1, 0.5])
parser.add_argument('--epoch', type=int, default=2)
parser.add_argument('--batch_size', type=int, default=1024)
parser.add_argument('--learning_rate', type=float, default=0.001)
parser.add_argument('--optimizer_type', type=str, default='adam')
parser.add_argument('--l2_reg', type=float, default=0.0)
parser.add_argument('--random_seed', type=int, default=2018)
parser.add_argument('--save_path', type=str, default='./model/')
parser.add_argument('--field_size', type=int, default=23, help='#fields')
parser.add_argument('--loss_type', type=str, default='logloss')
parser.add_argument('--verbose', type=int, default=1)
parser.add_argument('--run_times', type=int, default=3,help='run multiple times to eliminate error')
parser.add_argument('--deep_layers', type=str2list, default=None, help='config for dnn in joint train')
parser.add_argument('--batch_norm', type=int, default=0)
parser.add_argument('--batch_norm_decay', type=float, default=0.995)
#parser.add_argument('--data', type=str, help='data name')
parser.add_argument('--data_path', type=str, help='root path for all the data')
return parser.parse_args() | null |
159,077 | import math
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import make_scorer
from sklearn.model_selection import StratifiedKFold
from time import time
from .model import AutoInt
import argparse
import os
class AutoInt():
def __init__(self, args, feature_size, run_cnt):
self.feature_size = feature_size # denote as n, dimension of concatenated features
self.field_size = args.field_size # denote as M, number of total feature fields
self.embedding_size = args.embedding_size # denote as d, size of the feature embedding
self.blocks = args.blocks # number of the blocks
self.heads = args.heads # number of the heads
self.block_shape = args.block_shape
self.output_size = args.block_shape[-1]
self.has_residual = args.has_residual
self.deep_layers = args.deep_layers # whether to joint train with deep networks as described in paper
self.batch_norm = args.batch_norm
self.batch_norm_decay = args.batch_norm_decay
self.drop_keep_prob = args.dropout_keep_prob
self.l2_reg = args.l2_reg
self.epoch = args.epoch
self.batch_size = args.batch_size
self.learning_rate = args.learning_rate
self.optimizer_type = args.optimizer_type
self.save_path = args.save_path + str(run_cnt) + '/'
self.is_save = args.is_save
if (args.is_save == True and os.path.exists(self.save_path) == False):
os.makedirs(self.save_path)
self.verbose = args.verbose
self.random_seed = args.random_seed
self.loss_type = args.loss_type
self.eval_metric = roc_auc_score
self.best_loss = 1.0
self.greater_is_better = args.greater_is_better
self.train_result, self.valid_result = [], []
self.train_loss, self.valid_loss = [], []
self._init_graph()
def _init_graph(self):
self.graph = tf.Graph()
with self.graph.as_default():
tf.set_random_seed(self.random_seed)
# placeholder for single-value field.
self.feat_index = tf.placeholder(tf.int32, shape=[None, None],
name="feat_index") # None * M-1
self.feat_value = tf.placeholder(tf.float32, shape=[None, None],
name="feat_value") # None * M-1
# placeholder for multi-value field. (movielens dataset genre field)
self.genre_index = tf.placeholder(tf.int32, shape=[None, None],
name="genre_index") # None * 6
self.genre_value = tf.placeholder(tf.float32, shape=[None, None],
name="genre_value") # None * 6
self.label = tf.placeholder(tf.float32, shape=[None, 1], name="label") # None * 1
# In our implementation, the shape of dropout_keep_prob is [3], used in 3 different places.
self.dropout_keep_prob = tf.placeholder(tf.float32, shape=[None], name="dropout_keep_prob")
self.train_phase = tf.placeholder(tf.bool, name="train_phase")
self.weights = self._initialize_weights()
# model
self.embeddings = tf.nn.embedding_lookup(self.weights["feature_embeddings"],
self.feat_index) # None * M-1 * d
feat_value = tf.reshape(self.feat_value, shape=[-1, self.field_size-1, 1])
self.embeddings = tf.multiply(self.embeddings, feat_value) # None * M-1 * d
# for multi-value field
self.embeddings_m = tf.nn.embedding_lookup(self.weights["feature_embeddings"],
self.genre_index) # None * 6 * d
genre_value = tf.reshape(self.genre_value, shape=[-1, 6, 1])
self.embeddings_m = tf.multiply(self.embeddings_m, genre_value)
self.embeddings_m = tf.reduce_sum(self.embeddings_m, axis=1) # None * d
self.embeddings_m = tf.div(self.embeddings_m, tf.reduce_sum(self.genre_value, axis=1, keep_dims=True)) # None * d
#concatenate single-value field with multi-value field
self.embeddings = tf.concat([self.embeddings, tf.expand_dims(self.embeddings_m, 1)], 1) # None * M * d
self.embeddings = tf.nn.dropout(self.embeddings, self.dropout_keep_prob[1]) # None * M * d
# joint training with feedforward nn
if self.deep_layers != None:
self.y_dense = tf.reshape(self.embeddings, shape=[-1, self.field_size * self.embedding_size])
for i in range(0, len(self.deep_layers)):
self.y_dense = tf.add(tf.matmul(self.y_dense, self.weights["layer_%d" %i]), self.weights["bias_%d"%i]) # None * layer[i]
if self.batch_norm:
self.y_dense = self.batch_norm_layer(self.y_dense, train_phase=self.train_phase, scope_bn="bn_%d" %i)
self.y_dense = tf.nn.relu(self.y_dense)
self.y_dense = tf.nn.dropout(self.y_dense, self.dropout_keep_prob[2])
self.y_dense = tf.add(tf.matmul(self.y_dense, self.weights["prediction_dense"]),
self.weights["prediction_bias_dense"], name='logits_dense') # None * 1
# ---------- main part of AutoInt-------------------
self.y_deep = self.embeddings # None * M * d
for i in range(self.blocks):
self.y_deep = multihead_attention(queries=self.y_deep,
keys=self.y_deep,
values=self.y_deep,
num_units=self.block_shape[i],
num_heads=self.heads,
dropout_keep_prob=self.dropout_keep_prob[0],
is_training=self.train_phase,
has_residual=self.has_residual)
self.flat = tf.reshape(self.y_deep,
shape=[-1, self.output_size * self.field_size])
self.out = tf.add(tf.matmul(self.flat, self.weights["prediction"]),
self.weights["prediction_bias"], name='logits') # None * 1
if self.deep_layers != None:
self.out += self.y_dense
# ---------- Compute the loss ----------
# loss
if self.loss_type == "logloss":
self.out = tf.nn.sigmoid(self.out, name='pred')
self.loss = tf.losses.log_loss(self.label, self.out)
elif self.loss_type == "mse":
self.loss = tf.nn.l2_loss(tf.subtract(self.label, self.out))
# l2 regularization on weights
if self.l2_reg > 0:
if self.deep_layers != None:
for i in range(len(self.deep_layers)):
self.loss += tf.contrib.layers.l2_regularizer(
self.l2_reg)(self.weights["layer_%d"%i])
self.global_step = tf.Variable(0, name="global_step", trainable=False)
self.var1 = [v for v in tf.trainable_variables() if v.name != 'feature_bias:0']
self.var2 = [tf.trainable_variables()[1]] # self.var2 = [feature_bias]
if self.optimizer_type == "adam":
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate,
beta1=0.9, beta2=0.999, epsilon=1e-8).\
minimize(self.loss, global_step=self.global_step)
elif self.optimizer_type == "adagrad":
self.optimizer = tf.train.AdagradOptimizer(learning_rate=self.learning_rate,
initial_accumulator_value=1e-8).\
minimize(self.loss, global_step=self.global_step)
elif self.optimizer_type == "gd":
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate).\
minimize(self.loss, global_step=self.global_step)
elif self.optimizer_type == "momentum":
self.optimizer = tf.train.MomentumOptimizer(learning_rate=self.learning_rate, momentum=0.95).\
minimize(self.loss, global_step=self.global_step)
# init
self.saver = tf.train.Saver(max_to_keep=5)
init = tf.global_variables_initializer()
self.sess = self._init_session()
self.sess.run(init)
self.count_param()
def count_param(self):
k = (np.sum([np.prod(v.get_shape().as_list())
for v in tf.trainable_variables()]))
print("total parameters :%d" % k)
print("extra parameters : %d" % (k - self.feature_size * self.embedding_size))
def _init_session(self):
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
return tf.Session(config=config)
def _initialize_weights(self):
weights = dict()
# embeddings
weights["feature_embeddings"] = tf.Variable(
tf.random_normal([self.feature_size, self.embedding_size], 0.0, 0.01),
name="feature_embeddings") # feature_size(n) * d
input_size = self.output_size * self.field_size
# dense layers
if self.deep_layers != None:
num_layer = len(self.deep_layers)
layer0_size = self.field_size * self.embedding_size
glorot = np.sqrt(2.0 / (layer0_size + self.deep_layers[0]))
weights["layer_0"] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(layer0_size, self.deep_layers[0])), dtype=np.float32)
weights["bias_0"] = tf.Variable(np.random.normal(loc=0, scale=glorot, size=(1, self.deep_layers[0])),
dtype=np.float32) # 1 * layers[0]
for i in range(1, num_layer):
glorot = np.sqrt(2.0 / (self.deep_layers[i-1] + self.deep_layers[i]))
weights["layer_%d" % i] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(self.deep_layers[i-1], self.deep_layers[i])),
dtype=np.float32) # layers[i-1] * layers[i]
weights["bias_%d" % i] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(1, self.deep_layers[i])),
dtype=np.float32) # 1 * layer[i]
glorot = np.sqrt(2.0 / (self.deep_layers[-1] + 1))
weights["prediction_dense"] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(self.deep_layers[-1], 1)),
dtype=np.float32, name="prediction_dense")
weights["prediction_bias_dense"] = tf.Variable(
np.random.normal(), dtype=np.float32, name="prediction_bias_dense")
#---------- prediciton weight ------------------#
glorot = np.sqrt(2.0 / (input_size + 1))
weights["prediction"] = tf.Variable(
np.random.normal(loc=0, scale=glorot, size=(input_size, 1)),
dtype=np.float32, name="prediction")
weights["prediction_bias"] = tf.Variable(
np.random.normal(), dtype=np.float32, name="prediction_bias")
return weights
def batch_norm_layer(self, x, train_phase, scope_bn):
bn_train = batch_norm(x, decay=self.batch_norm_decay, center=True, scale=True, updates_collections=None,
is_training=True, reuse=None, trainable=True, scope=scope_bn)
bn_inference = batch_norm(x, decay=self.batch_norm_decay, center=True, scale=True, updates_collections=None,
is_training=False, reuse=True, trainable=True, scope=scope_bn)
z = tf.cond(train_phase, lambda: bn_train, lambda: bn_inference)
return z
def get_batch(self, Xi, Xv, Xi_genre, Xv_genre, y, batch_size, index):
start = index * batch_size
end = (index+1) * batch_size
end = end if end < len(y) else len(y)
return Xi[start:end], Xv[start:end], Xi_genre[start:end], Xv_genre[start:end], [[y_] for y_ in y[start:end]]
# shuffle three lists simutaneously
def shuffle_in_unison_scary(self, a, b, c, d, e):
rng_state = np.random.get_state()
np.random.shuffle(a)
np.random.set_state(rng_state)
np.random.shuffle(b)
np.random.set_state(rng_state)
np.random.shuffle(c)
np.random.set_state(rng_state)
np.random.shuffle(d)
np.random.set_state(rng_state)
np.random.shuffle(e)
def fit_on_batch(self, Xi, Xv, Xi_genre, Xv_genre, y):
feed_dict = {self.feat_index: Xi,
self.feat_value: Xv,
self.genre_index: Xi_genre,
self.genre_value: Xv_genre,
self.label: y,
self.dropout_keep_prob: self.drop_keep_prob,
self.train_phase: True}
step, loss, opt = self.sess.run((self.global_step, self.loss, self.optimizer), feed_dict=feed_dict)
return step, loss
# Since the train data is very large, they can not be fit into the memory at the same time.
# We separate the whole train data into several files and call "fit_once" for each file.
def fit_once(self, Xi_train, Xv_train, Xi_train_genre, Xv_train_genre, y_train,
epoch, Xi_valid=None,
Xv_valid=None, Xi_valid_genre=None, Xv_valid_genre=None, y_valid=None,
early_stopping=False):
has_valid = Xv_valid is not None
last_step = 0
t1 = time()
self.shuffle_in_unison_scary(Xi_train, Xv_train, Xi_train_genre, Xv_train_genre, y_train)
total_batch = int(len(y_train) / self.batch_size)
for i in range(total_batch):
Xi_batch, Xv_batch, Xi_batch_genre, Xv_batch_genre, y_batch = self.get_batch(Xi_train, Xv_train, Xi_train_genre, Xv_train_genre, y_train, self.batch_size, i)
step, loss = self.fit_on_batch(Xi_batch, Xv_batch, Xi_batch_genre, Xv_batch_genre, y_batch)
last_step = step
# evaluate training and validation datasets
train_result, train_loss = self.evaluate(Xi_train, Xv_train, Xi_train_genre, Xv_train_genre, y_train)
self.train_result.append(train_result)
self.train_loss.append(train_loss)
if has_valid:
valid_result, valid_loss = self.evaluate(Xi_valid, Xv_valid, Xi_valid_genre, Xv_valid_genre, y_valid)
self.valid_result.append(valid_result)
self.valid_loss.append(valid_loss)
if valid_loss < self.best_loss and self.is_save == True:
old_loss = self.best_loss
self.best_loss = valid_loss
self.saver.save(self.sess, self.save_path + 'model.ckpt',global_step=last_step)
print("[%d] model saved!. Valid loss is improved from %.4f to %.4f"
% (epoch, old_loss, self.best_loss))
if self.verbose > 0 and ((epoch-1)*9) % self.verbose == 0:
if has_valid:
print("[%d] train-result=%.4f, train-logloss=%.4f, valid-result=%.4f, valid-logloss=%.4f [%.1f s]" % (epoch, train_result, train_loss, valid_result, valid_loss, time() - t1))
else:
print("[%d] train-result=%.4f [%.1f s]" \
% (epoch, train_result, time() - t1))
if has_valid and early_stopping and self.training_termination(self.valid_loss):
return False
else:
return True
def training_termination(self, valid_result):
if len(valid_result) > 5:
if self.greater_is_better:
if valid_result[-1] < valid_result[-2] and \
valid_result[-2] < valid_result[-3] and \
valid_result[-3] < valid_result[-4] and \
valid_result[-4] < valid_result[-5]:
return True
else:
if valid_result[-1] > valid_result[-2] and \
valid_result[-2] > valid_result[-3] and \
valid_result[-3] > valid_result[-4] and \
valid_result[-4] > valid_result[-5]:
return True
return False
def predict(self, Xi, Xv, Xi_genre, Xv_genre):
"""
:param Xi: list of list of feature indices of each sample in the dataset
:param Xv: list of list of feature values of each sample in the dataset
:return: predicted probability of each sample
"""
# dummy y
dummy_y = [1] * len(Xi)
batch_index = 0
Xi_batch, Xv_batch, Xi_batch_genre, Xv_batch_genre, y_batch = self.get_batch(Xi, Xv, Xi_genre, Xv_genre, dummy_y, self.batch_size, batch_index)
y_pred = None
while len(Xi_batch) > 0:
num_batch = len(y_batch)
feed_dict = {self.feat_index: Xi_batch,
self.feat_value: Xv_batch,
self.genre_index: Xi_batch_genre,
self.genre_value: Xv_batch_genre,
self.label: y_batch,
self.dropout_keep_prob: [1.0] * len(self.drop_keep_prob),
self.train_phase: False}
batch_out = self.sess.run(self.out, feed_dict=feed_dict)
if batch_index == 0:
y_pred = np.reshape(batch_out, (num_batch,))
else:
y_pred = np.concatenate((y_pred, np.reshape(batch_out, (num_batch,))))
batch_index += 1
Xi_batch, Xv_batch, Xi_batch_genre, Xv_batch_genre, y_batch = self.get_batch(Xi, Xv, Xi_genre, Xv_genre, dummy_y, self.batch_size, batch_index)
return y_pred
def evaluate(self, Xi, Xv, Xi_genre, Xv_genre, y):
"""
:param Xi: list of list of feature indices of each sample in the dataset
:param Xv: list of list of feature values of each sample in the dataset
:param y: label of each sample in the dataset
:return: metric of the evaluation
"""
y_pred = self.predict(Xi, Xv, Xi_genre, Xv_genre)
y_pred = np.clip(y_pred,1e-6,1-1e-6)
return self.eval_metric(y, y_pred), log_loss(y, y_pred)
def restore(self, save_path=None):
if (save_path == None):
save_path = self.save_path
ckpt = tf.train.get_checkpoint_state(save_path)
if ckpt and ckpt.model_checkpoint_path:
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
if self.verbose > 0:
print ("restored from %s" % (save_path))
def _run_(args, run_cnt):
path_prefix = args.data_path
#feature_size = np.load(path_prefix + '/feature_size.npy')[0]
feature_size = 3600
# test: file1, valid: file2, train: file3-10
model = AutoInt(args=args, feature_size=feature_size, run_cnt=run_cnt)
Xi_valid = np.load(path_prefix + '/valid_i_other.npy')
Xv_valid = np.load(path_prefix + '/valid_x_other.npy')
Xi_valid_genre = np.load(path_prefix + '/valid_i_genre.npy')
Xv_valid_genre = np.load(path_prefix + '/valid_x_genre.npy')
y_valid = np.load(path_prefix + '/valid_y.npy')
is_continue = True
for k in range(model.epoch):
if not is_continue:
print('early stopping at epoch %d' % (k+1))
break
time_epoch = 0
for j in range(1):
if not is_continue:
print('early stopping at epoch %d' % (k+1))
break
Xi_train = np.load(path_prefix + '/train_i_other.npy')
Xv_train = np.load(path_prefix + '/train_x_other.npy')
Xi_train_genre = np.load(path_prefix + '/train_i_genre.npy')
Xv_train_genre = np.load(path_prefix + '/train_x_genre.npy')
y_train = np.load(path_prefix + '/train_y.npy')
t1 = time()
is_continue = model.fit_once(Xi_train, Xv_train, Xi_train_genre, Xv_train_genre, y_train, k+1,
Xi_valid, Xv_valid, Xi_valid_genre, Xv_valid_genre, y_valid, early_stopping=True)
time_epoch += time() - t1
print("epoch %d, time %d" % (k+1, time_epoch))
print('start testing!...')
Xi_test = np.load(path_prefix + '/test_i_other.npy')
Xv_test = np.load(path_prefix + '/test_x_other.npy')
Xi_test_genre = np.load(path_prefix + '/test_i_genre.npy')
Xv_test_genre = np.load(path_prefix + '/test_x_genre.npy')
y_test = np.load(path_prefix + '/test_y.npy')
model.restore()
test_result, test_loss = model.evaluate(Xi_test, Xv_test, Xi_test_genre, Xv_test_genre, y_test)
print("test-result = %.4lf, test-logloss = %.4lf" % (test_result, test_loss))
return test_result, test_loss | null |
159,078 | import os
import numpy as np
import tensorflow as tf
from time import time
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import roc_auc_score, log_loss
from tensorflow.contrib.layers.python.layers import batch_norm as batch_norm
def normalize(inputs, epsilon=1e-8):
'''
Applies layer normalization
Args:
inputs: A tensor with 2 or more dimensions
epsilon: A floating number to prevent Zero Division
Returns:
A tensor with the same shape and data dtype
'''
inputs_shape = inputs.get_shape()
params_shape = inputs_shape[-1:]
mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)
beta = tf.Variable(tf.zeros(params_shape))
gamma = tf.Variable(tf.ones(params_shape))
normalized = (inputs - mean) / ((variance + epsilon) ** (.5))
outputs = gamma * normalized + beta
return outputs
def multihead_attention(queries,
keys,
values,
num_units=None,
num_heads=1,
dropout_keep_prob=1,
is_training=True,
has_residual=True):
if num_units is None:
num_units = queries.get_shape().as_list[-1]
# Linear projections
Q = tf.layers.dense(queries, num_units, activation=tf.nn.relu)
K = tf.layers.dense(keys, num_units, activation=tf.nn.relu)
V = tf.layers.dense(values, num_units, activation=tf.nn.relu)
if has_residual:
V_res = tf.layers.dense(values, num_units, activation=tf.nn.relu)
# Split and concat
Q_ = tf.concat(tf.split(Q, num_heads, axis=2), axis=0)
K_ = tf.concat(tf.split(K, num_heads, axis=2), axis=0)
V_ = tf.concat(tf.split(V, num_heads, axis=2), axis=0)
# Multiplication
weights = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1]))
# Scale
weights = weights / (K_.get_shape().as_list()[-1] ** 0.5)
# Activation
weights = tf.nn.softmax(weights)
# Dropouts
weights = tf.layers.dropout(weights, rate=1-dropout_keep_prob,
training=tf.convert_to_tensor(is_training))
# Weighted sum
outputs = tf.matmul(weights, V_)
# Restore shape
outputs = tf.concat(tf.split(outputs, num_heads, axis=0), axis=2)
# Residual connection
if has_residual:
outputs += V_res
outputs = tf.nn.relu(outputs)
# Normalize
outputs = normalize(outputs)
return outputs | null |
159,079 | import numpy as np
import config
import os
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from sklearn import preprocessing
def _load_data(_nrows=None, debug = False):
train_x = pd.read_csv(config.TRAIN_X,header=None,sep=' ',nrows=_nrows, dtype=np.float)
train_y = pd.read_csv(config.TRAIN_Y,header=None,sep=' ',nrows=_nrows, dtype=np.int32)
train_x = train_x.values
train_y = train_y.values.reshape([-1])
print('data loading done!')
print('training data : %d' % train_y.shape[0])
assert train_x.shape[0]==train_y.shape[0]
return train_x, train_y | null |
159,080 | import numpy as np
import config
import os
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from sklearn import preprocessing
train_x_name = "train_x.npy"
train_y_name = "train_y.npy"
def save_x_y(fold_index, train_x, train_y):
_get = lambda x, l: [x[i] for i in l]
for i in range(len(fold_index)):
print("now part %d" % (i+1))
part_index = fold_index[i]
Xv_train_, y_train_ = _get(train_x, part_index), _get(train_y, part_index)
save_dir_Xv = config.DATA_PATH + "part" + str(i+1) + "/"
save_dir_y = config.DATA_PATH + "part" + str(i+1) + "/"
if (os.path.exists(save_dir_Xv) == False):
os.makedirs(save_dir_Xv)
if (os.path.exists(save_dir_y) == False):
os.makedirs(save_dir_y)
save_path_Xv = save_dir_Xv + train_x_name
save_path_y = save_dir_y + train_y_name
np.save(save_path_Xv, Xv_train_)
np.save(save_path_y, y_train_) | null |
159,081 | import numpy as np
import config
import os
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from sklearn import preprocessing
def save_i(fold_index):
_get = lambda x, l: [x[i] for i in l]
train_i = pd.read_csv(config.TRAIN_I,header=None,sep=' ',nrows=None, dtype=np.int32)
train_i = train_i.values
feature_size = train_i.max() + 1
print ("feature_size = %d" % feature_size)
feature_size = [feature_size]
feature_size = np.array(feature_size)
np.save(config.DATA_PATH + "feature_size.npy", feature_size)
print("train_i size: %d" % len(train_i))
for i in range(len(fold_index)):
print("now part %d" % (i+1))
part_index = fold_index[i]
Xi_train_ = _get(train_i, part_index)
save_path_Xi = config.DATA_PATH + "part" + str(i+1)+ '/train_i.npy'
np.save(save_path_Xi, Xi_train_) | null |
159,082 | import math
import config
import numpy as np
def scale(x):
if x > 2:
x = int(math.log(float(x))**2)
return x
def scale_each_fold():
for i in range(1,11):
print('now part %d' % i)
data = np.load(config.DATA_PATH + 'part'+str(i)+'/train_x.npy')
part = data[:,0:13]
for j in range(part.shape[0]):
if j % 100000 ==0:
print(j)
part[j] = list(map(scale, part[j]))
np.save(config.DATA_PATH + 'part' + str(i) + '/train_x2.npy', data) | null |
159,083 | import math
import config
import numpy as np
def scale(x):
def scale_each_fold():
for i in range(1,11):
print('now part %d' % i)
data = np.load(config.DATA_PATH + 'part'+str(i)+'/train_x.npy')
part = data[:,0:13]
for j in range(part.shape[0]):
if j % 100000 ==0:
print(j)
part[j] = list(map(scale, part[j]))
np.save(config.DATA_PATH + 'part' + str(i) + '/train_x2.npy', data) | null |
159,085 | import math
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import make_scorer
from sklearn.model_selection import StratifiedKFold
from time import time
from .model import AutoInt
import argparse
import os
def str2list(v):
v=v.split(',')
v=[int(_.strip('[]')) for _ in v]
return v
def str2list2(v):
v=v.split(',')
v=[float(_.strip('[]')) for _ in v]
return v
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--is_save', action='store_true')
parser.add_argument('--greater_is_better', action='store_true', help='early stop criterion')
parser.add_argument('--has_residual', action='store_true', help='add residual')
parser.add_argument('--blocks', type=int, default=2, help='#blocks')
parser.add_argument('--block_shape', type=str2list, default=[16,16], help='output shape of each block')
parser.add_argument('--heads', type=int, default=2, help='#heads')
parser.add_argument('--embedding_size', type=int, default=16)
parser.add_argument('--dropout_keep_prob', type=str2list2, default=[1, 1, 0.5])
parser.add_argument('--epoch', type=int, default=2)
parser.add_argument('--batch_size', type=int, default=1024)
parser.add_argument('--learning_rate', type=float, default=0.001)
parser.add_argument('--optimizer_type', type=str, default='adam')
parser.add_argument('--l2_reg', type=float, default=0.0)
parser.add_argument('--random_seed', type=int, default=2018)
parser.add_argument('--save_path', type=str, default='./model/')
parser.add_argument('--field_size', type=int, default=23, help='#fields')
parser.add_argument('--loss_type', type=str, default='logloss')
parser.add_argument('--verbose', type=int, default=1)
parser.add_argument('--run_times', type=int, default=3,help='run multiple times to eliminate error')
parser.add_argument('--deep_layers', type=str2list, default=None, help='config for dnn in joint train')
parser.add_argument('--batch_norm', type=int, default=0)
parser.add_argument('--batch_norm_decay', type=float, default=0.995)
parser.add_argument('--data', type=str, help='data name')
parser.add_argument('--data_path', type=str, help='root path for all the data')
return parser.parse_args() | null |
159,086 | import math
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import make_scorer
from sklearn.model_selection import StratifiedKFold
from time import time
from .model import AutoInt
import argparse
import os
class AutoInt():
def __init__(self, args, feature_size, run_cnt):
def _init_graph(self):
def count_param(self):
def _init_session(self):
def _initialize_weights(self):
def batch_norm_layer(self, x, train_phase, scope_bn):
def get_batch(self, Xi, Xv, y, batch_size, index):
def shuffle_in_unison_scary(self, a, b, c):
def fit_on_batch(self, Xi, Xv, y):
def fit_once(self, Xi_train, Xv_train, y_train,
epoch, file_count, Xi_valid=None,
Xv_valid=None, y_valid=None,
early_stopping=False):
def training_termination(self, valid_result):
def predict(self, Xi, Xv):
def evaluate(self, Xi, Xv, y):
def restore(self, save_path=None):
def _run_(args, file_name, run_cnt):
path_prefix = os.path.join(args.data_path, args.data)
feature_size = np.load(path_prefix + '/feature_size.npy')[0]
# test: file1, valid: file2, train: file3-10
model = AutoInt(args=args, feature_size=feature_size, run_cnt=run_cnt)
Xi_valid = np.load(path_prefix + '/part2/' + file_name[0])
Xv_valid = np.load(path_prefix + '/part2/' + file_name[1])
y_valid = np.load(path_prefix + '/part2/' + file_name[2])
is_continue = True
for k in range(model.epoch):
if not is_continue:
print('early stopping at epoch %d' % (k+1))
break
file_count = 0
time_epoch = 0
for j in range(3, 11):
if not is_continue:
print('early stopping at epoch %d file %d' % (k+1, j))
break
file_count += 1
Xi_train = np.load(path_prefix + '/part' + str(j) + '/' + file_name[0])
Xv_train = np.load(path_prefix + '/part' + str(j) + '/' + file_name[1])
y_train = np.load(path_prefix + '/part' + str(j) + '/' + file_name[2])
print("epoch %d, file %d" %(k+1, j))
t1 = time()
is_continue = model.fit_once(Xi_train, Xv_train, y_train, k+1, file_count,
Xi_valid, Xv_valid, y_valid, early_stopping=True)
time_epoch += time() - t1
print("epoch %d, time %d" % (k+1, time_epoch))
print('start testing!...')
Xi_test = np.load(path_prefix + '/part1/' + file_name[0])
Xv_test = np.load(path_prefix + '/part1/' + file_name[1])
y_test = np.load(path_prefix + '/part1/' + file_name[2])
model.restore()
test_result, test_loss = model.evaluate(Xi_test, Xv_test, y_test)
print("test-result = %.4lf, test-logloss = %.4lf" % (test_result, test_loss))
return test_result, test_loss | null |
159,088 | import pandas as pd
import numpy as np
import math
import argparse
import random
from collections import Counter
max_length = 30
def process_rating(day=7): # segment session in every $day days.
df = pd.read_csv(RATING_FILE, sep='\t', dtype={0:str, 1:str, 2:np.int32, 3: np.float32})
df = df[df['Rating'].between(1,6,inclusive=True)]
span_left = 1.2e9
span_right = 1.485e9
df = df[df['Timestamp'].between(span_left, span_right, inclusive=True)]
min_timestamp = df['Timestamp'].min()
time_id = [int(math.floor((t-min_timestamp) / (86400*day))) for t in df['Timestamp']]
df['TimeId'] = time_id
session_id = [str(uid)+'_'+str(tid) for uid, tid in zip(df['UserId'], df['TimeId'])]
df['SessionId'] = session_id
print('Statistics of user ratings:')
print('\tNumber of total ratings: {}'.format(len(df)))
print('\tNumber of users: {}'.format(df.UserId.nunique()))
print('\tNumber of items: {}'.format(df.ItemId.nunique()))
print('\tAverage ratings per user:{}'.format(df.groupby('UserId').size().mean()))
return df
def process_social(): # read in social network.
net = pd.read_csv(SOCIAL_NETWORK_FILE, sep='\t', dtype={0:str, 1: str})
net.drop_duplicates(subset=['Follower', 'Followee'], inplace=True)
friend_size = net.groupby('Follower').size()
#net = net[np.in1d(net.Follower, friend_size[friend_size>=5].index)]
print('Statistics of social network:')
print('\tTotal user in social network:{}.\n\tTotal edges(links) in social network:{}.'.format(\
net.Follower.nunique(), len(net)))
print('\tAverage number of friends for users: {}'.format(net.groupby('Follower').size().mean()))
return net
def reset_id(data, id_map, column_name='UserId'):
mapped_id = data[column_name].map(id_map)
data[column_name] = mapped_id
if column_name == 'UserId':
session_id = [str(uid)+'_'+str(tid) for uid, tid in zip(data['UserId'], data['TimeId'])]
data['SessionId'] = session_id
return data
def split_data(day): #split data for training/validation/testing.
df_data = process_rating(day)
df_net = process_social()
df_net = df_net.loc[df_net['Follower'].isin(df_data['UserId'].unique())]
df_net = df_net.loc[df_net['Followee'].isin(df_data['UserId'].unique())]
df_data = df_data.loc[df_data['UserId'].isin(df_net.Follower.unique())]
#restrict session length in [2, max_length]. We set a max_length because too long sequence may come from a fake user.
df_data = df_data[df_data['SessionId'].groupby(df_data['SessionId']).transform('size')>1]
df_data = df_data[df_data['SessionId'].groupby(df_data['SessionId']).transform('size')<=max_length]
#length_supports = df_data.groupby('SessionId').size()
#df_data = df_data[np.in1d(df_data.SessionId, length_supports[length_supports<=max_length].index)]
# split train, test, valid.
tmax = df_data.TimeId.max()
session_max_times = df_data.groupby('SessionId').TimeId.max()
session_train = session_max_times[session_max_times < tmax - 26].index
session_holdout = session_max_times[session_max_times >= tmax - 26].index
train_tr = df_data[df_data['SessionId'].isin(session_train)]
holdout_data = df_data[df_data['SessionId'].isin(session_holdout)]
print('Number of train/test: {}/{}'.format(len(train_tr), len(holdout_data)))
train_tr = train_tr[train_tr['ItemId'].groupby(train_tr['ItemId']).transform('size')>=20]
train_tr = train_tr[train_tr['SessionId'].groupby(train_tr['SessionId']).transform('size')>1]
print('Item size in train data: {}'.format(train_tr['ItemId'].nunique()))
train_item_counter = Counter(train_tr.ItemId)
to_predict = Counter(el for el in train_item_counter.elements() if train_item_counter[el] >= 50).keys()
print('Size of to predict: {}'.format(len(to_predict)))
# split holdout to valid and test.
holdout_cn = holdout_data.SessionId.nunique()
holdout_ids = holdout_data.SessionId.unique()
np.random.shuffle(holdout_ids)
valid_cn = int(holdout_cn * 0.5)
session_valid = holdout_ids[0: valid_cn]
session_test = holdout_ids[valid_cn: ]
valid = holdout_data[holdout_data['SessionId'].isin(session_valid)]
test = holdout_data[holdout_data['SessionId'].isin(session_test)]
valid = valid[valid['ItemId'].isin(to_predict)]
valid = valid[valid['SessionId'].groupby(valid['SessionId']).transform('size')>1]
test = test[test['ItemId'].isin(to_predict)]
test = test[test['SessionId'].groupby(test['SessionId']).transform('size')>1]
total_df = pd.concat([train_tr, valid, test])
df_net = df_net.loc[df_net['Follower'].isin(total_df['UserId'].unique())]
df_net = df_net.loc[df_net['Followee'].isin(total_df['UserId'].unique())]
user_map = dict(zip(total_df.UserId.unique(), range(total_df.UserId.nunique())))
item_map = dict(zip(total_df.ItemId.unique(), range(1, 1+total_df.ItemId.nunique())))
with open('user_id_map.tsv', 'w') as fout:
for k, v in user_map.iteritems():
fout.write(str(k) + '\t' + str(v) + '\n')
with open('item_id_map.tsv', 'w') as fout:
for k, v in item_map.iteritems():
fout.write(str(k) + '\t' + str(v) + '\n')
num_users = len(user_map)
num_items = len(item_map)
reset_id(total_df, user_map)
reset_id(train_tr, user_map)
reset_id(valid, user_map)
reset_id(test, user_map)
reset_id(df_net, user_map, 'Follower')
reset_id(df_net, user_map, 'Followee')
reset_id(total_df, item_map, 'ItemId')
reset_id(train_tr, item_map, 'ItemId')
reset_id(valid, item_map, 'ItemId')
reset_id(test, item_map, 'ItemId')
print 'Train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tAvg length: {}'.format(len(train_tr), train_tr.SessionId.nunique(), train_tr.ItemId.nunique(), train_tr.groupby('SessionId').size().mean())
print 'Valid set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tAvg length: {}'.format(len(valid), valid.SessionId.nunique(), valid.ItemId.nunique(), valid.groupby('SessionId').size().mean())
print 'Test set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tAvg length: {}'.format(len(test), test.SessionId.nunique(), test.ItemId.nunique(), test.groupby('SessionId').size().mean())
user2sessions = total_df.groupby('UserId')['SessionId'].apply(set).to_dict()
user_latest_session = []
for idx in xrange(num_users):
sessions = user2sessions[idx]
latest = []
for t in xrange(tmax+1):
if t == 0:
latest.append('NULL')
else:
sess_id_tmp = str(idx) + '_' + str(t-1)
if sess_id_tmp in sessions:
latest.append(sess_id_tmp)
else:
latest.append(latest[t-1])
user_latest_session.append(latest)
train_tr.to_csv('train.tsv', sep='\t', index=False)
valid.to_csv('valid.tsv', sep='\t', index=False)
test.to_csv('test.tsv', sep='\t', index=False)
df_net.to_csv('adj.tsv', sep='\t', index=False)
with open('latest_sessions.txt', 'w') as fout:
for idx in xrange(num_users):
fout.write(','.join(user_latest_session[idx]) + '\n') | null |
159,089 | import tensorflow as tf
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `uniform` function. Write a Python function `def uniform(shape, scale=0.05, name=None)` to solve the following problem:
Uniform init.
Here is the function:
def uniform(shape, scale=0.05, name=None):
"""Uniform init."""
initial = tf.random_uniform(shape, minval=-scale, maxval=scale, dtype=tf.float32)
return tf.Variable(initial, name=name) | Uniform init. |
159,090 | import tensorflow as tf
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `glorot` function. Write a Python function `def glorot(shape, name=None)` to solve the following problem:
Glorot & Bengio (AISTATS 2010) init.
Here is the function:
def glorot(shape, name=None):
"""Glorot & Bengio (AISTATS 2010) init."""
init_range = np.sqrt(6.0/(shape[0]+shape[1]))
initial = tf.random_uniform(shape, minval=-init_range, maxval=init_range, dtype=tf.float32)
return tf.Variable(initial, name=name) | Glorot & Bengio (AISTATS 2010) init. |
159,091 | import tensorflow as tf
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `zeros` function. Write a Python function `def zeros(shape, name=None)` to solve the following problem:
All zeros.
Here is the function:
def zeros(shape, name=None):
"""All zeros."""
initial = tf.zeros(shape, dtype=tf.float32)
return tf.Variable(initial, name=name) | All zeros. |
159,092 | import tensorflow as tf
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `ones` function. Write a Python function `def ones(shape, name=None)` to solve the following problem:
All ones.
Here is the function:
def ones(shape, name=None):
"""All ones."""
initial = tf.ones(shape, dtype=tf.float32)
return tf.Variable(initial, name=name) | All ones. |
159,093 | from __future__ import division
from __future__ import print_function
import os, sys
import argparse
import tensorflow as tf
import numpy as np
import time
from .utils import *
from .minibatch import MinibatchIterator
from .model import DGRec
np.random.seed(seed)
tf.set_random_seed(seed)
def evaluate(sess, model, minibatch, val_or_test='val'):
epoch_val_cost = []
epoch_val_recall = []
epoch_val_ndcg = []
epoch_val_point = []
while not minibatch.end_val(val_or_test):
feed_dict = minibatch.next_val_minibatch_feed_dict(val_or_test)
outs = sess.run([model.loss,model.sum_recall, model.sum_ndcg, model.point_count], feed_dict=feed_dict)
epoch_val_cost.append(outs[0])
epoch_val_recall.append(outs[1])
epoch_val_ndcg.append(outs[2])
epoch_val_point.append(outs[3])
return np.mean(epoch_val_cost), np.sum(epoch_val_recall) / np.sum(epoch_val_point), np.sum(epoch_val_ndcg) / np.sum(epoch_val_point)
def construct_placeholders(args):
# Define placeholders
placeholders = {
'input_x': tf.placeholder(tf.int32, shape=(args.batch_size, args.max_length), name='input_session'),
'input_y': tf.placeholder(tf.int32, shape=(args.batch_size, args.max_length), name='output_session'),
'mask_y': tf.placeholder(tf.float32, shape=(args.batch_size, args.max_length), name='mask_x'),
'support_nodes_layer1': tf.placeholder(tf.int32, shape=(args.batch_size*args.samples_1*args.samples_2), name='support_nodes_layer1'),
'support_nodes_layer2': tf.placeholder(tf.int32, shape=(args.batch_size*args.samples_2), name='support_nodes_layer2'),
'support_sessions_layer1': tf.placeholder(tf.int32, shape=(args.batch_size*args.samples_1*args.samples_2,\
args.max_length), name='support_sessions_layer1'),
'support_sessions_layer2': tf.placeholder(tf.int32, shape=(args.batch_size*args.samples_2,\
args.max_length), name='support_sessions_layer2'),
'support_lengths_layer1': tf.placeholder(tf.int32, shape=(args.batch_size*args.samples_1*args.samples_2),
name='support_lengths_layer1'),
'support_lengths_layer2': tf.placeholder(tf.int32, shape=(args.batch_size*args.samples_2),
name='support_lengths_layer2'),
}
return placeholders
class MinibatchIterator(object):
def __init__(self,
adj_info, # in pandas dataframe
latest_sessions,
data, # data list, either [train, valid] or [train, valid, test].
placeholders,
batch_size,
max_degree,
num_nodes,
max_length=20,
samples_1_2=[10,5],
training=True):
self.num_layers = 2 # Currently, only 2 layer is supported.
self.adj_info = adj_info
self.latest_sessions = latest_sessions
self.training = training
self.train_df, self.valid_df, self.test_df = data
self.all_data = pd.concat(data)
self.placeholders = placeholders
self.batch_size = batch_size
self.max_degree = max_degree
self.num_nodes = num_nodes
self.max_length = max_length
self.samples_1_2 = samples_1_2
self.sizes = [1, samples_1_2[1], samples_1_2[1]*samples_1_2[0]]
self.visible_time = self.user_visible_time()
self.test_adj, self.test_deg = self.construct_test_adj()
if self.training:
self.adj, self.deg = self.construct_adj()
self.train_session_ids = self._remove_infoless(self.train_df, self.adj, self.deg)
self.valid_session_ids = self._remove_infoless(self.valid_df, self.test_adj, self.test_deg)
self.sampler = UniformNeighborSampler(self.adj, self.visible_time, self.deg)
self.test_session_ids = self._remove_infoless(self.test_df, self.test_adj, self.test_deg)
self.padded_data, self.mask = self._padding_sessions(self.all_data)
self.test_sampler = UniformNeighborSampler(self.test_adj, self.visible_time, self.test_deg)
self.batch_num = 0
self.batch_num_val = 0
self.batch_num_test = 0
def user_visible_time(self):
'''
Find out when each user is 'visible' to her friends, i.e., every user's first click/watching time.
'''
visible_time = []
for l in self.latest_sessions:
timeid = max(loc for loc, val in enumerate(l) if val == 'NULL') + 1
visible_time.append(timeid)
assert timeid > 0 and timeid < len(l), 'Wrong when create visible time {}'.format(timeid)
return visible_time
def _remove_infoless(self, data, adj, deg):
'''
Remove users who have no sufficient friends.
'''
data = data.loc[deg[data['UserId']] != 0]
reserved_session_ids = []
print('sessions: {}\tratings: {}'.format(data.SessionId.nunique(), len(data)))
for sessid in data.SessionId.unique():
userid, timeid = sessid.split('_')
userid, timeid = int(userid), int(timeid)
cn_1 = 0
for neighbor in adj[userid, : ]:
if self.visible_time[neighbor] <= timeid and deg[neighbor] > 0:
cn_2 = 0
for second_neighbor in adj[neighbor, : ]:
if self.visible_time[second_neighbor] <= timeid:
break
cn_2 += 1
if cn_2 < self.max_degree:
break
cn_1 += 1
if cn_1 < self.max_degree:
reserved_session_ids.append(sessid)
return reserved_session_ids
def _padding_sessions(self, data):
'''
Pad zeros at the end of each session to length self.max_length for batch training.
'''
data = data.sort_values(by=['TimeId']).groupby('SessionId')['ItemId'].apply(list).to_dict()
new_data = {}
data_mask = {}
for k, v in data.items():
mask = np.ones(self.max_length, dtype=np.float32)
x = v[:-1]
y = v[1: ]
assert len(x) > 0
padded_len = self.max_length - len(x)
if padded_len > 0:
x.extend([0] * padded_len)
y.extend([0] * padded_len)
mask[-padded_len: ] = 0.
v.extend([0] * (self.max_length - len(v)))
x = x[:self.max_length]
y = y[:self.max_length]
v = v[:self.max_length]
new_data[k] = [np.array(x, dtype=np.int32), np.array(y, dtype=np.int32), np.array(v, dtype=np.int32)]
data_mask[k] = np.array(mask, dtype=bool)
return new_data, data_mask
def _batch_feed_dict(self, current_batch):
'''
Construct batch inputs.
'''
current_batch_sess_ids, samples, support_sizes = current_batch
feed_dict = {}
input_x = []
input_y = []
mask_y = []
timeids = []
for sessid in current_batch_sess_ids:
nodeid, timeid = sessid.split('_')
timeids.append(int(timeid))
x, y, _ = self.padded_data[sessid]
mask = self.mask[sessid]
input_x.append(x)
input_y.append(y)
mask_y.append(mask)
feed_dict.update({self.placeholders['input_x']: input_x})
feed_dict.update({self.placeholders['input_y']: input_y})
feed_dict.update({self.placeholders['mask_y']: mask_y})
feed_dict.update({self.placeholders['support_nodes_layer1']: samples[2]})
feed_dict.update({self.placeholders['support_nodes_layer2']: samples[1]})
#prepare sopportive user's recent sessions.
support_layers_session = []
support_layers_length = []
for layer in range(self.num_layers):
start = 0
t = self.num_layers - layer
support_sessions = []
support_lengths = []
for batch in range(self.batch_size):
timeid = timeids[batch]
support_nodes = samples[t][start: start + support_sizes[t]]
for support_node in support_nodes:
support_session_id = str(self.latest_sessions[support_node][timeid])
support_session = self.padded_data[support_session_id][2]
#print(support_session)
length = np.count_nonzero(support_session)
support_sessions.append(support_session)
support_lengths.append(length)
start += support_sizes[t]
support_layers_session.append(support_sessions)
support_layers_length.append(support_lengths)
feed_dict.update({self.placeholders['support_sessions_layer1']:support_layers_session[0]})
feed_dict.update({self.placeholders['support_sessions_layer2']:support_layers_session[1]})
feed_dict.update({self.placeholders['support_lengths_layer1']:support_layers_length[0]})
feed_dict.update({self.placeholders['support_lengths_layer2']:support_layers_length[1]})
return feed_dict
def sample(self, nodeids, timeids, sampler):
'''
Sample neighbors recursively. First-order, then second-order, ...
'''
samples = [nodeids]
support_size = 1
support_sizes = [support_size]
first_or_second = ['second', 'first']
for k in range(self.num_layers):
t = self.num_layers - k - 1
node = sampler([samples[k], self.samples_1_2[t], timeids, first_or_second[t], support_size])
support_size *= self.samples_1_2[t]
samples.append(np.reshape(node, [support_size * self.batch_size,]))
support_sizes.append(support_size)
return samples, support_sizes
def next_val_minibatch_feed_dict(self, val_or_test='val'):
'''
Construct evaluation or test inputs.
'''
if val_or_test == 'val':
start = self.batch_num_val * self.batch_size
self.batch_num_val += 1
data = self.valid_session_ids
elif val_or_test == 'test':
start = self.batch_num_test * self.batch_size
self.batch_num_test += 1
data = self.test_session_ids
else:
raise NotImplementedError
current_batch_sessions = data[start: start + self.batch_size]
nodes = [int(sessionid.split('_')[0]) for sessionid in current_batch_sessions]
timeids = [int(sessionid.split('_')[1]) for sessionid in current_batch_sessions]
samples, support_sizes = self.sample(nodes, timeids, self.test_sampler)
return self._batch_feed_dict([current_batch_sessions, samples, support_sizes])
def next_train_minibatch_feed_dict(self):
'''
Generate next training batch data.
'''
start = self.batch_num * self.batch_size
self.batch_num += 1
current_batch_sessions = self.train_session_ids[start: start + self.batch_size]
nodes = [int(sessionid.split('_')[0]) for sessionid in current_batch_sessions]
timeids = [int(sessionid.split('_')[1]) for sessionid in current_batch_sessions]
samples, support_sizes = self.sample(nodes, timeids, self.sampler)
return self._batch_feed_dict([current_batch_sessions, samples, support_sizes])
def construct_adj(self):
'''
Construct adj table used during training.
'''
adj = self.num_nodes*np.ones((self.num_nodes+1, self.max_degree), dtype=np.int32)
deg = np.zeros((self.num_nodes,))
missed = 0
for nodeid in self.train_df.UserId.unique():
neighbors = np.array([neighbor for neighbor in
self.adj_info.loc[self.adj_info['Follower']==nodeid].Followee.unique()], dtype=np.int32)
deg[nodeid] = len(neighbors)
if len(neighbors) == 0:
missed += 1
continue
if len(neighbors) > self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=False)
elif len(neighbors) < self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=True)
adj[nodeid, :] = neighbors
#print('Unexpected missing during constructing adj list: {}'.format(missed))
return adj, deg
def construct_test_adj(self):
'''
Construct adj table used during evaluation or testing.
'''
adj = self.num_nodes*np.ones((self.num_nodes+1, self.max_degree), dtype=np.int32)
deg = np.zeros((self.num_nodes,))
missed = 0
data = self.all_data
for nodeid in data.UserId.unique():
neighbors = np.array([neighbor for neighbor in
self.adj_info.loc[self.adj_info['Follower']==nodeid].Followee.unique()], dtype=np.int32)
deg[nodeid] = len(neighbors)
if len(neighbors) == 0:
missed += 1
continue
if len(neighbors) > self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=False)
elif len(neighbors) < self.max_degree:
neighbors = np.random.choice(neighbors, self.max_degree, replace=True)
adj[nodeid, :] = neighbors
#print('Unexpected missing during constructing adj list: {}'.format(missed))
return adj, deg
def end(self):
'''
Indicate whether we finish a pass over all training samples.
'''
return self.batch_num * self.batch_size > len(self.train_session_ids) - self.batch_size
def end_val(self, val_or_test='val'):
'''
Indicate whether we finish a pass over all testing or evaluation samples.
'''
batch_num = self.batch_num_val if val_or_test == 'val' else self.batch_num_test
data = self.valid_session_ids if val_or_test == 'val' else self.test_session_ids
end = batch_num * self.batch_size > len(data) - self.batch_size
if end:
if val_or_test == 'val':
self.batch_num_val = 0
elif val_or_test == 'test':
self.batch_num_test = 0
else:
raise NotImplementedError
if end:
self.batch_num_val = 0
return end
def shuffle(self):
'''
Shuffle training data.
'''
self.train_session_ids = np.random.permutation(self.train_session_ids)
self.batch_num = 0
class DGRec(object):
def __init__(self, args, support_sizes, placeholders):
self.support_sizes = support_sizes
if args.aggregator_type == "mean":
self.aggregator_cls = MeanAggregator
elif args.aggregator_type == "seq":
self.aggregator_cls = SeqAggregator
elif args.aggregator_type == "maxpool":
self.aggregator_cls = MaxPoolingAggregator
elif args.aggregator_type == "meanpool":
self.aggregator_cls = MeanPoolingAggregator
elif args.aggregator_type == "gcn":
self.aggregator_cls = GCNAggregator
elif args.aggregator_type == "attn":
self.aggregator_cls = AttentionAggregator
else:
raise Exception("Unknown aggregator: ", self.aggregator_cls)
self.input_x = placeholders['input_x']
self.input_y = placeholders['input_y']
self.mask_y = placeholders['mask_y']
self.mask = tf.cast(self.mask_y, dtype=tf.float32)
self.point_count = tf.reduce_sum(self.mask)
self.support_nodes_layer1 = placeholders['support_nodes_layer1']
self.support_nodes_layer2 = placeholders['support_nodes_layer2']
self.support_sessions_layer1 = placeholders['support_sessions_layer1']
self.support_sessions_layer2 = placeholders['support_sessions_layer2']
self.support_lengths_layer1 = placeholders['support_lengths_layer1']
self.support_lengths_layer2 = placeholders['support_lengths_layer2']
self.training = args.training
self.concat = args.concat
if args.act == 'linear':
self.act = lambda x:x
elif args.act == 'relu':
self.act = tf.nn.relu
elif args.act == 'elu':
self.act = tf.nn.elu
else:
raise NotImplementedError
self.batch_size = args.batch_size
self.hidden_size = args.hidden_size
self.samples_1 = args.samples_1
self.samples_2 = args.samples_2
self.num_samples = [self.samples_1, self.samples_2]
self.n_items = args.num_items
self.n_users = args.num_users
self.emb_item = args.embedding_size
self.emb_user = args.emb_user
self.max_length = args.max_length
self.model_size = args.model_size
self.dropout = args.dropout
self.dim1 = args.dim1
self.dim2 = args.dim2
self.weight_decay = args.weight_decay
self.global_only = args.global_only
self.local_only = args.local_only
self.dims = [self.hidden_size, args.dim1, args.dim2]
self.dense_layers = []
self.loss = 0
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.lr = tf.maximum(1e-5, tf.train.exponential_decay(args.learning_rate,
self.global_step,
args.decay_steps,
args.decay_rate,
staircase=True))
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
self.build()
def global_features(self):
self.user_embedding = tf.get_variable('user_embedding', [self.n_users, self.emb_user],\
initializer=tf.glorot_uniform_initializer())
feature_layer1 = tf.nn.embedding_lookup(self.user_embedding, self.support_nodes_layer1)
feature_layer2 = tf.nn.embedding_lookup(self.user_embedding, self.support_nodes_layer2)
dense_layer = Dense(self.emb_user,
self.hidden_size if self.global_only else self.hidden_size // 2,
act=tf.nn.relu,
dropout=self.dropout if self.training else 0.)
self.dense_layers.append(dense_layer)
feature_layer1 = dense_layer(feature_layer1)
feature_layer2 = dense_layer(feature_layer2)
return [feature_layer2, feature_layer1]
def local_features(self):
'''
Use the same rnn in decode function
'''
initial_state_layer1 = self.lstm_cell.zero_state(self.batch_size*self.samples_1*self.samples_2, dtype=tf.float32)
initial_state_layer2 = self.lstm_cell.zero_state(self.batch_size*self.samples_2, dtype=tf.float32)
inputs_1 = tf.nn.embedding_lookup(self.embedding, self.support_sessions_layer1)
inputs_2 = tf.nn.embedding_lookup(self.embedding, self.support_sessions_layer2)
outputs1, states1 = tf.nn.dynamic_rnn(cell=self.lstm_cell,
inputs=inputs_1,
sequence_length=self.support_lengths_layer1,
initial_state=initial_state_layer1,
dtype=tf.float32)
outputs2, states2 = tf.nn.dynamic_rnn(cell=self.lstm_cell,
inputs=inputs_2,
sequence_length=self.support_lengths_layer2,
initial_state=initial_state_layer2,
dtype=tf.float32)
# outputs: shape[batch_size, max_time, depth]
local_layer1 = states1.h
local_layer2 = states2.h
dense_layer = Dense(self.hidden_size,
self.hidden_size if self.local_only else self.hidden_size // 2,
act=tf.nn.relu,
dropout=self.dropout if self.training else 0.)
self.dense_layers.append(dense_layer)
local_layer1 = dense_layer(local_layer1)
local_layer2 = dense_layer(local_layer2)
return [local_layer2, local_layer1]
def global_and_local_features(self):
#global features
global_feature_layer2, global_feature_layer1 = self.global_features()
local_feature_layer2, local_feature_layer1 = self.local_features()
global_local_layer2 = tf.concat([global_feature_layer2, local_feature_layer2], -1)
global_local_layer1 = tf.concat([global_feature_layer1, local_feature_layer1], -1)
return [global_local_layer2, global_local_layer1]
def aggregate(self, hidden, dims, num_samples, support_sizes,
aggregators=None, name=None, concat=False, model_size="small"):
""" At each layer, aggregate hidden representations of neighbors to compute the hidden representations
at next layer.
Args:
samples: a list of samples of variable hops away for convolving at each layer of the
network. Length is the number of layers + 1. Each is a vector of node indices.
input_features: the input features for each sample of various hops away.
dims: a list of dimensions of the hidden representations from the input layer to the
final layer. Length is the number of layers + 1.
num_samples: list of number of samples for each layer.
support_sizes: the number of nodes to gather information from for each layer.
batch_size: the number of inputs (different for batch inputs and negative samples).
Returns:
The hidden representation at the final layer for all nodes in batch
"""
# length: number of layers + 1
hidden = hidden
new_agg = aggregators is None
if new_agg:
aggregators = []
for layer in range(len(num_samples)):
if new_agg:
dim_mult = 2 if concat and (layer != 0) else 1
# aggregator at current layer
if layer == len(num_samples) - 1:
aggregator = self.aggregator_cls(dim_mult*dims[layer], dims[layer+1], act=lambda x : x,
dropout=self.dropout if self.training else 0.,
name=name, concat=concat, model_size=model_size)
else:
aggregator = self.aggregator_cls(dim_mult*dims[layer], dims[layer+1], act=self.act,
dropout=self.dropout if self.training else 0.,
name=name, concat=concat, model_size=model_size)
aggregators.append(aggregator)
else:
aggregator = aggregators[layer]
# hidden representation at current layer for all support nodes that are various hops away
next_hidden = []
# as layer increases, the number of support nodes needed decreases
for hop in range(len(num_samples) - layer):
dim_mult = 2 if concat and (layer != 0) else 1
neigh_dims = [self.batch_size * support_sizes[hop],
num_samples[len(num_samples) - hop - 1],
dim_mult*dims[layer]]
h = aggregator((hidden[hop],
tf.reshape(hidden[hop + 1], neigh_dims)))
next_hidden.append(h)
hidden = next_hidden
return hidden[0], aggregators
def decode(self):
self.lstm_cell = lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(self.hidden_size)
initial_state = lstm_cell.zero_state(self.batch_size, dtype=tf.float32)
time_major_x = tf.transpose(self.input_x)
inputs = tf.nn.embedding_lookup(self.embedding, time_major_x)
outputs, state = tf.nn.dynamic_rnn(cell=lstm_cell,
inputs=inputs,
initial_state=initial_state,
time_major=True,
dtype=tf.float32,
scope='decode_rnn')
# outputs: shape[max_time, batch_size, depth]
slices = tf.split(outputs, num_or_size_splits=self.max_length, axis=0)
return [tf.squeeze(t,[0]) for t in slices]
def step_by_step(self, features_0, features_1_2, dims, num_samples, support_sizes,
aggregators=None, name=None, concat=False, model_size="small"):
self.aggregators = None
outputs = []
for feature0 in features_0:
hidden = [feature0, features_1_2[0], features_1_2[1]]
output1, self.aggregators = self.aggregate(hidden, dims, num_samples, support_sizes,
aggregators=self.aggregators, concat=concat, model_size=self.model_size)
outputs.append(output1)
return tf.stack(outputs, axis=0)
def build(self):
self.embedding = embedding = tf.get_variable('item_embedding', [self.n_items, self.emb_item],\
initializer=tf.glorot_uniform_initializer())
features_0 = self.decode() # features of zero layer nodes.
#outputs with shape [max_time, batch_size, dim2]
if self.global_only:
features_1_2 = self.global_features()
elif self.local_only:
features_1_2 = self.local_features()
else:
features_1_2 = self.global_and_local_features()
outputs = self.step_by_step(features_0, features_1_2, self.dims, self.num_samples, self.support_sizes,
concat=self.concat)
concat_self = tf.concat([features_0, outputs], axis=-1)
# exchange first two dimensions.
self.transposed_outputs = tf.transpose(concat_self, [1,0,2])
self.loss = self._loss()
self.sum_recall = self._recall()
self.sum_ndcg = self._ndcg()
grads_and_vars = self.optimizer.compute_gradients(self.loss)
clipped_grads_and_vars = [(tf.clip_by_value(grad, -5.0, 5.0) if grad is not None else None, var)
for grad, var in grads_and_vars]
self.opt_op = self.optimizer.apply_gradients(clipped_grads_and_vars, global_step=self.global_step)
def _loss(self):
reg_loss = 0.
xe_loss = 0.
fc_layer = Dense(self.dim2 + self.hidden_size, self.emb_item, act=lambda x:x, dropout=self.dropout if self.training else 0.)
self.dense_layers.append(fc_layer)
self.logits = logits = tf.matmul(fc_layer(tf.reshape(self.transposed_outputs, [-1, self.dim2+self.hidden_size])), self.embedding, transpose_b=True)
for dense_layer in self.dense_layers:
for var in dense_layer.vars.values():
reg_loss += self.weight_decay * tf.nn.l2_loss(var)
for aggregator in self.aggregators:
for var in aggregator.vars.values():
reg_loss += self.weight_decay * tf.nn.l2_loss(var)
reshaped_logits = tf.reshape(logits, [self.batch_size, self.max_length, self.n_items])
xe_loss += tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.input_y,
logits=reshaped_logits,
name='softmax_loss')
xe_loss *= self.mask
return tf.reduce_sum(xe_loss) / self.point_count + reg_loss
def _ndcg(self):
predictions = tf.transpose(self.logits)
targets = tf.reshape(self.input_y, [-1])
pred_values = tf.expand_dims(tf.diag_part(tf.nn.embedding_lookup(predictions, targets)), -1)
tile_pred_values = tf.tile(pred_values, [1, self.n_items-1])
ranks = tf.reduce_sum(tf.cast(self.logits[:,1:] > tile_pred_values, dtype=tf.float32), -1) + 1
ndcg = 1. / (log2(1.0 + ranks))
mask = tf.reshape(self.mask, [-1])
ndcg *= mask
return tf.reduce_sum(ndcg)
def _recall(self):
predictions = self.logits
targets = tf.reshape(self.input_y, [-1])
recall_at_k = tf.nn.in_top_k(predictions, targets, k=20)
recall_at_k = tf.cast(recall_at_k, dtype=tf.float32)
mask = tf.reshape(self.mask, [-1])
recall_at_k *= mask
return tf.reduce_sum(recall_at_k)
def train(args, data):
adj_info = data[0]
latest_per_user_by_time = data[1]
user_id_map = data[2]
item_id_map = data[3]
train_df = data[4]
valid_df = data[5]
test_df = data[6]
args.num_items = len(item_id_map) + 1
args.num_users = len(user_id_map)
placeholders = construct_placeholders(args)
if not os.path.exists(args.ckpt_dir):
os.makedirs(args.ckpt_dir)
ckpt_path = os.path.join(args.ckpt_dir, 'model.ckpt')
minibatch = MinibatchIterator(adj_info,
latest_per_user_by_time,
[train_df, valid_df, test_df],
placeholders,
batch_size=args.batch_size,
max_degree=args.max_degree,
num_nodes=len(user_id_map),
max_length=args.max_length,
samples_1_2=[args.samples_1, args.samples_2])
dgrec = DGRec(args, minibatch.sizes, placeholders)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.global_variables(), max_to_keep=3)
total_steps = 0
avg_time = 0.
patience = 10
inc = 0
early_stopping = False
highest_val_recall = -1.0
start_time = time.time()
for epoch in range(args.epochs):
minibatch.shuffle()
iter_cn = 0
print('Epoch: %04d' % (epoch + 1))
epoch_val_cost = []
epoch_val_recall = []
epoch_val_ndcg = []
epoch_train_cost = []
epoch_train_recall = []
epoch_train_ndcg = []
epoch_train_point = []
while not minibatch.end() and not early_stopping:
t = time.time()
feed_dict = minibatch.next_train_minibatch_feed_dict()
outs = sess.run([dgrec.opt_op, dgrec.loss, dgrec.sum_recall, dgrec.sum_ndcg, dgrec.point_count], feed_dict=feed_dict)
train_cost = outs[1]
epoch_train_cost.append(train_cost)
epoch_train_recall.append(outs[2])
epoch_train_ndcg.append(outs[3])
epoch_train_point.append(outs[4])
# Print results
avg_time = (avg_time * total_steps + time.time() - t) / (total_steps + 1)
if iter_cn % args.val_every == 0:
ret = evaluate(sess, dgrec, minibatch)
epoch_val_cost.append(ret[0])
epoch_val_recall.append(ret[1])
epoch_val_ndcg.append(ret[2])
if ret[1] >= highest_val_recall:
saver.save(sess, ckpt_path, global_step=total_steps)
highest_val_recall = ret[1]
inc = 0
print("Iter:", '%d' % iter_cn,
"val_loss=", "{:.5f}".format(epoch_val_cost[-1]),
"val_recall@20=", "{:.5f}".format(epoch_val_recall[-1]),
"val_ndcg=", "{:.5f}".format(epoch_val_ndcg[-1]),
"dump model!"
)
else:
inc += 1
if inc >= patience:
early_stopping = True
break
if total_steps % args.print_every == 0:
print("Iter:", '%d' % iter_cn,
"train_loss=", "{:.5f}".format(np.mean(epoch_train_cost)),
"train_recall@20=", "{:.5f}".format(np.sum(epoch_train_recall)/np.sum(epoch_train_point)),
"train_ndcg=", "{:.5f}".format(np.sum(epoch_train_ndcg)/np.sum(epoch_train_point)),
"val_loss=", "{:.5f}".format(epoch_val_cost[-1]),
"val_recall@20=", "{:.5f}".format(epoch_val_recall[-1]),
"val_ndcg=", "{:.5f}".format(epoch_val_ndcg[-1]),
"time=", "{:.5f}s".format(avg_time))
sys.stdout.flush()
total_steps += 1
iter_cn += 1
if early_stopping:
print('Early stop at epoch: {}, total training steps: {}'.format(epoch, total_steps))
break
end_time = time.time()
print('-----------{} seconds per batch iteration-------------'.format((end_time - start_time) / total_steps))
print('Parameter settings: {}'.format(args.ckpt_dir))
print('Optimization finished!\tStart testing...')
ret = evaluate(sess, dgrec, minibatch, 'test')
print('Test results:',
'\tLoss:{}'.format(ret[0]),
'\tRecall@20:{}'.format(ret[1]),
'\tNDCG:{}'.format(ret[2])) | null |
159,094 | from __future__ import division
from __future__ import print_function
import os, sys
import argparse
import tensorflow as tf
import numpy as np
import time
from .utils import *
from .minibatch import MinibatchIterator
from .model import DGRec
class Args():
training = True
global_only = False
local_only = False
epochs = 20
aggregator_type='attn'
act='relu'
batch_size = 200
max_degree = 50
num_users = -1
num_items = 100
concat=False
learning_rate=0.001
hidden_size = 100
embedding_size = 50
emb_user = 50
max_length=20
samples_1=10
samples_2=5
dim1 = 100
dim2 = 100
model_size = 'small'
dropout = 0.
weight_decay = 0.
print_every = 100
val_every = 500
ckpt_dir = 'save/'
def parseArgs():
args = Args()
parser = argparse.ArgumentParser(description='DGRec args')
parser.add_argument('--batch', default=200, type=int)
parser.add_argument('--model', default='attn', type=str)
parser.add_argument('--act', default='relu', type=str)
parser.add_argument('--degree', default=50, type=int)
parser.add_argument('--lr', default=0.002, type=float)
parser.add_argument('--hidden', default=100, type=int)
parser.add_argument('--embi', default=50, type=int)
parser.add_argument('--embu', default=50, type=int)
parser.add_argument('--samples1', default=10, type=int)
parser.add_argument('--samples2', default=5, type=int)
parser.add_argument('--dim1', default=100, type=int)
parser.add_argument('--dim2', default=100, type=int)
parser.add_argument('--dropout', default=0., type=float)
parser.add_argument('--l2', default=0., type=float)
parser.add_argument('--decay_steps', default=400, type=int)
parser.add_argument('--decay_rate', default=0.98, type=float)
parser.add_argument('--local', default=0, type=int)
parser.add_argument('--glb', default=0, type=int)
new_args = parser.parse_args()
args.batch_size = new_args.batch
args.aggregator_type = new_args.model
args.act = new_args.act
args.max_degree = new_args.degree
args.learning_rate = new_args.lr
args.hidden_size = new_args.hidden
args.embedding_size = new_args.embi
args.emb_user = new_args.embu
args.samples_1 = new_args.samples1
args.samples_2 = new_args.samples2
args.dim1 = new_args.dim1
args.dim2 = new_args.dim2
args.dropout = new_args.dropout
args.weight_decay = new_args.l2
args.decay_steps = new_args.decay_steps
args.decay_rate = new_args.decay_rate
args.local_only = new_args.local
args.global_only = new_args.glb
args.ckpt_dir = args.ckpt_dir + 'dgrec_batch{}'.format(args.batch_size)
args.ckpt_dir = args.ckpt_dir + '_model{}'.format(args.aggregator_type)
args.ckpt_dir = args.ckpt_dir + '_act{}'.format(args.act)
args.ckpt_dir = args.ckpt_dir + '_maxdegree{}'.format(args.max_degree)
args.ckpt_dir = args.ckpt_dir + '_lr{}'.format(args.learning_rate)
args.ckpt_dir = args.ckpt_dir + '_hidden{}'.format(args.hidden_size)
args.ckpt_dir = args.ckpt_dir + '_embi{}'.format(args.embedding_size)
args.ckpt_dir = args.ckpt_dir + '_embu{}'.format(args.emb_user)
args.ckpt_dir = args.ckpt_dir + '_samples1st{}'.format(args.samples_1)
args.ckpt_dir = args.ckpt_dir + '_samples2nd{}'.format(args.samples_2)
args.ckpt_dir = args.ckpt_dir + '_dim1st{}'.format(args.dim1)
args.ckpt_dir = args.ckpt_dir + '_dim2nd{}'.format(args.dim2)
args.ckpt_dir = args.ckpt_dir + '_dropout{}'.format(args.dropout)
args.ckpt_dir = args.ckpt_dir + '_l2reg{}'.format(args.weight_decay)
args.ckpt_dir = args.ckpt_dir + '_decaysteps{}'.format(args.decay_steps)
args.ckpt_dir = args.ckpt_dir + '_decayrate{}'.format(args.decay_rate)
args.ckpt_dir = args.ckpt_dir + '_global{}'.format(new_args.glb)
args.ckpt_dir = args.ckpt_dir + '_local{}'.format(new_args.local)
return args | null |
159,095 | from __future__ import print_function
import numpy as np
import pandas as pd
import random
def load_adj(data_path):
df_adj = pd.read_csv(data_path + '/adj.tsv', sep='\t', dtype={0:np.int32, 1:np.int32})
return df_adj
def load_latest_session(data_path):
ret = []
for line in open(data_path + '/latest_sessions.txt'):
chunks = line.strip().split(',')
ret.append(chunks)
return ret
def load_map(data_path, name='user'):
if name == 'user':
file_path = data_path + '/user_id_map.tsv'
elif name == 'item':
file_path = data_path + '/item_id_map.tsv'
else:
raise NotImplementedError
id_map = {}
for line in open(file_path):
k, v = line.strip().split('\t')
id_map[k] = str(v)
return id_map
def load_data(data_path):
adj = load_adj(data_path)
latest_sessions = load_latest_session(data_path)
user_id_map = load_map(data_path, 'user')
item_id_map = load_map(data_path, 'item')
train = pd.read_csv(data_path + '/train.tsv', sep='\t', dtype={0:np.int32, 1:np.int32, 3:np.float32})
valid = pd.read_csv(data_path + '/valid.tsv', sep='\t', dtype={0:np.int32, 1:np.int32, 3:np.float32})
test = pd.read_csv(data_path + '/test.tsv', sep='\t', dtype={0:np.int32, 1:np.int32, 3:np.float32})
return [adj, latest_sessions, user_id_map, item_id_map, train, valid, test] | null |
159,096 | import tensorflow as tf
import numpy as np
from .aggregators import *
from .layers import Dense
def log2(x):
numerator = tf.log(x)
denominator = tf.log(tf.constant(2, dtype=numerator.dtype))
return numerator / denominator | null |
159,097 | from __future__ import division
from __future__ import print_function
import tensorflow as tf
from .inits import zeros
_LAYER_UIDS = {}
The provided code snippet includes necessary dependencies for implementing the `get_layer_uid` function. Write a Python function `def get_layer_uid(layer_name='')` to solve the following problem:
Helper function, assigns unique layer IDs.
Here is the function:
def get_layer_uid(layer_name=''):
"""Helper function, assigns unique layer IDs."""
if layer_name not in _LAYER_UIDS:
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name] | Helper function, assigns unique layer IDs. |
159,100 | import os
import os.path as osp
import platform
import shutil
import sys
import warnings
from setuptools import find_packages, setup
The provided code snippet includes necessary dependencies for implementing the `parse_requirements` function. Write a Python function `def parse_requirements(fname='requirements.txt', with_version=True)` to solve the following problem:
Parse the package dependencies listed in a requirements file but strips specific versioning information. Args: fname (str): path to requirements file with_version (bool, default=False): if True include version specs Returns: List[str]: list of requirements items CommandLine: python -c "import setup; print(setup.parse_requirements())"
Here is the function:
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import re
import sys
from os.path import exists
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
elif '@git+' in line:
info['package'] = line
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
if ON_COLAB and info['package'] == 'xtcocotools':
# Due to an incompatibility between the Colab platform and the
# pre-built xtcocotools PyPI package, it is necessary to
# compile xtcocotools from source on Colab.
info = dict(
line=info['line'],
package='xtcocotools@'
'git+https://github.com/jin-s13/xtcocoapi')
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages | Parse the package dependencies listed in a requirements file but strips specific versioning information. Args: fname (str): path to requirements file with_version (bool, default=False): if True include version specs Returns: List[str]: list of requirements items CommandLine: python -c "import setup; print(setup.parse_requirements())" |
159,101 | import os
import os.path as osp
import platform
import shutil
import sys
import warnings
from setuptools import find_packages, setup
The provided code snippet includes necessary dependencies for implementing the `add_mim_extension` function. Write a Python function `def add_mim_extension()` to solve the following problem:
Add extra files that are required to support MIM into the package. These files will be added by creating a symlink to the originals if the package is installed in `editable` mode (e.g. pip install -e .), or by copying from the originals otherwise.
Here is the function:
def add_mim_extension():
"""Add extra files that are required to support MIM into the package.
These files will be added by creating a symlink to the originals if the
package is installed in `editable` mode (e.g. pip install -e .), or by
copying from the originals otherwise.
"""
# parse installment mode
if 'develop' in sys.argv:
# installed by `pip install -e .`
if platform.system() == 'Windows':
mode = 'copy'
else:
mode = 'symlink'
elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv:
# installed by `pip install .`
# or create source distribution by `python setup.py sdist`
mode = 'copy'
else:
return
filenames = [
'tools', 'configs', 'demo', 'model-index.yml', 'dataset-index.yml'
]
repo_path = osp.dirname(__file__)
mim_path = osp.join(repo_path, 'mmpose', '.mim')
os.makedirs(mim_path, exist_ok=True)
for filename in filenames:
if osp.exists(filename):
src_path = osp.join(repo_path, filename)
tar_path = osp.join(mim_path, filename)
if osp.isfile(tar_path) or osp.islink(tar_path):
os.remove(tar_path)
elif osp.isdir(tar_path):
shutil.rmtree(tar_path)
if mode == 'symlink':
src_relpath = osp.relpath(src_path, osp.dirname(tar_path))
os.symlink(src_relpath, tar_path)
elif mode == 'copy':
if osp.isfile(src_path):
shutil.copyfile(src_path, tar_path)
elif osp.isdir(src_path):
shutil.copytree(src_path, tar_path)
else:
warnings.warn(f'Cannot copy file {src_path}.')
else:
raise ValueError(f'Invalid mode {mode}') | Add extra files that are required to support MIM into the package. These files will be added by creating a symlink to the originals if the package is installed in `editable` mode (e.g. pip install -e .), or by copying from the originals otherwise. |
159,103 | import os
import os.path as osp
import re
from glob import glob
The provided code snippet includes necessary dependencies for implementing the `_get_project_docs` function. Write a Python function `def _get_project_docs()` to solve the following problem:
Get all project document files. Returns: list[str]: file paths
Here is the function:
def _get_project_docs():
"""Get all project document files.
Returns:
list[str]: file paths
"""
project_root = osp.join('..', '..', 'projects')
pattern = osp.sep.join(['*'] * 2) + '.md'
docs = glob(osp.join(project_root, pattern))
docs = [
doc for doc in docs
if 'example_project' not in doc and '_CN' not in doc
]
return docs | Get all project document files. Returns: list[str]: file paths |
159,104 | import os
import os.path as osp
import re
from glob import glob
The provided code snippet includes necessary dependencies for implementing the `_parse_project_doc_path` function. Write a Python function `def _parse_project_doc_path(fn)` to solve the following problem:
Get project name and banner from a project reference file. Returns: tuple: - project_name (str) - project_banner (str)
Here is the function:
def _parse_project_doc_path(fn):
"""Get project name and banner from a project reference file.
Returns:
tuple:
- project_name (str)
- project_banner (str)
"""
project_banner, project_name = None, None
with open(fn, 'r', encoding='utf-8') as f:
for line in f.readlines():
if re.match('^( )*<img', line) and not project_banner:
project_banner = line
if line.startswith('# ') and not project_name:
project_name = line
if project_name and project_banner:
break
if project_name is None or project_banner is None:
raise ValueError(f'Invalid paper reference file {fn}')
project_name = re.sub(r'^\# ', '', project_name).strip()
project_banner = project_banner.strip()
return project_name, project_banner | Get project name and banner from a project reference file. Returns: tuple: - project_name (str) - project_banner (str) |
159,105 | import os
import os.path as osp
import re
from glob import glob
def _get_project_intro_doc():
project_intro_doc = []
with open(
osp.join('..', '..', 'projects', 'README.md'), 'r',
encoding='utf-8') as f:
for line in f.readlines():
if line.startswith('# Welcome'):
continue
if './faq.md' in line:
line = line.replace('./faq.md', '#faq')
if './' in line:
line = line.replace('./', '/projects/')
project_intro_doc.append(line)
if line.startswith('## Project List'):
break
return project_intro_doc | null |
159,106 | import os
import os.path as osp
import re
from glob import glob
def _get_faq_doc():
faq_doc = ['\n']
with open(
osp.join('..', '..', 'projects', 'faq.md'), 'r',
encoding='utf-8') as f:
for line in f.readlines():
if '#' in line:
line = re.sub(r'^\#', '##', line)
faq_doc.append(line)
return faq_doc | null |
159,107 | import os
import os.path as osp
import re
from collections import defaultdict
from glob import glob
from addict import Addict
from titlecase import titlecase
The provided code snippet includes necessary dependencies for implementing the `_get_model_docs` function. Write a Python function `def _get_model_docs()` to solve the following problem:
Get all model document files. Returns: list[str]: file paths
Here is the function:
def _get_model_docs():
"""Get all model document files.
Returns:
list[str]: file paths
"""
config_root = osp.join('..', '..', 'configs')
pattern = osp.sep.join(['*'] * 4) + '.md'
docs = glob(osp.join(config_root, pattern))
docs = [doc for doc in docs if '_base_' not in doc]
return docs | Get all model document files. Returns: list[str]: file paths |
159,108 | import os
import os.path as osp
import re
from collections import defaultdict
from glob import glob
from addict import Addict
from titlecase import titlecase
The provided code snippet includes necessary dependencies for implementing the `_parse_model_doc_path` function. Write a Python function `def _parse_model_doc_path(path)` to solve the following problem:
Parse doc file path. Typical path would be like: configs/<task>/<algorithm>/<dataset>/<setting>.md An example is: "configs/animal_2d_keypoint/topdown_heatmap/ animalpose/resnet_animalpose.md" Returns: tuple: - task (str): e.g. ``'Animal 2D Keypoint'`` - dataset (str): e.g. ``'animalpose'`` - keywords (tuple): e.g. ``('topdown heatmap', 'resnet')``
Here is the function:
def _parse_model_doc_path(path):
"""Parse doc file path.
Typical path would be like:
configs/<task>/<algorithm>/<dataset>/<setting>.md
An example is:
"configs/animal_2d_keypoint/topdown_heatmap/
animalpose/resnet_animalpose.md"
Returns:
tuple:
- task (str): e.g. ``'Animal 2D Keypoint'``
- dataset (str): e.g. ``'animalpose'``
- keywords (tuple): e.g. ``('topdown heatmap', 'resnet')``
"""
_path = path.split(osp.sep)
_rel_path = _path[_path.index('configs'):]
# get task
def _titlecase_callback(word, **kwargs):
if word == '2d':
return '2D'
if word == '3d':
return '3D'
task = titlecase(
_rel_path[1].replace('_', ' '), callback=_titlecase_callback)
# get dataset
dataset = _rel_path[3]
# get keywords
keywords_algo = (_rel_path[2], )
keywords_setting = tuple(_rel_path[4][:-3].split('_'))
keywords = keywords_algo + keywords_setting
return task, dataset, keywords | Parse doc file path. Typical path would be like: configs/<task>/<algorithm>/<dataset>/<setting>.md An example is: "configs/animal_2d_keypoint/topdown_heatmap/ animalpose/resnet_animalpose.md" Returns: tuple: - task (str): e.g. ``'Animal 2D Keypoint'`` - dataset (str): e.g. ``'animalpose'`` - keywords (tuple): e.g. ``('topdown heatmap', 'resnet')`` |
159,109 | import os
import os.path as osp
import re
from collections import defaultdict
from glob import glob
from addict import Addict
from titlecase import titlecase
The provided code snippet includes necessary dependencies for implementing the `_get_paper_refs` function. Write a Python function `def _get_paper_refs()` to solve the following problem:
Get all paper references. Returns: Dict[str, List[str]]: keys are paper categories and values are lists of paper paths.
Here is the function:
def _get_paper_refs():
"""Get all paper references.
Returns:
Dict[str, List[str]]: keys are paper categories and values are lists
of paper paths.
"""
papers = glob('../src/papers/*/*.md')
paper_refs = defaultdict(list)
for fn in papers:
category = fn.split(osp.sep)[3]
paper_refs[category].append(fn)
return paper_refs | Get all paper references. Returns: Dict[str, List[str]]: keys are paper categories and values are lists of paper paths. |
159,110 | import os
import os.path as osp
import re
from collections import defaultdict
from glob import glob
from addict import Addict
from titlecase import titlecase
The provided code snippet includes necessary dependencies for implementing the `_parse_paper_ref` function. Write a Python function `def _parse_paper_ref(fn)` to solve the following problem:
Get paper name and indicator pattern from a paper reference file. Returns: tuple: - paper_name (str) - paper_indicator (str)
Here is the function:
def _parse_paper_ref(fn):
"""Get paper name and indicator pattern from a paper reference file.
Returns:
tuple:
- paper_name (str)
- paper_indicator (str)
"""
indicator = None
with open(fn, 'r', encoding='utf-8') as f:
for line in f.readlines():
if line.startswith('<summary'):
indicator = line
break
if indicator is None:
raise ValueError(f'Invalid paper reference file {fn}')
paper_name = re.sub(r'\<.*?\>', '', indicator).strip()
return paper_name, indicator | Get paper name and indicator pattern from a paper reference file. Returns: tuple: - paper_name (str) - paper_indicator (str) |
159,112 | import os
import subprocess
import sys
import pytorch_sphinx_theme
def builder_inited_handler(app):
subprocess.run(['python', './collect_modelzoo.py'])
subprocess.run(['python', './collect_projects.py'])
subprocess.run(['sh', './merge_docs.sh'])
subprocess.run(['python', './stats.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler) | null |
159,116 | import os
import os.path as osp
import re
from glob import glob
def _get_project_intro_doc():
project_intro_doc = []
with open(
osp.join('..', '..', 'projects', 'README.md'), 'r',
encoding='utf-8') as f:
for line in f.readlines():
if line.startswith('# Welcome'):
continue
if './faq.md' in line:
line = line.replace('./faq.md', '#faq')
if 'example_project' in line:
line = line.replace(
'./', 'https://github.com/open-mmlab/mmpose/'
'tree/dev-1.x/projects/')
project_intro_doc.append(line)
if line.startswith('## Project List'):
break
return project_intro_doc | null |
159,117 | import os
import os.path as osp
import re
from glob import glob
def _get_faq_doc():
faq_doc = []
with open(
osp.join('..', '..', 'projects', 'faq.md'), 'r',
encoding='utf-8') as f:
for line in f.readlines():
if '#' in line:
line = re.sub(r'^(\#+)', r'\g<1>#', line)
faq_doc.append(line)
return faq_doc | null |
159,124 | import argparse
import time
from typing import List, Tuple
import cv2
import loguru
import numpy as np
import onnxruntime as ort
def parse_args():
parser = argparse.ArgumentParser(
description='RTMPose ONNX inference demo.')
parser.add_argument('onnx_file', help='ONNX file path')
parser.add_argument('image_file', help='Input image file path')
parser.add_argument(
'--device', help='device type for inference', default='cpu')
parser.add_argument(
'--save-path',
help='path to save the output image',
default='output.jpg')
args = parser.parse_args()
return args | null |
159,125 | import argparse
import time
from typing import List, Tuple
import cv2
import loguru
import numpy as np
import onnxruntime as ort
def bbox_xyxy2cs(bbox: np.ndarray,
padding: float = 1.) -> Tuple[np.ndarray, np.ndarray]:
"""Transform the bbox format from (x,y,w,h) into (center, scale)
Args:
bbox (ndarray): Bounding box(es) in shape (4,) or (n, 4), formatted
as (left, top, right, bottom)
padding (float): BBox padding factor that will be multilied to scale.
Default: 1.0
Returns:
tuple: A tuple containing center and scale.
- np.ndarray[float32]: Center (x, y) of the bbox in shape (2,) or
(n, 2)
- np.ndarray[float32]: Scale (w, h) of the bbox in shape (2,) or
(n, 2)
"""
# convert single bbox from (4, ) to (1, 4)
dim = bbox.ndim
if dim == 1:
bbox = bbox[None, :]
# get bbox center and scale
x1, y1, x2, y2 = np.hsplit(bbox, [1, 2, 3])
center = np.hstack([x1 + x2, y1 + y2]) * 0.5
scale = np.hstack([x2 - x1, y2 - y1]) * padding
if dim == 1:
center = center[0]
scale = scale[0]
return center, scale
def top_down_affine(input_size: dict, bbox_scale: dict, bbox_center: dict,
img: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Get the bbox image as the model input by affine transform.
Args:
input_size (dict): The input size of the model.
bbox_scale (dict): The bbox scale of the img.
bbox_center (dict): The bbox center of the img.
img (np.ndarray): The original image.
Returns:
tuple: A tuple containing center and scale.
- np.ndarray[float32]: img after affine transform.
- np.ndarray[float32]: bbox scale after affine transform.
"""
w, h = input_size
warp_size = (int(w), int(h))
# reshape bbox to fixed aspect ratio
bbox_scale = _fix_aspect_ratio(bbox_scale, aspect_ratio=w / h)
# get the affine matrix
center = bbox_center
scale = bbox_scale
rot = 0
warp_mat = get_warp_matrix(center, scale, rot, output_size=(w, h))
# do affine transform
img = cv2.warpAffine(img, warp_mat, warp_size, flags=cv2.INTER_LINEAR)
return img, bbox_scale
The provided code snippet includes necessary dependencies for implementing the `preprocess` function. Write a Python function `def preprocess( img: np.ndarray, input_size: Tuple[int, int] = (192, 256) ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]` to solve the following problem:
Do preprocessing for RTMPose model inference. Args: img (np.ndarray): Input image in shape. input_size (tuple): Input image size in shape (w, h). Returns: tuple: - resized_img (np.ndarray): Preprocessed image. - center (np.ndarray): Center of image. - scale (np.ndarray): Scale of image.
Here is the function:
def preprocess(
img: np.ndarray, input_size: Tuple[int, int] = (192, 256)
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Do preprocessing for RTMPose model inference.
Args:
img (np.ndarray): Input image in shape.
input_size (tuple): Input image size in shape (w, h).
Returns:
tuple:
- resized_img (np.ndarray): Preprocessed image.
- center (np.ndarray): Center of image.
- scale (np.ndarray): Scale of image.
"""
# get shape of image
img_shape = img.shape[:2]
bbox = np.array([0, 0, img_shape[1], img_shape[0]])
# get center and scale
center, scale = bbox_xyxy2cs(bbox, padding=1.25)
# do affine transformation
resized_img, scale = top_down_affine(input_size, scale, center, img)
# normalize image
mean = np.array([123.675, 116.28, 103.53])
std = np.array([58.395, 57.12, 57.375])
resized_img = (resized_img - mean) / std
return resized_img, center, scale | Do preprocessing for RTMPose model inference. Args: img (np.ndarray): Input image in shape. input_size (tuple): Input image size in shape (w, h). Returns: tuple: - resized_img (np.ndarray): Preprocessed image. - center (np.ndarray): Center of image. - scale (np.ndarray): Scale of image. |
159,126 | import argparse
import time
from typing import List, Tuple
import cv2
import loguru
import numpy as np
import onnxruntime as ort
The provided code snippet includes necessary dependencies for implementing the `build_session` function. Write a Python function `def build_session(onnx_file: str, device: str = 'cpu') -> ort.InferenceSession` to solve the following problem:
Build onnxruntime session. Args: onnx_file (str): ONNX file path. device (str): Device type for inference. Returns: sess (ort.InferenceSession): ONNXRuntime session.
Here is the function:
def build_session(onnx_file: str, device: str = 'cpu') -> ort.InferenceSession:
"""Build onnxruntime session.
Args:
onnx_file (str): ONNX file path.
device (str): Device type for inference.
Returns:
sess (ort.InferenceSession): ONNXRuntime session.
"""
providers = ['CPUExecutionProvider'
] if device == 'cpu' else ['CUDAExecutionProvider']
sess = ort.InferenceSession(path_or_bytes=onnx_file, providers=providers)
return sess | Build onnxruntime session. Args: onnx_file (str): ONNX file path. device (str): Device type for inference. Returns: sess (ort.InferenceSession): ONNXRuntime session. |
159,127 | import argparse
import time
from typing import List, Tuple
import cv2
import loguru
import numpy as np
import onnxruntime as ort
The provided code snippet includes necessary dependencies for implementing the `inference` function. Write a Python function `def inference(sess: ort.InferenceSession, img: np.ndarray) -> np.ndarray` to solve the following problem:
Inference RTMPose model. Args: sess (ort.InferenceSession): ONNXRuntime session. img (np.ndarray): Input image in shape. Returns: outputs (np.ndarray): Output of RTMPose model.
Here is the function:
def inference(sess: ort.InferenceSession, img: np.ndarray) -> np.ndarray:
"""Inference RTMPose model.
Args:
sess (ort.InferenceSession): ONNXRuntime session.
img (np.ndarray): Input image in shape.
Returns:
outputs (np.ndarray): Output of RTMPose model.
"""
# build input
input = [img.transpose(2, 0, 1)]
# build output
sess_input = {sess.get_inputs()[0].name: input}
sess_output = []
for out in sess.get_outputs():
sess_output.append(out.name)
# run model
outputs = sess.run(sess_output, sess_input)
return outputs | Inference RTMPose model. Args: sess (ort.InferenceSession): ONNXRuntime session. img (np.ndarray): Input image in shape. Returns: outputs (np.ndarray): Output of RTMPose model. |
159,128 | import argparse
import time
from typing import List, Tuple
import cv2
import loguru
import numpy as np
import onnxruntime as ort
def decode(simcc_x: np.ndarray, simcc_y: np.ndarray,
simcc_split_ratio) -> Tuple[np.ndarray, np.ndarray]:
"""Modulate simcc distribution with Gaussian.
Args:
simcc_x (np.ndarray[K, Wx]): model predicted simcc in x.
simcc_y (np.ndarray[K, Wy]): model predicted simcc in y.
simcc_split_ratio (int): The split ratio of simcc.
Returns:
tuple: A tuple containing center and scale.
- np.ndarray[float32]: keypoints in shape (K, 2) or (n, K, 2)
- np.ndarray[float32]: scores in shape (K,) or (n, K)
"""
keypoints, scores = get_simcc_maximum(simcc_x, simcc_y)
keypoints /= simcc_split_ratio
return keypoints, scores
The provided code snippet includes necessary dependencies for implementing the `postprocess` function. Write a Python function `def postprocess(outputs: List[np.ndarray], model_input_size: Tuple[int, int], center: Tuple[int, int], scale: Tuple[int, int], simcc_split_ratio: float = 2.0 ) -> Tuple[np.ndarray, np.ndarray]` to solve the following problem:
Postprocess for RTMPose model output. Args: outputs (np.ndarray): Output of RTMPose model. model_input_size (tuple): RTMPose model Input image size. center (tuple): Center of bbox in shape (x, y). scale (tuple): Scale of bbox in shape (w, h). simcc_split_ratio (float): Split ratio of simcc. Returns: tuple: - keypoints (np.ndarray): Rescaled keypoints. - scores (np.ndarray): Model predict scores.
Here is the function:
def postprocess(outputs: List[np.ndarray],
model_input_size: Tuple[int, int],
center: Tuple[int, int],
scale: Tuple[int, int],
simcc_split_ratio: float = 2.0
) -> Tuple[np.ndarray, np.ndarray]:
"""Postprocess for RTMPose model output.
Args:
outputs (np.ndarray): Output of RTMPose model.
model_input_size (tuple): RTMPose model Input image size.
center (tuple): Center of bbox in shape (x, y).
scale (tuple): Scale of bbox in shape (w, h).
simcc_split_ratio (float): Split ratio of simcc.
Returns:
tuple:
- keypoints (np.ndarray): Rescaled keypoints.
- scores (np.ndarray): Model predict scores.
"""
# use simcc to decode
simcc_x, simcc_y = outputs
keypoints, scores = decode(simcc_x, simcc_y, simcc_split_ratio)
# rescale keypoints
keypoints = keypoints / model_input_size * scale + center - scale / 2
return keypoints, scores | Postprocess for RTMPose model output. Args: outputs (np.ndarray): Output of RTMPose model. model_input_size (tuple): RTMPose model Input image size. center (tuple): Center of bbox in shape (x, y). scale (tuple): Scale of bbox in shape (w, h). simcc_split_ratio (float): Split ratio of simcc. Returns: tuple: - keypoints (np.ndarray): Rescaled keypoints. - scores (np.ndarray): Model predict scores. |
159,129 | import argparse
import time
from typing import List, Tuple
import cv2
import loguru
import numpy as np
import onnxruntime as ort
The provided code snippet includes necessary dependencies for implementing the `visualize` function. Write a Python function `def visualize(img: np.ndarray, keypoints: np.ndarray, scores: np.ndarray, filename: str = 'output.jpg', thr=0.3) -> np.ndarray` to solve the following problem:
Visualize the keypoints and skeleton on image. Args: img (np.ndarray): Input image in shape. keypoints (np.ndarray): Keypoints in image. scores (np.ndarray): Model predict scores. thr (float): Threshold for visualize. Returns: img (np.ndarray): Visualized image.
Here is the function:
def visualize(img: np.ndarray,
keypoints: np.ndarray,
scores: np.ndarray,
filename: str = 'output.jpg',
thr=0.3) -> np.ndarray:
"""Visualize the keypoints and skeleton on image.
Args:
img (np.ndarray): Input image in shape.
keypoints (np.ndarray): Keypoints in image.
scores (np.ndarray): Model predict scores.
thr (float): Threshold for visualize.
Returns:
img (np.ndarray): Visualized image.
"""
# default color
skeleton = [(15, 13), (13, 11), (16, 14), (14, 12), (11, 12), (5, 11),
(6, 12), (5, 6), (5, 7), (6, 8), (7, 9), (8, 10), (1, 2),
(0, 1), (0, 2), (1, 3), (2, 4), (3, 5), (4, 6), (15, 17),
(15, 18), (15, 19), (16, 20), (16, 21), (16, 22), (91, 92),
(92, 93), (93, 94), (94, 95), (91, 96), (96, 97), (97, 98),
(98, 99), (91, 100), (100, 101), (101, 102), (102, 103),
(91, 104), (104, 105), (105, 106), (106, 107), (91, 108),
(108, 109), (109, 110), (110, 111), (112, 113), (113, 114),
(114, 115), (115, 116), (112, 117), (117, 118), (118, 119),
(119, 120), (112, 121), (121, 122), (122, 123), (123, 124),
(112, 125), (125, 126), (126, 127), (127, 128), (112, 129),
(129, 130), (130, 131), (131, 132)]
palette = [[51, 153, 255], [0, 255, 0], [255, 128, 0], [255, 255, 255],
[255, 153, 255], [102, 178, 255], [255, 51, 51]]
link_color = [
1, 1, 2, 2, 0, 0, 0, 0, 1, 2, 1, 2, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2,
2, 2, 2, 2, 2, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 1, 1, 1, 1, 2, 2, 2,
2, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 1, 1, 1, 1
]
point_color = [
0, 0, 0, 0, 0, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 2, 2, 2, 2, 2, 2, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2,
4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 1, 1, 1, 1, 3, 2, 2, 2, 2, 4, 4, 4,
4, 5, 5, 5, 5, 6, 6, 6, 6, 1, 1, 1, 1
]
# draw keypoints and skeleton
for kpts, score in zip(keypoints, scores):
for kpt, color in zip(kpts, point_color):
cv2.circle(img, tuple(kpt.astype(np.int32)), 1, palette[color], 1,
cv2.LINE_AA)
for (u, v), color in zip(skeleton, link_color):
if score[u] > thr and score[v] > thr:
cv2.line(img, tuple(kpts[u].astype(np.int32)),
tuple(kpts[v].astype(np.int32)), palette[color], 2,
cv2.LINE_AA)
# save to local
cv2.imwrite(filename, img)
return img | Visualize the keypoints and skeleton on image. Args: img (np.ndarray): Input image in shape. keypoints (np.ndarray): Keypoints in image. scores (np.ndarray): Model predict scores. thr (float): Threshold for visualize. Returns: img (np.ndarray): Visualized image. |
159,130 | import argparse
import torch
import torch.nn.functional as F
from mmdet.apis import init_detector
from torch import nn
def build_model_from_cfg(config_path: str, checkpoint_path: str, device):
model = init_detector(config_path, checkpoint_path, device=device)
model.eval()
return model | null |
159,131 | import argparse
import torch
import torch.nn.functional as F
from mmdet.apis import init_detector
from torch import nn
def parse_args():
parser = argparse.ArgumentParser(
description='convert rtmdet model to ONNX.')
parser.add_argument(
'--config', type=str, help='rtmdet config file path from mmdetection.')
parser.add_argument(
'--checkpoint',
type=str,
help='rtmdet checkpoint path from mmdetection.')
parser.add_argument('--output', type=str, help='output filename.')
parser.add_argument(
'--device',
type=str,
default='cuda:0',
help='Device used for inference')
parser.add_argument(
'--input-name', type=str, default='image', help='ONNX input name.')
parser.add_argument(
'--output-name', type=str, default='output', help='ONNX output name.')
parser.add_argument(
'--opset', type=int, default=11, help='ONNX opset version.')
args = parser.parse_args()
return args | null |
159,132 | import math
import mimetypes
import os
from argparse import ArgumentParser
from itertools import product
import cv2
import mmcv
import numpy as np
from mmengine.registry import init_default_scope
from mmpose.apis import inference_topdown
from mmpose.apis import init_model as init_pose_estimator
from mmpose.evaluation.functional import nms
from mmpose.structures import merge_data_samples
limb_seq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10],
[10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17],
[1, 16], [16, 18]]
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255,
0], [170, 255, 0],
[85, 255, 0], [0, 255, 0],
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255],
[0, 85, 255], [0, 0, 255], [85, 0, 255], [170, 0,
255], [255, 0, 255],
[255, 0, 170], [255, 0, 85]]
stickwidth = 4
num_openpose_kpt = 18
num_link = len(limb_seq)
The provided code snippet includes necessary dependencies for implementing the `mmpose_to_openpose_visualization` function. Write a Python function `def mmpose_to_openpose_visualization(args, img_path, detector, pose_estimator)` to solve the following problem:
Visualize predicted keypoints of one image in openpose format.
Here is the function:
def mmpose_to_openpose_visualization(args, img_path, detector, pose_estimator):
"""Visualize predicted keypoints of one image in openpose format."""
# predict bbox
scope = detector.cfg.get('default_scope', 'mmdet')
if scope is not None:
init_default_scope(scope)
det_result = inference_detector(detector, img_path)
pred_instance = det_result.pred_instances.cpu().numpy()
bboxes = np.concatenate(
(pred_instance.bboxes, pred_instance.scores[:, None]), axis=1)
bboxes = bboxes[np.logical_and(pred_instance.labels == args.det_cat_id,
pred_instance.scores > args.bbox_thr)]
bboxes = bboxes[nms(bboxes, args.nms_thr), :4]
# predict keypoints
pose_results = inference_topdown(pose_estimator, img_path, bboxes)
data_samples = merge_data_samples(pose_results)
# concatenate scores and keypoints
keypoints = np.concatenate(
(data_samples.pred_instances.keypoints,
data_samples.pred_instances.keypoint_scores.reshape(-1, 17, 1)),
axis=-1)
# compute neck joint
neck = (keypoints[:, 5] + keypoints[:, 6]) / 2
if keypoints[:, 5, 2] < args.kpt_thr or keypoints[:, 6, 2] < args.kpt_thr:
neck[:, 2] = 0
# 17 keypoints to 18 keypoints
new_keypoints = np.insert(keypoints[:, ], 17, neck, axis=1)
# mmpose format to openpose format
openpose_idx = [15, 14, 17, 16, 2, 6, 3, 7, 4, 8, 12, 9, 13, 10, 1]
mmpose_idx = [1, 2, 3, 4, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17]
new_keypoints[:, openpose_idx, :] = new_keypoints[:, mmpose_idx, :]
# show the results
img = mmcv.imread(img_path, channel_order='rgb')
# black background
black_img = np.zeros_like(img)
num_instance = new_keypoints.shape[0]
# draw keypoints
for i, j in product(range(num_instance), range(num_openpose_kpt)):
x, y, conf = new_keypoints[i][j]
if conf > args.kpt_thr:
cv2.circle(black_img, (int(x), int(y)), 4, colors[j], thickness=-1)
# draw links
cur_black_img = black_img.copy()
for i, link_idx in product(range(num_instance), range(num_link)):
conf = new_keypoints[i][np.array(limb_seq[link_idx]) - 1, 2]
if np.sum(conf > args.kpt_thr) == 2:
Y = new_keypoints[i][np.array(limb_seq[link_idx]) - 1, 0]
X = new_keypoints[i][np.array(limb_seq[link_idx]) - 1, 1]
mX = np.mean(X)
mY = np.mean(Y)
length = ((X[0] - X[1])**2 + (Y[0] - Y[1])**2)**0.5
angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
polygon = cv2.ellipse2Poly(
(int(mY), int(mX)), (int(length / 2), stickwidth), int(angle),
0, 360, 1)
cv2.fillConvexPoly(cur_black_img, polygon, colors[link_idx])
black_img = cv2.addWeighted(black_img, 0.4, cur_black_img, 0.6, 0)
# save image
out_file = 'openpose_' + os.path.splitext(
os.path.basename(img_path))[0] + '.png'
cv2.imwrite(out_file, black_img[:, :, [2, 1, 0]]) | Visualize predicted keypoints of one image in openpose format. |
159,133 | import torch
import argparse
from collections import OrderedDict
def change_model(args):
dis_model = torch.load(args.dis_path)
all_name = []
if args.two_dis:
for name, v in dis_model["state_dict"].items():
if name.startswith("teacher.backbone"):
all_name.append((name[8:], v))
elif name.startswith("student.head"):
all_name.append((name[8:], v))
else:
continue
else:
for name, v in dis_model["state_dict"].items():
if name.startswith("student."):
all_name.append((name[8:], v))
else:
continue
state_dict = OrderedDict(all_name)
dis_model['state_dict'] = state_dict
if 'optimizer' in dis_model.keys():
dis_model.pop('optimizer')
torch.save(dis_model, args.output_path) | null |
159,134 | from argparse import ArgumentParser
from mmcv.image import imread
from mmpose.apis import inference_topdown, init_model
from mmpose.registry import VISUALIZERS
from mmpose.structures import merge_data_samples
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument('--out-file', default=None, help='Path to output file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--draw-heatmap',
action='store_true',
help='Visualize the predicted heatmap')
parser.add_argument(
'--show-kpt-idx',
action='store_true',
default=False,
help='Whether to show the index of keypoints')
parser.add_argument(
'--skeleton-style',
default='mmpose',
type=str,
choices=['mmpose', 'openpose'],
help='Skeleton style selection')
parser.add_argument(
'--kpt-thr',
type=float,
default=0.3,
help='Visualizing keypoint thresholds')
parser.add_argument(
'--radius',
type=int,
default=3,
help='Keypoint radius for visualization')
parser.add_argument(
'--thickness',
type=int,
default=1,
help='Link thickness for visualization')
parser.add_argument(
'--alpha', type=float, default=0.8, help='The transparency of bboxes')
parser.add_argument(
'--show',
action='store_true',
default=False,
help='whether to show img')
args = parser.parse_args()
return args | null |
159,135 | import mimetypes
import os
import time
from argparse import ArgumentParser
import cv2
import json_tricks as json
import mmcv
import mmengine
import numpy as np
from mmpose.apis import inference_bottomup, init_model
from mmpose.registry import VISUALIZERS
from mmpose.structures import split_instances
The provided code snippet includes necessary dependencies for implementing the `process_one_image` function. Write a Python function `def process_one_image(args, img, pose_estimator, visualizer=None, show_interval=0)` to solve the following problem:
Visualize predicted keypoints (and heatmaps) of one image.
Here is the function:
def process_one_image(args,
img,
pose_estimator,
visualizer=None,
show_interval=0):
"""Visualize predicted keypoints (and heatmaps) of one image."""
# inference a single image
batch_results = inference_bottomup(pose_estimator, img)
results = batch_results[0]
# show the results
if isinstance(img, str):
img = mmcv.imread(img, channel_order='rgb')
elif isinstance(img, np.ndarray):
img = mmcv.bgr2rgb(img)
if visualizer is not None:
visualizer.add_datasample(
'result',
img,
data_sample=results,
draw_gt=False,
draw_bbox=False,
draw_heatmap=args.draw_heatmap,
show_kpt_idx=args.show_kpt_idx,
show=args.show,
wait_time=show_interval,
kpt_thr=args.kpt_thr)
return results.pred_instances | Visualize predicted keypoints (and heatmaps) of one image. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.