repo stringlengths 7 55 | path stringlengths 4 223 | func_name stringlengths 1 134 | original_string stringlengths 75 104k | language stringclasses 1 value | code stringlengths 75 104k | code_tokens listlengths 19 28.4k | docstring stringlengths 1 46.9k | docstring_tokens listlengths 1 1.97k | sha stringlengths 40 40 | url stringlengths 87 315 | partition stringclasses 1 value |
|---|---|---|---|---|---|---|---|---|---|---|---|
mdredze/carmen-python | carmen/resolver.py | get_resolver | def get_resolver(order=None, options=None, modules=None):
"""Return a location resolver. The *order* argument, if given,
should be a list of resolver names; results from resolvers named
earlier in the list are preferred over later ones. For a list of
built-in resolver names, see :doc:`/resolvers`. The *options*
argument can be used to pass configuration options to individual
resolvers, in the form of a dictionary mapping resolver names to
keyword arguments::
{'geocode': {'max_distance': 50}}
The *modules* argument can be used to specify a list of additional
modules to look for resolvers in. See :doc:`/develop` for details.
"""
if not known_resolvers:
from . import resolvers as carmen_resolvers
modules = [carmen_resolvers] + (modules or [])
for module in modules:
for loader, name, _ in pkgutil.iter_modules(module.__path__):
full_name = module.__name__ + '.' + name
loader.find_module(full_name).load_module(full_name)
if order is None:
order = ('place', 'geocode', 'profile')
else:
order = tuple(order)
if options is None:
options = {}
resolvers = []
for resolver_name in order:
if resolver_name not in known_resolvers:
raise ValueError('unknown resolver name "%s"' % resolver_name)
resolvers.append((
resolver_name,
known_resolvers[resolver_name](**options.get(resolver_name, {}))))
return ResolverCollection(resolvers) | python | def get_resolver(order=None, options=None, modules=None):
"""Return a location resolver. The *order* argument, if given,
should be a list of resolver names; results from resolvers named
earlier in the list are preferred over later ones. For a list of
built-in resolver names, see :doc:`/resolvers`. The *options*
argument can be used to pass configuration options to individual
resolvers, in the form of a dictionary mapping resolver names to
keyword arguments::
{'geocode': {'max_distance': 50}}
The *modules* argument can be used to specify a list of additional
modules to look for resolvers in. See :doc:`/develop` for details.
"""
if not known_resolvers:
from . import resolvers as carmen_resolvers
modules = [carmen_resolvers] + (modules or [])
for module in modules:
for loader, name, _ in pkgutil.iter_modules(module.__path__):
full_name = module.__name__ + '.' + name
loader.find_module(full_name).load_module(full_name)
if order is None:
order = ('place', 'geocode', 'profile')
else:
order = tuple(order)
if options is None:
options = {}
resolvers = []
for resolver_name in order:
if resolver_name not in known_resolvers:
raise ValueError('unknown resolver name "%s"' % resolver_name)
resolvers.append((
resolver_name,
known_resolvers[resolver_name](**options.get(resolver_name, {}))))
return ResolverCollection(resolvers) | [
"def",
"get_resolver",
"(",
"order",
"=",
"None",
",",
"options",
"=",
"None",
",",
"modules",
"=",
"None",
")",
":",
"if",
"not",
"known_resolvers",
":",
"from",
".",
"import",
"resolvers",
"as",
"carmen_resolvers",
"modules",
"=",
"[",
"carmen_resolvers",
... | Return a location resolver. The *order* argument, if given,
should be a list of resolver names; results from resolvers named
earlier in the list are preferred over later ones. For a list of
built-in resolver names, see :doc:`/resolvers`. The *options*
argument can be used to pass configuration options to individual
resolvers, in the form of a dictionary mapping resolver names to
keyword arguments::
{'geocode': {'max_distance': 50}}
The *modules* argument can be used to specify a list of additional
modules to look for resolvers in. See :doc:`/develop` for details. | [
"Return",
"a",
"location",
"resolver",
".",
"The",
"*",
"order",
"*",
"argument",
"if",
"given",
"should",
"be",
"a",
"list",
"of",
"resolver",
"names",
";",
"results",
"from",
"resolvers",
"named",
"earlier",
"in",
"the",
"list",
"are",
"preferred",
"over... | 070b974222b5407f7aae2518ffbdf9df198b8e96 | https://github.com/mdredze/carmen-python/blob/070b974222b5407f7aae2518ffbdf9df198b8e96/carmen/resolver.py#L115-L149 | train |
mdredze/carmen-python | carmen/resolver.py | AbstractResolver.load_locations | def load_locations(self, location_file=None):
"""Load locations into this resolver from the given
*location_file*, which should contain one JSON object per line
representing a location. If *location_file* is not specified,
an internal location database is used."""
if location_file is None:
contents = pkgutil.get_data(__package__, 'data/locations.json')
contents_string = contents.decode("ascii")
locations = contents_string.split('\n')
else:
from .cli import open_file
with open_file(location_file, 'rb') as input:
locations = input.readlines()
for location_string in locations:
if location_string.strip():
location = Location(known=True, **json.loads(location_string))
self.location_id_to_location[location.id] = location
self.add_location(location) | python | def load_locations(self, location_file=None):
"""Load locations into this resolver from the given
*location_file*, which should contain one JSON object per line
representing a location. If *location_file* is not specified,
an internal location database is used."""
if location_file is None:
contents = pkgutil.get_data(__package__, 'data/locations.json')
contents_string = contents.decode("ascii")
locations = contents_string.split('\n')
else:
from .cli import open_file
with open_file(location_file, 'rb') as input:
locations = input.readlines()
for location_string in locations:
if location_string.strip():
location = Location(known=True, **json.loads(location_string))
self.location_id_to_location[location.id] = location
self.add_location(location) | [
"def",
"load_locations",
"(",
"self",
",",
"location_file",
"=",
"None",
")",
":",
"if",
"location_file",
"is",
"None",
":",
"contents",
"=",
"pkgutil",
".",
"get_data",
"(",
"__package__",
",",
"'data/locations.json'",
")",
"contents_string",
"=",
"contents",
... | Load locations into this resolver from the given
*location_file*, which should contain one JSON object per line
representing a location. If *location_file* is not specified,
an internal location database is used. | [
"Load",
"locations",
"into",
"this",
"resolver",
"from",
"the",
"given",
"*",
"location_file",
"*",
"which",
"should",
"contain",
"one",
"JSON",
"object",
"per",
"line",
"representing",
"a",
"location",
".",
"If",
"*",
"location_file",
"*",
"is",
"not",
"spe... | 070b974222b5407f7aae2518ffbdf9df198b8e96 | https://github.com/mdredze/carmen-python/blob/070b974222b5407f7aae2518ffbdf9df198b8e96/carmen/resolver.py#L22-L40 | train |
mdredze/carmen-python | carmen/location.py | Location.canonical | def canonical(self):
"""Return a tuple containing a canonicalized version of this
location's country, state, county, and city names."""
try:
return tuple(map(lambda x: x.lower(), self.name()))
except:
return tuple([x.lower() for x in self.name()]) | python | def canonical(self):
"""Return a tuple containing a canonicalized version of this
location's country, state, county, and city names."""
try:
return tuple(map(lambda x: x.lower(), self.name()))
except:
return tuple([x.lower() for x in self.name()]) | [
"def",
"canonical",
"(",
"self",
")",
":",
"try",
":",
"return",
"tuple",
"(",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"lower",
"(",
")",
",",
"self",
".",
"name",
"(",
")",
")",
")",
"except",
":",
"return",
"tuple",
"(",
"[",
"x",
".",
"l... | Return a tuple containing a canonicalized version of this
location's country, state, county, and city names. | [
"Return",
"a",
"tuple",
"containing",
"a",
"canonicalized",
"version",
"of",
"this",
"location",
"s",
"country",
"state",
"county",
"and",
"city",
"names",
"."
] | 070b974222b5407f7aae2518ffbdf9df198b8e96 | https://github.com/mdredze/carmen-python/blob/070b974222b5407f7aae2518ffbdf9df198b8e96/carmen/location.py#L79-L85 | train |
mdredze/carmen-python | carmen/location.py | Location.name | def name(self):
"""Return a tuple containing this location's country, state,
county, and city names."""
try:
return tuple(
getattr(self, x) if getattr(self, x) else u''
for x in ('country', 'state', 'county', 'city'))
except:
return tuple(
getattr(self, x) if getattr(self, x) else ''
for x in ('country', 'state', 'county', 'city')) | python | def name(self):
"""Return a tuple containing this location's country, state,
county, and city names."""
try:
return tuple(
getattr(self, x) if getattr(self, x) else u''
for x in ('country', 'state', 'county', 'city'))
except:
return tuple(
getattr(self, x) if getattr(self, x) else ''
for x in ('country', 'state', 'county', 'city')) | [
"def",
"name",
"(",
"self",
")",
":",
"try",
":",
"return",
"tuple",
"(",
"getattr",
"(",
"self",
",",
"x",
")",
"if",
"getattr",
"(",
"self",
",",
"x",
")",
"else",
"u''",
"for",
"x",
"in",
"(",
"'country'",
",",
"'state'",
",",
"'county'",
",",... | Return a tuple containing this location's country, state,
county, and city names. | [
"Return",
"a",
"tuple",
"containing",
"this",
"location",
"s",
"country",
"state",
"county",
"and",
"city",
"names",
"."
] | 070b974222b5407f7aae2518ffbdf9df198b8e96 | https://github.com/mdredze/carmen-python/blob/070b974222b5407f7aae2518ffbdf9df198b8e96/carmen/location.py#L87-L97 | train |
mdredze/carmen-python | carmen/location.py | Location.parent | def parent(self):
"""Return a location representing the administrative unit above
the one represented by this location."""
if self.city:
return Location(
country=self.country, state=self.state, county=self.county)
if self.county:
return Location(country=self.country, state=self.state)
if self.state:
return Location(country=self.country)
return Location() | python | def parent(self):
"""Return a location representing the administrative unit above
the one represented by this location."""
if self.city:
return Location(
country=self.country, state=self.state, county=self.county)
if self.county:
return Location(country=self.country, state=self.state)
if self.state:
return Location(country=self.country)
return Location() | [
"def",
"parent",
"(",
"self",
")",
":",
"if",
"self",
".",
"city",
":",
"return",
"Location",
"(",
"country",
"=",
"self",
".",
"country",
",",
"state",
"=",
"self",
".",
"state",
",",
"county",
"=",
"self",
".",
"county",
")",
"if",
"self",
".",
... | Return a location representing the administrative unit above
the one represented by this location. | [
"Return",
"a",
"location",
"representing",
"the",
"administrative",
"unit",
"above",
"the",
"one",
"represented",
"by",
"this",
"location",
"."
] | 070b974222b5407f7aae2518ffbdf9df198b8e96 | https://github.com/mdredze/carmen-python/blob/070b974222b5407f7aae2518ffbdf9df198b8e96/carmen/location.py#L99-L109 | train |
kristianfoerster/melodist | melodist/humidity.py | disaggregate_humidity | def disaggregate_humidity(data_daily, method='equal', temp=None,
a0=None, a1=None, kr=None,
month_hour_precip_mean=None, preserve_daily_mean=False):
"""general function for humidity disaggregation
Args:
daily_data: daily values
method: keyword specifying the disaggregation method to be used
temp: hourly temperature time series (necessary for some methods)
kr: parameter for linear_dewpoint_variation method (6 or 12)
month_hour_precip_mean: [month, hour, precip(y/n)] categorical mean values
preserve_daily_mean: if True, correct the daily mean values of the disaggregated
data with the observed daily means.
Returns:
Disaggregated hourly values of relative humidity.
"""
assert method in ('equal',
'minimal',
'dewpoint_regression',
'min_max',
'linear_dewpoint_variation',
'month_hour_precip_mean'), 'Invalid option'
if method == 'equal':
hum_disagg = melodist.distribute_equally(data_daily.hum)
elif method in ('minimal', 'dewpoint_regression', 'linear_dewpoint_variation'):
if method == 'minimal':
a0 = 0
a1 = 1
assert a0 is not None and a1 is not None, 'a0 and a1 must be specified'
tdew_daily = a0 + a1 * data_daily.tmin
tdew = melodist.distribute_equally(tdew_daily)
if method == 'linear_dewpoint_variation':
assert kr is not None, 'kr must be specified'
assert kr in (6, 12), 'kr must be 6 or 12'
tdew_delta = 0.5 * np.sin((temp.index.hour + 1) * np.pi / kr - 3. * np.pi / 4.) # eq. (21) from Debele et al. (2007)
tdew_nextday = tdew.shift(-24)
tdew_nextday.iloc[-24:] = tdew.iloc[-24:] # copy the last day
# eq. (20) from Debele et al. (2007):
# (corrected - the equation is wrong both in Debele et al. (2007) and Bregaglio et al. (2010) - it should
# be (T_dp,day)_(d+1) - (T_dp,day)_d instead of the other way around)
tdew += temp.index.hour / 24. * (tdew_nextday - tdew) + tdew_delta
sat_vap_press_tdew = util.vapor_pressure(tdew, 100)
sat_vap_press_t = util.vapor_pressure(temp, 100)
hum_disagg = pd.Series(index=temp.index, data=100 * sat_vap_press_tdew / sat_vap_press_t)
elif method == 'min_max':
assert 'hum_min' in data_daily.columns and 'hum_max' in data_daily.columns, \
'Minimum and maximum humidity must be present in data frame'
hmin = melodist.distribute_equally(data_daily.hum_min)
hmax = melodist.distribute_equally(data_daily.hum_max)
tmin = melodist.distribute_equally(data_daily.tmin)
tmax = melodist.distribute_equally(data_daily.tmax)
hum_disagg = hmax + (temp - tmin) / (tmax - tmin) * (hmin - hmax)
elif method == 'month_hour_precip_mean':
assert month_hour_precip_mean is not None
precip_equal = melodist.distribute_equally(data_daily.precip) # daily precipitation equally distributed to hourly values
hum_disagg = pd.Series(index=precip_equal.index)
locs = list(zip(hum_disagg.index.month, hum_disagg.index.hour, precip_equal > 0))
hum_disagg[:] = month_hour_precip_mean.loc[locs].values
if preserve_daily_mean:
daily_mean_df = pd.DataFrame(data=dict(obs=data_daily.hum, disagg=hum_disagg.resample('D').mean()))
bias = melodist.util.distribute_equally(daily_mean_df.disagg - daily_mean_df.obs)
bias = bias.fillna(0)
hum_disagg -= bias
return hum_disagg.clip(0, 100) | python | def disaggregate_humidity(data_daily, method='equal', temp=None,
a0=None, a1=None, kr=None,
month_hour_precip_mean=None, preserve_daily_mean=False):
"""general function for humidity disaggregation
Args:
daily_data: daily values
method: keyword specifying the disaggregation method to be used
temp: hourly temperature time series (necessary for some methods)
kr: parameter for linear_dewpoint_variation method (6 or 12)
month_hour_precip_mean: [month, hour, precip(y/n)] categorical mean values
preserve_daily_mean: if True, correct the daily mean values of the disaggregated
data with the observed daily means.
Returns:
Disaggregated hourly values of relative humidity.
"""
assert method in ('equal',
'minimal',
'dewpoint_regression',
'min_max',
'linear_dewpoint_variation',
'month_hour_precip_mean'), 'Invalid option'
if method == 'equal':
hum_disagg = melodist.distribute_equally(data_daily.hum)
elif method in ('minimal', 'dewpoint_regression', 'linear_dewpoint_variation'):
if method == 'minimal':
a0 = 0
a1 = 1
assert a0 is not None and a1 is not None, 'a0 and a1 must be specified'
tdew_daily = a0 + a1 * data_daily.tmin
tdew = melodist.distribute_equally(tdew_daily)
if method == 'linear_dewpoint_variation':
assert kr is not None, 'kr must be specified'
assert kr in (6, 12), 'kr must be 6 or 12'
tdew_delta = 0.5 * np.sin((temp.index.hour + 1) * np.pi / kr - 3. * np.pi / 4.) # eq. (21) from Debele et al. (2007)
tdew_nextday = tdew.shift(-24)
tdew_nextday.iloc[-24:] = tdew.iloc[-24:] # copy the last day
# eq. (20) from Debele et al. (2007):
# (corrected - the equation is wrong both in Debele et al. (2007) and Bregaglio et al. (2010) - it should
# be (T_dp,day)_(d+1) - (T_dp,day)_d instead of the other way around)
tdew += temp.index.hour / 24. * (tdew_nextday - tdew) + tdew_delta
sat_vap_press_tdew = util.vapor_pressure(tdew, 100)
sat_vap_press_t = util.vapor_pressure(temp, 100)
hum_disagg = pd.Series(index=temp.index, data=100 * sat_vap_press_tdew / sat_vap_press_t)
elif method == 'min_max':
assert 'hum_min' in data_daily.columns and 'hum_max' in data_daily.columns, \
'Minimum and maximum humidity must be present in data frame'
hmin = melodist.distribute_equally(data_daily.hum_min)
hmax = melodist.distribute_equally(data_daily.hum_max)
tmin = melodist.distribute_equally(data_daily.tmin)
tmax = melodist.distribute_equally(data_daily.tmax)
hum_disagg = hmax + (temp - tmin) / (tmax - tmin) * (hmin - hmax)
elif method == 'month_hour_precip_mean':
assert month_hour_precip_mean is not None
precip_equal = melodist.distribute_equally(data_daily.precip) # daily precipitation equally distributed to hourly values
hum_disagg = pd.Series(index=precip_equal.index)
locs = list(zip(hum_disagg.index.month, hum_disagg.index.hour, precip_equal > 0))
hum_disagg[:] = month_hour_precip_mean.loc[locs].values
if preserve_daily_mean:
daily_mean_df = pd.DataFrame(data=dict(obs=data_daily.hum, disagg=hum_disagg.resample('D').mean()))
bias = melodist.util.distribute_equally(daily_mean_df.disagg - daily_mean_df.obs)
bias = bias.fillna(0)
hum_disagg -= bias
return hum_disagg.clip(0, 100) | [
"def",
"disaggregate_humidity",
"(",
"data_daily",
",",
"method",
"=",
"'equal'",
",",
"temp",
"=",
"None",
",",
"a0",
"=",
"None",
",",
"a1",
"=",
"None",
",",
"kr",
"=",
"None",
",",
"month_hour_precip_mean",
"=",
"None",
",",
"preserve_daily_mean",
"=",... | general function for humidity disaggregation
Args:
daily_data: daily values
method: keyword specifying the disaggregation method to be used
temp: hourly temperature time series (necessary for some methods)
kr: parameter for linear_dewpoint_variation method (6 or 12)
month_hour_precip_mean: [month, hour, precip(y/n)] categorical mean values
preserve_daily_mean: if True, correct the daily mean values of the disaggregated
data with the observed daily means.
Returns:
Disaggregated hourly values of relative humidity. | [
"general",
"function",
"for",
"humidity",
"disaggregation"
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/humidity.py#L33-L109 | train |
kristianfoerster/melodist | melodist/wind.py | _cosine_function | def _cosine_function(x, a, b, t_shift):
"""genrates a diurnal course of windspeed accroding to the cosine function
Args:
x: series of euqally distributed windspeed values
a: parameter a for the cosine function
b: parameter b for the cosine function
t_shift: parameter t_shift for the cosine function
Returns:
series including diurnal course of windspeed.
"""
mean_wind, t = x
return a * mean_wind * np.cos(np.pi * (t - t_shift) / 12) + b * mean_wind | python | def _cosine_function(x, a, b, t_shift):
"""genrates a diurnal course of windspeed accroding to the cosine function
Args:
x: series of euqally distributed windspeed values
a: parameter a for the cosine function
b: parameter b for the cosine function
t_shift: parameter t_shift for the cosine function
Returns:
series including diurnal course of windspeed.
"""
mean_wind, t = x
return a * mean_wind * np.cos(np.pi * (t - t_shift) / 12) + b * mean_wind | [
"def",
"_cosine_function",
"(",
"x",
",",
"a",
",",
"b",
",",
"t_shift",
")",
":",
"mean_wind",
",",
"t",
"=",
"x",
"return",
"a",
"*",
"mean_wind",
"*",
"np",
".",
"cos",
"(",
"np",
".",
"pi",
"*",
"(",
"t",
"-",
"t_shift",
")",
"/",
"12",
"... | genrates a diurnal course of windspeed accroding to the cosine function
Args:
x: series of euqally distributed windspeed values
a: parameter a for the cosine function
b: parameter b for the cosine function
t_shift: parameter t_shift for the cosine function
Returns:
series including diurnal course of windspeed. | [
"genrates",
"a",
"diurnal",
"course",
"of",
"windspeed",
"accroding",
"to",
"the",
"cosine",
"function"
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/wind.py#L33-L47 | train |
kristianfoerster/melodist | melodist/wind.py | disaggregate_wind | def disaggregate_wind(wind_daily, method='equal', a=None, b=None, t_shift=None):
"""general function for windspeed disaggregation
Args:
wind_daily: daily values
method: keyword specifying the disaggregation method to be used
a: parameter a for the cosine function
b: parameter b for the cosine function
t_shift: parameter t_shift for the cosine function
Returns:
Disaggregated hourly values of windspeed.
"""
assert method in ('equal', 'cosine', 'random'), 'Invalid method'
wind_eq = melodist.distribute_equally(wind_daily)
if method == 'equal':
wind_disagg = wind_eq
elif method == 'cosine':
assert None not in (a, b, t_shift)
wind_disagg = _cosine_function(np.array([wind_eq.values, wind_eq.index.hour]), a, b, t_shift)
elif method == 'random':
wind_disagg = wind_eq * (-np.log(np.random.rand(len(wind_eq))))**0.3
return wind_disagg | python | def disaggregate_wind(wind_daily, method='equal', a=None, b=None, t_shift=None):
"""general function for windspeed disaggregation
Args:
wind_daily: daily values
method: keyword specifying the disaggregation method to be used
a: parameter a for the cosine function
b: parameter b for the cosine function
t_shift: parameter t_shift for the cosine function
Returns:
Disaggregated hourly values of windspeed.
"""
assert method in ('equal', 'cosine', 'random'), 'Invalid method'
wind_eq = melodist.distribute_equally(wind_daily)
if method == 'equal':
wind_disagg = wind_eq
elif method == 'cosine':
assert None not in (a, b, t_shift)
wind_disagg = _cosine_function(np.array([wind_eq.values, wind_eq.index.hour]), a, b, t_shift)
elif method == 'random':
wind_disagg = wind_eq * (-np.log(np.random.rand(len(wind_eq))))**0.3
return wind_disagg | [
"def",
"disaggregate_wind",
"(",
"wind_daily",
",",
"method",
"=",
"'equal'",
",",
"a",
"=",
"None",
",",
"b",
"=",
"None",
",",
"t_shift",
"=",
"None",
")",
":",
"assert",
"method",
"in",
"(",
"'equal'",
",",
"'cosine'",
",",
"'random'",
")",
",",
"... | general function for windspeed disaggregation
Args:
wind_daily: daily values
method: keyword specifying the disaggregation method to be used
a: parameter a for the cosine function
b: parameter b for the cosine function
t_shift: parameter t_shift for the cosine function
Returns:
Disaggregated hourly values of windspeed. | [
"general",
"function",
"for",
"windspeed",
"disaggregation"
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/wind.py#L50-L75 | train |
kristianfoerster/melodist | melodist/wind.py | fit_cosine_function | def fit_cosine_function(wind):
"""fits a cosine function to observed hourly windspeed data
Args:
wind: observed hourly windspeed data
Returns:
parameters needed to generate diurnal features of windspeed using a cosine function
"""
wind_daily = wind.groupby(wind.index.date).mean()
wind_daily_hourly = pd.Series(index=wind.index, data=wind_daily.loc[wind.index.date].values) # daily values evenly distributed over the hours
df = pd.DataFrame(data=dict(daily=wind_daily_hourly, hourly=wind)).dropna(how='any')
x = np.array([df.daily, df.index.hour])
popt, pcov = scipy.optimize.curve_fit(_cosine_function, x, df.hourly)
return popt | python | def fit_cosine_function(wind):
"""fits a cosine function to observed hourly windspeed data
Args:
wind: observed hourly windspeed data
Returns:
parameters needed to generate diurnal features of windspeed using a cosine function
"""
wind_daily = wind.groupby(wind.index.date).mean()
wind_daily_hourly = pd.Series(index=wind.index, data=wind_daily.loc[wind.index.date].values) # daily values evenly distributed over the hours
df = pd.DataFrame(data=dict(daily=wind_daily_hourly, hourly=wind)).dropna(how='any')
x = np.array([df.daily, df.index.hour])
popt, pcov = scipy.optimize.curve_fit(_cosine_function, x, df.hourly)
return popt | [
"def",
"fit_cosine_function",
"(",
"wind",
")",
":",
"wind_daily",
"=",
"wind",
".",
"groupby",
"(",
"wind",
".",
"index",
".",
"date",
")",
".",
"mean",
"(",
")",
"wind_daily_hourly",
"=",
"pd",
".",
"Series",
"(",
"index",
"=",
"wind",
".",
"index",
... | fits a cosine function to observed hourly windspeed data
Args:
wind: observed hourly windspeed data
Returns:
parameters needed to generate diurnal features of windspeed using a cosine function | [
"fits",
"a",
"cosine",
"function",
"to",
"observed",
"hourly",
"windspeed",
"data"
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/wind.py#L78-L94 | train |
kristianfoerster/melodist | melodist/data_io.py | read_smet | def read_smet(filename, mode):
"""Reads smet data and returns the data in required dataformat (pd df)
See https://models.slf.ch/docserver/meteoio/SMET_specifications.pdf
for further details on the specifications of this file format.
Parameters
----
filename : SMET file to read
mode : "d" for daily and "h" for hourly input
Returns
----
[header, data]
header: header as dict
data : data as pd df
"""
# dictionary
# based on smet spec V.1.1 and self defined
# daily data
dict_d = {'TA': 'tmean',
'TMAX': 'tmax', # no spec
'TMIN': 'tmin', # no spec
'PSUM': 'precip',
'ISWR': 'glob', # no spec
'RH': 'hum',
'VW': 'wind'}
# hourly data
dict_h = {'TA': 'temp',
'PSUM': 'precip',
'ISWR': 'glob', # no spec
'RH': 'hum',
'VW': 'wind'}
with open(filename) as f:
in_header = False
data_start = None
header = collections.OrderedDict()
for line_num, line in enumerate(f):
if line.strip() == '[HEADER]':
in_header = True
continue
elif line.strip() == '[DATA]':
data_start = line_num + 1
break
if in_header:
line_split = line.split('=')
k = line_split[0].strip()
v = line_split[1].strip()
header[k] = v
# get column names
columns = header['fields'].split()
multiplier = [float(x) for x in header['units_multiplier'].split()][1:]
data = pd.read_table(
filename,
sep=r'\s+',
na_values=[-999],
skiprows=data_start,
names=columns,
index_col='timestamp',
parse_dates=True,
)
data = data*multiplier
del data.index.name
# rename columns
if mode == "d":
data = data.rename(columns=dict_d)
if mode == "h":
data = data.rename(columns=dict_h)
return header, data | python | def read_smet(filename, mode):
"""Reads smet data and returns the data in required dataformat (pd df)
See https://models.slf.ch/docserver/meteoio/SMET_specifications.pdf
for further details on the specifications of this file format.
Parameters
----
filename : SMET file to read
mode : "d" for daily and "h" for hourly input
Returns
----
[header, data]
header: header as dict
data : data as pd df
"""
# dictionary
# based on smet spec V.1.1 and self defined
# daily data
dict_d = {'TA': 'tmean',
'TMAX': 'tmax', # no spec
'TMIN': 'tmin', # no spec
'PSUM': 'precip',
'ISWR': 'glob', # no spec
'RH': 'hum',
'VW': 'wind'}
# hourly data
dict_h = {'TA': 'temp',
'PSUM': 'precip',
'ISWR': 'glob', # no spec
'RH': 'hum',
'VW': 'wind'}
with open(filename) as f:
in_header = False
data_start = None
header = collections.OrderedDict()
for line_num, line in enumerate(f):
if line.strip() == '[HEADER]':
in_header = True
continue
elif line.strip() == '[DATA]':
data_start = line_num + 1
break
if in_header:
line_split = line.split('=')
k = line_split[0].strip()
v = line_split[1].strip()
header[k] = v
# get column names
columns = header['fields'].split()
multiplier = [float(x) for x in header['units_multiplier'].split()][1:]
data = pd.read_table(
filename,
sep=r'\s+',
na_values=[-999],
skiprows=data_start,
names=columns,
index_col='timestamp',
parse_dates=True,
)
data = data*multiplier
del data.index.name
# rename columns
if mode == "d":
data = data.rename(columns=dict_d)
if mode == "h":
data = data.rename(columns=dict_h)
return header, data | [
"def",
"read_smet",
"(",
"filename",
",",
"mode",
")",
":",
"# dictionary",
"# based on smet spec V.1.1 and self defined",
"# daily data",
"dict_d",
"=",
"{",
"'TA'",
":",
"'tmean'",
",",
"'TMAX'",
":",
"'tmax'",
",",
"# no spec",
"'TMIN'",
":",
"'tmin'",
",",
"... | Reads smet data and returns the data in required dataformat (pd df)
See https://models.slf.ch/docserver/meteoio/SMET_specifications.pdf
for further details on the specifications of this file format.
Parameters
----
filename : SMET file to read
mode : "d" for daily and "h" for hourly input
Returns
----
[header, data]
header: header as dict
data : data as pd df | [
"Reads",
"smet",
"data",
"and",
"returns",
"the",
"data",
"in",
"required",
"dataformat",
"(",
"pd",
"df",
")"
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/data_io.py#L33-L113 | train |
kristianfoerster/melodist | melodist/data_io.py | read_dwd | def read_dwd(filename, metadata, mode="d", skip_last=True):
"""Reads dwd (German Weather Service) data and returns the data in required
dataformat (pd df)
Parameters
----
filename : DWD file to read (full path) / list of hourly files (RR+TU+FF)
metadata : corresponding DWD metadata file to read
mode : "d" for daily and "h" for hourly input
skip_last : boolen, skips last line due to file format
Returns
----
[header, data]
header: header as dict
data : data as pd df
"""
def read_single_dwd(filename, metadata, mode, skip_last):
# Param names {'DWD':'dissag_def'}
dict_d = {'LUFTTEMPERATUR': 'tmean',
'LUFTTEMPERATUR_MINIMUM': 'tmin', # no spec
'LUFTTEMPERATUR_MAXIMUM': 'tmax', # no spec
'NIEDERSCHLAGSHOEHE': 'precip',
'GLOBAL_KW_J': 'glob', # no spec
'REL_FEUCHTE': 'hum',
'WINDGESCHWINDIGKEIT': 'wind',
'SONNENSCHEINDAUER': 'sun_h'}
# ---read meta------------------
meta = pd.read_csv(
metadata,
sep=';'
)
# remove whitespace from header columns
meta.rename(columns=lambda x: x.strip(), inplace=True)
header = {"Stations_id": meta.Stations_id[meta.last_valid_index()],
"Stationsname": meta.Stationsname[meta.last_valid_index()],
# workaround for colnames with . (Geogr.Breite)
"Breite": meta.iloc[meta.last_valid_index(), 2], # DezDeg
"Laenge": meta.iloc[meta.last_valid_index(), 3] # DezDeg
}
# ---read data------------------
if skip_last is not None:
num_lines = sum(1 for line in open(filename))
skip_last = [num_lines-1]
# hourly data must be parsed by custom definition
if mode == "d":
data = pd.read_csv(
filename,
sep=';',
na_values='-999',
index_col=' MESS_DATUM',
parse_dates=True,
skiprows=skip_last
)
# hourly data must be parsed by custom definition
if mode == "h":
def date_parser(date_time):
hour = date_time[8:10]
day = date_time[6:8]
month = date_time[4:6]
year = date_time[0:4]
minute = '00'
sec = '00'
return pd.Timestamp('%s-%s-%s %s:%s:%s' % (year, month, day, hour, minute, sec))
data = pd.read_csv(
filename,
sep=';',
na_values='-999',
index_col=' MESS_DATUM',
date_parser=date_parser,
skiprows=skip_last
)
# remove whitespace from header columns
data.rename(columns=lambda x: x.strip(), inplace=True)
# rename to dissag definition
data = data.rename(columns=dict_d)
# get colums which are not defined
drop = [col for col in data.columns if col not in dict_d.values()]
# delete columns
data = data.drop(drop, axis=1)
# convert temperatures to Kelvin (+273.15)
if 'tmin' in data.columns:
data["tmin"] = data["tmin"] + 273.15
if 'tmax' in data.columns:
data["tmax"] = data["tmax"] + 273.15
if 'tmean' in data.columns:
data["tmean"] = data["tmean"] + 273.15
if 'temp' in data.columns:
data["temp"] = data["temp"] + 273.15
return header, data
if type(filename) == list:
i = 1
for file in filename:
header, data_h = read_single_dwd(file, metadata, mode, skip_last)
if i == 1:
data = data_h
else:
data = data.join(data_h, how='outer')
i += 1
else:
header, data = read_single_dwd(filename, metadata, mode, skip_last)
return header, data | python | def read_dwd(filename, metadata, mode="d", skip_last=True):
"""Reads dwd (German Weather Service) data and returns the data in required
dataformat (pd df)
Parameters
----
filename : DWD file to read (full path) / list of hourly files (RR+TU+FF)
metadata : corresponding DWD metadata file to read
mode : "d" for daily and "h" for hourly input
skip_last : boolen, skips last line due to file format
Returns
----
[header, data]
header: header as dict
data : data as pd df
"""
def read_single_dwd(filename, metadata, mode, skip_last):
# Param names {'DWD':'dissag_def'}
dict_d = {'LUFTTEMPERATUR': 'tmean',
'LUFTTEMPERATUR_MINIMUM': 'tmin', # no spec
'LUFTTEMPERATUR_MAXIMUM': 'tmax', # no spec
'NIEDERSCHLAGSHOEHE': 'precip',
'GLOBAL_KW_J': 'glob', # no spec
'REL_FEUCHTE': 'hum',
'WINDGESCHWINDIGKEIT': 'wind',
'SONNENSCHEINDAUER': 'sun_h'}
# ---read meta------------------
meta = pd.read_csv(
metadata,
sep=';'
)
# remove whitespace from header columns
meta.rename(columns=lambda x: x.strip(), inplace=True)
header = {"Stations_id": meta.Stations_id[meta.last_valid_index()],
"Stationsname": meta.Stationsname[meta.last_valid_index()],
# workaround for colnames with . (Geogr.Breite)
"Breite": meta.iloc[meta.last_valid_index(), 2], # DezDeg
"Laenge": meta.iloc[meta.last_valid_index(), 3] # DezDeg
}
# ---read data------------------
if skip_last is not None:
num_lines = sum(1 for line in open(filename))
skip_last = [num_lines-1]
# hourly data must be parsed by custom definition
if mode == "d":
data = pd.read_csv(
filename,
sep=';',
na_values='-999',
index_col=' MESS_DATUM',
parse_dates=True,
skiprows=skip_last
)
# hourly data must be parsed by custom definition
if mode == "h":
def date_parser(date_time):
hour = date_time[8:10]
day = date_time[6:8]
month = date_time[4:6]
year = date_time[0:4]
minute = '00'
sec = '00'
return pd.Timestamp('%s-%s-%s %s:%s:%s' % (year, month, day, hour, minute, sec))
data = pd.read_csv(
filename,
sep=';',
na_values='-999',
index_col=' MESS_DATUM',
date_parser=date_parser,
skiprows=skip_last
)
# remove whitespace from header columns
data.rename(columns=lambda x: x.strip(), inplace=True)
# rename to dissag definition
data = data.rename(columns=dict_d)
# get colums which are not defined
drop = [col for col in data.columns if col not in dict_d.values()]
# delete columns
data = data.drop(drop, axis=1)
# convert temperatures to Kelvin (+273.15)
if 'tmin' in data.columns:
data["tmin"] = data["tmin"] + 273.15
if 'tmax' in data.columns:
data["tmax"] = data["tmax"] + 273.15
if 'tmean' in data.columns:
data["tmean"] = data["tmean"] + 273.15
if 'temp' in data.columns:
data["temp"] = data["temp"] + 273.15
return header, data
if type(filename) == list:
i = 1
for file in filename:
header, data_h = read_single_dwd(file, metadata, mode, skip_last)
if i == 1:
data = data_h
else:
data = data.join(data_h, how='outer')
i += 1
else:
header, data = read_single_dwd(filename, metadata, mode, skip_last)
return header, data | [
"def",
"read_dwd",
"(",
"filename",
",",
"metadata",
",",
"mode",
"=",
"\"d\"",
",",
"skip_last",
"=",
"True",
")",
":",
"def",
"read_single_dwd",
"(",
"filename",
",",
"metadata",
",",
"mode",
",",
"skip_last",
")",
":",
"# Param names {'DWD':'dissag_def'}",
... | Reads dwd (German Weather Service) data and returns the data in required
dataformat (pd df)
Parameters
----
filename : DWD file to read (full path) / list of hourly files (RR+TU+FF)
metadata : corresponding DWD metadata file to read
mode : "d" for daily and "h" for hourly input
skip_last : boolen, skips last line due to file format
Returns
----
[header, data]
header: header as dict
data : data as pd df | [
"Reads",
"dwd",
"(",
"German",
"Weather",
"Service",
")",
"data",
"and",
"returns",
"the",
"data",
"in",
"required",
"dataformat",
"(",
"pd",
"df",
")"
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/data_io.py#L116-L233 | train |
kristianfoerster/melodist | melodist/data_io.py | write_smet | def write_smet(filename, data, metadata, nodata_value=-999, mode='h', check_nan=True):
"""writes smet files
Parameters
----
filename : filename/loction of output
data : data to write as pandas df
metadata: header to write input as dict
nodata_value: Nodata Value to write/use
mode: defines if to write daily ("d") or continuos data (default 'h')
check_nan: will check if only nans in data and if true will not write this colums (default True)
"""
# dictionary
# based on smet spec V.1.1 and selfdefined
# daily data
dict_d= {'tmean':'TA',
'tmin':'TMAX', #no spec
'tmax':'TMIN', #no spec
'precip':'PSUM',
'glob':'ISWR', #no spec
'hum':'RH',
'wind':'VW'
}
#hourly data
dict_h= {'temp':'TA',
'precip':'PSUM',
'glob':'ISWR', #no spec
'hum':'RH',
'wind':'VW'
}
#rename columns
if mode == "d":
data = data.rename(columns=dict_d)
if mode == "h":
data = data.rename(columns=dict_h)
if check_nan:
#get all colums with data
datas_in = data.sum().dropna().to_frame().T
#get colums with no datas
drop = [data_nan for data_nan in data.columns if data_nan not in datas_in]
#delete columns
data = data.drop(drop, axis=1)
with open(filename, 'w') as f:
#preparing data
#converte date_times to SMET timestamps
if mode == "d":
t = '%Y-%m-%dT00:00'
if mode == "h":
t = '%Y-%m-%dT%H:%M'
data['timestamp'] = [d.strftime(t) for d in data.index]
cols = data.columns.tolist()
cols = cols[-1:] + cols[:-1]
data = data[cols]
#metadatas update
metadata['fields'] = ' '.join(data.columns)
metadata["units_multiplier"] = len(metadata['fields'].split())*"1 "
#writing data
#metadata
f.write('SMET 1.1 ASCII\n')
f.write('[HEADER]\n')
for k, v in metadata.items():
f.write('{} = {}\n'.format(k, v))
#data
f.write('[DATA]\n')
data_str = data.fillna(nodata_value).to_string(
header=False,
index=False,
float_format=lambda x: '{:.2f}'.format(x),
)
f.write(data_str) | python | def write_smet(filename, data, metadata, nodata_value=-999, mode='h', check_nan=True):
"""writes smet files
Parameters
----
filename : filename/loction of output
data : data to write as pandas df
metadata: header to write input as dict
nodata_value: Nodata Value to write/use
mode: defines if to write daily ("d") or continuos data (default 'h')
check_nan: will check if only nans in data and if true will not write this colums (default True)
"""
# dictionary
# based on smet spec V.1.1 and selfdefined
# daily data
dict_d= {'tmean':'TA',
'tmin':'TMAX', #no spec
'tmax':'TMIN', #no spec
'precip':'PSUM',
'glob':'ISWR', #no spec
'hum':'RH',
'wind':'VW'
}
#hourly data
dict_h= {'temp':'TA',
'precip':'PSUM',
'glob':'ISWR', #no spec
'hum':'RH',
'wind':'VW'
}
#rename columns
if mode == "d":
data = data.rename(columns=dict_d)
if mode == "h":
data = data.rename(columns=dict_h)
if check_nan:
#get all colums with data
datas_in = data.sum().dropna().to_frame().T
#get colums with no datas
drop = [data_nan for data_nan in data.columns if data_nan not in datas_in]
#delete columns
data = data.drop(drop, axis=1)
with open(filename, 'w') as f:
#preparing data
#converte date_times to SMET timestamps
if mode == "d":
t = '%Y-%m-%dT00:00'
if mode == "h":
t = '%Y-%m-%dT%H:%M'
data['timestamp'] = [d.strftime(t) for d in data.index]
cols = data.columns.tolist()
cols = cols[-1:] + cols[:-1]
data = data[cols]
#metadatas update
metadata['fields'] = ' '.join(data.columns)
metadata["units_multiplier"] = len(metadata['fields'].split())*"1 "
#writing data
#metadata
f.write('SMET 1.1 ASCII\n')
f.write('[HEADER]\n')
for k, v in metadata.items():
f.write('{} = {}\n'.format(k, v))
#data
f.write('[DATA]\n')
data_str = data.fillna(nodata_value).to_string(
header=False,
index=False,
float_format=lambda x: '{:.2f}'.format(x),
)
f.write(data_str) | [
"def",
"write_smet",
"(",
"filename",
",",
"data",
",",
"metadata",
",",
"nodata_value",
"=",
"-",
"999",
",",
"mode",
"=",
"'h'",
",",
"check_nan",
"=",
"True",
")",
":",
"# dictionary",
"# based on smet spec V.1.1 and selfdefined",
"# daily data",
"dict_d",
"=... | writes smet files
Parameters
----
filename : filename/loction of output
data : data to write as pandas df
metadata: header to write input as dict
nodata_value: Nodata Value to write/use
mode: defines if to write daily ("d") or continuos data (default 'h')
check_nan: will check if only nans in data and if true will not write this colums (default True) | [
"writes",
"smet",
"files"
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/data_io.py#L236-L320 | train |
kristianfoerster/melodist | melodist/data_io.py | read_single_knmi_file | def read_single_knmi_file(filename):
"""reads a single file of KNMI's meteorological time series
data availability: www.knmi.nl/nederland-nu/klimatologie/uurgegevens
Args:
filename: the file to be opened
Returns:
pandas data frame including time series
"""
hourly_data_obs_raw = pd.read_csv(
filename,
parse_dates=[['YYYYMMDD', 'HH']],
date_parser=lambda yyyymmdd, hh: pd.datetime(int(str(yyyymmdd)[0:4]),
int(str(yyyymmdd)[4:6]),
int(str(yyyymmdd)[6:8]),
int(hh) - 1),
skiprows=31,
skipinitialspace=True,
na_values='',
keep_date_col=True,
)
hourly_data_obs_raw.index = hourly_data_obs_raw['YYYYMMDD_HH']
hourly_data_obs_raw.index = hourly_data_obs_raw.index + pd.Timedelta(hours=1)
columns_hourly = ['temp', 'precip', 'glob', 'hum', 'wind', 'ssd']
hourly_data_obs = pd.DataFrame(
index=hourly_data_obs_raw.index,
columns=columns_hourly,
data=dict(
temp=hourly_data_obs_raw['T'] / 10 + 273.15,
precip=hourly_data_obs_raw['RH'] / 10,
glob=hourly_data_obs_raw['Q'] * 10000 / 3600.,
hum=hourly_data_obs_raw['U'],
wind=hourly_data_obs_raw['FH'] / 10,
ssd=hourly_data_obs_raw['SQ'] * 6,
),
)
# remove negative values
negative_values = hourly_data_obs['precip'] < 0.0
hourly_data_obs.loc[negative_values, 'precip'] = 0.0
return hourly_data_obs | python | def read_single_knmi_file(filename):
"""reads a single file of KNMI's meteorological time series
data availability: www.knmi.nl/nederland-nu/klimatologie/uurgegevens
Args:
filename: the file to be opened
Returns:
pandas data frame including time series
"""
hourly_data_obs_raw = pd.read_csv(
filename,
parse_dates=[['YYYYMMDD', 'HH']],
date_parser=lambda yyyymmdd, hh: pd.datetime(int(str(yyyymmdd)[0:4]),
int(str(yyyymmdd)[4:6]),
int(str(yyyymmdd)[6:8]),
int(hh) - 1),
skiprows=31,
skipinitialspace=True,
na_values='',
keep_date_col=True,
)
hourly_data_obs_raw.index = hourly_data_obs_raw['YYYYMMDD_HH']
hourly_data_obs_raw.index = hourly_data_obs_raw.index + pd.Timedelta(hours=1)
columns_hourly = ['temp', 'precip', 'glob', 'hum', 'wind', 'ssd']
hourly_data_obs = pd.DataFrame(
index=hourly_data_obs_raw.index,
columns=columns_hourly,
data=dict(
temp=hourly_data_obs_raw['T'] / 10 + 273.15,
precip=hourly_data_obs_raw['RH'] / 10,
glob=hourly_data_obs_raw['Q'] * 10000 / 3600.,
hum=hourly_data_obs_raw['U'],
wind=hourly_data_obs_raw['FH'] / 10,
ssd=hourly_data_obs_raw['SQ'] * 6,
),
)
# remove negative values
negative_values = hourly_data_obs['precip'] < 0.0
hourly_data_obs.loc[negative_values, 'precip'] = 0.0
return hourly_data_obs | [
"def",
"read_single_knmi_file",
"(",
"filename",
")",
":",
"hourly_data_obs_raw",
"=",
"pd",
".",
"read_csv",
"(",
"filename",
",",
"parse_dates",
"=",
"[",
"[",
"'YYYYMMDD'",
",",
"'HH'",
"]",
"]",
",",
"date_parser",
"=",
"lambda",
"yyyymmdd",
",",
"hh",
... | reads a single file of KNMI's meteorological time series
data availability: www.knmi.nl/nederland-nu/klimatologie/uurgegevens
Args:
filename: the file to be opened
Returns:
pandas data frame including time series | [
"reads",
"a",
"single",
"file",
"of",
"KNMI",
"s",
"meteorological",
"time",
"series"
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/data_io.py#L323-L367 | train |
kristianfoerster/melodist | melodist/data_io.py | read_knmi_dataset | def read_knmi_dataset(directory):
"""Reads files from a directory and merges the time series
Please note: For each station, a separate directory must be provided!
data availability: www.knmi.nl/nederland-nu/klimatologie/uurgegevens
Args:
directory: directory including the files
Returns:
pandas data frame including time series
"""
filemask = '%s*.txt' % directory
filelist = glob.glob(filemask)
columns_hourly = ['temp', 'precip', 'glob', 'hum', 'wind', 'ssd']
ts = pd.DataFrame(columns=columns_hourly)
first_call = True
for file_i in filelist:
print(file_i)
current = read_single_knmi_file(file_i)
if(first_call):
ts = current
first_call = False
else:
ts = pd.concat([ts, current])
return ts | python | def read_knmi_dataset(directory):
"""Reads files from a directory and merges the time series
Please note: For each station, a separate directory must be provided!
data availability: www.knmi.nl/nederland-nu/klimatologie/uurgegevens
Args:
directory: directory including the files
Returns:
pandas data frame including time series
"""
filemask = '%s*.txt' % directory
filelist = glob.glob(filemask)
columns_hourly = ['temp', 'precip', 'glob', 'hum', 'wind', 'ssd']
ts = pd.DataFrame(columns=columns_hourly)
first_call = True
for file_i in filelist:
print(file_i)
current = read_single_knmi_file(file_i)
if(first_call):
ts = current
first_call = False
else:
ts = pd.concat([ts, current])
return ts | [
"def",
"read_knmi_dataset",
"(",
"directory",
")",
":",
"filemask",
"=",
"'%s*.txt'",
"%",
"directory",
"filelist",
"=",
"glob",
".",
"glob",
"(",
"filemask",
")",
"columns_hourly",
"=",
"[",
"'temp'",
",",
"'precip'",
",",
"'glob'",
",",
"'hum'",
",",
"'w... | Reads files from a directory and merges the time series
Please note: For each station, a separate directory must be provided!
data availability: www.knmi.nl/nederland-nu/klimatologie/uurgegevens
Args:
directory: directory including the files
Returns:
pandas data frame including time series | [
"Reads",
"files",
"from",
"a",
"directory",
"and",
"merges",
"the",
"time",
"series"
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/data_io.py#L370-L397 | train |
kristianfoerster/melodist | melodist/station.py | Station.calc_sun_times | def calc_sun_times(self):
"""
Computes the times of sunrise, solar noon, and sunset for each day.
"""
self.sun_times = melodist.util.get_sun_times(self.data_daily.index, self.lon, self.lat, self.timezone) | python | def calc_sun_times(self):
"""
Computes the times of sunrise, solar noon, and sunset for each day.
"""
self.sun_times = melodist.util.get_sun_times(self.data_daily.index, self.lon, self.lat, self.timezone) | [
"def",
"calc_sun_times",
"(",
"self",
")",
":",
"self",
".",
"sun_times",
"=",
"melodist",
".",
"util",
".",
"get_sun_times",
"(",
"self",
".",
"data_daily",
".",
"index",
",",
"self",
".",
"lon",
",",
"self",
".",
"lat",
",",
"self",
".",
"timezone",
... | Computes the times of sunrise, solar noon, and sunset for each day. | [
"Computes",
"the",
"times",
"of",
"sunrise",
"solar",
"noon",
"and",
"sunset",
"for",
"each",
"day",
"."
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/station.py#L174-L179 | train |
kristianfoerster/melodist | melodist/station.py | Station.disaggregate_wind | def disaggregate_wind(self, method='equal'):
"""
Disaggregate wind speed.
Parameters
----------
method : str, optional
Disaggregation method.
``equal``
Mean daily wind speed is duplicated for the 24 hours of the day. (Default)
``cosine``
Distributes daily mean wind speed using a cosine function derived from hourly
observations.
``random``
Draws random numbers to distribute wind speed (usually not conserving the
daily average).
"""
self.data_disagg.wind = melodist.disaggregate_wind(self.data_daily.wind, method=method, **self.statistics.wind) | python | def disaggregate_wind(self, method='equal'):
"""
Disaggregate wind speed.
Parameters
----------
method : str, optional
Disaggregation method.
``equal``
Mean daily wind speed is duplicated for the 24 hours of the day. (Default)
``cosine``
Distributes daily mean wind speed using a cosine function derived from hourly
observations.
``random``
Draws random numbers to distribute wind speed (usually not conserving the
daily average).
"""
self.data_disagg.wind = melodist.disaggregate_wind(self.data_daily.wind, method=method, **self.statistics.wind) | [
"def",
"disaggregate_wind",
"(",
"self",
",",
"method",
"=",
"'equal'",
")",
":",
"self",
".",
"data_disagg",
".",
"wind",
"=",
"melodist",
".",
"disaggregate_wind",
"(",
"self",
".",
"data_daily",
".",
"wind",
",",
"method",
"=",
"method",
",",
"*",
"*"... | Disaggregate wind speed.
Parameters
----------
method : str, optional
Disaggregation method.
``equal``
Mean daily wind speed is duplicated for the 24 hours of the day. (Default)
``cosine``
Distributes daily mean wind speed using a cosine function derived from hourly
observations.
``random``
Draws random numbers to distribute wind speed (usually not conserving the
daily average). | [
"Disaggregate",
"wind",
"speed",
"."
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/station.py#L181-L201 | train |
kristianfoerster/melodist | melodist/station.py | Station.disaggregate_humidity | def disaggregate_humidity(self, method='equal', preserve_daily_mean=False):
"""
Disaggregate relative humidity.
Parameters
----------
method : str, optional
Disaggregation method.
``equal``
Mean daily humidity is duplicated for the 24 hours of the day. (Default)
``minimal``:
Calculates humidity from daily dew point temperature by setting the dew point temperature
equal to the daily minimum temperature.
``dewpoint_regression``:
Calculates humidity from daily dew point temperature by calculating dew point temperature
using ``Tdew = a * Tmin + b``, where ``a`` and ``b`` are determined by calibration.
``linear_dewpoint_variation``:
Calculates humidity from hourly dew point temperature by assuming a linear dew point
temperature variation between consecutive days.
``min_max``:
Calculates hourly humidity from observations of daily minimum and maximum humidity.
``month_hour_precip_mean``:
Calculates hourly humidity from categorical [month, hour, precip(y/n)] mean values
derived from observations.
preserve_daily_mean : bool, optional
If True, correct the daily mean values of the disaggregated data with the observed daily means.
"""
self.data_disagg.hum = melodist.disaggregate_humidity(
self.data_daily,
temp=self.data_disagg.temp,
method=method,
preserve_daily_mean=preserve_daily_mean,
**self.statistics.hum
) | python | def disaggregate_humidity(self, method='equal', preserve_daily_mean=False):
"""
Disaggregate relative humidity.
Parameters
----------
method : str, optional
Disaggregation method.
``equal``
Mean daily humidity is duplicated for the 24 hours of the day. (Default)
``minimal``:
Calculates humidity from daily dew point temperature by setting the dew point temperature
equal to the daily minimum temperature.
``dewpoint_regression``:
Calculates humidity from daily dew point temperature by calculating dew point temperature
using ``Tdew = a * Tmin + b``, where ``a`` and ``b`` are determined by calibration.
``linear_dewpoint_variation``:
Calculates humidity from hourly dew point temperature by assuming a linear dew point
temperature variation between consecutive days.
``min_max``:
Calculates hourly humidity from observations of daily minimum and maximum humidity.
``month_hour_precip_mean``:
Calculates hourly humidity from categorical [month, hour, precip(y/n)] mean values
derived from observations.
preserve_daily_mean : bool, optional
If True, correct the daily mean values of the disaggregated data with the observed daily means.
"""
self.data_disagg.hum = melodist.disaggregate_humidity(
self.data_daily,
temp=self.data_disagg.temp,
method=method,
preserve_daily_mean=preserve_daily_mean,
**self.statistics.hum
) | [
"def",
"disaggregate_humidity",
"(",
"self",
",",
"method",
"=",
"'equal'",
",",
"preserve_daily_mean",
"=",
"False",
")",
":",
"self",
".",
"data_disagg",
".",
"hum",
"=",
"melodist",
".",
"disaggregate_humidity",
"(",
"self",
".",
"data_daily",
",",
"temp",
... | Disaggregate relative humidity.
Parameters
----------
method : str, optional
Disaggregation method.
``equal``
Mean daily humidity is duplicated for the 24 hours of the day. (Default)
``minimal``:
Calculates humidity from daily dew point temperature by setting the dew point temperature
equal to the daily minimum temperature.
``dewpoint_regression``:
Calculates humidity from daily dew point temperature by calculating dew point temperature
using ``Tdew = a * Tmin + b``, where ``a`` and ``b`` are determined by calibration.
``linear_dewpoint_variation``:
Calculates humidity from hourly dew point temperature by assuming a linear dew point
temperature variation between consecutive days.
``min_max``:
Calculates hourly humidity from observations of daily minimum and maximum humidity.
``month_hour_precip_mean``:
Calculates hourly humidity from categorical [month, hour, precip(y/n)] mean values
derived from observations.
preserve_daily_mean : bool, optional
If True, correct the daily mean values of the disaggregated data with the observed daily means. | [
"Disaggregate",
"relative",
"humidity",
"."
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/station.py#L203-L243 | train |
kristianfoerster/melodist | melodist/station.py | Station.disaggregate_temperature | def disaggregate_temperature(self, method='sine_min_max', min_max_time='fix', mod_nighttime=False):
"""
Disaggregate air temperature.
Parameters
----------
method : str, optional
Disaggregation method.
``sine_min_max``
Hourly temperatures follow a sine function preserving daily minimum
and maximum values. (Default)
``sine_mean``
Hourly temperatures follow a sine function preserving the daily mean
value and the diurnal temperature range.
``sine``
Same as ``sine_min_max``.
``mean_course_min_max``
Hourly temperatures follow an observed average course (calculated for each month),
preserving daily minimum and maximum values.
``mean_course_mean``
Hourly temperatures follow an observed average course (calculated for each month),
preserving the daily mean value and the diurnal temperature range.
min_max_time : str, optional
Method to determine the time of minimum and maximum temperature.
``fix``:
Minimum/maximum temperature are assumed to occur at 07:00/14:00 local time.
``sun_loc``:
Minimum/maximum temperature are assumed to occur at sunrise / solar noon + 2 h.
``sun_loc_shift``:
Minimum/maximum temperature are assumed to occur at sunrise / solar noon + monthly mean shift.
mod_nighttime : bool, optional
Use linear interpolation between minimum and maximum temperature.
"""
self.data_disagg.temp = melodist.disaggregate_temperature(
self.data_daily,
method=method,
min_max_time=min_max_time,
max_delta=self.statistics.temp.max_delta,
mean_course=self.statistics.temp.mean_course,
sun_times=self.sun_times,
mod_nighttime=mod_nighttime
) | python | def disaggregate_temperature(self, method='sine_min_max', min_max_time='fix', mod_nighttime=False):
"""
Disaggregate air temperature.
Parameters
----------
method : str, optional
Disaggregation method.
``sine_min_max``
Hourly temperatures follow a sine function preserving daily minimum
and maximum values. (Default)
``sine_mean``
Hourly temperatures follow a sine function preserving the daily mean
value and the diurnal temperature range.
``sine``
Same as ``sine_min_max``.
``mean_course_min_max``
Hourly temperatures follow an observed average course (calculated for each month),
preserving daily minimum and maximum values.
``mean_course_mean``
Hourly temperatures follow an observed average course (calculated for each month),
preserving the daily mean value and the diurnal temperature range.
min_max_time : str, optional
Method to determine the time of minimum and maximum temperature.
``fix``:
Minimum/maximum temperature are assumed to occur at 07:00/14:00 local time.
``sun_loc``:
Minimum/maximum temperature are assumed to occur at sunrise / solar noon + 2 h.
``sun_loc_shift``:
Minimum/maximum temperature are assumed to occur at sunrise / solar noon + monthly mean shift.
mod_nighttime : bool, optional
Use linear interpolation between minimum and maximum temperature.
"""
self.data_disagg.temp = melodist.disaggregate_temperature(
self.data_daily,
method=method,
min_max_time=min_max_time,
max_delta=self.statistics.temp.max_delta,
mean_course=self.statistics.temp.mean_course,
sun_times=self.sun_times,
mod_nighttime=mod_nighttime
) | [
"def",
"disaggregate_temperature",
"(",
"self",
",",
"method",
"=",
"'sine_min_max'",
",",
"min_max_time",
"=",
"'fix'",
",",
"mod_nighttime",
"=",
"False",
")",
":",
"self",
".",
"data_disagg",
".",
"temp",
"=",
"melodist",
".",
"disaggregate_temperature",
"(",... | Disaggregate air temperature.
Parameters
----------
method : str, optional
Disaggregation method.
``sine_min_max``
Hourly temperatures follow a sine function preserving daily minimum
and maximum values. (Default)
``sine_mean``
Hourly temperatures follow a sine function preserving the daily mean
value and the diurnal temperature range.
``sine``
Same as ``sine_min_max``.
``mean_course_min_max``
Hourly temperatures follow an observed average course (calculated for each month),
preserving daily minimum and maximum values.
``mean_course_mean``
Hourly temperatures follow an observed average course (calculated for each month),
preserving the daily mean value and the diurnal temperature range.
min_max_time : str, optional
Method to determine the time of minimum and maximum temperature.
``fix``:
Minimum/maximum temperature are assumed to occur at 07:00/14:00 local time.
``sun_loc``:
Minimum/maximum temperature are assumed to occur at sunrise / solar noon + 2 h.
``sun_loc_shift``:
Minimum/maximum temperature are assumed to occur at sunrise / solar noon + monthly mean shift.
mod_nighttime : bool, optional
Use linear interpolation between minimum and maximum temperature. | [
"Disaggregate",
"air",
"temperature",
"."
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/station.py#L245-L296 | train |
kristianfoerster/melodist | melodist/station.py | Station.disaggregate_precipitation | def disaggregate_precipitation(self, method='equal', zerodiv='uniform', shift=0, master_precip=None):
"""
Disaggregate precipitation.
Parameters
----------
method : str, optional
Disaggregation method.
``equal``
Daily precipitation is distributed equally over the 24 hours of the day. (Default)
``cascade``
Hourly precipitation values are obtained using a cascade model set up using
hourly observations.
zerodiv : str, optional
Method to deal with zero division, relevant for ``method='masterstation'``.
``uniform``
Use uniform distribution. (Default)
master_precip : Series, optional
Hourly precipitation records from a representative station
(required for ``method='masterstation'``).
"""
if method == 'equal':
precip_disagg = melodist.disagg_prec(self.data_daily, method=method, shift=shift)
elif method == 'cascade':
precip_disagg = pd.Series(index=self.data_disagg.index)
for months, stats in zip(self.statistics.precip.months, self.statistics.precip.stats):
precip_daily = melodist.seasonal_subset(self.data_daily.precip, months=months)
if len(precip_daily) > 1:
data = melodist.disagg_prec(precip_daily, method=method, cascade_options=stats,
shift=shift, zerodiv=zerodiv)
precip_disagg.loc[data.index] = data
elif method == 'masterstation':
precip_disagg = melodist.precip_master_station(self.data_daily.precip, master_precip, zerodiv)
self.data_disagg.precip = precip_disagg | python | def disaggregate_precipitation(self, method='equal', zerodiv='uniform', shift=0, master_precip=None):
"""
Disaggregate precipitation.
Parameters
----------
method : str, optional
Disaggregation method.
``equal``
Daily precipitation is distributed equally over the 24 hours of the day. (Default)
``cascade``
Hourly precipitation values are obtained using a cascade model set up using
hourly observations.
zerodiv : str, optional
Method to deal with zero division, relevant for ``method='masterstation'``.
``uniform``
Use uniform distribution. (Default)
master_precip : Series, optional
Hourly precipitation records from a representative station
(required for ``method='masterstation'``).
"""
if method == 'equal':
precip_disagg = melodist.disagg_prec(self.data_daily, method=method, shift=shift)
elif method == 'cascade':
precip_disagg = pd.Series(index=self.data_disagg.index)
for months, stats in zip(self.statistics.precip.months, self.statistics.precip.stats):
precip_daily = melodist.seasonal_subset(self.data_daily.precip, months=months)
if len(precip_daily) > 1:
data = melodist.disagg_prec(precip_daily, method=method, cascade_options=stats,
shift=shift, zerodiv=zerodiv)
precip_disagg.loc[data.index] = data
elif method == 'masterstation':
precip_disagg = melodist.precip_master_station(self.data_daily.precip, master_precip, zerodiv)
self.data_disagg.precip = precip_disagg | [
"def",
"disaggregate_precipitation",
"(",
"self",
",",
"method",
"=",
"'equal'",
",",
"zerodiv",
"=",
"'uniform'",
",",
"shift",
"=",
"0",
",",
"master_precip",
"=",
"None",
")",
":",
"if",
"method",
"==",
"'equal'",
":",
"precip_disagg",
"=",
"melodist",
... | Disaggregate precipitation.
Parameters
----------
method : str, optional
Disaggregation method.
``equal``
Daily precipitation is distributed equally over the 24 hours of the day. (Default)
``cascade``
Hourly precipitation values are obtained using a cascade model set up using
hourly observations.
zerodiv : str, optional
Method to deal with zero division, relevant for ``method='masterstation'``.
``uniform``
Use uniform distribution. (Default)
master_precip : Series, optional
Hourly precipitation records from a representative station
(required for ``method='masterstation'``). | [
"Disaggregate",
"precipitation",
"."
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/station.py#L298-L338 | train |
kristianfoerster/melodist | melodist/station.py | Station.disaggregate_radiation | def disaggregate_radiation(self, method='pot_rad', pot_rad=None):
"""
Disaggregate solar radiation.
Parameters
----------
method : str, optional
Disaggregation method.
``pot_rad``
Calculates potential clear-sky hourly radiation and scales it according to the
mean daily radiation. (Default)
``pot_rad_via_ssd``
Calculates potential clear-sky hourly radiation and scales it according to the
observed daily sunshine duration.
``pot_rad_via_bc``
Calculates potential clear-sky hourly radiation and scales it according to daily
minimum and maximum temperature.
``mean_course``
Hourly radiation follows an observed average course (calculated for each month).
pot_rad : Series, optional
Hourly values of potential solar radiation. If ``None``, calculated internally.
"""
if self.sun_times is None:
self.calc_sun_times()
if pot_rad is None and method != 'mean_course':
pot_rad = melodist.potential_radiation(self.data_disagg.index, self.lon, self.lat, self.timezone)
self.data_disagg.glob = melodist.disaggregate_radiation(
self.data_daily,
sun_times=self.sun_times,
pot_rad=pot_rad,
method=method,
angstr_a=self.statistics.glob.angstroem.a,
angstr_b=self.statistics.glob.angstroem.b,
bristcamp_a=self.statistics.glob.bristcamp.a,
bristcamp_c=self.statistics.glob.bristcamp.c,
mean_course=self.statistics.glob.mean_course
) | python | def disaggregate_radiation(self, method='pot_rad', pot_rad=None):
"""
Disaggregate solar radiation.
Parameters
----------
method : str, optional
Disaggregation method.
``pot_rad``
Calculates potential clear-sky hourly radiation and scales it according to the
mean daily radiation. (Default)
``pot_rad_via_ssd``
Calculates potential clear-sky hourly radiation and scales it according to the
observed daily sunshine duration.
``pot_rad_via_bc``
Calculates potential clear-sky hourly radiation and scales it according to daily
minimum and maximum temperature.
``mean_course``
Hourly radiation follows an observed average course (calculated for each month).
pot_rad : Series, optional
Hourly values of potential solar radiation. If ``None``, calculated internally.
"""
if self.sun_times is None:
self.calc_sun_times()
if pot_rad is None and method != 'mean_course':
pot_rad = melodist.potential_radiation(self.data_disagg.index, self.lon, self.lat, self.timezone)
self.data_disagg.glob = melodist.disaggregate_radiation(
self.data_daily,
sun_times=self.sun_times,
pot_rad=pot_rad,
method=method,
angstr_a=self.statistics.glob.angstroem.a,
angstr_b=self.statistics.glob.angstroem.b,
bristcamp_a=self.statistics.glob.bristcamp.a,
bristcamp_c=self.statistics.glob.bristcamp.c,
mean_course=self.statistics.glob.mean_course
) | [
"def",
"disaggregate_radiation",
"(",
"self",
",",
"method",
"=",
"'pot_rad'",
",",
"pot_rad",
"=",
"None",
")",
":",
"if",
"self",
".",
"sun_times",
"is",
"None",
":",
"self",
".",
"calc_sun_times",
"(",
")",
"if",
"pot_rad",
"is",
"None",
"and",
"metho... | Disaggregate solar radiation.
Parameters
----------
method : str, optional
Disaggregation method.
``pot_rad``
Calculates potential clear-sky hourly radiation and scales it according to the
mean daily radiation. (Default)
``pot_rad_via_ssd``
Calculates potential clear-sky hourly radiation and scales it according to the
observed daily sunshine duration.
``pot_rad_via_bc``
Calculates potential clear-sky hourly radiation and scales it according to daily
minimum and maximum temperature.
``mean_course``
Hourly radiation follows an observed average course (calculated for each month).
pot_rad : Series, optional
Hourly values of potential solar radiation. If ``None``, calculated internally. | [
"Disaggregate",
"solar",
"radiation",
"."
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/station.py#L340-L383 | train |
kristianfoerster/melodist | melodist/station.py | Station.interpolate | def interpolate(self, column_hours, method='linear', limit=24, limit_direction='both', **kwargs):
"""
Wrapper function for ``pandas.Series.interpolate`` that can be used to
"disaggregate" values using various interpolation methods.
Parameters
----------
column_hours : dict
Dictionary containing column names in ``data_daily`` and the hour
values they should be associated to.
method, limit, limit_direction, **kwargs
These parameters are passed on to ``pandas.Series.interpolate``.
Examples
--------
Assume that ``mystation.data_daily.T7``, ``mystation.data_daily.T14``,
and ``mystation.data_daily.T19`` contain air temperature measurements
taken at 07:00, 14:00, and 19:00.
We can use the interpolation functions provided by pandas/scipy to derive
hourly values:
>>> mystation.data_hourly.temp = mystation.interpolate({'T7': 7, 'T14': 14, 'T19': 19}) # linear interpolation (default)
>>> mystation.data_hourly.temp = mystation.interpolate({'T7': 7, 'T14': 14, 'T19': 19}, method='cubic') # cubic spline
"""
kwargs = dict(kwargs, method=method, limit=limit, limit_direction=limit_direction)
data = melodist.util.prepare_interpolation_data(self.data_daily, column_hours)
return data.interpolate(**kwargs) | python | def interpolate(self, column_hours, method='linear', limit=24, limit_direction='both', **kwargs):
"""
Wrapper function for ``pandas.Series.interpolate`` that can be used to
"disaggregate" values using various interpolation methods.
Parameters
----------
column_hours : dict
Dictionary containing column names in ``data_daily`` and the hour
values they should be associated to.
method, limit, limit_direction, **kwargs
These parameters are passed on to ``pandas.Series.interpolate``.
Examples
--------
Assume that ``mystation.data_daily.T7``, ``mystation.data_daily.T14``,
and ``mystation.data_daily.T19`` contain air temperature measurements
taken at 07:00, 14:00, and 19:00.
We can use the interpolation functions provided by pandas/scipy to derive
hourly values:
>>> mystation.data_hourly.temp = mystation.interpolate({'T7': 7, 'T14': 14, 'T19': 19}) # linear interpolation (default)
>>> mystation.data_hourly.temp = mystation.interpolate({'T7': 7, 'T14': 14, 'T19': 19}, method='cubic') # cubic spline
"""
kwargs = dict(kwargs, method=method, limit=limit, limit_direction=limit_direction)
data = melodist.util.prepare_interpolation_data(self.data_daily, column_hours)
return data.interpolate(**kwargs) | [
"def",
"interpolate",
"(",
"self",
",",
"column_hours",
",",
"method",
"=",
"'linear'",
",",
"limit",
"=",
"24",
",",
"limit_direction",
"=",
"'both'",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"=",
"dict",
"(",
"kwargs",
",",
"method",
"=",
"method... | Wrapper function for ``pandas.Series.interpolate`` that can be used to
"disaggregate" values using various interpolation methods.
Parameters
----------
column_hours : dict
Dictionary containing column names in ``data_daily`` and the hour
values they should be associated to.
method, limit, limit_direction, **kwargs
These parameters are passed on to ``pandas.Series.interpolate``.
Examples
--------
Assume that ``mystation.data_daily.T7``, ``mystation.data_daily.T14``,
and ``mystation.data_daily.T19`` contain air temperature measurements
taken at 07:00, 14:00, and 19:00.
We can use the interpolation functions provided by pandas/scipy to derive
hourly values:
>>> mystation.data_hourly.temp = mystation.interpolate({'T7': 7, 'T14': 14, 'T19': 19}) # linear interpolation (default)
>>> mystation.data_hourly.temp = mystation.interpolate({'T7': 7, 'T14': 14, 'T19': 19}, method='cubic') # cubic spline | [
"Wrapper",
"function",
"for",
"pandas",
".",
"Series",
".",
"interpolate",
"that",
"can",
"be",
"used",
"to",
"disaggregate",
"values",
"using",
"various",
"interpolation",
"methods",
"."
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/station.py#L385-L412 | train |
boydgreenfield/query | query/core.py | QueryDbOrm._query_helper | def _query_helper(self, by=None):
"""
Internal helper for preparing queries.
"""
if by is None:
primary_keys = self.table.primary_key.columns.keys()
if len(primary_keys) > 1:
warnings.warn("WARNING: MORE THAN 1 PRIMARY KEY FOR TABLE %s. "
"USING THE FIRST KEY %s." %
(self.table.name, primary_keys[0]))
if not primary_keys:
raise NoPrimaryKeyException("Table %s needs a primary key for"
"the .last() method to work properly. "
"Alternatively, specify an ORDER BY "
"column with the by= argument. " %
self.table.name)
id_col = primary_keys[0]
else:
id_col = by
if self.column is None:
col = "*"
else:
col = self.column.name
return col, id_col | python | def _query_helper(self, by=None):
"""
Internal helper for preparing queries.
"""
if by is None:
primary_keys = self.table.primary_key.columns.keys()
if len(primary_keys) > 1:
warnings.warn("WARNING: MORE THAN 1 PRIMARY KEY FOR TABLE %s. "
"USING THE FIRST KEY %s." %
(self.table.name, primary_keys[0]))
if not primary_keys:
raise NoPrimaryKeyException("Table %s needs a primary key for"
"the .last() method to work properly. "
"Alternatively, specify an ORDER BY "
"column with the by= argument. " %
self.table.name)
id_col = primary_keys[0]
else:
id_col = by
if self.column is None:
col = "*"
else:
col = self.column.name
return col, id_col | [
"def",
"_query_helper",
"(",
"self",
",",
"by",
"=",
"None",
")",
":",
"if",
"by",
"is",
"None",
":",
"primary_keys",
"=",
"self",
".",
"table",
".",
"primary_key",
".",
"columns",
".",
"keys",
"(",
")",
"if",
"len",
"(",
"primary_keys",
")",
">",
... | Internal helper for preparing queries. | [
"Internal",
"helper",
"for",
"preparing",
"queries",
"."
] | 03aa43b746b43832af3f0403265e648a5617b62b | https://github.com/boydgreenfield/query/blob/03aa43b746b43832af3f0403265e648a5617b62b/query/core.py#L72-L99 | train |
boydgreenfield/query | query/core.py | QueryDbOrm.head | def head(self, n=10, by=None, **kwargs):
"""
Get the first n entries for a given Table/Column. Additional keywords
passed to QueryDb.query().
Requires that the given table has a primary key specified.
"""
col, id_col = self._query_helper(by=by)
select = ("SELECT %s FROM %s ORDER BY %s ASC LIMIT %d" %
(col, self.table.name, id_col, n))
return self._db.query(select, **kwargs) | python | def head(self, n=10, by=None, **kwargs):
"""
Get the first n entries for a given Table/Column. Additional keywords
passed to QueryDb.query().
Requires that the given table has a primary key specified.
"""
col, id_col = self._query_helper(by=by)
select = ("SELECT %s FROM %s ORDER BY %s ASC LIMIT %d" %
(col, self.table.name, id_col, n))
return self._db.query(select, **kwargs) | [
"def",
"head",
"(",
"self",
",",
"n",
"=",
"10",
",",
"by",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"col",
",",
"id_col",
"=",
"self",
".",
"_query_helper",
"(",
"by",
"=",
"by",
")",
"select",
"=",
"(",
"\"SELECT %s FROM %s ORDER BY %s ASC L... | Get the first n entries for a given Table/Column. Additional keywords
passed to QueryDb.query().
Requires that the given table has a primary key specified. | [
"Get",
"the",
"first",
"n",
"entries",
"for",
"a",
"given",
"Table",
"/",
"Column",
".",
"Additional",
"keywords",
"passed",
"to",
"QueryDb",
".",
"query",
"()",
"."
] | 03aa43b746b43832af3f0403265e648a5617b62b | https://github.com/boydgreenfield/query/blob/03aa43b746b43832af3f0403265e648a5617b62b/query/core.py#L101-L113 | train |
boydgreenfield/query | query/core.py | QueryDbOrm.last | def last(self, n=10, by=None, **kwargs):
"""
Alias for .tail().
"""
return self.tail(n=n, by=by, **kwargs) | python | def last(self, n=10, by=None, **kwargs):
"""
Alias for .tail().
"""
return self.tail(n=n, by=by, **kwargs) | [
"def",
"last",
"(",
"self",
",",
"n",
"=",
"10",
",",
"by",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"tail",
"(",
"n",
"=",
"n",
",",
"by",
"=",
"by",
",",
"*",
"*",
"kwargs",
")"
] | Alias for .tail(). | [
"Alias",
"for",
".",
"tail",
"()",
"."
] | 03aa43b746b43832af3f0403265e648a5617b62b | https://github.com/boydgreenfield/query/blob/03aa43b746b43832af3f0403265e648a5617b62b/query/core.py#L134-L138 | train |
boydgreenfield/query | query/core.py | QueryDbOrm.where | def where(self, where_string, **kwargs):
"""
Select from a given Table or Column with the specified WHERE clause
string. Additional keywords are passed to ExploreSqlDB.query(). For
convenience, if there is no '=', '>', '<', 'like', or 'LIKE' clause
in the WHERE statement .where() tries to match the input string
against the primary key column of the Table.
Args:
where_string (str): Where clause for the query against the Table
or Column
Kwars:
**kwargs: Optional **kwargs passed to the QueryDb.query() call
Returns:
result (pandas.DataFrame or sqlalchemy ResultProxy): Query result
as a DataFrame (default) or sqlalchemy result.
"""
col, id_col = self._query_helper(by=None)
where_string = str(where_string) # Coerce here, for .__contains___
where_operators = ["=", ">", "<", "LIKE", "like"]
if np.any([where_string.__contains__(w) for w in where_operators]):
select = ("SELECT %s FROM %s WHERE %s" %
(col, self.table.name, where_string))
else:
select = ("SELECT %s FROM %s WHERE %s = %s" %
(col, self.table.name, id_col, where_string))
return self._db.query(select, **kwargs) | python | def where(self, where_string, **kwargs):
"""
Select from a given Table or Column with the specified WHERE clause
string. Additional keywords are passed to ExploreSqlDB.query(). For
convenience, if there is no '=', '>', '<', 'like', or 'LIKE' clause
in the WHERE statement .where() tries to match the input string
against the primary key column of the Table.
Args:
where_string (str): Where clause for the query against the Table
or Column
Kwars:
**kwargs: Optional **kwargs passed to the QueryDb.query() call
Returns:
result (pandas.DataFrame or sqlalchemy ResultProxy): Query result
as a DataFrame (default) or sqlalchemy result.
"""
col, id_col = self._query_helper(by=None)
where_string = str(where_string) # Coerce here, for .__contains___
where_operators = ["=", ">", "<", "LIKE", "like"]
if np.any([where_string.__contains__(w) for w in where_operators]):
select = ("SELECT %s FROM %s WHERE %s" %
(col, self.table.name, where_string))
else:
select = ("SELECT %s FROM %s WHERE %s = %s" %
(col, self.table.name, id_col, where_string))
return self._db.query(select, **kwargs) | [
"def",
"where",
"(",
"self",
",",
"where_string",
",",
"*",
"*",
"kwargs",
")",
":",
"col",
",",
"id_col",
"=",
"self",
".",
"_query_helper",
"(",
"by",
"=",
"None",
")",
"where_string",
"=",
"str",
"(",
"where_string",
")",
"# Coerce here, for .__contains... | Select from a given Table or Column with the specified WHERE clause
string. Additional keywords are passed to ExploreSqlDB.query(). For
convenience, if there is no '=', '>', '<', 'like', or 'LIKE' clause
in the WHERE statement .where() tries to match the input string
against the primary key column of the Table.
Args:
where_string (str): Where clause for the query against the Table
or Column
Kwars:
**kwargs: Optional **kwargs passed to the QueryDb.query() call
Returns:
result (pandas.DataFrame or sqlalchemy ResultProxy): Query result
as a DataFrame (default) or sqlalchemy result. | [
"Select",
"from",
"a",
"given",
"Table",
"or",
"Column",
"with",
"the",
"specified",
"WHERE",
"clause",
"string",
".",
"Additional",
"keywords",
"are",
"passed",
"to",
"ExploreSqlDB",
".",
"query",
"()",
".",
"For",
"convenience",
"if",
"there",
"is",
"no",
... | 03aa43b746b43832af3f0403265e648a5617b62b | https://github.com/boydgreenfield/query/blob/03aa43b746b43832af3f0403265e648a5617b62b/query/core.py#L140-L170 | train |
boydgreenfield/query | query/core.py | QueryDb.query | def query(self, sql_query, return_as="dataframe"):
"""
Execute a raw SQL query against the the SQL DB.
Args:
sql_query (str): A raw SQL query to execute.
Kwargs:
return_as (str): Specify what type of object should be
returned. The following are acceptable types:
- "dataframe": pandas.DataFrame or None if no matching query
- "result": sqlalchemy.engine.result.ResultProxy
Returns:
result (pandas.DataFrame or sqlalchemy ResultProxy): Query result
as a DataFrame (default) or sqlalchemy result (specified with
return_as="result")
Raises:
QueryDbError
"""
if isinstance(sql_query, str):
pass
elif isinstance(sql_query, unicode):
sql_query = str(sql_query)
else:
raise QueryDbError("query() requires a str or unicode input.")
query = sqlalchemy.sql.text(sql_query)
if return_as.upper() in ["DF", "DATAFRAME"]:
return self._to_df(query, self._engine)
elif return_as.upper() in ["RESULT", "RESULTPROXY"]:
with self._engine.connect() as conn:
result = conn.execute(query)
return result
else:
raise QueryDbError("Other return types not implemented.") | python | def query(self, sql_query, return_as="dataframe"):
"""
Execute a raw SQL query against the the SQL DB.
Args:
sql_query (str): A raw SQL query to execute.
Kwargs:
return_as (str): Specify what type of object should be
returned. The following are acceptable types:
- "dataframe": pandas.DataFrame or None if no matching query
- "result": sqlalchemy.engine.result.ResultProxy
Returns:
result (pandas.DataFrame or sqlalchemy ResultProxy): Query result
as a DataFrame (default) or sqlalchemy result (specified with
return_as="result")
Raises:
QueryDbError
"""
if isinstance(sql_query, str):
pass
elif isinstance(sql_query, unicode):
sql_query = str(sql_query)
else:
raise QueryDbError("query() requires a str or unicode input.")
query = sqlalchemy.sql.text(sql_query)
if return_as.upper() in ["DF", "DATAFRAME"]:
return self._to_df(query, self._engine)
elif return_as.upper() in ["RESULT", "RESULTPROXY"]:
with self._engine.connect() as conn:
result = conn.execute(query)
return result
else:
raise QueryDbError("Other return types not implemented.") | [
"def",
"query",
"(",
"self",
",",
"sql_query",
",",
"return_as",
"=",
"\"dataframe\"",
")",
":",
"if",
"isinstance",
"(",
"sql_query",
",",
"str",
")",
":",
"pass",
"elif",
"isinstance",
"(",
"sql_query",
",",
"unicode",
")",
":",
"sql_query",
"=",
"str"... | Execute a raw SQL query against the the SQL DB.
Args:
sql_query (str): A raw SQL query to execute.
Kwargs:
return_as (str): Specify what type of object should be
returned. The following are acceptable types:
- "dataframe": pandas.DataFrame or None if no matching query
- "result": sqlalchemy.engine.result.ResultProxy
Returns:
result (pandas.DataFrame or sqlalchemy ResultProxy): Query result
as a DataFrame (default) or sqlalchemy result (specified with
return_as="result")
Raises:
QueryDbError | [
"Execute",
"a",
"raw",
"SQL",
"query",
"against",
"the",
"the",
"SQL",
"DB",
"."
] | 03aa43b746b43832af3f0403265e648a5617b62b | https://github.com/boydgreenfield/query/blob/03aa43b746b43832af3f0403265e648a5617b62b/query/core.py#L298-L335 | train |
boydgreenfield/query | query/core.py | QueryDb._set_metadata | def _set_metadata(self):
"""
Internal helper to set metadata attributes.
"""
meta = QueryDbMeta()
with self._engine.connect() as conn:
meta.bind = conn
meta.reflect()
self._meta = meta
# Set an inspect attribute, whose subattributes
# return individual tables / columns. Tables and columns
# are special classes with .last() and other convenience methods
self.inspect = QueryDbAttributes()
for table in self._meta.tables:
setattr(self.inspect, table,
QueryDbOrm(self._meta.tables[table], self))
table_attr = getattr(self.inspect, table)
table_cols = table_attr.table.columns
for col in table_cols.keys():
setattr(table_attr, col,
QueryDbOrm(table_cols[col], self))
# Finally add some summary info:
# Table name
# Primary Key item or list
# N of Cols
# Distinct Col Values (class so NVARCHAR(20) and NVARCHAR(30) are not different)
primary_keys = table_attr.table.primary_key.columns.keys()
self._summary_info.append((
table,
primary_keys[0] if len(primary_keys) == 1 else primary_keys,
len(table_cols),
len(set([x.type.__class__ for x in table_cols.values()])),
)) | python | def _set_metadata(self):
"""
Internal helper to set metadata attributes.
"""
meta = QueryDbMeta()
with self._engine.connect() as conn:
meta.bind = conn
meta.reflect()
self._meta = meta
# Set an inspect attribute, whose subattributes
# return individual tables / columns. Tables and columns
# are special classes with .last() and other convenience methods
self.inspect = QueryDbAttributes()
for table in self._meta.tables:
setattr(self.inspect, table,
QueryDbOrm(self._meta.tables[table], self))
table_attr = getattr(self.inspect, table)
table_cols = table_attr.table.columns
for col in table_cols.keys():
setattr(table_attr, col,
QueryDbOrm(table_cols[col], self))
# Finally add some summary info:
# Table name
# Primary Key item or list
# N of Cols
# Distinct Col Values (class so NVARCHAR(20) and NVARCHAR(30) are not different)
primary_keys = table_attr.table.primary_key.columns.keys()
self._summary_info.append((
table,
primary_keys[0] if len(primary_keys) == 1 else primary_keys,
len(table_cols),
len(set([x.type.__class__ for x in table_cols.values()])),
)) | [
"def",
"_set_metadata",
"(",
"self",
")",
":",
"meta",
"=",
"QueryDbMeta",
"(",
")",
"with",
"self",
".",
"_engine",
".",
"connect",
"(",
")",
"as",
"conn",
":",
"meta",
".",
"bind",
"=",
"conn",
"meta",
".",
"reflect",
"(",
")",
"self",
".",
"_met... | Internal helper to set metadata attributes. | [
"Internal",
"helper",
"to",
"set",
"metadata",
"attributes",
"."
] | 03aa43b746b43832af3f0403265e648a5617b62b | https://github.com/boydgreenfield/query/blob/03aa43b746b43832af3f0403265e648a5617b62b/query/core.py#L337-L373 | train |
boydgreenfield/query | query/core.py | QueryDb._to_df | def _to_df(self, query, conn, index_col=None, coerce_float=True, params=None,
parse_dates=None, columns=None):
"""
Internal convert-to-DataFrame convenience wrapper.
"""
return pd.io.sql.read_sql(str(query), conn, index_col=index_col,
coerce_float=coerce_float, params=params,
parse_dates=parse_dates, columns=columns) | python | def _to_df(self, query, conn, index_col=None, coerce_float=True, params=None,
parse_dates=None, columns=None):
"""
Internal convert-to-DataFrame convenience wrapper.
"""
return pd.io.sql.read_sql(str(query), conn, index_col=index_col,
coerce_float=coerce_float, params=params,
parse_dates=parse_dates, columns=columns) | [
"def",
"_to_df",
"(",
"self",
",",
"query",
",",
"conn",
",",
"index_col",
"=",
"None",
",",
"coerce_float",
"=",
"True",
",",
"params",
"=",
"None",
",",
"parse_dates",
"=",
"None",
",",
"columns",
"=",
"None",
")",
":",
"return",
"pd",
".",
"io",
... | Internal convert-to-DataFrame convenience wrapper. | [
"Internal",
"convert",
"-",
"to",
"-",
"DataFrame",
"convenience",
"wrapper",
"."
] | 03aa43b746b43832af3f0403265e648a5617b62b | https://github.com/boydgreenfield/query/blob/03aa43b746b43832af3f0403265e648a5617b62b/query/core.py#L375-L382 | train |
kristianfoerster/melodist | melodist/temperature.py | disaggregate_temperature | def disaggregate_temperature(data_daily,
method='sine_min_max',
min_max_time='fix',
mod_nighttime=False,
max_delta=None,
mean_course=None,
sun_times=None):
"""The disaggregation function for temperature
Parameters
----
data_daily : daily data
method : method to disaggregate
min_max_time: "fix" - min/max temperature at fixed times 7h/14h,
"sun_loc" - min/max calculated by sunrise/sunnoon + 2h,
"sun_loc_shift" - min/max calculated by sunrise/sunnoon + monthly mean shift,
max_delta: maximum monthly temperature shift as returned by get_shift_by_data()
sun_times: times of sunrise/noon as returned by get_sun_times()
"""
if method not in (
'sine_min_max',
'sine_mean',
'sine',
'mean_course_min_max',
'mean_course_mean',
):
raise ValueError('Invalid option')
temp_disagg = pd.Series(index=melodist.util.hourly_index(data_daily.index))
if method in ('sine_min_max', 'sine_mean', 'sine'):
# for this option assume time of minimum and maximum and fit cosine function through minimum and maximum temperatures
hours_per_day = 24
default_shift_hours = 2
daylength_thres = 3
# min / max hour during polar night assumption
min_loc_polar = 6
max_loc_polar = 18
locdf = pd.DataFrame(
index=data_daily.index,
columns=[
'min_loc',
'max_loc',
'min_val_before',
'min_val_cur',
'min_val_next',
'max_val_before',
'max_val_cur',
'max_val_next',
'mean_val_cur',
]
)
if min_max_time == 'fix':
# take fixed location for minimum and maximum
locdf.min_loc = 7
locdf.max_loc = 14
elif min_max_time == 'sun_loc':
# take location for minimum and maximum by sunrise / sunnoon + 2h
locdf.min_loc = sun_times.sunrise.round() # sun rise round to full hour
locdf.max_loc = sun_times.sunnoon.round() + default_shift_hours # sun noon round to full hour + fix 2h
elif min_max_time == 'sun_loc_shift':
# take location for minimum and maximum by sunrise / sunnoon + monthly delta
locdf.min_loc = sun_times.sunrise.round() # sun rise round to full hour
locdf.max_loc = (sun_times.sunnoon + max_delta[locdf.index.month].values).round() # sun noon + shift derived from observed hourly data, round to full hour
pos = locdf.min_loc > locdf.max_loc
locdf.loc[pos, 'max_loc'] = sun_times.sunnoon[pos].round() + default_shift_hours # standard shift in this case
locdf.min_loc = locdf.min_loc.astype(int)
locdf.max_loc = locdf.max_loc.astype(int)
locdf.min_val_cur = data_daily.tmin
locdf.max_val_cur = data_daily.tmax
locdf.mean_val_cur = data_daily.temp
locdf.min_val_next = data_daily.tmin.shift(-1, 'D')
locdf.max_val_next = data_daily.tmax.shift(-1, 'D')
locdf.loc[locdf.index[-1], 'min_val_next'] = locdf.min_val_cur.iloc[-1]
locdf.loc[locdf.index[-1], 'max_val_next'] = locdf.max_val_cur.iloc[-1]
locdf.min_val_before = data_daily.tmin.shift(1, 'D')
locdf.max_val_before = data_daily.tmax.shift(1, 'D')
locdf.loc[locdf.index[0], 'min_val_before'] = locdf.min_val_cur.iloc[0]
locdf.loc[locdf.index[0], 'max_val_before'] = locdf.max_val_cur.iloc[0]
locdf_day = locdf
locdf = locdf.reindex(temp_disagg.index, method='ffill')
# whenever we are before the maximum for the current day, use minimum value of current day for cosine function fitting
# once we have passed the maximum value use the minimum for next day to ensure smooth transitions
min_val = locdf.min_val_next.copy()
min_val[min_val.index.hour < locdf.max_loc] = locdf.min_val_cur
# whenever we are before the minimum for the current day, use maximum value of day before for cosine function fitting
# once we have passed the minimum value use the maximum for the current day to ensure smooth transitions
max_val = locdf.max_val_cur.copy()
max_val[max_val.index.hour < locdf.min_loc] = locdf.max_val_before
temp_disagg = pd.Series(index=min_val.index)
if method in ('sine_min_max', 'sine'):
delta_val = max_val - min_val
v_trans = min_val + delta_val / 2.
if mod_nighttime:
before_min = locdf.index.hour <= locdf.min_loc
between_min_max = (locdf.index.hour > locdf.min_loc) & (locdf.index.hour < locdf.max_loc)
after_max = locdf.index.hour >= locdf.max_loc
temp_disagg[before_min] = v_trans + delta_val / 2. * np.cos(np.pi / (hours_per_day - (locdf.max_loc - locdf.min_loc)) * (hours_per_day - locdf.max_loc + locdf.index.hour))
temp_disagg[between_min_max] = v_trans + delta_val / 2. * np.cos(1.25 * np.pi + 0.75 * np.pi / (locdf.max_loc - locdf.min_loc) * (locdf.index.hour - locdf.min_loc))
temp_disagg[after_max] = v_trans + delta_val / 2. * np.cos(np.pi / (hours_per_day - (locdf.max_loc - locdf.min_loc)) * (locdf.index.hour - locdf.max_loc))
else:
temp_disagg[:] = v_trans + (delta_val / 2.) * np.cos(2 * np.pi / hours_per_day * (locdf.index.hour - locdf.max_loc))
elif method == 'sine_mean':
dtr = locdf.max_val_cur - locdf.min_val_cur
temp_disagg[:] = locdf.mean_val_cur + dtr / 2. * np.cos(2 * np.pi / hours_per_day * (locdf.index.hour - locdf.max_loc))
polars = sun_times.daylength < daylength_thres
if polars.sum() > 0:
# during polar night, no diurnal variation of temperature is applied
# instead the daily average calculated using tmin and tmax is applied
polars_index_hourly = melodist.util.hourly_index(polars[polars].index)
temp_disagg.loc[polars_index_hourly] = np.nan
avg_before = (locdf_day.min_val_before + locdf_day.max_val_before) / 2.
avg_cur = (locdf_day.min_val_cur + locdf_day.max_val_cur) / 2.
getting_warmers = polars & (avg_before <= avg_cur)
getting_colders = polars & ~(avg_before <= avg_cur)
getting_warmers_min_loc = pd.DatetimeIndex([ts.replace(hour=min_loc_polar) for ts in getting_warmers[getting_warmers].index])
getting_warmers_max_loc = pd.DatetimeIndex([ts.replace(hour=max_loc_polar) for ts in getting_warmers[getting_warmers].index])
temp_disagg[getting_warmers_min_loc] = locdf_day.min_val_cur[getting_warmers].values
temp_disagg[getting_warmers_max_loc] = locdf_day.max_val_cur[getting_warmers].values
getting_colders_min_loc = pd.DatetimeIndex([ts.replace(hour=min_loc_polar) for ts in getting_colders[getting_colders].index])
getting_colders_max_loc = pd.DatetimeIndex([ts.replace(hour=max_loc_polar) for ts in getting_colders[getting_colders].index])
temp_disagg[getting_colders_min_loc] = locdf_day.max_val_cur[getting_colders].values
temp_disagg[getting_colders_max_loc] = locdf_day.min_val_cur[getting_colders].values
temp_polars = temp_disagg.loc[polars_index_hourly].copy()
transition_days = polars[polars.diff() == True].astype(int) # 0 where transition from polar to "normal" mode, 1 where transition from normal to polar
if len(transition_days) > 0:
polar_to_normal_days = transition_days.index[transition_days == 0]
normal_to_polar_days = transition_days.index[transition_days == 1] - pd.Timedelta(days=1)
add_days = polar_to_normal_days.union(normal_to_polar_days)
temp_polars = temp_polars.append(temp_disagg[melodist.util.hourly_index(add_days)]).sort_index()
for day in polar_to_normal_days:
min_loc = int(locdf.loc[day].min_loc)
temp_polars[day.replace(hour=0):day.replace(hour=min_loc) - pd.Timedelta(hours=1)] = np.nan
temp_polars[day.replace(hour=min_loc)] = locdf.min_val_cur[day]
for day in normal_to_polar_days:
max_loc = int(locdf.loc[day].max_loc)
temp_polars[day.replace(hour=max_loc) + pd.Timedelta(hours=1):day.replace(hour=23)] = np.nan
temp_interp = temp_polars.interpolate(method='linear', limit=23)
temp_disagg[temp_interp.index] = temp_interp
elif method == 'mean_course_min_max':
data_daily_as_hourly = data_daily.reindex(temp_disagg.index, method='ffill', limit=23)
df = pd.DataFrame(index=temp_disagg.index)
df['normval'] = mean_course.unstack().loc[list(zip(df.index.month, df.index.hour))].values
df['tmin'] = data_daily_as_hourly.tmin
df['tmax'] = data_daily_as_hourly.tmax
temp_disagg[:] = df.normval * (df.tmax - df.tmin) + df.tmin
elif method == 'mean_course_mean':
data_daily_as_hourly = data_daily.reindex(temp_disagg.index, method='ffill', limit=23)
dtr = data_daily_as_hourly.tmax - data_daily_as_hourly.tmin
mc = pd.Series(index=temp_disagg.index)
mean_course_zeromean = mean_course - mean_course.mean() # shift mean course so that the daily mean is 0
mc[:] = mean_course_zeromean.unstack().loc[list(zip(temp_disagg.index.month, temp_disagg.index.hour))].values
temp_disagg[:] = data_daily_as_hourly.temp + dtr * mc
return temp_disagg | python | def disaggregate_temperature(data_daily,
method='sine_min_max',
min_max_time='fix',
mod_nighttime=False,
max_delta=None,
mean_course=None,
sun_times=None):
"""The disaggregation function for temperature
Parameters
----
data_daily : daily data
method : method to disaggregate
min_max_time: "fix" - min/max temperature at fixed times 7h/14h,
"sun_loc" - min/max calculated by sunrise/sunnoon + 2h,
"sun_loc_shift" - min/max calculated by sunrise/sunnoon + monthly mean shift,
max_delta: maximum monthly temperature shift as returned by get_shift_by_data()
sun_times: times of sunrise/noon as returned by get_sun_times()
"""
if method not in (
'sine_min_max',
'sine_mean',
'sine',
'mean_course_min_max',
'mean_course_mean',
):
raise ValueError('Invalid option')
temp_disagg = pd.Series(index=melodist.util.hourly_index(data_daily.index))
if method in ('sine_min_max', 'sine_mean', 'sine'):
# for this option assume time of minimum and maximum and fit cosine function through minimum and maximum temperatures
hours_per_day = 24
default_shift_hours = 2
daylength_thres = 3
# min / max hour during polar night assumption
min_loc_polar = 6
max_loc_polar = 18
locdf = pd.DataFrame(
index=data_daily.index,
columns=[
'min_loc',
'max_loc',
'min_val_before',
'min_val_cur',
'min_val_next',
'max_val_before',
'max_val_cur',
'max_val_next',
'mean_val_cur',
]
)
if min_max_time == 'fix':
# take fixed location for minimum and maximum
locdf.min_loc = 7
locdf.max_loc = 14
elif min_max_time == 'sun_loc':
# take location for minimum and maximum by sunrise / sunnoon + 2h
locdf.min_loc = sun_times.sunrise.round() # sun rise round to full hour
locdf.max_loc = sun_times.sunnoon.round() + default_shift_hours # sun noon round to full hour + fix 2h
elif min_max_time == 'sun_loc_shift':
# take location for minimum and maximum by sunrise / sunnoon + monthly delta
locdf.min_loc = sun_times.sunrise.round() # sun rise round to full hour
locdf.max_loc = (sun_times.sunnoon + max_delta[locdf.index.month].values).round() # sun noon + shift derived from observed hourly data, round to full hour
pos = locdf.min_loc > locdf.max_loc
locdf.loc[pos, 'max_loc'] = sun_times.sunnoon[pos].round() + default_shift_hours # standard shift in this case
locdf.min_loc = locdf.min_loc.astype(int)
locdf.max_loc = locdf.max_loc.astype(int)
locdf.min_val_cur = data_daily.tmin
locdf.max_val_cur = data_daily.tmax
locdf.mean_val_cur = data_daily.temp
locdf.min_val_next = data_daily.tmin.shift(-1, 'D')
locdf.max_val_next = data_daily.tmax.shift(-1, 'D')
locdf.loc[locdf.index[-1], 'min_val_next'] = locdf.min_val_cur.iloc[-1]
locdf.loc[locdf.index[-1], 'max_val_next'] = locdf.max_val_cur.iloc[-1]
locdf.min_val_before = data_daily.tmin.shift(1, 'D')
locdf.max_val_before = data_daily.tmax.shift(1, 'D')
locdf.loc[locdf.index[0], 'min_val_before'] = locdf.min_val_cur.iloc[0]
locdf.loc[locdf.index[0], 'max_val_before'] = locdf.max_val_cur.iloc[0]
locdf_day = locdf
locdf = locdf.reindex(temp_disagg.index, method='ffill')
# whenever we are before the maximum for the current day, use minimum value of current day for cosine function fitting
# once we have passed the maximum value use the minimum for next day to ensure smooth transitions
min_val = locdf.min_val_next.copy()
min_val[min_val.index.hour < locdf.max_loc] = locdf.min_val_cur
# whenever we are before the minimum for the current day, use maximum value of day before for cosine function fitting
# once we have passed the minimum value use the maximum for the current day to ensure smooth transitions
max_val = locdf.max_val_cur.copy()
max_val[max_val.index.hour < locdf.min_loc] = locdf.max_val_before
temp_disagg = pd.Series(index=min_val.index)
if method in ('sine_min_max', 'sine'):
delta_val = max_val - min_val
v_trans = min_val + delta_val / 2.
if mod_nighttime:
before_min = locdf.index.hour <= locdf.min_loc
between_min_max = (locdf.index.hour > locdf.min_loc) & (locdf.index.hour < locdf.max_loc)
after_max = locdf.index.hour >= locdf.max_loc
temp_disagg[before_min] = v_trans + delta_val / 2. * np.cos(np.pi / (hours_per_day - (locdf.max_loc - locdf.min_loc)) * (hours_per_day - locdf.max_loc + locdf.index.hour))
temp_disagg[between_min_max] = v_trans + delta_val / 2. * np.cos(1.25 * np.pi + 0.75 * np.pi / (locdf.max_loc - locdf.min_loc) * (locdf.index.hour - locdf.min_loc))
temp_disagg[after_max] = v_trans + delta_val / 2. * np.cos(np.pi / (hours_per_day - (locdf.max_loc - locdf.min_loc)) * (locdf.index.hour - locdf.max_loc))
else:
temp_disagg[:] = v_trans + (delta_val / 2.) * np.cos(2 * np.pi / hours_per_day * (locdf.index.hour - locdf.max_loc))
elif method == 'sine_mean':
dtr = locdf.max_val_cur - locdf.min_val_cur
temp_disagg[:] = locdf.mean_val_cur + dtr / 2. * np.cos(2 * np.pi / hours_per_day * (locdf.index.hour - locdf.max_loc))
polars = sun_times.daylength < daylength_thres
if polars.sum() > 0:
# during polar night, no diurnal variation of temperature is applied
# instead the daily average calculated using tmin and tmax is applied
polars_index_hourly = melodist.util.hourly_index(polars[polars].index)
temp_disagg.loc[polars_index_hourly] = np.nan
avg_before = (locdf_day.min_val_before + locdf_day.max_val_before) / 2.
avg_cur = (locdf_day.min_val_cur + locdf_day.max_val_cur) / 2.
getting_warmers = polars & (avg_before <= avg_cur)
getting_colders = polars & ~(avg_before <= avg_cur)
getting_warmers_min_loc = pd.DatetimeIndex([ts.replace(hour=min_loc_polar) for ts in getting_warmers[getting_warmers].index])
getting_warmers_max_loc = pd.DatetimeIndex([ts.replace(hour=max_loc_polar) for ts in getting_warmers[getting_warmers].index])
temp_disagg[getting_warmers_min_loc] = locdf_day.min_val_cur[getting_warmers].values
temp_disagg[getting_warmers_max_loc] = locdf_day.max_val_cur[getting_warmers].values
getting_colders_min_loc = pd.DatetimeIndex([ts.replace(hour=min_loc_polar) for ts in getting_colders[getting_colders].index])
getting_colders_max_loc = pd.DatetimeIndex([ts.replace(hour=max_loc_polar) for ts in getting_colders[getting_colders].index])
temp_disagg[getting_colders_min_loc] = locdf_day.max_val_cur[getting_colders].values
temp_disagg[getting_colders_max_loc] = locdf_day.min_val_cur[getting_colders].values
temp_polars = temp_disagg.loc[polars_index_hourly].copy()
transition_days = polars[polars.diff() == True].astype(int) # 0 where transition from polar to "normal" mode, 1 where transition from normal to polar
if len(transition_days) > 0:
polar_to_normal_days = transition_days.index[transition_days == 0]
normal_to_polar_days = transition_days.index[transition_days == 1] - pd.Timedelta(days=1)
add_days = polar_to_normal_days.union(normal_to_polar_days)
temp_polars = temp_polars.append(temp_disagg[melodist.util.hourly_index(add_days)]).sort_index()
for day in polar_to_normal_days:
min_loc = int(locdf.loc[day].min_loc)
temp_polars[day.replace(hour=0):day.replace(hour=min_loc) - pd.Timedelta(hours=1)] = np.nan
temp_polars[day.replace(hour=min_loc)] = locdf.min_val_cur[day]
for day in normal_to_polar_days:
max_loc = int(locdf.loc[day].max_loc)
temp_polars[day.replace(hour=max_loc) + pd.Timedelta(hours=1):day.replace(hour=23)] = np.nan
temp_interp = temp_polars.interpolate(method='linear', limit=23)
temp_disagg[temp_interp.index] = temp_interp
elif method == 'mean_course_min_max':
data_daily_as_hourly = data_daily.reindex(temp_disagg.index, method='ffill', limit=23)
df = pd.DataFrame(index=temp_disagg.index)
df['normval'] = mean_course.unstack().loc[list(zip(df.index.month, df.index.hour))].values
df['tmin'] = data_daily_as_hourly.tmin
df['tmax'] = data_daily_as_hourly.tmax
temp_disagg[:] = df.normval * (df.tmax - df.tmin) + df.tmin
elif method == 'mean_course_mean':
data_daily_as_hourly = data_daily.reindex(temp_disagg.index, method='ffill', limit=23)
dtr = data_daily_as_hourly.tmax - data_daily_as_hourly.tmin
mc = pd.Series(index=temp_disagg.index)
mean_course_zeromean = mean_course - mean_course.mean() # shift mean course so that the daily mean is 0
mc[:] = mean_course_zeromean.unstack().loc[list(zip(temp_disagg.index.month, temp_disagg.index.hour))].values
temp_disagg[:] = data_daily_as_hourly.temp + dtr * mc
return temp_disagg | [
"def",
"disaggregate_temperature",
"(",
"data_daily",
",",
"method",
"=",
"'sine_min_max'",
",",
"min_max_time",
"=",
"'fix'",
",",
"mod_nighttime",
"=",
"False",
",",
"max_delta",
"=",
"None",
",",
"mean_course",
"=",
"None",
",",
"sun_times",
"=",
"None",
")... | The disaggregation function for temperature
Parameters
----
data_daily : daily data
method : method to disaggregate
min_max_time: "fix" - min/max temperature at fixed times 7h/14h,
"sun_loc" - min/max calculated by sunrise/sunnoon + 2h,
"sun_loc_shift" - min/max calculated by sunrise/sunnoon + monthly mean shift,
max_delta: maximum monthly temperature shift as returned by get_shift_by_data()
sun_times: times of sunrise/noon as returned by get_sun_times() | [
"The",
"disaggregation",
"function",
"for",
"temperature"
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/temperature.py#L33-L212 | train |
kristianfoerster/melodist | melodist/temperature.py | get_shift_by_data | def get_shift_by_data(temp_hourly, lon, lat, time_zone):
'''function to get max temp shift (monthly) by hourly data
Parameters
----
hourly_data_obs : observed hourly data
lat : latitude in DezDeg
lon : longitude in DezDeg
time_zone: timezone
'''
daily_index = temp_hourly.resample('D').mean().index
sun_times = melodist.util.get_sun_times(daily_index, lon, lat, time_zone)
idxmax = temp_hourly.groupby(temp_hourly.index.date).idxmax()
idxmax.index = pd.to_datetime(idxmax.index)
max_temp_hour_obs = idxmax.dropna().apply(lambda d: d.hour)
max_temp_hour_pot = sun_times.sunnoon
max_delta = max_temp_hour_obs - max_temp_hour_pot
mean_monthly_delta = max_delta.groupby(max_delta.index.month).mean()
return mean_monthly_delta | python | def get_shift_by_data(temp_hourly, lon, lat, time_zone):
'''function to get max temp shift (monthly) by hourly data
Parameters
----
hourly_data_obs : observed hourly data
lat : latitude in DezDeg
lon : longitude in DezDeg
time_zone: timezone
'''
daily_index = temp_hourly.resample('D').mean().index
sun_times = melodist.util.get_sun_times(daily_index, lon, lat, time_zone)
idxmax = temp_hourly.groupby(temp_hourly.index.date).idxmax()
idxmax.index = pd.to_datetime(idxmax.index)
max_temp_hour_obs = idxmax.dropna().apply(lambda d: d.hour)
max_temp_hour_pot = sun_times.sunnoon
max_delta = max_temp_hour_obs - max_temp_hour_pot
mean_monthly_delta = max_delta.groupby(max_delta.index.month).mean()
return mean_monthly_delta | [
"def",
"get_shift_by_data",
"(",
"temp_hourly",
",",
"lon",
",",
"lat",
",",
"time_zone",
")",
":",
"daily_index",
"=",
"temp_hourly",
".",
"resample",
"(",
"'D'",
")",
".",
"mean",
"(",
")",
".",
"index",
"sun_times",
"=",
"melodist",
".",
"util",
".",
... | function to get max temp shift (monthly) by hourly data
Parameters
----
hourly_data_obs : observed hourly data
lat : latitude in DezDeg
lon : longitude in DezDeg
time_zone: timezone | [
"function",
"to",
"get",
"max",
"temp",
"shift",
"(",
"monthly",
")",
"by",
"hourly",
"data",
"Parameters",
"----",
"hourly_data_obs",
":",
"observed",
"hourly",
"data",
"lat",
":",
"latitude",
"in",
"DezDeg",
"lon",
":",
"longitude",
"in",
"DezDeg",
"time_z... | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/temperature.py#L215-L235 | train |
kristianfoerster/melodist | melodist/util/util.py | distribute_equally | def distribute_equally(daily_data, divide=False):
"""Obtains hourly values by equally distributing the daily values.
Args:
daily_data: daily values
divide: if True, divide resulting values by the number of hours in
order to preserve the daily sum (required e.g. for precipitation).
Returns:
Equally distributed hourly values.
"""
index = hourly_index(daily_data.index)
hourly_data = daily_data.reindex(index)
hourly_data = hourly_data.groupby(hourly_data.index.day).transform(
lambda x: x.fillna(method='ffill', limit=23))
if divide:
hourly_data /= 24
return hourly_data | python | def distribute_equally(daily_data, divide=False):
"""Obtains hourly values by equally distributing the daily values.
Args:
daily_data: daily values
divide: if True, divide resulting values by the number of hours in
order to preserve the daily sum (required e.g. for precipitation).
Returns:
Equally distributed hourly values.
"""
index = hourly_index(daily_data.index)
hourly_data = daily_data.reindex(index)
hourly_data = hourly_data.groupby(hourly_data.index.day).transform(
lambda x: x.fillna(method='ffill', limit=23))
if divide:
hourly_data /= 24
return hourly_data | [
"def",
"distribute_equally",
"(",
"daily_data",
",",
"divide",
"=",
"False",
")",
":",
"index",
"=",
"hourly_index",
"(",
"daily_data",
".",
"index",
")",
"hourly_data",
"=",
"daily_data",
".",
"reindex",
"(",
"index",
")",
"hourly_data",
"=",
"hourly_data",
... | Obtains hourly values by equally distributing the daily values.
Args:
daily_data: daily values
divide: if True, divide resulting values by the number of hours in
order to preserve the daily sum (required e.g. for precipitation).
Returns:
Equally distributed hourly values. | [
"Obtains",
"hourly",
"values",
"by",
"equally",
"distributing",
"the",
"daily",
"values",
"."
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/util/util.py#L50-L70 | train |
kristianfoerster/melodist | melodist/util/util.py | vapor_pressure | def vapor_pressure(temp, hum):
"""
Calculates vapor pressure from temperature and humidity after Sonntag (1990).
Args:
temp: temperature values
hum: humidity value(s). Can be scalar (e.g. for calculating saturation vapor pressure).
Returns:
Vapor pressure in hPa.
"""
if np.isscalar(hum):
hum = np.zeros(temp.shape) + hum
assert(temp.shape == hum.shape)
positives = np.array(temp >= 273.15)
vap_press = np.zeros(temp.shape) * np.nan
vap_press[positives] = 6.112 * np.exp((17.62 * (temp[positives] - 273.15)) / (243.12 + (temp[positives] - 273.15))) * hum[positives] / 100.
vap_press[~positives] = 6.112 * np.exp((22.46 * (temp[~positives] - 273.15)) / (272.62 + (temp[~positives] - 273.15))) * hum[~positives] / 100.
return vap_press | python | def vapor_pressure(temp, hum):
"""
Calculates vapor pressure from temperature and humidity after Sonntag (1990).
Args:
temp: temperature values
hum: humidity value(s). Can be scalar (e.g. for calculating saturation vapor pressure).
Returns:
Vapor pressure in hPa.
"""
if np.isscalar(hum):
hum = np.zeros(temp.shape) + hum
assert(temp.shape == hum.shape)
positives = np.array(temp >= 273.15)
vap_press = np.zeros(temp.shape) * np.nan
vap_press[positives] = 6.112 * np.exp((17.62 * (temp[positives] - 273.15)) / (243.12 + (temp[positives] - 273.15))) * hum[positives] / 100.
vap_press[~positives] = 6.112 * np.exp((22.46 * (temp[~positives] - 273.15)) / (272.62 + (temp[~positives] - 273.15))) * hum[~positives] / 100.
return vap_press | [
"def",
"vapor_pressure",
"(",
"temp",
",",
"hum",
")",
":",
"if",
"np",
".",
"isscalar",
"(",
"hum",
")",
":",
"hum",
"=",
"np",
".",
"zeros",
"(",
"temp",
".",
"shape",
")",
"+",
"hum",
"assert",
"(",
"temp",
".",
"shape",
"==",
"hum",
".",
"s... | Calculates vapor pressure from temperature and humidity after Sonntag (1990).
Args:
temp: temperature values
hum: humidity value(s). Can be scalar (e.g. for calculating saturation vapor pressure).
Returns:
Vapor pressure in hPa. | [
"Calculates",
"vapor",
"pressure",
"from",
"temperature",
"and",
"humidity",
"after",
"Sonntag",
"(",
"1990",
")",
"."
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/util/util.py#L73-L95 | train |
kristianfoerster/melodist | melodist/util/util.py | dewpoint_temperature | def dewpoint_temperature(temp, hum):
"""computes the dewpoint temperature
Parameters
----
temp : temperature [K]
hum : relative humidity
Returns
dewpoint temperature in K
"""
assert(temp.shape == hum.shape)
vap_press = vapor_pressure(temp, hum)
positives = np.array(temp >= 273.15)
dewpoint_temp = temp.copy() * np.nan
dewpoint_temp[positives] = 243.12 * np.log(vap_press[positives] / 6.112) / (17.62 - np.log(vap_press[positives] / 6.112))
dewpoint_temp[~positives] = 272.62 * np.log(vap_press[~positives] / 6.112) / (22.46 - np.log(vap_press[~positives] / 6.112))
return dewpoint_temp + 273.15 | python | def dewpoint_temperature(temp, hum):
"""computes the dewpoint temperature
Parameters
----
temp : temperature [K]
hum : relative humidity
Returns
dewpoint temperature in K
"""
assert(temp.shape == hum.shape)
vap_press = vapor_pressure(temp, hum)
positives = np.array(temp >= 273.15)
dewpoint_temp = temp.copy() * np.nan
dewpoint_temp[positives] = 243.12 * np.log(vap_press[positives] / 6.112) / (17.62 - np.log(vap_press[positives] / 6.112))
dewpoint_temp[~positives] = 272.62 * np.log(vap_press[~positives] / 6.112) / (22.46 - np.log(vap_press[~positives] / 6.112))
return dewpoint_temp + 273.15 | [
"def",
"dewpoint_temperature",
"(",
"temp",
",",
"hum",
")",
":",
"assert",
"(",
"temp",
".",
"shape",
"==",
"hum",
".",
"shape",
")",
"vap_press",
"=",
"vapor_pressure",
"(",
"temp",
",",
"hum",
")",
"positives",
"=",
"np",
".",
"array",
"(",
"temp",
... | computes the dewpoint temperature
Parameters
----
temp : temperature [K]
hum : relative humidity
Returns
dewpoint temperature in K | [
"computes",
"the",
"dewpoint",
"temperature"
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/util/util.py#L98-L119 | train |
kristianfoerster/melodist | melodist/util/util.py | linregress | def linregress(x, y, return_stats=False):
"""linear regression calculation
Parameters
----
x : independent variable (series)
y : dependent variable (series)
return_stats : returns statistical values as well if required (bool)
Returns
----
list of parameters (and statistics)
"""
a1, a0, r_value, p_value, stderr = scipy.stats.linregress(x, y)
retval = a1, a0
if return_stats:
retval += r_value, p_value, stderr
return retval | python | def linregress(x, y, return_stats=False):
"""linear regression calculation
Parameters
----
x : independent variable (series)
y : dependent variable (series)
return_stats : returns statistical values as well if required (bool)
Returns
----
list of parameters (and statistics)
"""
a1, a0, r_value, p_value, stderr = scipy.stats.linregress(x, y)
retval = a1, a0
if return_stats:
retval += r_value, p_value, stderr
return retval | [
"def",
"linregress",
"(",
"x",
",",
"y",
",",
"return_stats",
"=",
"False",
")",
":",
"a1",
",",
"a0",
",",
"r_value",
",",
"p_value",
",",
"stderr",
"=",
"scipy",
".",
"stats",
".",
"linregress",
"(",
"x",
",",
"y",
")",
"retval",
"=",
"a1",
","... | linear regression calculation
Parameters
----
x : independent variable (series)
y : dependent variable (series)
return_stats : returns statistical values as well if required (bool)
Returns
----
list of parameters (and statistics) | [
"linear",
"regression",
"calculation"
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/util/util.py#L122-L142 | train |
kristianfoerster/melodist | melodist/util/util.py | get_sun_times | def get_sun_times(dates, lon, lat, time_zone):
"""Computes the times of sunrise, solar noon, and sunset for each day.
Parameters
----
dates: datetime
lat : latitude in DecDeg
lon : longitude in DecDeg
time_zone : timezone
Returns
----
DataFrame: [sunrise, sunnoon, sunset, day length] in dec hours
"""
df = pd.DataFrame(index=dates, columns=['sunrise', 'sunnoon', 'sunset', 'daylength'])
doy = np.array([(d - d.replace(day=1, month=1)).days + 1 for d in df.index]) # day of year
# Day angle and declination after Bourges (1985):
day_angle_b = np.deg2rad((360. / 365.25) * (doy - 79.346))
declination = np.deg2rad(
0.3723 + 23.2567 * np.sin(day_angle_b) - 0.7580 * np.cos(day_angle_b)
+ 0.1149 * np.sin(2*day_angle_b) + 0.3656 * np.cos(2*day_angle_b)
- 0.1712 * np.sin(3*day_angle_b) + 0.0201 * np.cos(3*day_angle_b)
)
# Equation of time with day angle after Spencer (1971):
day_angle_s = 2 * np.pi * (doy - 1) / 365.
eq_time = 12. / np.pi * (
0.000075 +
0.001868 * np.cos( day_angle_s) - 0.032077 * np.sin( day_angle_s) -
0.014615 * np.cos(2*day_angle_s) - 0.040849 * np.sin(2*day_angle_s)
)
#
standard_meridian = time_zone * 15.
delta_lat_time = (lon - standard_meridian) * 24. / 360.
omega_nul_arg = -np.tan(np.deg2rad(lat)) * np.tan(declination)
omega_nul = np.arccos(omega_nul_arg)
sunrise = 12. * (1. - (omega_nul) / np.pi) - delta_lat_time - eq_time
sunset = 12. * (1. + (omega_nul) / np.pi) - delta_lat_time - eq_time
# as an approximation, solar noon is independent of the below mentioned
# cases:
sunnoon = 12. * (1.) - delta_lat_time - eq_time
# $kf 2015-11-13: special case midnight sun and polar night
# CASE 1: MIDNIGHT SUN
# set sunrise and sunset to values that would yield the maximum day
# length even though this a crude assumption
pos = omega_nul_arg < -1
sunrise[pos] = sunnoon[pos] - 12
sunset[pos] = sunnoon[pos] + 12
# CASE 2: POLAR NIGHT
# set sunrise and sunset to values that would yield the minmum day
# length even though this a crude assumption
pos = omega_nul_arg > 1
sunrise[pos] = sunnoon[pos]
sunset[pos] = sunnoon[pos]
daylength = sunset - sunrise
# adjust if required
sunrise[sunrise < 0] += 24
sunset[sunset > 24] -= 24
df.sunrise = sunrise
df.sunnoon = sunnoon
df.sunset = sunset
df.daylength = daylength
return df | python | def get_sun_times(dates, lon, lat, time_zone):
"""Computes the times of sunrise, solar noon, and sunset for each day.
Parameters
----
dates: datetime
lat : latitude in DecDeg
lon : longitude in DecDeg
time_zone : timezone
Returns
----
DataFrame: [sunrise, sunnoon, sunset, day length] in dec hours
"""
df = pd.DataFrame(index=dates, columns=['sunrise', 'sunnoon', 'sunset', 'daylength'])
doy = np.array([(d - d.replace(day=1, month=1)).days + 1 for d in df.index]) # day of year
# Day angle and declination after Bourges (1985):
day_angle_b = np.deg2rad((360. / 365.25) * (doy - 79.346))
declination = np.deg2rad(
0.3723 + 23.2567 * np.sin(day_angle_b) - 0.7580 * np.cos(day_angle_b)
+ 0.1149 * np.sin(2*day_angle_b) + 0.3656 * np.cos(2*day_angle_b)
- 0.1712 * np.sin(3*day_angle_b) + 0.0201 * np.cos(3*day_angle_b)
)
# Equation of time with day angle after Spencer (1971):
day_angle_s = 2 * np.pi * (doy - 1) / 365.
eq_time = 12. / np.pi * (
0.000075 +
0.001868 * np.cos( day_angle_s) - 0.032077 * np.sin( day_angle_s) -
0.014615 * np.cos(2*day_angle_s) - 0.040849 * np.sin(2*day_angle_s)
)
#
standard_meridian = time_zone * 15.
delta_lat_time = (lon - standard_meridian) * 24. / 360.
omega_nul_arg = -np.tan(np.deg2rad(lat)) * np.tan(declination)
omega_nul = np.arccos(omega_nul_arg)
sunrise = 12. * (1. - (omega_nul) / np.pi) - delta_lat_time - eq_time
sunset = 12. * (1. + (omega_nul) / np.pi) - delta_lat_time - eq_time
# as an approximation, solar noon is independent of the below mentioned
# cases:
sunnoon = 12. * (1.) - delta_lat_time - eq_time
# $kf 2015-11-13: special case midnight sun and polar night
# CASE 1: MIDNIGHT SUN
# set sunrise and sunset to values that would yield the maximum day
# length even though this a crude assumption
pos = omega_nul_arg < -1
sunrise[pos] = sunnoon[pos] - 12
sunset[pos] = sunnoon[pos] + 12
# CASE 2: POLAR NIGHT
# set sunrise and sunset to values that would yield the minmum day
# length even though this a crude assumption
pos = omega_nul_arg > 1
sunrise[pos] = sunnoon[pos]
sunset[pos] = sunnoon[pos]
daylength = sunset - sunrise
# adjust if required
sunrise[sunrise < 0] += 24
sunset[sunset > 24] -= 24
df.sunrise = sunrise
df.sunnoon = sunnoon
df.sunset = sunset
df.daylength = daylength
return df | [
"def",
"get_sun_times",
"(",
"dates",
",",
"lon",
",",
"lat",
",",
"time_zone",
")",
":",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"index",
"=",
"dates",
",",
"columns",
"=",
"[",
"'sunrise'",
",",
"'sunnoon'",
",",
"'sunset'",
",",
"'daylength'",
"]",
... | Computes the times of sunrise, solar noon, and sunset for each day.
Parameters
----
dates: datetime
lat : latitude in DecDeg
lon : longitude in DecDeg
time_zone : timezone
Returns
----
DataFrame: [sunrise, sunnoon, sunset, day length] in dec hours | [
"Computes",
"the",
"times",
"of",
"sunrise",
"solar",
"noon",
"and",
"sunset",
"for",
"each",
"day",
"."
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/util/util.py#L145-L221 | train |
kristianfoerster/melodist | melodist/util/util.py | detect_gaps | def detect_gaps(dataframe, timestep, print_all=False, print_max=5, verbose=True):
"""checks if a given dataframe contains gaps and returns the number of gaps
This funtion checks if a dataframe contains any gaps for a given temporal
resolution that needs to be specified in seconds. The number of gaps
detected in the dataframe is returned.
Args:
dataframe: A pandas dataframe object with index defined as datetime
timestep (int): The temporal resolution of the time series in seconds
(e.g., 86400 for daily values)
print_all (bool, opt): Lists every gap on the screen
print_mx (int, opt): The maximum number of gaps listed on the screen in
order to avoid a decrease in performance if numerous gaps occur
verbose (bool, opt): Enables/disables output to the screen
Returns:
The number of gaps as integer. Negative values indicate errors.
"""
gcount = 0
msg_counter = 0
warning_printed = False
try:
n = len(dataframe.index)
except:
print('Error: Invalid dataframe.')
return -1
for i in range(0, n):
if(i > 0):
time_diff = dataframe.index[i] - dataframe.index[i-1]
if(time_diff.delta/1E9 != timestep):
gcount += 1
if print_all or (msg_counter <= print_max - 1):
if verbose:
print('Warning: Gap in time series found between %s and %s' % (dataframe.index[i-1], dataframe.index[i]))
msg_counter += 1
if msg_counter == print_max and verbose and not warning_printed:
print('Waring: Only the first %i gaps have been listed. Try to increase print_max parameter to show more details.' % msg_counter)
warning_printed = True
if verbose:
print('%i gaps found in total.' % (gcount))
return gcount | python | def detect_gaps(dataframe, timestep, print_all=False, print_max=5, verbose=True):
"""checks if a given dataframe contains gaps and returns the number of gaps
This funtion checks if a dataframe contains any gaps for a given temporal
resolution that needs to be specified in seconds. The number of gaps
detected in the dataframe is returned.
Args:
dataframe: A pandas dataframe object with index defined as datetime
timestep (int): The temporal resolution of the time series in seconds
(e.g., 86400 for daily values)
print_all (bool, opt): Lists every gap on the screen
print_mx (int, opt): The maximum number of gaps listed on the screen in
order to avoid a decrease in performance if numerous gaps occur
verbose (bool, opt): Enables/disables output to the screen
Returns:
The number of gaps as integer. Negative values indicate errors.
"""
gcount = 0
msg_counter = 0
warning_printed = False
try:
n = len(dataframe.index)
except:
print('Error: Invalid dataframe.')
return -1
for i in range(0, n):
if(i > 0):
time_diff = dataframe.index[i] - dataframe.index[i-1]
if(time_diff.delta/1E9 != timestep):
gcount += 1
if print_all or (msg_counter <= print_max - 1):
if verbose:
print('Warning: Gap in time series found between %s and %s' % (dataframe.index[i-1], dataframe.index[i]))
msg_counter += 1
if msg_counter == print_max and verbose and not warning_printed:
print('Waring: Only the first %i gaps have been listed. Try to increase print_max parameter to show more details.' % msg_counter)
warning_printed = True
if verbose:
print('%i gaps found in total.' % (gcount))
return gcount | [
"def",
"detect_gaps",
"(",
"dataframe",
",",
"timestep",
",",
"print_all",
"=",
"False",
",",
"print_max",
"=",
"5",
",",
"verbose",
"=",
"True",
")",
":",
"gcount",
"=",
"0",
"msg_counter",
"=",
"0",
"warning_printed",
"=",
"False",
"try",
":",
"n",
"... | checks if a given dataframe contains gaps and returns the number of gaps
This funtion checks if a dataframe contains any gaps for a given temporal
resolution that needs to be specified in seconds. The number of gaps
detected in the dataframe is returned.
Args:
dataframe: A pandas dataframe object with index defined as datetime
timestep (int): The temporal resolution of the time series in seconds
(e.g., 86400 for daily values)
print_all (bool, opt): Lists every gap on the screen
print_mx (int, opt): The maximum number of gaps listed on the screen in
order to avoid a decrease in performance if numerous gaps occur
verbose (bool, opt): Enables/disables output to the screen
Returns:
The number of gaps as integer. Negative values indicate errors. | [
"checks",
"if",
"a",
"given",
"dataframe",
"contains",
"gaps",
"and",
"returns",
"the",
"number",
"of",
"gaps"
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/util/util.py#L224-L265 | train |
kristianfoerster/melodist | melodist/util/util.py | drop_incomplete_days | def drop_incomplete_days(dataframe, shift=0):
"""truncates a given dataframe to full days only
This funtion truncates a given pandas dataframe (time series) to full days
only, thus dropping leading and tailing hours of incomplete days. Please
note that this methodology only applies to hourly time series.
Args:
dataframe: A pandas dataframe object with index defined as datetime
shift (unsigned int, opt): First hour of daily recordings. For daily
recordings of precipitation gages, 8 would be the first hour of
the subsequent day of recordings since daily totals are
usually recorded at 7. Omit defining this parameter if you intend
to pertain recordings to 0-23h.
Returns:
A dataframe with full days only.
"""
dropped = 0
if shift > 23 or shift < 0:
print("Invalid shift parameter setting! Using defaults.")
shift = 0
first = shift
last = first - 1
if last < 0:
last += 24
try:
# todo: move this checks to a separate function
n = len(dataframe.index)
except:
print('Error: Invalid dataframe.')
return dataframe
delete = list()
# drop heading lines if required
for i in range(0, n):
if dataframe.index.hour[i] == first and dataframe.index.minute[i] == 0:
break
else:
delete.append(i)
dropped += 1
# drop tailing lines if required
for i in range(n-1, 0, -1):
if dataframe.index.hour[i] == last and dataframe.index.minute[i] == 0:
break
else:
delete.append(i)
dropped += 1
# print("The following rows have been dropped (%i in total):" % dropped)
# print(delete)
return dataframe.drop(dataframe.index[[delete]]) | python | def drop_incomplete_days(dataframe, shift=0):
"""truncates a given dataframe to full days only
This funtion truncates a given pandas dataframe (time series) to full days
only, thus dropping leading and tailing hours of incomplete days. Please
note that this methodology only applies to hourly time series.
Args:
dataframe: A pandas dataframe object with index defined as datetime
shift (unsigned int, opt): First hour of daily recordings. For daily
recordings of precipitation gages, 8 would be the first hour of
the subsequent day of recordings since daily totals are
usually recorded at 7. Omit defining this parameter if you intend
to pertain recordings to 0-23h.
Returns:
A dataframe with full days only.
"""
dropped = 0
if shift > 23 or shift < 0:
print("Invalid shift parameter setting! Using defaults.")
shift = 0
first = shift
last = first - 1
if last < 0:
last += 24
try:
# todo: move this checks to a separate function
n = len(dataframe.index)
except:
print('Error: Invalid dataframe.')
return dataframe
delete = list()
# drop heading lines if required
for i in range(0, n):
if dataframe.index.hour[i] == first and dataframe.index.minute[i] == 0:
break
else:
delete.append(i)
dropped += 1
# drop tailing lines if required
for i in range(n-1, 0, -1):
if dataframe.index.hour[i] == last and dataframe.index.minute[i] == 0:
break
else:
delete.append(i)
dropped += 1
# print("The following rows have been dropped (%i in total):" % dropped)
# print(delete)
return dataframe.drop(dataframe.index[[delete]]) | [
"def",
"drop_incomplete_days",
"(",
"dataframe",
",",
"shift",
"=",
"0",
")",
":",
"dropped",
"=",
"0",
"if",
"shift",
">",
"23",
"or",
"shift",
"<",
"0",
":",
"print",
"(",
"\"Invalid shift parameter setting! Using defaults.\"",
")",
"shift",
"=",
"0",
"fir... | truncates a given dataframe to full days only
This funtion truncates a given pandas dataframe (time series) to full days
only, thus dropping leading and tailing hours of incomplete days. Please
note that this methodology only applies to hourly time series.
Args:
dataframe: A pandas dataframe object with index defined as datetime
shift (unsigned int, opt): First hour of daily recordings. For daily
recordings of precipitation gages, 8 would be the first hour of
the subsequent day of recordings since daily totals are
usually recorded at 7. Omit defining this parameter if you intend
to pertain recordings to 0-23h.
Returns:
A dataframe with full days only. | [
"truncates",
"a",
"given",
"dataframe",
"to",
"full",
"days",
"only"
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/util/util.py#L268-L320 | train |
kristianfoerster/melodist | melodist/util/util.py | daily_from_hourly | def daily_from_hourly(df):
"""Aggregates data (hourly to daily values) according to the characteristics
of each variable (e.g., average for temperature, sum for precipitation)
Args:
df: dataframe including time series with one hour time steps
Returns:
dataframe (daily)
"""
df_daily = pd.DataFrame()
if 'temp' in df:
df_daily['temp'] = df.temp.resample('D').mean()
df_daily['tmin'] = df.temp.groupby(df.temp.index.date).min()
df_daily['tmax'] = df.temp.groupby(df.temp.index.date).max()
if 'precip' in df:
df_daily['precip'] = df.precip.resample('D').sum()
if 'glob' in df:
df_daily['glob'] = df.glob.resample('D').mean()
if 'hum' in df:
df_daily['hum'] = df.hum.resample('D').mean()
if 'hum' in df:
df_daily['hum_min'] = df.hum.groupby(df.hum.index.date).min()
if 'hum' in df:
df_daily['hum_max'] = df.hum.groupby(df.hum.index.date).max()
if 'wind' in df:
df_daily['wind'] = df.wind.resample('D').mean()
if 'ssd' in df:
df_daily['ssd'] = df.ssd.resample('D').sum() / 60 # minutes to hours
df_daily.index.name = None
return df_daily | python | def daily_from_hourly(df):
"""Aggregates data (hourly to daily values) according to the characteristics
of each variable (e.g., average for temperature, sum for precipitation)
Args:
df: dataframe including time series with one hour time steps
Returns:
dataframe (daily)
"""
df_daily = pd.DataFrame()
if 'temp' in df:
df_daily['temp'] = df.temp.resample('D').mean()
df_daily['tmin'] = df.temp.groupby(df.temp.index.date).min()
df_daily['tmax'] = df.temp.groupby(df.temp.index.date).max()
if 'precip' in df:
df_daily['precip'] = df.precip.resample('D').sum()
if 'glob' in df:
df_daily['glob'] = df.glob.resample('D').mean()
if 'hum' in df:
df_daily['hum'] = df.hum.resample('D').mean()
if 'hum' in df:
df_daily['hum_min'] = df.hum.groupby(df.hum.index.date).min()
if 'hum' in df:
df_daily['hum_max'] = df.hum.groupby(df.hum.index.date).max()
if 'wind' in df:
df_daily['wind'] = df.wind.resample('D').mean()
if 'ssd' in df:
df_daily['ssd'] = df.ssd.resample('D').sum() / 60 # minutes to hours
df_daily.index.name = None
return df_daily | [
"def",
"daily_from_hourly",
"(",
"df",
")",
":",
"df_daily",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"if",
"'temp'",
"in",
"df",
":",
"df_daily",
"[",
"'temp'",
"]",
"=",
"df",
".",
"temp",
".",
"resample",
"(",
"'D'",
")",
".",
"mean",
"(",
")",
... | Aggregates data (hourly to daily values) according to the characteristics
of each variable (e.g., average for temperature, sum for precipitation)
Args:
df: dataframe including time series with one hour time steps
Returns:
dataframe (daily) | [
"Aggregates",
"data",
"(",
"hourly",
"to",
"daily",
"values",
")",
"according",
"to",
"the",
"characteristics",
"of",
"each",
"variable",
"(",
"e",
".",
"g",
".",
"average",
"for",
"temperature",
"sum",
"for",
"precipitation",
")"
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/util/util.py#L340-L380 | train |
kristianfoerster/melodist | melodist/precipitation.py | disagg_prec | def disagg_prec(dailyData,
method='equal',
cascade_options=None,
hourly_data_obs=None,
zerodiv="uniform",
shift=0):
"""The disaggregation function for precipitation.
Parameters
----------
dailyData : pd.Series
daily data
method : str
method to disaggregate
cascade_options : cascade object
including statistical parameters for the cascade model
hourly_data_obs : pd.Series
observed hourly data of master station
zerodiv : str
method to deal with zero division by key "uniform" --> uniform
distribution
shift : int
shifts the precipitation data by shift (int) steps (eg +7 for
7:00 to 6:00)
"""
if method not in ('equal', 'cascade', 'masterstation'):
raise ValueError('Invalid option')
if method == 'equal':
precip_disagg = melodist.distribute_equally(dailyData.precip,
divide=True)
elif method == 'masterstation':
precip_disagg = precip_master_station(dailyData,
hourly_data_obs,
zerodiv)
elif method == 'cascade':
assert cascade_options is not None
precip_disagg = disagg_prec_cascade(dailyData,
cascade_options,
shift=shift)
return precip_disagg | python | def disagg_prec(dailyData,
method='equal',
cascade_options=None,
hourly_data_obs=None,
zerodiv="uniform",
shift=0):
"""The disaggregation function for precipitation.
Parameters
----------
dailyData : pd.Series
daily data
method : str
method to disaggregate
cascade_options : cascade object
including statistical parameters for the cascade model
hourly_data_obs : pd.Series
observed hourly data of master station
zerodiv : str
method to deal with zero division by key "uniform" --> uniform
distribution
shift : int
shifts the precipitation data by shift (int) steps (eg +7 for
7:00 to 6:00)
"""
if method not in ('equal', 'cascade', 'masterstation'):
raise ValueError('Invalid option')
if method == 'equal':
precip_disagg = melodist.distribute_equally(dailyData.precip,
divide=True)
elif method == 'masterstation':
precip_disagg = precip_master_station(dailyData,
hourly_data_obs,
zerodiv)
elif method == 'cascade':
assert cascade_options is not None
precip_disagg = disagg_prec_cascade(dailyData,
cascade_options,
shift=shift)
return precip_disagg | [
"def",
"disagg_prec",
"(",
"dailyData",
",",
"method",
"=",
"'equal'",
",",
"cascade_options",
"=",
"None",
",",
"hourly_data_obs",
"=",
"None",
",",
"zerodiv",
"=",
"\"uniform\"",
",",
"shift",
"=",
"0",
")",
":",
"if",
"method",
"not",
"in",
"(",
"'equ... | The disaggregation function for precipitation.
Parameters
----------
dailyData : pd.Series
daily data
method : str
method to disaggregate
cascade_options : cascade object
including statistical parameters for the cascade model
hourly_data_obs : pd.Series
observed hourly data of master station
zerodiv : str
method to deal with zero division by key "uniform" --> uniform
distribution
shift : int
shifts the precipitation data by shift (int) steps (eg +7 for
7:00 to 6:00) | [
"The",
"disaggregation",
"function",
"for",
"precipitation",
"."
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/precipitation.py#L45-L87 | train |
kristianfoerster/melodist | melodist/precipitation.py | disagg_prec_cascade | def disagg_prec_cascade(precip_daily,
cascade_options, hourly=True,level=9,
shift=0,
test=False):
"""Precipitation disaggregation with cascade model (Olsson, 1998)
Parameters
----------
precip_daily : pd.Series
daily data
hourly: Boolean (for an hourly resolution disaggregation)
if False, then returns 5-min disaggregated precipitation
(disaggregation level depending on the "level" variable)
cascade_options : cascade object
including statistical parameters for the cascade model
shift : int
shifts the precipitation data by shift steps (eg +7 for 7:00 to
6:00)
test : bool
test mode, returns time series of each cascade level
"""
if len(precip_daily) < 2:
raise ValueError('Input data must have at least two elements.')
# set missing values to zero:
precip_daily = precip_daily.copy()
missing_days = precip_daily.index[precip_daily.isnull()]
precip_daily[missing_days] = 0
if hourly:
si = 5 # index of first level
else:
si = level
# statistics for branching into two bins
wxxcum = np.zeros((7, 2, 4))
if isinstance(cascade_options, melodist.cascade.CascadeStatistics):
# this is the standard case considering one data set for all levels
# get cumulative probabilities for branching
overwrite_stats = False
for k in range(0, 7):
wxxcum[k, :, :] = cascade_options.wxx[k, :, :]
if k > 0:
wxxcum[k, :, :] = wxxcum[k-1, :, :] + wxxcum[k, :, :]
elif isinstance(cascade_options, list):
if len(cascade_options) == si:#5
overwrite_stats = True
list_casc = cascade_options
else:
raise ValueError('Cascade statistics list must have %s elements!' % si)
else:
raise TypeError('cascade_options has invalid type')
# arrays for each level
n = len(precip_daily)
vdn1 = np.zeros(n*2)
vdn2 = np.zeros(n*4)
vdn3 = np.zeros(n*8)
vdn4 = np.zeros(n*16)
vdn5 = np.zeros(n*32)
if not hourly:
vdn6 = np.zeros(n*64)
vdn7 = np.zeros(n*128)
vdn8 = np.zeros(n*256)
vdn9 = np.zeros(n*512)
if level == 10 or level == 11:
vdn10 = np.zeros(n*1024)
if level == 11:
vdn11 = np.zeros(n*2048)
# class boundaries for histograms
wclassbounds = np.array([0.0,
0.1429,
0.2857,
0.4286,
0.5714,
0.7143,
0.8571,
1.0])
# disaggregation for each level
for l in range(1, si+1):
if l == 1:
vdn_in = precip_daily
vdn_out = vdn1
elif l == 2:
vdn_in = vdn_out
vdn_out = vdn2
elif l == 3:
vdn_in = vdn_out
vdn_out = vdn3
elif l == 4:
vdn_in = vdn_out
vdn_out = vdn4
elif l == 5:
vdn_in = vdn_out
vdn_out = vdn5
elif l == 6:
vdn_in = vdn_out
vdn_out = vdn6
elif l == 7:
vdn_in = vdn_out
vdn_out = vdn7
elif l == 8:
vdn_in = vdn_out
vdn_out = vdn8
elif l == 9:
vdn_in = vdn_out
vdn_out = vdn9
elif l == 10:
vdn_in = vdn_out
vdn_out = vdn10
elif l == 11:
vdn_in = vdn_out
vdn_out = vdn11
si -= 1
if overwrite_stats:
cascade_options = list_casc[si]
for k in range(0, 7):
wxxcum[k, :, :] = cascade_options.wxx[k, :, :]
if k > 0:
wxxcum[k, :, :] = wxxcum[k-1, :, :] + wxxcum[k, :, :]
meanvol = cascade_options.threshold[0]
else:
meanvol = cascade_options.threshold[si]
# evaluate mean rainfall intensity for wet boxes
# these values should be determined during the aggregation phase!!!!!
# mean volume threshold
# meanvol = np.mean(vdn_in[vdn_in>0.])
# use values derived parameter by parameter estimation instead
# see above
j = 0
for i in range(0, len(vdn_in)):
# it's raining now?
if vdn_in[i] > 0:
# determine type of box
if i == 0: # only starting or isolated
if vdn_in[i+1] > 0:
vbtype = cascade.BoxTypes.starting
else:
vbtype = cascade.BoxTypes.isolated
elif i == len(vdn_in)-1: # only ending or isolated
if vdn_in[i-1] > 0:
vbtype = cascade.BoxTypes.ending
else:
vbtype = cascade.BoxTypes.isolated
else: # neither at at the end nor at the beginning
if vdn_in[i-1] == 0 and vdn_in[i+1] == 0:
vbtype = cascade.BoxTypes.isolated
if vdn_in[i-1] == 0 and vdn_in[i+1] > 0:
vbtype = cascade.BoxTypes.starting
if vdn_in[i-1] > 0 and vdn_in[i+1] > 0:
vbtype = cascade.BoxTypes.enclosed
if vdn_in[i-1] > 0 and vdn_in[i+1] == 0:
vbtype = cascade.BoxTypes.ending
# above or below mean?
if vdn_in[i] > meanvol:
belowabove = 1 # above mean
else:
belowabove = 0 # below mean
#
p = np.zeros((3, 1))
p[0] = cascade_options.p01[belowabove, vbtype-1] # index changed!
p[1] = cascade_options.p10[belowabove, vbtype-1]
p[2] = cascade_options.pxx[belowabove, vbtype-1]
# draw a random number to determine the braching type
rndp = np.random.random()
if rndp <= p[0]:
# first box 0, second box: 1 P(0/1)
vdn_out[j] = 0.0
j = j + 1
vdn_out[j] = vdn_in[i]
j = j + 1
elif rndp > p[0] and rndp <= p[0] + p[1]:
# first box 1, second box: 0 P(1/0)
vdn_out[j] = vdn_in[i]
j = j + 1
vdn_out[j] = 0.0
j = j + 1
else:
# both boxes wet
# we need a new random number
rndw = np.random.random()
# guess w1:
for k in range(0, 7):
if rndw <= wxxcum[k, belowabove, vbtype-1]:
w1 = wclassbounds[k+1] - 1./14. # class center
break
vdn_out[j] = w1 * vdn_in[i]
j = j + 1
vdn_out[j] = (1. - w1) * vdn_in[i]
j = j + 1
# check results (in the previous version this error has never been observed)
if w1 < 0 or w1 > 1:
print('error')
return
else:
# add two dry boxes
vdn_out[j] = 0.0
j = j + 1
vdn_out[j] = 0.0
j = j + 1
if hourly:
# uniformly disaggregate 0.75 h values to 0.25 h values
vdn_025 = np.zeros(len(vdn_out)*3)
j = 0
for i in range(0, len(vdn_out)):
for m in range(0, 3):
vdn_025[j+m] = vdn_out[i] / 3.
j = j + 3
# aggregate to hourly time steps
vdn_025cs = np.cumsum(vdn_025)
vdn = np.zeros(int(len(vdn_025)/4))
for i in range(0, len(vdn)+1):
# for first hour take 4th item
if i == 0:
vdn[i] = vdn_025cs[3]
elif i == 1:
pass
else:
# >1 (starting with 2-1 = 1 item)
vdn[i-1] = vdn_025cs[(i*4)-1] - vdn_025cs[(i*4)-5]
disagg_precip = pd.Series(index=melodist.util.hourly_index(precip_daily.index), data=vdn)
else:
precip_sn = pd.Series(index= sub_level_index(precip_daily.index, level=level, fill_gaps=False), data=vdn_out)
disagg_precip = precip_sn.resample('5min').sum()
# set missing days to nan again:
for date in missing_days:
disagg_precip[ disagg_precip.index.date == date.date()] = np.nan
# shifts the data by shift steps (fills with nan/cuts edge data )
if shift != 0:
disagg_precip = disagg_precip.shift(shift) #? freq='1U')
# return time series
if test:
if hourly:
return vdn1, vdn2, vdn3, vdn4, vdn5, vdn_025, disagg_precip
else:
if level == 9:
return vdn1, vdn2, vdn3, vdn4, vdn5, vdn6, vdn7, vdn8, vdn9, precip_sn, disagg_precip
elif level == 10:
return vdn1, vdn2, vdn3, vdn4, vdn5, vdn6, vdn7, vdn8, vdn9, vdn10, precip_sn, disagg_precip
else:
return vdn1, vdn2, vdn3, vdn4, vdn5, vdn6, vdn7, vdn8, vdn9, vdn10, vdn11, precip_sn, disagg_precip
else:
return disagg_precip | python | def disagg_prec_cascade(precip_daily,
cascade_options, hourly=True,level=9,
shift=0,
test=False):
"""Precipitation disaggregation with cascade model (Olsson, 1998)
Parameters
----------
precip_daily : pd.Series
daily data
hourly: Boolean (for an hourly resolution disaggregation)
if False, then returns 5-min disaggregated precipitation
(disaggregation level depending on the "level" variable)
cascade_options : cascade object
including statistical parameters for the cascade model
shift : int
shifts the precipitation data by shift steps (eg +7 for 7:00 to
6:00)
test : bool
test mode, returns time series of each cascade level
"""
if len(precip_daily) < 2:
raise ValueError('Input data must have at least two elements.')
# set missing values to zero:
precip_daily = precip_daily.copy()
missing_days = precip_daily.index[precip_daily.isnull()]
precip_daily[missing_days] = 0
if hourly:
si = 5 # index of first level
else:
si = level
# statistics for branching into two bins
wxxcum = np.zeros((7, 2, 4))
if isinstance(cascade_options, melodist.cascade.CascadeStatistics):
# this is the standard case considering one data set for all levels
# get cumulative probabilities for branching
overwrite_stats = False
for k in range(0, 7):
wxxcum[k, :, :] = cascade_options.wxx[k, :, :]
if k > 0:
wxxcum[k, :, :] = wxxcum[k-1, :, :] + wxxcum[k, :, :]
elif isinstance(cascade_options, list):
if len(cascade_options) == si:#5
overwrite_stats = True
list_casc = cascade_options
else:
raise ValueError('Cascade statistics list must have %s elements!' % si)
else:
raise TypeError('cascade_options has invalid type')
# arrays for each level
n = len(precip_daily)
vdn1 = np.zeros(n*2)
vdn2 = np.zeros(n*4)
vdn3 = np.zeros(n*8)
vdn4 = np.zeros(n*16)
vdn5 = np.zeros(n*32)
if not hourly:
vdn6 = np.zeros(n*64)
vdn7 = np.zeros(n*128)
vdn8 = np.zeros(n*256)
vdn9 = np.zeros(n*512)
if level == 10 or level == 11:
vdn10 = np.zeros(n*1024)
if level == 11:
vdn11 = np.zeros(n*2048)
# class boundaries for histograms
wclassbounds = np.array([0.0,
0.1429,
0.2857,
0.4286,
0.5714,
0.7143,
0.8571,
1.0])
# disaggregation for each level
for l in range(1, si+1):
if l == 1:
vdn_in = precip_daily
vdn_out = vdn1
elif l == 2:
vdn_in = vdn_out
vdn_out = vdn2
elif l == 3:
vdn_in = vdn_out
vdn_out = vdn3
elif l == 4:
vdn_in = vdn_out
vdn_out = vdn4
elif l == 5:
vdn_in = vdn_out
vdn_out = vdn5
elif l == 6:
vdn_in = vdn_out
vdn_out = vdn6
elif l == 7:
vdn_in = vdn_out
vdn_out = vdn7
elif l == 8:
vdn_in = vdn_out
vdn_out = vdn8
elif l == 9:
vdn_in = vdn_out
vdn_out = vdn9
elif l == 10:
vdn_in = vdn_out
vdn_out = vdn10
elif l == 11:
vdn_in = vdn_out
vdn_out = vdn11
si -= 1
if overwrite_stats:
cascade_options = list_casc[si]
for k in range(0, 7):
wxxcum[k, :, :] = cascade_options.wxx[k, :, :]
if k > 0:
wxxcum[k, :, :] = wxxcum[k-1, :, :] + wxxcum[k, :, :]
meanvol = cascade_options.threshold[0]
else:
meanvol = cascade_options.threshold[si]
# evaluate mean rainfall intensity for wet boxes
# these values should be determined during the aggregation phase!!!!!
# mean volume threshold
# meanvol = np.mean(vdn_in[vdn_in>0.])
# use values derived parameter by parameter estimation instead
# see above
j = 0
for i in range(0, len(vdn_in)):
# it's raining now?
if vdn_in[i] > 0:
# determine type of box
if i == 0: # only starting or isolated
if vdn_in[i+1] > 0:
vbtype = cascade.BoxTypes.starting
else:
vbtype = cascade.BoxTypes.isolated
elif i == len(vdn_in)-1: # only ending or isolated
if vdn_in[i-1] > 0:
vbtype = cascade.BoxTypes.ending
else:
vbtype = cascade.BoxTypes.isolated
else: # neither at at the end nor at the beginning
if vdn_in[i-1] == 0 and vdn_in[i+1] == 0:
vbtype = cascade.BoxTypes.isolated
if vdn_in[i-1] == 0 and vdn_in[i+1] > 0:
vbtype = cascade.BoxTypes.starting
if vdn_in[i-1] > 0 and vdn_in[i+1] > 0:
vbtype = cascade.BoxTypes.enclosed
if vdn_in[i-1] > 0 and vdn_in[i+1] == 0:
vbtype = cascade.BoxTypes.ending
# above or below mean?
if vdn_in[i] > meanvol:
belowabove = 1 # above mean
else:
belowabove = 0 # below mean
#
p = np.zeros((3, 1))
p[0] = cascade_options.p01[belowabove, vbtype-1] # index changed!
p[1] = cascade_options.p10[belowabove, vbtype-1]
p[2] = cascade_options.pxx[belowabove, vbtype-1]
# draw a random number to determine the braching type
rndp = np.random.random()
if rndp <= p[0]:
# first box 0, second box: 1 P(0/1)
vdn_out[j] = 0.0
j = j + 1
vdn_out[j] = vdn_in[i]
j = j + 1
elif rndp > p[0] and rndp <= p[0] + p[1]:
# first box 1, second box: 0 P(1/0)
vdn_out[j] = vdn_in[i]
j = j + 1
vdn_out[j] = 0.0
j = j + 1
else:
# both boxes wet
# we need a new random number
rndw = np.random.random()
# guess w1:
for k in range(0, 7):
if rndw <= wxxcum[k, belowabove, vbtype-1]:
w1 = wclassbounds[k+1] - 1./14. # class center
break
vdn_out[j] = w1 * vdn_in[i]
j = j + 1
vdn_out[j] = (1. - w1) * vdn_in[i]
j = j + 1
# check results (in the previous version this error has never been observed)
if w1 < 0 or w1 > 1:
print('error')
return
else:
# add two dry boxes
vdn_out[j] = 0.0
j = j + 1
vdn_out[j] = 0.0
j = j + 1
if hourly:
# uniformly disaggregate 0.75 h values to 0.25 h values
vdn_025 = np.zeros(len(vdn_out)*3)
j = 0
for i in range(0, len(vdn_out)):
for m in range(0, 3):
vdn_025[j+m] = vdn_out[i] / 3.
j = j + 3
# aggregate to hourly time steps
vdn_025cs = np.cumsum(vdn_025)
vdn = np.zeros(int(len(vdn_025)/4))
for i in range(0, len(vdn)+1):
# for first hour take 4th item
if i == 0:
vdn[i] = vdn_025cs[3]
elif i == 1:
pass
else:
# >1 (starting with 2-1 = 1 item)
vdn[i-1] = vdn_025cs[(i*4)-1] - vdn_025cs[(i*4)-5]
disagg_precip = pd.Series(index=melodist.util.hourly_index(precip_daily.index), data=vdn)
else:
precip_sn = pd.Series(index= sub_level_index(precip_daily.index, level=level, fill_gaps=False), data=vdn_out)
disagg_precip = precip_sn.resample('5min').sum()
# set missing days to nan again:
for date in missing_days:
disagg_precip[ disagg_precip.index.date == date.date()] = np.nan
# shifts the data by shift steps (fills with nan/cuts edge data )
if shift != 0:
disagg_precip = disagg_precip.shift(shift) #? freq='1U')
# return time series
if test:
if hourly:
return vdn1, vdn2, vdn3, vdn4, vdn5, vdn_025, disagg_precip
else:
if level == 9:
return vdn1, vdn2, vdn3, vdn4, vdn5, vdn6, vdn7, vdn8, vdn9, precip_sn, disagg_precip
elif level == 10:
return vdn1, vdn2, vdn3, vdn4, vdn5, vdn6, vdn7, vdn8, vdn9, vdn10, precip_sn, disagg_precip
else:
return vdn1, vdn2, vdn3, vdn4, vdn5, vdn6, vdn7, vdn8, vdn9, vdn10, vdn11, precip_sn, disagg_precip
else:
return disagg_precip | [
"def",
"disagg_prec_cascade",
"(",
"precip_daily",
",",
"cascade_options",
",",
"hourly",
"=",
"True",
",",
"level",
"=",
"9",
",",
"shift",
"=",
"0",
",",
"test",
"=",
"False",
")",
":",
"if",
"len",
"(",
"precip_daily",
")",
"<",
"2",
":",
"raise",
... | Precipitation disaggregation with cascade model (Olsson, 1998)
Parameters
----------
precip_daily : pd.Series
daily data
hourly: Boolean (for an hourly resolution disaggregation)
if False, then returns 5-min disaggregated precipitation
(disaggregation level depending on the "level" variable)
cascade_options : cascade object
including statistical parameters for the cascade model
shift : int
shifts the precipitation data by shift steps (eg +7 for 7:00 to
6:00)
test : bool
test mode, returns time series of each cascade level | [
"Precipitation",
"disaggregation",
"with",
"cascade",
"model",
"(",
"Olsson",
"1998",
")"
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/precipitation.py#L90-L357 | train |
kristianfoerster/melodist | melodist/precipitation.py | precip_master_station | def precip_master_station(precip_daily,
master_precip_hourly,
zerodiv):
"""Disaggregate precipitation based on the patterns of a master station
Parameters
-----------
precip_daily : pd.Series
daily data
master_precip_hourly : pd.Series
observed hourly data of the master station
zerodiv : str
method to deal with zero division by key "uniform" --> uniform
distribution
"""
precip_hourly = pd.Series(index=melodist.util.hourly_index(precip_daily.index))
# set some parameters for cosine function
for index_d, precip in precip_daily.iteritems():
# get hourly data of the day
index = index_d.date().isoformat()
precip_h = master_precip_hourly[index]
# calc rel values and multiply by daily sums
# check for zero division
if precip_h.sum() != 0 and precip_h.sum() != np.isnan(precip_h.sum()):
precip_h_rel = (precip_h / precip_h.sum()) * precip
else:
# uniform option will preserve daily data by uniform distr
if zerodiv == 'uniform':
precip_h_rel = (1/24) * precip
else:
precip_h_rel = 0
# write the disaggregated day to data
precip_hourly[index] = precip_h_rel
return precip_hourly | python | def precip_master_station(precip_daily,
master_precip_hourly,
zerodiv):
"""Disaggregate precipitation based on the patterns of a master station
Parameters
-----------
precip_daily : pd.Series
daily data
master_precip_hourly : pd.Series
observed hourly data of the master station
zerodiv : str
method to deal with zero division by key "uniform" --> uniform
distribution
"""
precip_hourly = pd.Series(index=melodist.util.hourly_index(precip_daily.index))
# set some parameters for cosine function
for index_d, precip in precip_daily.iteritems():
# get hourly data of the day
index = index_d.date().isoformat()
precip_h = master_precip_hourly[index]
# calc rel values and multiply by daily sums
# check for zero division
if precip_h.sum() != 0 and precip_h.sum() != np.isnan(precip_h.sum()):
precip_h_rel = (precip_h / precip_h.sum()) * precip
else:
# uniform option will preserve daily data by uniform distr
if zerodiv == 'uniform':
precip_h_rel = (1/24) * precip
else:
precip_h_rel = 0
# write the disaggregated day to data
precip_hourly[index] = precip_h_rel
return precip_hourly | [
"def",
"precip_master_station",
"(",
"precip_daily",
",",
"master_precip_hourly",
",",
"zerodiv",
")",
":",
"precip_hourly",
"=",
"pd",
".",
"Series",
"(",
"index",
"=",
"melodist",
".",
"util",
".",
"hourly_index",
"(",
"precip_daily",
".",
"index",
")",
")",... | Disaggregate precipitation based on the patterns of a master station
Parameters
-----------
precip_daily : pd.Series
daily data
master_precip_hourly : pd.Series
observed hourly data of the master station
zerodiv : str
method to deal with zero division by key "uniform" --> uniform
distribution | [
"Disaggregate",
"precipitation",
"based",
"on",
"the",
"patterns",
"of",
"a",
"master",
"station"
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/precipitation.py#L359-L400 | train |
kristianfoerster/melodist | melodist/precipitation.py | aggregate_precipitation | def aggregate_precipitation(vec_data,hourly=True, percentile=50):
"""Aggregates highly resolved precipitation data and creates statistics
Parameters
----------
vec_data : pd.Series
hourly (hourly=True) OR 5-min values
Returns
-------
output : cascade object
representing statistics of the cascade model
"""
cascade_opt = cascade.CascadeStatistics()
cascade_opt.percentile = percentile
# length of input time series
n_in = len(vec_data)
n_out = np.floor(n_in/2)
# alternative:
# 1st step: new time series
vec_time = vec_data.index
vdn0 = []
vtn0 = []
j = 0
for i in range(0, n_in):
if np.mod(i, 2) != 0:
vdn0.append(vec_data.precip.values[i-1] + vec_data.precip.values[i])
vtn0.append(vec_time[i])
j = j+1
vdn = pd.DataFrame(index=vtn0, data={'precip': vdn0})
# length of new time series
n_out = len(vdn)
# series of box types:
vbtype = np.zeros((n_out, ), dtype=np.int)
# fields for empirical probabilities
# counts
nb = np.zeros((2, 4))
nbxx = np.zeros((2, 4))
# class boundaries for histograms
# wclassbounds = np.linspace(0, 1, num=8)
wlower = np.array([0,
0.1429,
0.2857,
0.4286,
0.5714,
0.7143,
0.8571]) # wclassbounds[0:7]
wupper = np.array([0.1429,
0.2857,
0.4286,
0.5714,
0.7143,
0.8571,
1.0]) # wclassbounds[1:8]
# evaluate mean rainfall intensity for wet boxes
# these values should be determined during the aggregation phase!!!!!
# mean volume threshold
meanvol = np.percentile(vdn.precip[vdn.precip > 0.],
cascade_opt.percentile) # np.mean(vdn.precip[vdn.precip>0.])
cascade_opt.threshold = np.array([meanvol])
# 2nd step: classify boxes at the upper level
for i in range(0, n_out):
if vdn.precip.values[i] > 0.: # rain?
if i == 0: # only starting or isolated
if vdn.precip.values[i+1] > 0.:
vbtype[i] = cascade.BoxTypes.starting
else:
vbtype[i] = cascade.BoxTypes.isolated
elif i == n_out-1: # only ending or isolated
if vdn.precip.values[i-1] > 0.:
vbtype[i] = cascade.BoxTypes.ending
else:
vbtype[i] = cascade.BoxTypes.isolated
else: # neither at at the end nor at the beginning
if vdn.precip.values[i-1] == 0. and vdn.precip.values[i+1] == 0.:
vbtype[i] = cascade.BoxTypes.isolated
if vdn.precip.values[i-1] == 0. and vdn.precip.values[i+1] > 0.:
vbtype[i] = cascade.BoxTypes.starting
if vdn.precip.values[i-1] > 0. and vdn.precip.values[i+1] > 0.:
vbtype[i] = cascade.BoxTypes.enclosed
if vdn.precip.values[i-1] > 0. and vdn.precip.values[i+1] == 0.:
vbtype[i] = cascade.BoxTypes.ending
else:
vbtype[i] = cascade.BoxTypes.dry # no rain
# 3rd step: examine branching
j = 0
for i in range(0, n_in):
if np.mod(i, 2) != 0:
if vdn.precip.values[j] > 0:
if vdn.precip.values[j] > meanvol:
belowabove = 1 # above mean
else:
belowabove = 0 # below mean
nb[belowabove, vbtype[j]-1] += 1
if vec_data.precip.values[i-1] > 0 and vec_data.precip.values[i] == 0:
# P(1/0)
cascade_opt.p10[belowabove, vbtype[j]-1] += 1
if vec_data.precip.values[i-1] == 0 and vec_data.precip.values[i] > 0:
# P(0/1)
cascade_opt.p01[belowabove, vbtype[j]-1] += 1
if vec_data.precip.values[i-1] > 0 and vec_data.precip.values[i] > 0:
# P(x/x)
cascade_opt.pxx[belowabove, vbtype[j]-1] += 1
nbxx[belowabove, vbtype[j]-1] += 1
# weights
r1 = vec_data.precip.values[i-1]
r2 = vec_data.precip.values[i]
wxxval = r1 / (r1 + r2)
# Test
if abs(r1+r2-vdn.precip.values[j]) > 1.E-3:
print('i=' + str(i) + ', j=' + str(j) +
', r1=' + str(r1) + ", r2=" + str(r2) +
", Summe=" + str(vdn.precip.values[j]))
print(vec_data.index[i])
print(vdn.index[j])
print('error')
return cascade_opt, vdn
for k in range(0, 7):
if wxxval > wlower[k] and wxxval <= wupper[k]:
cascade_opt.wxx[k, belowabove, vbtype[j]-1] += 1
break
j = j + 1
# 4th step: transform counts to percentages
cascade_opt.p01 = cascade_opt.p01 / nb
cascade_opt.p10 = cascade_opt.p10 / nb
cascade_opt.pxx = cascade_opt.pxx / nb
with np.errstate(divide='ignore', invalid='ignore'): # do not issue warnings here when dividing by zero, this is handled below
for k in range(0, 7):
cascade_opt.wxx[k, :, :] = cascade_opt.wxx[k, :, :] / nbxx[:, :]
# In some cases, the time series are too short for deriving statistics.
if (np.isnan(cascade_opt.p01).any() or
np.isnan(cascade_opt.p10).any() or
np.isnan(cascade_opt.pxx).any()):
print("ERROR (branching probabilities):")
print("Invalid statistics. Default values will be returned. "
"Try to use longer time series or apply statistics "
"derived for another station.")
cascade_opt.fill_with_sample_data()
# For some box types, the corresponding probabilities might yield nan.
# If this happens, nan values will be replaced by 1/7 in order to provide
# valid values for disaggregation.
if np.isnan(cascade_opt.wxx).any():
print("Warning (weighting probabilities):")
print("The derived cascade statistics are not valid as some "
"probabilities are undefined! ", end="")
print("Try to use longer time series that might be more "
"appropriate for deriving statistics. ", end="")
print("As a workaround, default values according to equally "
"distributed probabilities ", end="")
print("will be applied...", end="")
cascade_opt.wxx[np.isnan(cascade_opt.wxx)] = 1.0 / 7.0
wxx = np.zeros((2, 4))
for k in range(0, 7):
wxx[:, :] += cascade_opt.wxx[k, :, :]
if wxx.any() > 1.001 or wxx.any() < 0.999:
print("failed! Using default values!")
cascade_opt.fill_with_sample_data()
else:
print("OK!")
return cascade_opt, vdn | python | def aggregate_precipitation(vec_data,hourly=True, percentile=50):
"""Aggregates highly resolved precipitation data and creates statistics
Parameters
----------
vec_data : pd.Series
hourly (hourly=True) OR 5-min values
Returns
-------
output : cascade object
representing statistics of the cascade model
"""
cascade_opt = cascade.CascadeStatistics()
cascade_opt.percentile = percentile
# length of input time series
n_in = len(vec_data)
n_out = np.floor(n_in/2)
# alternative:
# 1st step: new time series
vec_time = vec_data.index
vdn0 = []
vtn0 = []
j = 0
for i in range(0, n_in):
if np.mod(i, 2) != 0:
vdn0.append(vec_data.precip.values[i-1] + vec_data.precip.values[i])
vtn0.append(vec_time[i])
j = j+1
vdn = pd.DataFrame(index=vtn0, data={'precip': vdn0})
# length of new time series
n_out = len(vdn)
# series of box types:
vbtype = np.zeros((n_out, ), dtype=np.int)
# fields for empirical probabilities
# counts
nb = np.zeros((2, 4))
nbxx = np.zeros((2, 4))
# class boundaries for histograms
# wclassbounds = np.linspace(0, 1, num=8)
wlower = np.array([0,
0.1429,
0.2857,
0.4286,
0.5714,
0.7143,
0.8571]) # wclassbounds[0:7]
wupper = np.array([0.1429,
0.2857,
0.4286,
0.5714,
0.7143,
0.8571,
1.0]) # wclassbounds[1:8]
# evaluate mean rainfall intensity for wet boxes
# these values should be determined during the aggregation phase!!!!!
# mean volume threshold
meanvol = np.percentile(vdn.precip[vdn.precip > 0.],
cascade_opt.percentile) # np.mean(vdn.precip[vdn.precip>0.])
cascade_opt.threshold = np.array([meanvol])
# 2nd step: classify boxes at the upper level
for i in range(0, n_out):
if vdn.precip.values[i] > 0.: # rain?
if i == 0: # only starting or isolated
if vdn.precip.values[i+1] > 0.:
vbtype[i] = cascade.BoxTypes.starting
else:
vbtype[i] = cascade.BoxTypes.isolated
elif i == n_out-1: # only ending or isolated
if vdn.precip.values[i-1] > 0.:
vbtype[i] = cascade.BoxTypes.ending
else:
vbtype[i] = cascade.BoxTypes.isolated
else: # neither at at the end nor at the beginning
if vdn.precip.values[i-1] == 0. and vdn.precip.values[i+1] == 0.:
vbtype[i] = cascade.BoxTypes.isolated
if vdn.precip.values[i-1] == 0. and vdn.precip.values[i+1] > 0.:
vbtype[i] = cascade.BoxTypes.starting
if vdn.precip.values[i-1] > 0. and vdn.precip.values[i+1] > 0.:
vbtype[i] = cascade.BoxTypes.enclosed
if vdn.precip.values[i-1] > 0. and vdn.precip.values[i+1] == 0.:
vbtype[i] = cascade.BoxTypes.ending
else:
vbtype[i] = cascade.BoxTypes.dry # no rain
# 3rd step: examine branching
j = 0
for i in range(0, n_in):
if np.mod(i, 2) != 0:
if vdn.precip.values[j] > 0:
if vdn.precip.values[j] > meanvol:
belowabove = 1 # above mean
else:
belowabove = 0 # below mean
nb[belowabove, vbtype[j]-1] += 1
if vec_data.precip.values[i-1] > 0 and vec_data.precip.values[i] == 0:
# P(1/0)
cascade_opt.p10[belowabove, vbtype[j]-1] += 1
if vec_data.precip.values[i-1] == 0 and vec_data.precip.values[i] > 0:
# P(0/1)
cascade_opt.p01[belowabove, vbtype[j]-1] += 1
if vec_data.precip.values[i-1] > 0 and vec_data.precip.values[i] > 0:
# P(x/x)
cascade_opt.pxx[belowabove, vbtype[j]-1] += 1
nbxx[belowabove, vbtype[j]-1] += 1
# weights
r1 = vec_data.precip.values[i-1]
r2 = vec_data.precip.values[i]
wxxval = r1 / (r1 + r2)
# Test
if abs(r1+r2-vdn.precip.values[j]) > 1.E-3:
print('i=' + str(i) + ', j=' + str(j) +
', r1=' + str(r1) + ", r2=" + str(r2) +
", Summe=" + str(vdn.precip.values[j]))
print(vec_data.index[i])
print(vdn.index[j])
print('error')
return cascade_opt, vdn
for k in range(0, 7):
if wxxval > wlower[k] and wxxval <= wupper[k]:
cascade_opt.wxx[k, belowabove, vbtype[j]-1] += 1
break
j = j + 1
# 4th step: transform counts to percentages
cascade_opt.p01 = cascade_opt.p01 / nb
cascade_opt.p10 = cascade_opt.p10 / nb
cascade_opt.pxx = cascade_opt.pxx / nb
with np.errstate(divide='ignore', invalid='ignore'): # do not issue warnings here when dividing by zero, this is handled below
for k in range(0, 7):
cascade_opt.wxx[k, :, :] = cascade_opt.wxx[k, :, :] / nbxx[:, :]
# In some cases, the time series are too short for deriving statistics.
if (np.isnan(cascade_opt.p01).any() or
np.isnan(cascade_opt.p10).any() or
np.isnan(cascade_opt.pxx).any()):
print("ERROR (branching probabilities):")
print("Invalid statistics. Default values will be returned. "
"Try to use longer time series or apply statistics "
"derived for another station.")
cascade_opt.fill_with_sample_data()
# For some box types, the corresponding probabilities might yield nan.
# If this happens, nan values will be replaced by 1/7 in order to provide
# valid values for disaggregation.
if np.isnan(cascade_opt.wxx).any():
print("Warning (weighting probabilities):")
print("The derived cascade statistics are not valid as some "
"probabilities are undefined! ", end="")
print("Try to use longer time series that might be more "
"appropriate for deriving statistics. ", end="")
print("As a workaround, default values according to equally "
"distributed probabilities ", end="")
print("will be applied...", end="")
cascade_opt.wxx[np.isnan(cascade_opt.wxx)] = 1.0 / 7.0
wxx = np.zeros((2, 4))
for k in range(0, 7):
wxx[:, :] += cascade_opt.wxx[k, :, :]
if wxx.any() > 1.001 or wxx.any() < 0.999:
print("failed! Using default values!")
cascade_opt.fill_with_sample_data()
else:
print("OK!")
return cascade_opt, vdn | [
"def",
"aggregate_precipitation",
"(",
"vec_data",
",",
"hourly",
"=",
"True",
",",
"percentile",
"=",
"50",
")",
":",
"cascade_opt",
"=",
"cascade",
".",
"CascadeStatistics",
"(",
")",
"cascade_opt",
".",
"percentile",
"=",
"percentile",
"# length of input time s... | Aggregates highly resolved precipitation data and creates statistics
Parameters
----------
vec_data : pd.Series
hourly (hourly=True) OR 5-min values
Returns
-------
output : cascade object
representing statistics of the cascade model | [
"Aggregates",
"highly",
"resolved",
"precipitation",
"data",
"and",
"creates",
"statistics"
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/precipitation.py#L403-L586 | train |
kristianfoerster/melodist | melodist/precipitation.py | seasonal_subset | def seasonal_subset(dataframe,
months='all'):
'''Get the seasonal data.
Parameters
----------
dataframe : pd.DataFrame
months: int, str
Months to use for statistics, or 'all' for 1-12 (default='all')
'''
if isinstance(months, str) and months == 'all':
months = np.arange(12) + 1
for month_num, month in enumerate(months):
df_cur = dataframe[dataframe.index.month == month]
if month_num == 0:
df = df_cur
else:
df = df.append(df_cur)
return df.sort_index() | python | def seasonal_subset(dataframe,
months='all'):
'''Get the seasonal data.
Parameters
----------
dataframe : pd.DataFrame
months: int, str
Months to use for statistics, or 'all' for 1-12 (default='all')
'''
if isinstance(months, str) and months == 'all':
months = np.arange(12) + 1
for month_num, month in enumerate(months):
df_cur = dataframe[dataframe.index.month == month]
if month_num == 0:
df = df_cur
else:
df = df.append(df_cur)
return df.sort_index() | [
"def",
"seasonal_subset",
"(",
"dataframe",
",",
"months",
"=",
"'all'",
")",
":",
"if",
"isinstance",
"(",
"months",
",",
"str",
")",
"and",
"months",
"==",
"'all'",
":",
"months",
"=",
"np",
".",
"arange",
"(",
"12",
")",
"+",
"1",
"for",
"month_nu... | Get the seasonal data.
Parameters
----------
dataframe : pd.DataFrame
months: int, str
Months to use for statistics, or 'all' for 1-12 (default='all') | [
"Get",
"the",
"seasonal",
"data",
"."
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/precipitation.py#L589-L611 | train |
kristianfoerster/melodist | melodist/precipitation.py | build_casc | def build_casc(ObsData, hourly=True,level=9,
months=None,
avg_stats=True,
percentile=50):
'''Builds the cascade statistics of observed data for disaggregation
Parameters
-----------
ObsData : pd.Series
hourly=True -> hourly obs data
else -> 5min data (disaggregation level=9 (default), 10, 11)
months : numpy array of ints
Months for each seasons to be used for statistics (array of
numpy array, default=1-12, e.g., [np.arange(12) + 1])
avg_stats : bool
average statistics for all levels True/False (default=True)
percentile : int, float
percentile for splitting the dataset in small and high
intensities (default=50)
Returns
-------
list_seasonal_casc :
list holding the results
'''
list_seasonal_casc = list()
if months is None:
months = [np.arange(12) + 1]
# Parameter estimation for each season
for cur_months in months:
vdn = seasonal_subset(ObsData, cur_months)
if len(ObsData.precip[np.isnan(ObsData.precip)]) > 0:
ObsData.precip[np.isnan(ObsData.precip)] = 0
casc_opt = melodist.cascade.CascadeStatistics()
casc_opt.percentile = percentile
list_casc_opt = list()
count = 0
if hourly:
aggre_level = 5
else:
aggre_level = level
thresholds = np.zeros(aggre_level) #np.array([0., 0., 0., 0., 0.])
for i in range(0, aggre_level):
# aggregate the data
casc_opt_i, vdn = aggregate_precipitation(vdn, hourly, \
percentile=percentile)
thresholds[i] = casc_opt_i.threshold
copy_of_casc_opt_i = copy.copy(casc_opt_i)
list_casc_opt.append(copy_of_casc_opt_i)
n_vdn = len(vdn)
casc_opt_i * n_vdn # level related weighting
casc_opt + casc_opt_i # add to total statistics
count = count + n_vdn
casc_opt * (1. / count) # transfer weighted matrices to probabilities
casc_opt.threshold = thresholds
# statistics object
if avg_stats:
# in this case, the average statistics will be applied for all levels likewise
stat_obj = casc_opt
else:
# for longer time series, separate statistics might be more appropriate
# level dependent statistics will be assumed
stat_obj = list_casc_opt
list_seasonal_casc.append(stat_obj)
return list_seasonal_casc | python | def build_casc(ObsData, hourly=True,level=9,
months=None,
avg_stats=True,
percentile=50):
'''Builds the cascade statistics of observed data for disaggregation
Parameters
-----------
ObsData : pd.Series
hourly=True -> hourly obs data
else -> 5min data (disaggregation level=9 (default), 10, 11)
months : numpy array of ints
Months for each seasons to be used for statistics (array of
numpy array, default=1-12, e.g., [np.arange(12) + 1])
avg_stats : bool
average statistics for all levels True/False (default=True)
percentile : int, float
percentile for splitting the dataset in small and high
intensities (default=50)
Returns
-------
list_seasonal_casc :
list holding the results
'''
list_seasonal_casc = list()
if months is None:
months = [np.arange(12) + 1]
# Parameter estimation for each season
for cur_months in months:
vdn = seasonal_subset(ObsData, cur_months)
if len(ObsData.precip[np.isnan(ObsData.precip)]) > 0:
ObsData.precip[np.isnan(ObsData.precip)] = 0
casc_opt = melodist.cascade.CascadeStatistics()
casc_opt.percentile = percentile
list_casc_opt = list()
count = 0
if hourly:
aggre_level = 5
else:
aggre_level = level
thresholds = np.zeros(aggre_level) #np.array([0., 0., 0., 0., 0.])
for i in range(0, aggre_level):
# aggregate the data
casc_opt_i, vdn = aggregate_precipitation(vdn, hourly, \
percentile=percentile)
thresholds[i] = casc_opt_i.threshold
copy_of_casc_opt_i = copy.copy(casc_opt_i)
list_casc_opt.append(copy_of_casc_opt_i)
n_vdn = len(vdn)
casc_opt_i * n_vdn # level related weighting
casc_opt + casc_opt_i # add to total statistics
count = count + n_vdn
casc_opt * (1. / count) # transfer weighted matrices to probabilities
casc_opt.threshold = thresholds
# statistics object
if avg_stats:
# in this case, the average statistics will be applied for all levels likewise
stat_obj = casc_opt
else:
# for longer time series, separate statistics might be more appropriate
# level dependent statistics will be assumed
stat_obj = list_casc_opt
list_seasonal_casc.append(stat_obj)
return list_seasonal_casc | [
"def",
"build_casc",
"(",
"ObsData",
",",
"hourly",
"=",
"True",
",",
"level",
"=",
"9",
",",
"months",
"=",
"None",
",",
"avg_stats",
"=",
"True",
",",
"percentile",
"=",
"50",
")",
":",
"list_seasonal_casc",
"=",
"list",
"(",
")",
"if",
"months",
"... | Builds the cascade statistics of observed data for disaggregation
Parameters
-----------
ObsData : pd.Series
hourly=True -> hourly obs data
else -> 5min data (disaggregation level=9 (default), 10, 11)
months : numpy array of ints
Months for each seasons to be used for statistics (array of
numpy array, default=1-12, e.g., [np.arange(12) + 1])
avg_stats : bool
average statistics for all levels True/False (default=True)
percentile : int, float
percentile for splitting the dataset in small and high
intensities (default=50)
Returns
-------
list_seasonal_casc :
list holding the results | [
"Builds",
"the",
"cascade",
"statistics",
"of",
"observed",
"data",
"for",
"disaggregation"
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/precipitation.py#L614-L690 | train |
kristianfoerster/melodist | melodist/cascade.py | CascadeStatistics.fill_with_sample_data | def fill_with_sample_data(self):
"""This function fills the corresponding object with sample data."""
# replace these sample data with another dataset later
# this function is deprecated as soon as a common file format for this
# type of data will be available
self.p01 = np.array([[0.576724636119866, 0.238722774405744, 0.166532122130638, 0.393474644666218],
[0.303345245644811, 0.0490956843857575, 0.0392403031072856, 0.228441890034704]])
self.p10 = np.array([[0.158217002255554, 0.256581140990052, 0.557852226779526, 0.422638238585814],
[0.0439831163244427, 0.0474928027621488, 0.303675296728195, 0.217512052135178]])
self.pxx = np.array([[0.265058361624580, 0.504696084604205, 0.275615651089836, 0.183887116747968],
[0.652671638030746, 0.903411512852094, 0.657084400164519, 0.554046057830118]])
self.wxx = np.array([[[0.188389148850583, 0.0806836453984190, 0.0698113025807722, 0.0621499191745602],
[0.240993281622128, 0.0831019646519721, 0.0415130545715575, 0.155284541403192]],
[[0.190128959522795, 0.129220679033862, 0.0932213021787505, 0.193080698516532],
[0.196379692358065, 0.108549414860949, 0.0592714297292217, 0.0421945385836429]],
[[0.163043672107111, 0.152063537378127, 0.102823783410167, 0.0906028835221283],
[0.186579466868095, 0.189705690316132, 0.0990207345993082, 0.107831389238912]],
[[0.197765724699431, 0.220046257566978, 0.177876233348082, 0.261288786454262],
[0.123823472714948, 0.220514673922285, 0.102486496386323, 0.101975538893918]],
[[0.114435243444815, 0.170857634762767, 0.177327072603662, 0.135362730582518],
[0.0939211776723413,0.174291820501902, 0.125275822078525, 0.150842841725936]],
[[0.0988683809545079, 0.152323481100248, 0.185606883566286, 0.167242856061538],
[0.0760275616817939, 0.127275603247149, 0.202466168603738, 0.186580243138018]],
[[0.0473688704207573, 0.0948047647595988, 0.193333422312280, 0.0902721256884624],
[0.0822753470826286, 0.0965608324996108, 0.369966294031327, 0.255290907016382]]]) | python | def fill_with_sample_data(self):
"""This function fills the corresponding object with sample data."""
# replace these sample data with another dataset later
# this function is deprecated as soon as a common file format for this
# type of data will be available
self.p01 = np.array([[0.576724636119866, 0.238722774405744, 0.166532122130638, 0.393474644666218],
[0.303345245644811, 0.0490956843857575, 0.0392403031072856, 0.228441890034704]])
self.p10 = np.array([[0.158217002255554, 0.256581140990052, 0.557852226779526, 0.422638238585814],
[0.0439831163244427, 0.0474928027621488, 0.303675296728195, 0.217512052135178]])
self.pxx = np.array([[0.265058361624580, 0.504696084604205, 0.275615651089836, 0.183887116747968],
[0.652671638030746, 0.903411512852094, 0.657084400164519, 0.554046057830118]])
self.wxx = np.array([[[0.188389148850583, 0.0806836453984190, 0.0698113025807722, 0.0621499191745602],
[0.240993281622128, 0.0831019646519721, 0.0415130545715575, 0.155284541403192]],
[[0.190128959522795, 0.129220679033862, 0.0932213021787505, 0.193080698516532],
[0.196379692358065, 0.108549414860949, 0.0592714297292217, 0.0421945385836429]],
[[0.163043672107111, 0.152063537378127, 0.102823783410167, 0.0906028835221283],
[0.186579466868095, 0.189705690316132, 0.0990207345993082, 0.107831389238912]],
[[0.197765724699431, 0.220046257566978, 0.177876233348082, 0.261288786454262],
[0.123823472714948, 0.220514673922285, 0.102486496386323, 0.101975538893918]],
[[0.114435243444815, 0.170857634762767, 0.177327072603662, 0.135362730582518],
[0.0939211776723413,0.174291820501902, 0.125275822078525, 0.150842841725936]],
[[0.0988683809545079, 0.152323481100248, 0.185606883566286, 0.167242856061538],
[0.0760275616817939, 0.127275603247149, 0.202466168603738, 0.186580243138018]],
[[0.0473688704207573, 0.0948047647595988, 0.193333422312280, 0.0902721256884624],
[0.0822753470826286, 0.0965608324996108, 0.369966294031327, 0.255290907016382]]]) | [
"def",
"fill_with_sample_data",
"(",
"self",
")",
":",
"# replace these sample data with another dataset later",
"# this function is deprecated as soon as a common file format for this",
"# type of data will be available",
"self",
".",
"p01",
"=",
"np",
".",
"array",
"(",
"[",
"[... | This function fills the corresponding object with sample data. | [
"This",
"function",
"fills",
"the",
"corresponding",
"object",
"with",
"sample",
"data",
"."
] | ddc155c77b65f791be0021dbbaf68c6bac42ecbd | https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/cascade.py#L54-L81 | train |
gaqzi/django-emoji | emoji/models.py | Emoji.names | def names(cls):
"""A list of all emoji names without file extension."""
if not cls._files:
for f in os.listdir(cls._image_path):
if(not f.startswith('.') and
os.path.isfile(os.path.join(cls._image_path, f))):
cls._files.append(os.path.splitext(f)[0])
return cls._files | python | def names(cls):
"""A list of all emoji names without file extension."""
if not cls._files:
for f in os.listdir(cls._image_path):
if(not f.startswith('.') and
os.path.isfile(os.path.join(cls._image_path, f))):
cls._files.append(os.path.splitext(f)[0])
return cls._files | [
"def",
"names",
"(",
"cls",
")",
":",
"if",
"not",
"cls",
".",
"_files",
":",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"cls",
".",
"_image_path",
")",
":",
"if",
"(",
"not",
"f",
".",
"startswith",
"(",
"'.'",
")",
"and",
"os",
".",
"path",... | A list of all emoji names without file extension. | [
"A",
"list",
"of",
"all",
"emoji",
"names",
"without",
"file",
"extension",
"."
] | 08625d14f5b4251f4784bb5abf2620cb46bbdcab | https://github.com/gaqzi/django-emoji/blob/08625d14f5b4251f4784bb5abf2620cb46bbdcab/emoji/models.py#L114-L122 | train |
gaqzi/django-emoji | emoji/models.py | Emoji.replace | def replace(cls, replacement_string):
"""Add in valid emojis in a string where a valid emoji is between ::"""
e = cls()
def _replace_emoji(match):
val = match.group(1)
if val in e:
return e._image_string(match.group(1))
else:
return match.group(0)
return e._pattern.sub(_replace_emoji, replacement_string) | python | def replace(cls, replacement_string):
"""Add in valid emojis in a string where a valid emoji is between ::"""
e = cls()
def _replace_emoji(match):
val = match.group(1)
if val in e:
return e._image_string(match.group(1))
else:
return match.group(0)
return e._pattern.sub(_replace_emoji, replacement_string) | [
"def",
"replace",
"(",
"cls",
",",
"replacement_string",
")",
":",
"e",
"=",
"cls",
"(",
")",
"def",
"_replace_emoji",
"(",
"match",
")",
":",
"val",
"=",
"match",
".",
"group",
"(",
"1",
")",
"if",
"val",
"in",
"e",
":",
"return",
"e",
".",
"_im... | Add in valid emojis in a string where a valid emoji is between :: | [
"Add",
"in",
"valid",
"emojis",
"in",
"a",
"string",
"where",
"a",
"valid",
"emoji",
"is",
"between",
"::"
] | 08625d14f5b4251f4784bb5abf2620cb46bbdcab | https://github.com/gaqzi/django-emoji/blob/08625d14f5b4251f4784bb5abf2620cb46bbdcab/emoji/models.py#L125-L136 | train |
gaqzi/django-emoji | emoji/models.py | Emoji.replace_unicode | def replace_unicode(cls, replacement_string):
"""This method will iterate over every character in
``replacement_string`` and see if it mathces any of the
unicode codepoints that we recognize. If it does then it will
replace that codepoint with an image just like ``replace``.
NOTE: This will only work with Python versions built with wide
unicode caracter support. Python 3 should always work but
Python 2 will have to tested before deploy.
"""
e = cls()
output = []
surrogate_character = None
if settings.EMOJI_REPLACE_HTML_ENTITIES:
replacement_string = cls.replace_html_entities(replacement_string)
for i, character in enumerate(replacement_string):
if character in cls._unicode_modifiers:
continue
# Check whether this is the first character in a Unicode
# surrogate pair when Python doesn't have wide Unicode
# support.
#
# Is there any reason to do this even if Python got wide
# support enabled?
if(not UNICODE_WIDE and not surrogate_character and
ord(character) >= UNICODE_SURROGATE_MIN and
ord(character) <= UNICODE_SURROGATE_MAX):
surrogate_character = character
continue
if surrogate_character:
character = convert_unicode_surrogates(
surrogate_character + character
)
surrogate_character = None
name = e.name_for(character)
if name:
if settings.EMOJI_ALT_AS_UNICODE:
character = e._image_string(name, alt=character)
else:
character = e._image_string(name)
output.append(character)
return ''.join(output) | python | def replace_unicode(cls, replacement_string):
"""This method will iterate over every character in
``replacement_string`` and see if it mathces any of the
unicode codepoints that we recognize. If it does then it will
replace that codepoint with an image just like ``replace``.
NOTE: This will only work with Python versions built with wide
unicode caracter support. Python 3 should always work but
Python 2 will have to tested before deploy.
"""
e = cls()
output = []
surrogate_character = None
if settings.EMOJI_REPLACE_HTML_ENTITIES:
replacement_string = cls.replace_html_entities(replacement_string)
for i, character in enumerate(replacement_string):
if character in cls._unicode_modifiers:
continue
# Check whether this is the first character in a Unicode
# surrogate pair when Python doesn't have wide Unicode
# support.
#
# Is there any reason to do this even if Python got wide
# support enabled?
if(not UNICODE_WIDE and not surrogate_character and
ord(character) >= UNICODE_SURROGATE_MIN and
ord(character) <= UNICODE_SURROGATE_MAX):
surrogate_character = character
continue
if surrogate_character:
character = convert_unicode_surrogates(
surrogate_character + character
)
surrogate_character = None
name = e.name_for(character)
if name:
if settings.EMOJI_ALT_AS_UNICODE:
character = e._image_string(name, alt=character)
else:
character = e._image_string(name)
output.append(character)
return ''.join(output) | [
"def",
"replace_unicode",
"(",
"cls",
",",
"replacement_string",
")",
":",
"e",
"=",
"cls",
"(",
")",
"output",
"=",
"[",
"]",
"surrogate_character",
"=",
"None",
"if",
"settings",
".",
"EMOJI_REPLACE_HTML_ENTITIES",
":",
"replacement_string",
"=",
"cls",
".",... | This method will iterate over every character in
``replacement_string`` and see if it mathces any of the
unicode codepoints that we recognize. If it does then it will
replace that codepoint with an image just like ``replace``.
NOTE: This will only work with Python versions built with wide
unicode caracter support. Python 3 should always work but
Python 2 will have to tested before deploy. | [
"This",
"method",
"will",
"iterate",
"over",
"every",
"character",
"in",
"replacement_string",
"and",
"see",
"if",
"it",
"mathces",
"any",
"of",
"the",
"unicode",
"codepoints",
"that",
"we",
"recognize",
".",
"If",
"it",
"does",
"then",
"it",
"will",
"replac... | 08625d14f5b4251f4784bb5abf2620cb46bbdcab | https://github.com/gaqzi/django-emoji/blob/08625d14f5b4251f4784bb5abf2620cb46bbdcab/emoji/models.py#L139-L188 | train |
gaqzi/django-emoji | emoji/models.py | Emoji.replace_html_entities | def replace_html_entities(cls, replacement_string):
"""Replaces HTML escaped unicode entities with their unicode
equivalent. If the setting `EMOJI_REPLACE_HTML_ENTITIES` is
`True` then this conversation will always be done in
`replace_unicode` (default: True).
"""
def _hex_to_unicode(hex_code):
if PYTHON3:
hex_code = '{0:0>8}'.format(hex_code)
as_int = struct.unpack('>i', bytes.fromhex(hex_code))[0]
return '{0:c}'.format(as_int)
else:
return hex_to_unicode(hex_code)
def _replace_integer_entity(match):
hex_val = hex(int(match.group(1)))
return _hex_to_unicode(hex_val.replace('0x', ''))
def _replace_hex_entity(match):
return _hex_to_unicode(match.group(1))
# replace integer code points, A
replacement_string = re.sub(
cls._html_entities_integer_unicode_regex,
_replace_integer_entity,
replacement_string
)
# replace hex code points, A
replacement_string = re.sub(
cls._html_entities_hex_unicode_regex,
_replace_hex_entity,
replacement_string
)
return replacement_string | python | def replace_html_entities(cls, replacement_string):
"""Replaces HTML escaped unicode entities with their unicode
equivalent. If the setting `EMOJI_REPLACE_HTML_ENTITIES` is
`True` then this conversation will always be done in
`replace_unicode` (default: True).
"""
def _hex_to_unicode(hex_code):
if PYTHON3:
hex_code = '{0:0>8}'.format(hex_code)
as_int = struct.unpack('>i', bytes.fromhex(hex_code))[0]
return '{0:c}'.format(as_int)
else:
return hex_to_unicode(hex_code)
def _replace_integer_entity(match):
hex_val = hex(int(match.group(1)))
return _hex_to_unicode(hex_val.replace('0x', ''))
def _replace_hex_entity(match):
return _hex_to_unicode(match.group(1))
# replace integer code points, A
replacement_string = re.sub(
cls._html_entities_integer_unicode_regex,
_replace_integer_entity,
replacement_string
)
# replace hex code points, A
replacement_string = re.sub(
cls._html_entities_hex_unicode_regex,
_replace_hex_entity,
replacement_string
)
return replacement_string | [
"def",
"replace_html_entities",
"(",
"cls",
",",
"replacement_string",
")",
":",
"def",
"_hex_to_unicode",
"(",
"hex_code",
")",
":",
"if",
"PYTHON3",
":",
"hex_code",
"=",
"'{0:0>8}'",
".",
"format",
"(",
"hex_code",
")",
"as_int",
"=",
"struct",
".",
"unpa... | Replaces HTML escaped unicode entities with their unicode
equivalent. If the setting `EMOJI_REPLACE_HTML_ENTITIES` is
`True` then this conversation will always be done in
`replace_unicode` (default: True). | [
"Replaces",
"HTML",
"escaped",
"unicode",
"entities",
"with",
"their",
"unicode",
"equivalent",
".",
"If",
"the",
"setting",
"EMOJI_REPLACE_HTML_ENTITIES",
"is",
"True",
"then",
"this",
"conversation",
"will",
"always",
"be",
"done",
"in",
"replace_unicode",
"(",
... | 08625d14f5b4251f4784bb5abf2620cb46bbdcab | https://github.com/gaqzi/django-emoji/blob/08625d14f5b4251f4784bb5abf2620cb46bbdcab/emoji/models.py#L198-L234 | train |
gaqzi/django-emoji | bin/generate-unicode-aliases.py | _convert_to_unicode | def _convert_to_unicode(string):
"""This method should work with both Python 2 and 3 with the caveat
that they need to be compiled with wide unicode character support.
If there isn't wide unicode character support it'll blow up with a
warning.
"""
codepoints = []
for character in string.split('-'):
if character in BLACKLIST_UNICODE:
next
codepoints.append(
'\U{0:0>8}'.format(character).decode('unicode-escape')
)
return codepoints | python | def _convert_to_unicode(string):
"""This method should work with both Python 2 and 3 with the caveat
that they need to be compiled with wide unicode character support.
If there isn't wide unicode character support it'll blow up with a
warning.
"""
codepoints = []
for character in string.split('-'):
if character in BLACKLIST_UNICODE:
next
codepoints.append(
'\U{0:0>8}'.format(character).decode('unicode-escape')
)
return codepoints | [
"def",
"_convert_to_unicode",
"(",
"string",
")",
":",
"codepoints",
"=",
"[",
"]",
"for",
"character",
"in",
"string",
".",
"split",
"(",
"'-'",
")",
":",
"if",
"character",
"in",
"BLACKLIST_UNICODE",
":",
"next",
"codepoints",
".",
"append",
"(",
"'\\U{0... | This method should work with both Python 2 and 3 with the caveat
that they need to be compiled with wide unicode character support.
If there isn't wide unicode character support it'll blow up with a
warning. | [
"This",
"method",
"should",
"work",
"with",
"both",
"Python",
"2",
"and",
"3",
"with",
"the",
"caveat",
"that",
"they",
"need",
"to",
"be",
"compiled",
"with",
"wide",
"unicode",
"character",
"support",
"."
] | 08625d14f5b4251f4784bb5abf2620cb46bbdcab | https://github.com/gaqzi/django-emoji/blob/08625d14f5b4251f4784bb5abf2620cb46bbdcab/bin/generate-unicode-aliases.py#L32-L49 | train |
acsone/bobtemplates.odoo | bobtemplates/odoo/hooks.py | _delete_file | def _delete_file(configurator, path):
""" remove file and remove it's directories if empty """
path = os.path.join(configurator.target_directory, path)
os.remove(path)
try:
os.removedirs(os.path.dirname(path))
except OSError:
pass | python | def _delete_file(configurator, path):
""" remove file and remove it's directories if empty """
path = os.path.join(configurator.target_directory, path)
os.remove(path)
try:
os.removedirs(os.path.dirname(path))
except OSError:
pass | [
"def",
"_delete_file",
"(",
"configurator",
",",
"path",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"configurator",
".",
"target_directory",
",",
"path",
")",
"os",
".",
"remove",
"(",
"path",
")",
"try",
":",
"os",
".",
"removedirs",
... | remove file and remove it's directories if empty | [
"remove",
"file",
"and",
"remove",
"it",
"s",
"directories",
"if",
"empty"
] | 6e8c3cb12747d8b5af5a9821f995f285251e4d4d | https://github.com/acsone/bobtemplates.odoo/blob/6e8c3cb12747d8b5af5a9821f995f285251e4d4d/bobtemplates/odoo/hooks.py#L34-L41 | train |
acsone/bobtemplates.odoo | bobtemplates/odoo/hooks.py | _insert_manifest_item | def _insert_manifest_item(configurator, key, item):
""" Insert an item in the list of an existing manifest key """
with _open_manifest(configurator) as f:
manifest = f.read()
if item in ast.literal_eval(manifest).get(key, []):
return
pattern = """(["']{}["']:\\s*\\[)""".format(key)
repl = """\\1\n '{}',""".format(item)
manifest = re.sub(pattern, repl, manifest, re.MULTILINE)
with _open_manifest(configurator, "w") as f:
f.write(manifest) | python | def _insert_manifest_item(configurator, key, item):
""" Insert an item in the list of an existing manifest key """
with _open_manifest(configurator) as f:
manifest = f.read()
if item in ast.literal_eval(manifest).get(key, []):
return
pattern = """(["']{}["']:\\s*\\[)""".format(key)
repl = """\\1\n '{}',""".format(item)
manifest = re.sub(pattern, repl, manifest, re.MULTILINE)
with _open_manifest(configurator, "w") as f:
f.write(manifest) | [
"def",
"_insert_manifest_item",
"(",
"configurator",
",",
"key",
",",
"item",
")",
":",
"with",
"_open_manifest",
"(",
"configurator",
")",
"as",
"f",
":",
"manifest",
"=",
"f",
".",
"read",
"(",
")",
"if",
"item",
"in",
"ast",
".",
"literal_eval",
"(",
... | Insert an item in the list of an existing manifest key | [
"Insert",
"an",
"item",
"in",
"the",
"list",
"of",
"an",
"existing",
"manifest",
"key"
] | 6e8c3cb12747d8b5af5a9821f995f285251e4d4d | https://github.com/acsone/bobtemplates.odoo/blob/6e8c3cb12747d8b5af5a9821f995f285251e4d4d/bobtemplates/odoo/hooks.py#L58-L68 | train |
nyaruka/smartmin | setup.py | _read_requirements | def _read_requirements(filename):
"""Parses a file for pip installation requirements."""
with open(filename) as requirements_file:
contents = requirements_file.read()
return [line.strip() for line in contents.splitlines() if _is_requirement(line)] | python | def _read_requirements(filename):
"""Parses a file for pip installation requirements."""
with open(filename) as requirements_file:
contents = requirements_file.read()
return [line.strip() for line in contents.splitlines() if _is_requirement(line)] | [
"def",
"_read_requirements",
"(",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
")",
"as",
"requirements_file",
":",
"contents",
"=",
"requirements_file",
".",
"read",
"(",
")",
"return",
"[",
"line",
".",
"strip",
"(",
")",
"for",
"line",
"in",
... | Parses a file for pip installation requirements. | [
"Parses",
"a",
"file",
"for",
"pip",
"installation",
"requirements",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/setup.py#L17-L21 | train |
nyaruka/smartmin | smartmin/perms.py | assign_perm | def assign_perm(perm, group):
"""
Assigns a permission to a group
"""
if not isinstance(perm, Permission):
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label, codename=codename)
group.permissions.add(perm)
return perm | python | def assign_perm(perm, group):
"""
Assigns a permission to a group
"""
if not isinstance(perm, Permission):
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label, codename=codename)
group.permissions.add(perm)
return perm | [
"def",
"assign_perm",
"(",
"perm",
",",
"group",
")",
":",
"if",
"not",
"isinstance",
"(",
"perm",
",",
"Permission",
")",
":",
"try",
":",
"app_label",
",",
"codename",
"=",
"perm",
".",
"split",
"(",
"'.'",
",",
"1",
")",
"except",
"ValueError",
":... | Assigns a permission to a group | [
"Assigns",
"a",
"permission",
"to",
"a",
"group"
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/perms.py#L4-L17 | train |
nyaruka/smartmin | smartmin/perms.py | remove_perm | def remove_perm(perm, group):
"""
Removes a permission from a group
"""
if not isinstance(perm, Permission):
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label, codename=codename)
group.permissions.remove(perm)
return | python | def remove_perm(perm, group):
"""
Removes a permission from a group
"""
if not isinstance(perm, Permission):
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label, codename=codename)
group.permissions.remove(perm)
return | [
"def",
"remove_perm",
"(",
"perm",
",",
"group",
")",
":",
"if",
"not",
"isinstance",
"(",
"perm",
",",
"Permission",
")",
":",
"try",
":",
"app_label",
",",
"codename",
"=",
"perm",
".",
"split",
"(",
"'.'",
",",
"1",
")",
"except",
"ValueError",
":... | Removes a permission from a group | [
"Removes",
"a",
"permission",
"from",
"a",
"group"
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/perms.py#L20-L33 | train |
nyaruka/smartmin | smartmin/templatetags/smartmin.py | get_list_class | def get_list_class(context, list):
"""
Returns the class to use for the passed in list. We just build something up
from the object type for the list.
"""
return "list_%s_%s" % (list.model._meta.app_label, list.model._meta.model_name) | python | def get_list_class(context, list):
"""
Returns the class to use for the passed in list. We just build something up
from the object type for the list.
"""
return "list_%s_%s" % (list.model._meta.app_label, list.model._meta.model_name) | [
"def",
"get_list_class",
"(",
"context",
",",
"list",
")",
":",
"return",
"\"list_%s_%s\"",
"%",
"(",
"list",
".",
"model",
".",
"_meta",
".",
"app_label",
",",
"list",
".",
"model",
".",
"_meta",
".",
"model_name",
")"
] | Returns the class to use for the passed in list. We just build something up
from the object type for the list. | [
"Returns",
"the",
"class",
"to",
"use",
"for",
"the",
"passed",
"in",
"list",
".",
"We",
"just",
"build",
"something",
"up",
"from",
"the",
"object",
"type",
"for",
"the",
"list",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/templatetags/smartmin.py#L24-L29 | train |
nyaruka/smartmin | smartmin/templatetags/smartmin.py | format_datetime | def format_datetime(time):
"""
Formats a date, converting the time to the user timezone if one is specified
"""
user_time_zone = timezone.get_current_timezone()
if time.tzinfo is None:
time = time.replace(tzinfo=pytz.utc)
user_time_zone = pytz.timezone(getattr(settings, 'USER_TIME_ZONE', 'GMT'))
time = time.astimezone(user_time_zone)
return time.strftime("%b %d, %Y %H:%M") | python | def format_datetime(time):
"""
Formats a date, converting the time to the user timezone if one is specified
"""
user_time_zone = timezone.get_current_timezone()
if time.tzinfo is None:
time = time.replace(tzinfo=pytz.utc)
user_time_zone = pytz.timezone(getattr(settings, 'USER_TIME_ZONE', 'GMT'))
time = time.astimezone(user_time_zone)
return time.strftime("%b %d, %Y %H:%M") | [
"def",
"format_datetime",
"(",
"time",
")",
":",
"user_time_zone",
"=",
"timezone",
".",
"get_current_timezone",
"(",
")",
"if",
"time",
".",
"tzinfo",
"is",
"None",
":",
"time",
"=",
"time",
".",
"replace",
"(",
"tzinfo",
"=",
"pytz",
".",
"utc",
")",
... | Formats a date, converting the time to the user timezone if one is specified | [
"Formats",
"a",
"date",
"converting",
"the",
"time",
"to",
"the",
"user",
"timezone",
"if",
"one",
"is",
"specified"
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/templatetags/smartmin.py#L32-L42 | train |
nyaruka/smartmin | smartmin/templatetags/smartmin.py | get_value_from_view | def get_value_from_view(context, field):
"""
Responsible for deriving the displayed value for the passed in 'field'.
This first checks for a particular method on the ListView, then looks for a method
on the object, then finally treats it as an attribute.
"""
view = context['view']
obj = None
if 'object' in context:
obj = context['object']
value = view.lookup_field_value(context, obj, field)
# it's a date
if type(value) == datetime:
return format_datetime(value)
return value | python | def get_value_from_view(context, field):
"""
Responsible for deriving the displayed value for the passed in 'field'.
This first checks for a particular method on the ListView, then looks for a method
on the object, then finally treats it as an attribute.
"""
view = context['view']
obj = None
if 'object' in context:
obj = context['object']
value = view.lookup_field_value(context, obj, field)
# it's a date
if type(value) == datetime:
return format_datetime(value)
return value | [
"def",
"get_value_from_view",
"(",
"context",
",",
"field",
")",
":",
"view",
"=",
"context",
"[",
"'view'",
"]",
"obj",
"=",
"None",
"if",
"'object'",
"in",
"context",
":",
"obj",
"=",
"context",
"[",
"'object'",
"]",
"value",
"=",
"view",
".",
"looku... | Responsible for deriving the displayed value for the passed in 'field'.
This first checks for a particular method on the ListView, then looks for a method
on the object, then finally treats it as an attribute. | [
"Responsible",
"for",
"deriving",
"the",
"displayed",
"value",
"for",
"the",
"passed",
"in",
"field",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/templatetags/smartmin.py#L46-L64 | train |
nyaruka/smartmin | smartmin/templatetags/smartmin.py | get_class | def get_class(context, field, obj=None):
"""
Looks up the class for this field
"""
view = context['view']
return view.lookup_field_class(field, obj, "field_" + field) | python | def get_class(context, field, obj=None):
"""
Looks up the class for this field
"""
view = context['view']
return view.lookup_field_class(field, obj, "field_" + field) | [
"def",
"get_class",
"(",
"context",
",",
"field",
",",
"obj",
"=",
"None",
")",
":",
"view",
"=",
"context",
"[",
"'view'",
"]",
"return",
"view",
".",
"lookup_field_class",
"(",
"field",
",",
"obj",
",",
"\"field_\"",
"+",
"field",
")"
] | Looks up the class for this field | [
"Looks",
"up",
"the",
"class",
"for",
"this",
"field"
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/templatetags/smartmin.py#L84-L89 | train |
nyaruka/smartmin | smartmin/templatetags/smartmin.py | get_label | def get_label(context, field, obj=None):
"""
Responsible for figuring out the right label for the passed in field.
The order of precedence is:
1) if the view has a field_config and a label specified there, use that label
2) check for a form in the view, if it contains that field, use it's value
"""
view = context['view']
return view.lookup_field_label(context, field, obj) | python | def get_label(context, field, obj=None):
"""
Responsible for figuring out the right label for the passed in field.
The order of precedence is:
1) if the view has a field_config and a label specified there, use that label
2) check for a form in the view, if it contains that field, use it's value
"""
view = context['view']
return view.lookup_field_label(context, field, obj) | [
"def",
"get_label",
"(",
"context",
",",
"field",
",",
"obj",
"=",
"None",
")",
":",
"view",
"=",
"context",
"[",
"'view'",
"]",
"return",
"view",
".",
"lookup_field_label",
"(",
"context",
",",
"field",
",",
"obj",
")"
] | Responsible for figuring out the right label for the passed in field.
The order of precedence is:
1) if the view has a field_config and a label specified there, use that label
2) check for a form in the view, if it contains that field, use it's value | [
"Responsible",
"for",
"figuring",
"out",
"the",
"right",
"label",
"for",
"the",
"passed",
"in",
"field",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/templatetags/smartmin.py#L93-L102 | train |
nyaruka/smartmin | smartmin/templatetags/smartmin.py | get_field_link | def get_field_link(context, field, obj=None):
"""
Determine what the field link should be for the given field, object pair
"""
view = context['view']
return view.lookup_field_link(context, field, obj) | python | def get_field_link(context, field, obj=None):
"""
Determine what the field link should be for the given field, object pair
"""
view = context['view']
return view.lookup_field_link(context, field, obj) | [
"def",
"get_field_link",
"(",
"context",
",",
"field",
",",
"obj",
"=",
"None",
")",
":",
"view",
"=",
"context",
"[",
"'view'",
"]",
"return",
"view",
".",
"lookup_field_link",
"(",
"context",
",",
"field",
",",
"obj",
")"
] | Determine what the field link should be for the given field, object pair | [
"Determine",
"what",
"the",
"field",
"link",
"should",
"be",
"for",
"the",
"given",
"field",
"object",
"pair"
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/templatetags/smartmin.py#L106-L111 | train |
nyaruka/smartmin | smartmin/management/__init__.py | get_permissions_app_name | def get_permissions_app_name():
"""
Gets the app after which smartmin permissions should be installed. This can be specified by PERMISSIONS_APP in the
Django settings or defaults to the last app with models
"""
global permissions_app_name
if not permissions_app_name:
permissions_app_name = getattr(settings, 'PERMISSIONS_APP', None)
if not permissions_app_name:
app_names_with_models = [a.name for a in apps.get_app_configs() if a.models_module is not None]
if app_names_with_models:
permissions_app_name = app_names_with_models[-1]
return permissions_app_name | python | def get_permissions_app_name():
"""
Gets the app after which smartmin permissions should be installed. This can be specified by PERMISSIONS_APP in the
Django settings or defaults to the last app with models
"""
global permissions_app_name
if not permissions_app_name:
permissions_app_name = getattr(settings, 'PERMISSIONS_APP', None)
if not permissions_app_name:
app_names_with_models = [a.name for a in apps.get_app_configs() if a.models_module is not None]
if app_names_with_models:
permissions_app_name = app_names_with_models[-1]
return permissions_app_name | [
"def",
"get_permissions_app_name",
"(",
")",
":",
"global",
"permissions_app_name",
"if",
"not",
"permissions_app_name",
":",
"permissions_app_name",
"=",
"getattr",
"(",
"settings",
",",
"'PERMISSIONS_APP'",
",",
"None",
")",
"if",
"not",
"permissions_app_name",
":",... | Gets the app after which smartmin permissions should be installed. This can be specified by PERMISSIONS_APP in the
Django settings or defaults to the last app with models | [
"Gets",
"the",
"app",
"after",
"which",
"smartmin",
"permissions",
"should",
"be",
"installed",
".",
"This",
"can",
"be",
"specified",
"by",
"PERMISSIONS_APP",
"in",
"the",
"Django",
"settings",
"or",
"defaults",
"to",
"the",
"last",
"app",
"with",
"models"
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/management/__init__.py#L15-L30 | train |
nyaruka/smartmin | smartmin/management/__init__.py | check_role_permissions | def check_role_permissions(role, permissions, current_permissions):
"""
Checks the the passed in role (can be user, group or AnonymousUser) has all the passed
in permissions, granting them if necessary.
"""
role_permissions = []
# get all the current permissions, we'll remove these as we verify they should still be granted
for permission in permissions:
splits = permission.split(".")
if len(splits) != 2 and len(splits) != 3:
sys.stderr.write(" invalid permission %s, ignoring\n" % permission)
continue
app = splits[0]
codenames = []
if len(splits) == 2:
codenames.append(splits[1])
else:
(object, action) = splits[1:]
# if this is a wildcard, then query our database for all the permissions that exist on this object
if action == '*':
for perm in Permission.objects.filter(codename__startswith="%s_" % object, content_type__app_label=app):
codenames.append(perm.codename)
# otherwise, this is an error, continue
else:
sys.stderr.write(" invalid permission %s, ignoring\n" % permission)
continue
if len(codenames) == 0:
continue
for codename in codenames:
# the full codename for this permission
full_codename = "%s.%s" % (app, codename)
# this marks all the permissions which should remain
role_permissions.append(full_codename)
try:
assign_perm(full_codename, role)
except ObjectDoesNotExist:
pass
# sys.stderr.write(" unknown permission %s, ignoring\n" % permission)
# remove any that are extra
for permission in current_permissions:
if isinstance(permission, str):
key = permission
else:
key = "%s.%s" % (permission.content_type.app_label, permission.codename)
if key not in role_permissions:
remove_perm(key, role) | python | def check_role_permissions(role, permissions, current_permissions):
"""
Checks the the passed in role (can be user, group or AnonymousUser) has all the passed
in permissions, granting them if necessary.
"""
role_permissions = []
# get all the current permissions, we'll remove these as we verify they should still be granted
for permission in permissions:
splits = permission.split(".")
if len(splits) != 2 and len(splits) != 3:
sys.stderr.write(" invalid permission %s, ignoring\n" % permission)
continue
app = splits[0]
codenames = []
if len(splits) == 2:
codenames.append(splits[1])
else:
(object, action) = splits[1:]
# if this is a wildcard, then query our database for all the permissions that exist on this object
if action == '*':
for perm in Permission.objects.filter(codename__startswith="%s_" % object, content_type__app_label=app):
codenames.append(perm.codename)
# otherwise, this is an error, continue
else:
sys.stderr.write(" invalid permission %s, ignoring\n" % permission)
continue
if len(codenames) == 0:
continue
for codename in codenames:
# the full codename for this permission
full_codename = "%s.%s" % (app, codename)
# this marks all the permissions which should remain
role_permissions.append(full_codename)
try:
assign_perm(full_codename, role)
except ObjectDoesNotExist:
pass
# sys.stderr.write(" unknown permission %s, ignoring\n" % permission)
# remove any that are extra
for permission in current_permissions:
if isinstance(permission, str):
key = permission
else:
key = "%s.%s" % (permission.content_type.app_label, permission.codename)
if key not in role_permissions:
remove_perm(key, role) | [
"def",
"check_role_permissions",
"(",
"role",
",",
"permissions",
",",
"current_permissions",
")",
":",
"role_permissions",
"=",
"[",
"]",
"# get all the current permissions, we'll remove these as we verify they should still be granted",
"for",
"permission",
"in",
"permissions",
... | Checks the the passed in role (can be user, group or AnonymousUser) has all the passed
in permissions, granting them if necessary. | [
"Checks",
"the",
"the",
"passed",
"in",
"role",
"(",
"can",
"be",
"user",
"group",
"or",
"AnonymousUser",
")",
"has",
"all",
"the",
"passed",
"in",
"permissions",
"granting",
"them",
"if",
"necessary",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/management/__init__.py#L40-L95 | train |
nyaruka/smartmin | smartmin/management/__init__.py | check_all_group_permissions | def check_all_group_permissions(sender, **kwargs):
"""
Checks that all the permissions specified in our settings.py are set for our groups.
"""
if not is_permissions_app(sender):
return
config = getattr(settings, 'GROUP_PERMISSIONS', dict())
# for each of our items
for name, permissions in config.items():
# get or create the group
(group, created) = Group.objects.get_or_create(name=name)
if created:
pass
check_role_permissions(group, permissions, group.permissions.all()) | python | def check_all_group_permissions(sender, **kwargs):
"""
Checks that all the permissions specified in our settings.py are set for our groups.
"""
if not is_permissions_app(sender):
return
config = getattr(settings, 'GROUP_PERMISSIONS', dict())
# for each of our items
for name, permissions in config.items():
# get or create the group
(group, created) = Group.objects.get_or_create(name=name)
if created:
pass
check_role_permissions(group, permissions, group.permissions.all()) | [
"def",
"check_all_group_permissions",
"(",
"sender",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"is_permissions_app",
"(",
"sender",
")",
":",
"return",
"config",
"=",
"getattr",
"(",
"settings",
",",
"'GROUP_PERMISSIONS'",
",",
"dict",
"(",
")",
")",
... | Checks that all the permissions specified in our settings.py are set for our groups. | [
"Checks",
"that",
"all",
"the",
"permissions",
"specified",
"in",
"our",
"settings",
".",
"py",
"are",
"set",
"for",
"our",
"groups",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/management/__init__.py#L98-L114 | train |
nyaruka/smartmin | smartmin/management/__init__.py | add_permission | def add_permission(content_type, permission):
"""
Adds the passed in permission to that content type. Note that the permission passed
in should be a single word, or verb. The proper 'codename' will be generated from that.
"""
# build our permission slug
codename = "%s_%s" % (content_type.model, permission)
# sys.stderr.write("Checking %s permission for %s\n" % (permission, content_type.name))
# does it already exist
if not Permission.objects.filter(content_type=content_type, codename=codename):
Permission.objects.create(content_type=content_type,
codename=codename,
name="Can %s %s" % (permission, content_type.name)) | python | def add_permission(content_type, permission):
"""
Adds the passed in permission to that content type. Note that the permission passed
in should be a single word, or verb. The proper 'codename' will be generated from that.
"""
# build our permission slug
codename = "%s_%s" % (content_type.model, permission)
# sys.stderr.write("Checking %s permission for %s\n" % (permission, content_type.name))
# does it already exist
if not Permission.objects.filter(content_type=content_type, codename=codename):
Permission.objects.create(content_type=content_type,
codename=codename,
name="Can %s %s" % (permission, content_type.name)) | [
"def",
"add_permission",
"(",
"content_type",
",",
"permission",
")",
":",
"# build our permission slug",
"codename",
"=",
"\"%s_%s\"",
"%",
"(",
"content_type",
".",
"model",
",",
"permission",
")",
"# sys.stderr.write(\"Checking %s permission for %s\\n\" % (permission, cont... | Adds the passed in permission to that content type. Note that the permission passed
in should be a single word, or verb. The proper 'codename' will be generated from that. | [
"Adds",
"the",
"passed",
"in",
"permission",
"to",
"that",
"content",
"type",
".",
"Note",
"that",
"the",
"permission",
"passed",
"in",
"should",
"be",
"a",
"single",
"word",
"or",
"verb",
".",
"The",
"proper",
"codename",
"will",
"be",
"generated",
"from"... | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/management/__init__.py#L117-L131 | train |
nyaruka/smartmin | smartmin/management/__init__.py | check_all_permissions | def check_all_permissions(sender, **kwargs):
"""
This syncdb checks our PERMISSIONS setting in settings.py and makes sure all those permissions
actually exit.
"""
if not is_permissions_app(sender):
return
config = getattr(settings, 'PERMISSIONS', dict())
# for each of our items
for natural_key, permissions in config.items():
# if the natural key '*' then that means add to all objects
if natural_key == '*':
# for each of our content types
for content_type in ContentType.objects.all():
for permission in permissions:
add_permission(content_type, permission)
# otherwise, this is on a specific content type, add for each of those
else:
app, model = natural_key.split('.')
try:
content_type = ContentType.objects.get_by_natural_key(app, model)
except ContentType.DoesNotExist:
continue
# add each permission
for permission in permissions:
add_permission(content_type, permission) | python | def check_all_permissions(sender, **kwargs):
"""
This syncdb checks our PERMISSIONS setting in settings.py and makes sure all those permissions
actually exit.
"""
if not is_permissions_app(sender):
return
config = getattr(settings, 'PERMISSIONS', dict())
# for each of our items
for natural_key, permissions in config.items():
# if the natural key '*' then that means add to all objects
if natural_key == '*':
# for each of our content types
for content_type in ContentType.objects.all():
for permission in permissions:
add_permission(content_type, permission)
# otherwise, this is on a specific content type, add for each of those
else:
app, model = natural_key.split('.')
try:
content_type = ContentType.objects.get_by_natural_key(app, model)
except ContentType.DoesNotExist:
continue
# add each permission
for permission in permissions:
add_permission(content_type, permission) | [
"def",
"check_all_permissions",
"(",
"sender",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"is_permissions_app",
"(",
"sender",
")",
":",
"return",
"config",
"=",
"getattr",
"(",
"settings",
",",
"'PERMISSIONS'",
",",
"dict",
"(",
")",
")",
"# for each... | This syncdb checks our PERMISSIONS setting in settings.py and makes sure all those permissions
actually exit. | [
"This",
"syncdb",
"checks",
"our",
"PERMISSIONS",
"setting",
"in",
"settings",
".",
"py",
"and",
"makes",
"sure",
"all",
"those",
"permissions",
"actually",
"exit",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/management/__init__.py#L135-L164 | train |
nyaruka/smartmin | smartmin/users/views.py | UserForm.save | def save(self, commit=True):
"""
Overloaded so we can save any new password that is included.
"""
is_new_user = self.instance.pk is None
user = super(UserForm, self).save(commit)
# new users should be made active by default
if is_new_user:
user.is_active = True
# if we had a new password set, use it
new_pass = self.cleaned_data['new_password']
if new_pass:
user.set_password(new_pass)
if commit:
user.save()
return user | python | def save(self, commit=True):
"""
Overloaded so we can save any new password that is included.
"""
is_new_user = self.instance.pk is None
user = super(UserForm, self).save(commit)
# new users should be made active by default
if is_new_user:
user.is_active = True
# if we had a new password set, use it
new_pass = self.cleaned_data['new_password']
if new_pass:
user.set_password(new_pass)
if commit:
user.save()
return user | [
"def",
"save",
"(",
"self",
",",
"commit",
"=",
"True",
")",
":",
"is_new_user",
"=",
"self",
".",
"instance",
".",
"pk",
"is",
"None",
"user",
"=",
"super",
"(",
"UserForm",
",",
"self",
")",
".",
"save",
"(",
"commit",
")",
"# new users should be mad... | Overloaded so we can save any new password that is included. | [
"Overloaded",
"so",
"we",
"can",
"save",
"any",
"new",
"password",
"that",
"is",
"included",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/users/views.py#L39-L58 | train |
nyaruka/smartmin | smartmin/views.py | smart_url | def smart_url(url, obj=None):
"""
URLs that start with @ are reversed, using the passed in arguments.
Otherwise a straight % substitution is applied.
"""
if url.find("@") >= 0:
(args, value) = url.split('@')
if args:
val = getattr(obj, args, None)
return reverse(value, args=[val])
else:
return reverse(value)
else:
if obj is None:
return url
else:
return url % obj.id | python | def smart_url(url, obj=None):
"""
URLs that start with @ are reversed, using the passed in arguments.
Otherwise a straight % substitution is applied.
"""
if url.find("@") >= 0:
(args, value) = url.split('@')
if args:
val = getattr(obj, args, None)
return reverse(value, args=[val])
else:
return reverse(value)
else:
if obj is None:
return url
else:
return url % obj.id | [
"def",
"smart_url",
"(",
"url",
",",
"obj",
"=",
"None",
")",
":",
"if",
"url",
".",
"find",
"(",
"\"@\"",
")",
">=",
"0",
":",
"(",
"args",
",",
"value",
")",
"=",
"url",
".",
"split",
"(",
"'@'",
")",
"if",
"args",
":",
"val",
"=",
"getattr... | URLs that start with @ are reversed, using the passed in arguments.
Otherwise a straight % substitution is applied. | [
"URLs",
"that",
"start",
"with",
"@",
"are",
"reversed",
"using",
"the",
"passed",
"in",
"arguments",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L31-L49 | train |
nyaruka/smartmin | smartmin/views.py | derive_single_object_url_pattern | def derive_single_object_url_pattern(slug_url_kwarg, path, action):
"""
Utility function called by class methods for single object views
"""
if slug_url_kwarg:
return r'^%s/%s/(?P<%s>[^/]+)/$' % (path, action, slug_url_kwarg)
else:
return r'^%s/%s/(?P<pk>\d+)/$' % (path, action) | python | def derive_single_object_url_pattern(slug_url_kwarg, path, action):
"""
Utility function called by class methods for single object views
"""
if slug_url_kwarg:
return r'^%s/%s/(?P<%s>[^/]+)/$' % (path, action, slug_url_kwarg)
else:
return r'^%s/%s/(?P<pk>\d+)/$' % (path, action) | [
"def",
"derive_single_object_url_pattern",
"(",
"slug_url_kwarg",
",",
"path",
",",
"action",
")",
":",
"if",
"slug_url_kwarg",
":",
"return",
"r'^%s/%s/(?P<%s>[^/]+)/$'",
"%",
"(",
"path",
",",
"action",
",",
"slug_url_kwarg",
")",
"else",
":",
"return",
"r'^%s/%... | Utility function called by class methods for single object views | [
"Utility",
"function",
"called",
"by",
"class",
"methods",
"for",
"single",
"object",
"views"
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L390-L397 | train |
nyaruka/smartmin | smartmin/views.py | SmartView.has_permission | def has_permission(self, request, *args, **kwargs):
"""
Figures out if the current user has permissions for this view.
"""
self.kwargs = kwargs
self.args = args
self.request = request
if not getattr(self, 'permission', None):
return True
else:
return request.user.has_perm(self.permission) | python | def has_permission(self, request, *args, **kwargs):
"""
Figures out if the current user has permissions for this view.
"""
self.kwargs = kwargs
self.args = args
self.request = request
if not getattr(self, 'permission', None):
return True
else:
return request.user.has_perm(self.permission) | [
"def",
"has_permission",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"kwargs",
"=",
"kwargs",
"self",
".",
"args",
"=",
"args",
"self",
".",
"request",
"=",
"request",
"if",
"not",
"getattr",
"(",
... | Figures out if the current user has permissions for this view. | [
"Figures",
"out",
"if",
"the",
"current",
"user",
"has",
"permissions",
"for",
"this",
"view",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L88-L99 | train |
nyaruka/smartmin | smartmin/views.py | SmartView.dispatch | def dispatch(self, request, *args, **kwargs):
"""
Overloaded to check permissions if appropriate
"""
def wrapper(request, *args, **kwargs):
if not self.has_permission(request, *args, **kwargs):
path = urlquote(request.get_full_path())
login_url = kwargs.pop('login_url', settings.LOGIN_URL)
redirect_field_name = kwargs.pop('redirect_field_name', REDIRECT_FIELD_NAME)
return HttpResponseRedirect("%s?%s=%s" % (login_url, redirect_field_name, path))
else:
response = self.pre_process(request, *args, **kwargs)
if not response:
return super(SmartView, self).dispatch(request, *args, **kwargs)
else:
return response
return wrapper(request, *args, **kwargs) | python | def dispatch(self, request, *args, **kwargs):
"""
Overloaded to check permissions if appropriate
"""
def wrapper(request, *args, **kwargs):
if not self.has_permission(request, *args, **kwargs):
path = urlquote(request.get_full_path())
login_url = kwargs.pop('login_url', settings.LOGIN_URL)
redirect_field_name = kwargs.pop('redirect_field_name', REDIRECT_FIELD_NAME)
return HttpResponseRedirect("%s?%s=%s" % (login_url, redirect_field_name, path))
else:
response = self.pre_process(request, *args, **kwargs)
if not response:
return super(SmartView, self).dispatch(request, *args, **kwargs)
else:
return response
return wrapper(request, *args, **kwargs) | [
"def",
"dispatch",
"(",
"self",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"wrapper",
"(",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"has_permission",
"(",
"request",
... | Overloaded to check permissions if appropriate | [
"Overloaded",
"to",
"check",
"permissions",
"if",
"appropriate"
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L101-L118 | train |
nyaruka/smartmin | smartmin/views.py | SmartView.lookup_obj_attribute | def lookup_obj_attribute(self, obj, field):
"""
Looks for a field's value from the passed in obj. Note that this will strip
leading attributes to deal with subelements if possible
"""
curr_field = field.encode('ascii', 'ignore').decode("utf-8")
rest = None
if field.find('.') >= 0:
curr_field = field.split('.')[0]
rest = '.'.join(field.split('.')[1:])
# next up is the object itself
obj_field = getattr(obj, curr_field, None)
# if it is callable, do so
if obj_field and getattr(obj_field, '__call__', None):
obj_field = obj_field()
if obj_field and rest:
return self.lookup_obj_attribute(obj_field, rest)
else:
return obj_field | python | def lookup_obj_attribute(self, obj, field):
"""
Looks for a field's value from the passed in obj. Note that this will strip
leading attributes to deal with subelements if possible
"""
curr_field = field.encode('ascii', 'ignore').decode("utf-8")
rest = None
if field.find('.') >= 0:
curr_field = field.split('.')[0]
rest = '.'.join(field.split('.')[1:])
# next up is the object itself
obj_field = getattr(obj, curr_field, None)
# if it is callable, do so
if obj_field and getattr(obj_field, '__call__', None):
obj_field = obj_field()
if obj_field and rest:
return self.lookup_obj_attribute(obj_field, rest)
else:
return obj_field | [
"def",
"lookup_obj_attribute",
"(",
"self",
",",
"obj",
",",
"field",
")",
":",
"curr_field",
"=",
"field",
".",
"encode",
"(",
"'ascii'",
",",
"'ignore'",
")",
".",
"decode",
"(",
"\"utf-8\"",
")",
"rest",
"=",
"None",
"if",
"field",
".",
"find",
"(",... | Looks for a field's value from the passed in obj. Note that this will strip
leading attributes to deal with subelements if possible | [
"Looks",
"for",
"a",
"field",
"s",
"value",
"from",
"the",
"passed",
"in",
"obj",
".",
"Note",
"that",
"this",
"will",
"strip",
"leading",
"attributes",
"to",
"deal",
"with",
"subelements",
"if",
"possible"
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L130-L152 | train |
nyaruka/smartmin | smartmin/views.py | SmartView.lookup_field_value | def lookup_field_value(self, context, obj, field):
"""
Looks up the field value for the passed in object and field name.
Note that this method is actually called from a template, but this provides a hook
for subclasses to modify behavior if they wish to do so.
This may be used for example to change the display value of a variable depending on
other variables within our context.
"""
curr_field = field.encode('ascii', 'ignore').decode("utf-8")
# if this isn't a subfield, check the view to see if it has a get_ method
if field.find('.') == -1:
# view supercedes all, does it have a 'get_' method for this obj
view_method = getattr(self, 'get_%s' % curr_field, None)
if view_method:
return view_method(obj)
return self.lookup_obj_attribute(obj, field) | python | def lookup_field_value(self, context, obj, field):
"""
Looks up the field value for the passed in object and field name.
Note that this method is actually called from a template, but this provides a hook
for subclasses to modify behavior if they wish to do so.
This may be used for example to change the display value of a variable depending on
other variables within our context.
"""
curr_field = field.encode('ascii', 'ignore').decode("utf-8")
# if this isn't a subfield, check the view to see if it has a get_ method
if field.find('.') == -1:
# view supercedes all, does it have a 'get_' method for this obj
view_method = getattr(self, 'get_%s' % curr_field, None)
if view_method:
return view_method(obj)
return self.lookup_obj_attribute(obj, field) | [
"def",
"lookup_field_value",
"(",
"self",
",",
"context",
",",
"obj",
",",
"field",
")",
":",
"curr_field",
"=",
"field",
".",
"encode",
"(",
"'ascii'",
",",
"'ignore'",
")",
".",
"decode",
"(",
"\"utf-8\"",
")",
"# if this isn't a subfield, check the view to se... | Looks up the field value for the passed in object and field name.
Note that this method is actually called from a template, but this provides a hook
for subclasses to modify behavior if they wish to do so.
This may be used for example to change the display value of a variable depending on
other variables within our context. | [
"Looks",
"up",
"the",
"field",
"value",
"for",
"the",
"passed",
"in",
"object",
"and",
"field",
"name",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L154-L173 | train |
nyaruka/smartmin | smartmin/views.py | SmartView.lookup_field_label | def lookup_field_label(self, context, field, default=None):
"""
Figures out what the field label should be for the passed in field name.
Our heuristic is as follows:
1) we check to see if our field_config has a label specified
2) if not, then we derive a field value from the field name
"""
# if this is a subfield, strip off everything but the last field name
if field.find('.') >= 0:
return self.lookup_field_label(context, field.split('.')[-1], default)
label = None
# is there a label specified for this field
if field in self.field_config and 'label' in self.field_config[field]:
label = self.field_config[field]['label']
# if we were given a default, use that
elif default:
label = default
# check our model
else:
for model_field in self.model._meta.fields:
if model_field.name == field:
return model_field.verbose_name.title()
# otherwise, derive it from our field name
if label is None:
label = self.derive_field_label(field)
return label | python | def lookup_field_label(self, context, field, default=None):
"""
Figures out what the field label should be for the passed in field name.
Our heuristic is as follows:
1) we check to see if our field_config has a label specified
2) if not, then we derive a field value from the field name
"""
# if this is a subfield, strip off everything but the last field name
if field.find('.') >= 0:
return self.lookup_field_label(context, field.split('.')[-1], default)
label = None
# is there a label specified for this field
if field in self.field_config and 'label' in self.field_config[field]:
label = self.field_config[field]['label']
# if we were given a default, use that
elif default:
label = default
# check our model
else:
for model_field in self.model._meta.fields:
if model_field.name == field:
return model_field.verbose_name.title()
# otherwise, derive it from our field name
if label is None:
label = self.derive_field_label(field)
return label | [
"def",
"lookup_field_label",
"(",
"self",
",",
"context",
",",
"field",
",",
"default",
"=",
"None",
")",
":",
"# if this is a subfield, strip off everything but the last field name",
"if",
"field",
".",
"find",
"(",
"'.'",
")",
">=",
"0",
":",
"return",
"self",
... | Figures out what the field label should be for the passed in field name.
Our heuristic is as follows:
1) we check to see if our field_config has a label specified
2) if not, then we derive a field value from the field name | [
"Figures",
"out",
"what",
"the",
"field",
"label",
"should",
"be",
"for",
"the",
"passed",
"in",
"field",
"name",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L175-L207 | train |
nyaruka/smartmin | smartmin/views.py | SmartView.lookup_field_help | def lookup_field_help(self, field, default=None):
"""
Looks up the help text for the passed in field.
"""
help = None
# is there a label specified for this field
if field in self.field_config and 'help' in self.field_config[field]:
help = self.field_config[field]['help']
# if we were given a default, use that
elif default:
help = default
# try to see if there is a description on our model
elif hasattr(self, 'model'):
for model_field in self.model._meta.fields:
if model_field.name == field:
help = model_field.help_text
break
return help | python | def lookup_field_help(self, field, default=None):
"""
Looks up the help text for the passed in field.
"""
help = None
# is there a label specified for this field
if field in self.field_config and 'help' in self.field_config[field]:
help = self.field_config[field]['help']
# if we were given a default, use that
elif default:
help = default
# try to see if there is a description on our model
elif hasattr(self, 'model'):
for model_field in self.model._meta.fields:
if model_field.name == field:
help = model_field.help_text
break
return help | [
"def",
"lookup_field_help",
"(",
"self",
",",
"field",
",",
"default",
"=",
"None",
")",
":",
"help",
"=",
"None",
"# is there a label specified for this field",
"if",
"field",
"in",
"self",
".",
"field_config",
"and",
"'help'",
"in",
"self",
".",
"field_config"... | Looks up the help text for the passed in field. | [
"Looks",
"up",
"the",
"help",
"text",
"for",
"the",
"passed",
"in",
"field",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L209-L230 | train |
nyaruka/smartmin | smartmin/views.py | SmartView.lookup_field_class | def lookup_field_class(self, field, obj=None, default=None):
"""
Looks up any additional class we should include when rendering this field
"""
css = ""
# is there a class specified for this field
if field in self.field_config and 'class' in self.field_config[field]:
css = self.field_config[field]['class']
# if we were given a default, use that
elif default:
css = default
return css | python | def lookup_field_class(self, field, obj=None, default=None):
"""
Looks up any additional class we should include when rendering this field
"""
css = ""
# is there a class specified for this field
if field in self.field_config and 'class' in self.field_config[field]:
css = self.field_config[field]['class']
# if we were given a default, use that
elif default:
css = default
return css | [
"def",
"lookup_field_class",
"(",
"self",
",",
"field",
",",
"obj",
"=",
"None",
",",
"default",
"=",
"None",
")",
":",
"css",
"=",
"\"\"",
"# is there a class specified for this field",
"if",
"field",
"in",
"self",
".",
"field_config",
"and",
"'class'",
"in",... | Looks up any additional class we should include when rendering this field | [
"Looks",
"up",
"any",
"additional",
"class",
"we",
"should",
"include",
"when",
"rendering",
"this",
"field"
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L232-L246 | train |
nyaruka/smartmin | smartmin/views.py | SmartView.get_template_names | def get_template_names(self):
"""
Returns the name of the template to use to render this request.
Smartmin provides default templates as fallbacks, so appends it's own templates names to the end
of whatever list is built by the generic views.
Subclasses can override this by setting a 'template_name' variable on the class.
"""
templates = []
if getattr(self, 'template_name', None):
templates.append(self.template_name)
if getattr(self, 'default_template', None):
templates.append(self.default_template)
else:
templates = super(SmartView, self).get_template_names()
return templates | python | def get_template_names(self):
"""
Returns the name of the template to use to render this request.
Smartmin provides default templates as fallbacks, so appends it's own templates names to the end
of whatever list is built by the generic views.
Subclasses can override this by setting a 'template_name' variable on the class.
"""
templates = []
if getattr(self, 'template_name', None):
templates.append(self.template_name)
if getattr(self, 'default_template', None):
templates.append(self.default_template)
else:
templates = super(SmartView, self).get_template_names()
return templates | [
"def",
"get_template_names",
"(",
"self",
")",
":",
"templates",
"=",
"[",
"]",
"if",
"getattr",
"(",
"self",
",",
"'template_name'",
",",
"None",
")",
":",
"templates",
".",
"append",
"(",
"self",
".",
"template_name",
")",
"if",
"getattr",
"(",
"self",... | Returns the name of the template to use to render this request.
Smartmin provides default templates as fallbacks, so appends it's own templates names to the end
of whatever list is built by the generic views.
Subclasses can override this by setting a 'template_name' variable on the class. | [
"Returns",
"the",
"name",
"of",
"the",
"template",
"to",
"use",
"to",
"render",
"this",
"request",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L263-L281 | train |
nyaruka/smartmin | smartmin/views.py | SmartView.derive_fields | def derive_fields(self):
"""
Default implementation
"""
fields = []
if self.fields:
fields.append(self.fields)
return fields | python | def derive_fields(self):
"""
Default implementation
"""
fields = []
if self.fields:
fields.append(self.fields)
return fields | [
"def",
"derive_fields",
"(",
"self",
")",
":",
"fields",
"=",
"[",
"]",
"if",
"self",
".",
"fields",
":",
"fields",
".",
"append",
"(",
"self",
".",
"fields",
")",
"return",
"fields"
] | Default implementation | [
"Default",
"implementation"
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L283-L291 | train |
nyaruka/smartmin | smartmin/views.py | SmartView.get_context_data | def get_context_data(self, **kwargs):
"""
We supplement the normal context data by adding our fields and labels.
"""
context = super(SmartView, self).get_context_data(**kwargs)
# derive our field config
self.field_config = self.derive_field_config()
# add our fields
self.fields = self.derive_fields()
# build up our current parameter string, EXCLUSIVE of our page. These
# are used to build pagination URLs
url_params = "?"
order_params = ""
for key in self.request.GET.keys():
if key != 'page' and key != 'pjax' and (len(key) == 0 or key[0] != '_'):
for value in self.request.GET.getlist(key):
url_params += "%s=%s&" % (key, urlquote(value))
elif key == '_order':
order_params = "&".join(["%s=%s" % (key, _) for _ in self.request.GET.getlist(key)])
context['url_params'] = url_params
context['order_params'] = order_params + "&"
context['pjax'] = self.pjax
# set our blocks
context['blocks'] = dict()
# stuff it all in our context
context['fields'] = self.fields
context['view'] = self
context['field_config'] = self.field_config
context['title'] = self.derive_title()
# and any extra context the user specified
context.update(self.extra_context)
# by default, our base is 'base.html', but we might be pjax
base_template = "base.html"
if 'pjax' in self.request.GET or 'pjax' in self.request.POST:
base_template = "smartmin/pjax.html"
if 'HTTP_X_PJAX' in self.request.META:
base_template = "smartmin/pjax.html"
context['base_template'] = base_template
# set our refresh if we have one
refresh = self.derive_refresh()
if refresh:
context['refresh'] = refresh
return context | python | def get_context_data(self, **kwargs):
"""
We supplement the normal context data by adding our fields and labels.
"""
context = super(SmartView, self).get_context_data(**kwargs)
# derive our field config
self.field_config = self.derive_field_config()
# add our fields
self.fields = self.derive_fields()
# build up our current parameter string, EXCLUSIVE of our page. These
# are used to build pagination URLs
url_params = "?"
order_params = ""
for key in self.request.GET.keys():
if key != 'page' and key != 'pjax' and (len(key) == 0 or key[0] != '_'):
for value in self.request.GET.getlist(key):
url_params += "%s=%s&" % (key, urlquote(value))
elif key == '_order':
order_params = "&".join(["%s=%s" % (key, _) for _ in self.request.GET.getlist(key)])
context['url_params'] = url_params
context['order_params'] = order_params + "&"
context['pjax'] = self.pjax
# set our blocks
context['blocks'] = dict()
# stuff it all in our context
context['fields'] = self.fields
context['view'] = self
context['field_config'] = self.field_config
context['title'] = self.derive_title()
# and any extra context the user specified
context.update(self.extra_context)
# by default, our base is 'base.html', but we might be pjax
base_template = "base.html"
if 'pjax' in self.request.GET or 'pjax' in self.request.POST:
base_template = "smartmin/pjax.html"
if 'HTTP_X_PJAX' in self.request.META:
base_template = "smartmin/pjax.html"
context['base_template'] = base_template
# set our refresh if we have one
refresh = self.derive_refresh()
if refresh:
context['refresh'] = refresh
return context | [
"def",
"get_context_data",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"context",
"=",
"super",
"(",
"SmartView",
",",
"self",
")",
".",
"get_context_data",
"(",
"*",
"*",
"kwargs",
")",
"# derive our field config",
"self",
".",
"field_config",
"=",
"s... | We supplement the normal context data by adding our fields and labels. | [
"We",
"supplement",
"the",
"normal",
"context",
"data",
"by",
"adding",
"our",
"fields",
"and",
"labels",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L309-L364 | train |
nyaruka/smartmin | smartmin/views.py | SmartView.render_to_response | def render_to_response(self, context, **response_kwargs):
"""
Overloaded to deal with _format arguments.
"""
# should we actually render in json?
if '_format' in self.request.GET and self.request.GET['_format'] == 'json':
return JsonResponse(self.as_json(context), safe=False)
# otherwise, return normally
else:
return super(SmartView, self).render_to_response(context) | python | def render_to_response(self, context, **response_kwargs):
"""
Overloaded to deal with _format arguments.
"""
# should we actually render in json?
if '_format' in self.request.GET and self.request.GET['_format'] == 'json':
return JsonResponse(self.as_json(context), safe=False)
# otherwise, return normally
else:
return super(SmartView, self).render_to_response(context) | [
"def",
"render_to_response",
"(",
"self",
",",
"context",
",",
"*",
"*",
"response_kwargs",
")",
":",
"# should we actually render in json?",
"if",
"'_format'",
"in",
"self",
".",
"request",
".",
"GET",
"and",
"self",
".",
"request",
".",
"GET",
"[",
"'_format... | Overloaded to deal with _format arguments. | [
"Overloaded",
"to",
"deal",
"with",
"_format",
"arguments",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L373-L383 | train |
nyaruka/smartmin | smartmin/views.py | SmartReadView.derive_fields | def derive_fields(self):
"""
Derives our fields. We first default to using our 'fields' variable if available,
otherwise we figure it out from our object.
"""
if self.fields:
return list(self.fields)
else:
fields = []
for field in self.object._meta.fields:
fields.append(field.name)
# only exclude? then remove those items there
exclude = self.derive_exclude()
# remove any excluded fields
fields = [field for field in fields if field not in exclude]
return fields | python | def derive_fields(self):
"""
Derives our fields. We first default to using our 'fields' variable if available,
otherwise we figure it out from our object.
"""
if self.fields:
return list(self.fields)
else:
fields = []
for field in self.object._meta.fields:
fields.append(field.name)
# only exclude? then remove those items there
exclude = self.derive_exclude()
# remove any excluded fields
fields = [field for field in fields if field not in exclude]
return fields | [
"def",
"derive_fields",
"(",
"self",
")",
":",
"if",
"self",
".",
"fields",
":",
"return",
"list",
"(",
"self",
".",
"fields",
")",
"else",
":",
"fields",
"=",
"[",
"]",
"for",
"field",
"in",
"self",
".",
"object",
".",
"_meta",
".",
"fields",
":",... | Derives our fields. We first default to using our 'fields' variable if available,
otherwise we figure it out from our object. | [
"Derives",
"our",
"fields",
".",
"We",
"first",
"default",
"to",
"using",
"our",
"fields",
"variable",
"if",
"available",
"otherwise",
"we",
"figure",
"it",
"out",
"from",
"our",
"object",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L435-L454 | train |
nyaruka/smartmin | smartmin/views.py | SmartDeleteView.get_context_data | def get_context_data(self, **kwargs):
""" Add in the field to use for the name field """
context = super(SmartDeleteView, self).get_context_data(**kwargs)
context['name_field'] = self.name_field
context['cancel_url'] = self.get_cancel_url()
return context | python | def get_context_data(self, **kwargs):
""" Add in the field to use for the name field """
context = super(SmartDeleteView, self).get_context_data(**kwargs)
context['name_field'] = self.name_field
context['cancel_url'] = self.get_cancel_url()
return context | [
"def",
"get_context_data",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"context",
"=",
"super",
"(",
"SmartDeleteView",
",",
"self",
")",
".",
"get_context_data",
"(",
"*",
"*",
"kwargs",
")",
"context",
"[",
"'name_field'",
"]",
"=",
"self",
".",
... | Add in the field to use for the name field | [
"Add",
"in",
"the",
"field",
"to",
"use",
"for",
"the",
"name",
"field"
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L498-L503 | train |
nyaruka/smartmin | smartmin/views.py | SmartListView.derive_title | def derive_title(self):
"""
Derives our title from our list
"""
title = super(SmartListView, self).derive_title()
if not title:
return force_text(self.model._meta.verbose_name_plural).title()
else:
return title | python | def derive_title(self):
"""
Derives our title from our list
"""
title = super(SmartListView, self).derive_title()
if not title:
return force_text(self.model._meta.verbose_name_plural).title()
else:
return title | [
"def",
"derive_title",
"(",
"self",
")",
":",
"title",
"=",
"super",
"(",
"SmartListView",
",",
"self",
")",
".",
"derive_title",
"(",
")",
"if",
"not",
"title",
":",
"return",
"force_text",
"(",
"self",
".",
"model",
".",
"_meta",
".",
"verbose_name_plu... | Derives our title from our list | [
"Derives",
"our",
"title",
"from",
"our",
"list"
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L531-L540 | train |
nyaruka/smartmin | smartmin/views.py | SmartListView.derive_link_fields | def derive_link_fields(self, context):
"""
Used to derive which fields should be linked. This should return a set() containing
the names of those fields which should be linkable.
"""
if self.link_fields is not None:
return self.link_fields
else:
link_fields = set()
if self.fields:
for field in self.fields:
if field != 'is_active':
link_fields.add(field)
break
return link_fields | python | def derive_link_fields(self, context):
"""
Used to derive which fields should be linked. This should return a set() containing
the names of those fields which should be linkable.
"""
if self.link_fields is not None:
return self.link_fields
else:
link_fields = set()
if self.fields:
for field in self.fields:
if field != 'is_active':
link_fields.add(field)
break
return link_fields | [
"def",
"derive_link_fields",
"(",
"self",
",",
"context",
")",
":",
"if",
"self",
".",
"link_fields",
"is",
"not",
"None",
":",
"return",
"self",
".",
"link_fields",
"else",
":",
"link_fields",
"=",
"set",
"(",
")",
"if",
"self",
".",
"fields",
":",
"f... | Used to derive which fields should be linked. This should return a set() containing
the names of those fields which should be linkable. | [
"Used",
"to",
"derive",
"which",
"fields",
"should",
"be",
"linked",
".",
"This",
"should",
"return",
"a",
"set",
"()",
"containing",
"the",
"names",
"of",
"those",
"fields",
"which",
"should",
"be",
"linkable",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L542-L558 | train |
nyaruka/smartmin | smartmin/views.py | SmartListView.lookup_field_orderable | def lookup_field_orderable(self, field):
"""
Returns whether the passed in field is sortable or not, by default all 'raw' fields, that
is fields that are part of the model are sortable.
"""
try:
self.model._meta.get_field_by_name(field)
return True
except Exception:
# that field doesn't exist, so not sortable
return False | python | def lookup_field_orderable(self, field):
"""
Returns whether the passed in field is sortable or not, by default all 'raw' fields, that
is fields that are part of the model are sortable.
"""
try:
self.model._meta.get_field_by_name(field)
return True
except Exception:
# that field doesn't exist, so not sortable
return False | [
"def",
"lookup_field_orderable",
"(",
"self",
",",
"field",
")",
":",
"try",
":",
"self",
".",
"model",
".",
"_meta",
".",
"get_field_by_name",
"(",
"field",
")",
"return",
"True",
"except",
"Exception",
":",
"# that field doesn't exist, so not sortable",
"return"... | Returns whether the passed in field is sortable or not, by default all 'raw' fields, that
is fields that are part of the model are sortable. | [
"Returns",
"whether",
"the",
"passed",
"in",
"field",
"is",
"sortable",
"or",
"not",
"by",
"default",
"all",
"raw",
"fields",
"that",
"is",
"fields",
"that",
"are",
"part",
"of",
"the",
"model",
"are",
"sortable",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L566-L576 | train |
nyaruka/smartmin | smartmin/views.py | SmartListView.get_context_data | def get_context_data(self, **kwargs):
"""
Add in what fields are linkable
"""
context = super(SmartListView, self).get_context_data(**kwargs)
# our linkable fields
self.link_fields = self.derive_link_fields(context)
# stuff it all in our context
context['link_fields'] = self.link_fields
# our search term if any
if 'search' in self.request.GET:
context['search'] = self.request.GET['search']
# our ordering field if any
order = self.derive_ordering()
if order:
if order[0] == '-':
context['order'] = order[1:]
context['order_asc'] = False
else:
context['order'] = order
context['order_asc'] = True
return context | python | def get_context_data(self, **kwargs):
"""
Add in what fields are linkable
"""
context = super(SmartListView, self).get_context_data(**kwargs)
# our linkable fields
self.link_fields = self.derive_link_fields(context)
# stuff it all in our context
context['link_fields'] = self.link_fields
# our search term if any
if 'search' in self.request.GET:
context['search'] = self.request.GET['search']
# our ordering field if any
order = self.derive_ordering()
if order:
if order[0] == '-':
context['order'] = order[1:]
context['order_asc'] = False
else:
context['order'] = order
context['order_asc'] = True
return context | [
"def",
"get_context_data",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"context",
"=",
"super",
"(",
"SmartListView",
",",
"self",
")",
".",
"get_context_data",
"(",
"*",
"*",
"kwargs",
")",
"# our linkable fields",
"self",
".",
"link_fields",
"=",
"se... | Add in what fields are linkable | [
"Add",
"in",
"what",
"fields",
"are",
"linkable"
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L578-L604 | train |
nyaruka/smartmin | smartmin/views.py | SmartListView.derive_queryset | def derive_queryset(self, **kwargs):
"""
Derives our queryset.
"""
# get our parent queryset
queryset = super(SmartListView, self).get_queryset(**kwargs)
# apply any filtering
search_fields = self.derive_search_fields()
search_query = self.request.GET.get('search')
if search_fields and search_query:
term_queries = []
for term in search_query.split(' '):
field_queries = []
for field in search_fields:
field_queries.append(Q(**{field: term}))
term_queries.append(reduce(operator.or_, field_queries))
queryset = queryset.filter(reduce(operator.and_, term_queries))
# add any select related
related = self.derive_select_related()
if related:
queryset = queryset.select_related(*related)
# return our queryset
return queryset | python | def derive_queryset(self, **kwargs):
"""
Derives our queryset.
"""
# get our parent queryset
queryset = super(SmartListView, self).get_queryset(**kwargs)
# apply any filtering
search_fields = self.derive_search_fields()
search_query = self.request.GET.get('search')
if search_fields and search_query:
term_queries = []
for term in search_query.split(' '):
field_queries = []
for field in search_fields:
field_queries.append(Q(**{field: term}))
term_queries.append(reduce(operator.or_, field_queries))
queryset = queryset.filter(reduce(operator.and_, term_queries))
# add any select related
related = self.derive_select_related()
if related:
queryset = queryset.select_related(*related)
# return our queryset
return queryset | [
"def",
"derive_queryset",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# get our parent queryset",
"queryset",
"=",
"super",
"(",
"SmartListView",
",",
"self",
")",
".",
"get_queryset",
"(",
"*",
"*",
"kwargs",
")",
"# apply any filtering",
"search_fields",
... | Derives our queryset. | [
"Derives",
"our",
"queryset",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L609-L635 | train |
nyaruka/smartmin | smartmin/views.py | SmartListView.get_queryset | def get_queryset(self, **kwargs):
"""
Gets our queryset. This takes care of filtering if there are any
fields to filter by.
"""
queryset = self.derive_queryset(**kwargs)
return self.order_queryset(queryset) | python | def get_queryset(self, **kwargs):
"""
Gets our queryset. This takes care of filtering if there are any
fields to filter by.
"""
queryset = self.derive_queryset(**kwargs)
return self.order_queryset(queryset) | [
"def",
"get_queryset",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"queryset",
"=",
"self",
".",
"derive_queryset",
"(",
"*",
"*",
"kwargs",
")",
"return",
"self",
".",
"order_queryset",
"(",
"queryset",
")"
] | Gets our queryset. This takes care of filtering if there are any
fields to filter by. | [
"Gets",
"our",
"queryset",
".",
"This",
"takes",
"care",
"of",
"filtering",
"if",
"there",
"are",
"any",
"fields",
"to",
"filter",
"by",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L637-L644 | train |
nyaruka/smartmin | smartmin/views.py | SmartListView.derive_ordering | def derive_ordering(self):
"""
Returns what field should be used for ordering (using a prepended '-' to indicate descending sort).
If the default order of the queryset should be used, returns None
"""
if '_order' in self.request.GET:
return self.request.GET['_order']
elif self.default_order:
return self.default_order
else:
return None | python | def derive_ordering(self):
"""
Returns what field should be used for ordering (using a prepended '-' to indicate descending sort).
If the default order of the queryset should be used, returns None
"""
if '_order' in self.request.GET:
return self.request.GET['_order']
elif self.default_order:
return self.default_order
else:
return None | [
"def",
"derive_ordering",
"(",
"self",
")",
":",
"if",
"'_order'",
"in",
"self",
".",
"request",
".",
"GET",
":",
"return",
"self",
".",
"request",
".",
"GET",
"[",
"'_order'",
"]",
"elif",
"self",
".",
"default_order",
":",
"return",
"self",
".",
"def... | Returns what field should be used for ordering (using a prepended '-' to indicate descending sort).
If the default order of the queryset should be used, returns None | [
"Returns",
"what",
"field",
"should",
"be",
"used",
"for",
"ordering",
"(",
"using",
"a",
"prepended",
"-",
"to",
"indicate",
"descending",
"sort",
")",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L646-L657 | train |
nyaruka/smartmin | smartmin/views.py | SmartListView.order_queryset | def order_queryset(self, queryset):
"""
Orders the passed in queryset, returning a new queryset in response. By default uses the _order query
parameter.
"""
order = self.derive_ordering()
# if we get our order from the request
# make sure it is a valid field in the list
if '_order' in self.request.GET:
if order.lstrip('-') not in self.derive_fields():
order = None
if order:
# if our order is a single string, convert to a simple list
if isinstance(order, str):
order = (order,)
queryset = queryset.order_by(*order)
return queryset | python | def order_queryset(self, queryset):
"""
Orders the passed in queryset, returning a new queryset in response. By default uses the _order query
parameter.
"""
order = self.derive_ordering()
# if we get our order from the request
# make sure it is a valid field in the list
if '_order' in self.request.GET:
if order.lstrip('-') not in self.derive_fields():
order = None
if order:
# if our order is a single string, convert to a simple list
if isinstance(order, str):
order = (order,)
queryset = queryset.order_by(*order)
return queryset | [
"def",
"order_queryset",
"(",
"self",
",",
"queryset",
")",
":",
"order",
"=",
"self",
".",
"derive_ordering",
"(",
")",
"# if we get our order from the request",
"# make sure it is a valid field in the list",
"if",
"'_order'",
"in",
"self",
".",
"request",
".",
"GET"... | Orders the passed in queryset, returning a new queryset in response. By default uses the _order query
parameter. | [
"Orders",
"the",
"passed",
"in",
"queryset",
"returning",
"a",
"new",
"queryset",
"in",
"response",
".",
"By",
"default",
"uses",
"the",
"_order",
"query",
"parameter",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L659-L679 | train |
nyaruka/smartmin | smartmin/views.py | SmartListView.derive_fields | def derive_fields(self):
"""
Derives our fields.
"""
if self.fields:
return self.fields
else:
fields = []
for field in self.object_list.model._meta.fields:
if field.name != 'id':
fields.append(field.name)
return fields | python | def derive_fields(self):
"""
Derives our fields.
"""
if self.fields:
return self.fields
else:
fields = []
for field in self.object_list.model._meta.fields:
if field.name != 'id':
fields.append(field.name)
return fields | [
"def",
"derive_fields",
"(",
"self",
")",
":",
"if",
"self",
".",
"fields",
":",
"return",
"self",
".",
"fields",
"else",
":",
"fields",
"=",
"[",
"]",
"for",
"field",
"in",
"self",
".",
"object_list",
".",
"model",
".",
"_meta",
".",
"fields",
":",
... | Derives our fields. | [
"Derives",
"our",
"fields",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L681-L693 | train |
nyaruka/smartmin | smartmin/views.py | SmartListView.render_to_response | def render_to_response(self, context, **response_kwargs):
"""
Overloaded to deal with _format arguments.
"""
# is this a select2 format response?
if self.request.GET.get('_format', 'html') == 'select2':
results = []
for obj in context['object_list']:
result = None
if hasattr(obj, 'as_select2'):
result = obj.as_select2()
if not result:
result = dict(id=obj.pk, text="%s" % obj)
results.append(result)
json_data = dict(results=results, err='nil', more=context['page_obj'].has_next())
return JsonResponse(json_data)
# otherwise, return normally
else:
return super(SmartListView, self).render_to_response(context) | python | def render_to_response(self, context, **response_kwargs):
"""
Overloaded to deal with _format arguments.
"""
# is this a select2 format response?
if self.request.GET.get('_format', 'html') == 'select2':
results = []
for obj in context['object_list']:
result = None
if hasattr(obj, 'as_select2'):
result = obj.as_select2()
if not result:
result = dict(id=obj.pk, text="%s" % obj)
results.append(result)
json_data = dict(results=results, err='nil', more=context['page_obj'].has_next())
return JsonResponse(json_data)
# otherwise, return normally
else:
return super(SmartListView, self).render_to_response(context) | [
"def",
"render_to_response",
"(",
"self",
",",
"context",
",",
"*",
"*",
"response_kwargs",
")",
":",
"# is this a select2 format response?",
"if",
"self",
".",
"request",
".",
"GET",
".",
"get",
"(",
"'_format'",
",",
"'html'",
")",
"==",
"'select2'",
":",
... | Overloaded to deal with _format arguments. | [
"Overloaded",
"to",
"deal",
"with",
"_format",
"arguments",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L707-L729 | train |
nyaruka/smartmin | smartmin/views.py | SmartFormMixin.get_form | def get_form(self):
"""
Returns an instance of the form to be used in this view.
"""
self.form = super(SmartFormMixin, self).get_form()
fields = list(self.derive_fields())
# apply our field filtering on our form class
exclude = self.derive_exclude()
exclude += self.derive_readonly()
# remove any excluded fields
for field in exclude:
if field in self.form.fields:
del self.form.fields[field]
if fields is not None:
# filter out our form fields
remove = [name for name in self.form.fields.keys() if name not in fields]
for name in remove:
del self.form.fields[name]
# stuff in our referer as the default location for where to return
location = forms.CharField(widget=forms.widgets.HiddenInput(), required=False)
if ('HTTP_REFERER' in self.request.META):
location.initial = self.request.META['HTTP_REFERER']
# add the location to our form fields
self.form.fields['loc'] = location
if fields:
fields.append('loc')
# provides a hook to programmatically customize fields before rendering
for (name, field) in self.form.fields.items():
field = self.customize_form_field(name, field)
self.form.fields[name] = field
return self.form | python | def get_form(self):
"""
Returns an instance of the form to be used in this view.
"""
self.form = super(SmartFormMixin, self).get_form()
fields = list(self.derive_fields())
# apply our field filtering on our form class
exclude = self.derive_exclude()
exclude += self.derive_readonly()
# remove any excluded fields
for field in exclude:
if field in self.form.fields:
del self.form.fields[field]
if fields is not None:
# filter out our form fields
remove = [name for name in self.form.fields.keys() if name not in fields]
for name in remove:
del self.form.fields[name]
# stuff in our referer as the default location for where to return
location = forms.CharField(widget=forms.widgets.HiddenInput(), required=False)
if ('HTTP_REFERER' in self.request.META):
location.initial = self.request.META['HTTP_REFERER']
# add the location to our form fields
self.form.fields['loc'] = location
if fields:
fields.append('loc')
# provides a hook to programmatically customize fields before rendering
for (name, field) in self.form.fields.items():
field = self.customize_form_field(name, field)
self.form.fields[name] = field
return self.form | [
"def",
"get_form",
"(",
"self",
")",
":",
"self",
".",
"form",
"=",
"super",
"(",
"SmartFormMixin",
",",
"self",
")",
".",
"get_form",
"(",
")",
"fields",
"=",
"list",
"(",
"self",
".",
"derive_fields",
"(",
")",
")",
"# apply our field filtering on our fo... | Returns an instance of the form to be used in this view. | [
"Returns",
"an",
"instance",
"of",
"the",
"form",
"to",
"be",
"used",
"in",
"this",
"view",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L825-L865 | train |
nyaruka/smartmin | smartmin/views.py | SmartFormMixin.customize_form_field | def customize_form_field(self, name, field):
"""
Allows views to customize their form fields. By default, Smartmin replaces the plain textbox
date input with it's own DatePicker implementation.
"""
if isinstance(field, forms.fields.DateField) and isinstance(field.widget, forms.widgets.DateInput):
field.widget = widgets.DatePickerWidget()
field.input_formats = [field.widget.input_format[1]] + list(field.input_formats)
if isinstance(field, forms.fields.ImageField) and isinstance(field.widget, forms.widgets.ClearableFileInput):
field.widget = widgets.ImageThumbnailWidget()
return field | python | def customize_form_field(self, name, field):
"""
Allows views to customize their form fields. By default, Smartmin replaces the plain textbox
date input with it's own DatePicker implementation.
"""
if isinstance(field, forms.fields.DateField) and isinstance(field.widget, forms.widgets.DateInput):
field.widget = widgets.DatePickerWidget()
field.input_formats = [field.widget.input_format[1]] + list(field.input_formats)
if isinstance(field, forms.fields.ImageField) and isinstance(field.widget, forms.widgets.ClearableFileInput):
field.widget = widgets.ImageThumbnailWidget()
return field | [
"def",
"customize_form_field",
"(",
"self",
",",
"name",
",",
"field",
")",
":",
"if",
"isinstance",
"(",
"field",
",",
"forms",
".",
"fields",
".",
"DateField",
")",
"and",
"isinstance",
"(",
"field",
".",
"widget",
",",
"forms",
".",
"widgets",
".",
... | Allows views to customize their form fields. By default, Smartmin replaces the plain textbox
date input with it's own DatePicker implementation. | [
"Allows",
"views",
"to",
"customize",
"their",
"form",
"fields",
".",
"By",
"default",
"Smartmin",
"replaces",
"the",
"plain",
"textbox",
"date",
"input",
"with",
"it",
"s",
"own",
"DatePicker",
"implementation",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L867-L879 | train |
nyaruka/smartmin | smartmin/views.py | SmartFormMixin.lookup_field_label | def lookup_field_label(self, context, field, default=None):
"""
Figures out what the field label should be for the passed in field name.
We overload this so as to use our form to see if there is label set there. If so
then we'll pass that as the default instead of having our parent derive
the field from the name.
"""
default = None
for form_field in self.form:
if form_field.name == field:
default = form_field.label
break
return super(SmartFormMixin, self).lookup_field_label(context, field, default=default) | python | def lookup_field_label(self, context, field, default=None):
"""
Figures out what the field label should be for the passed in field name.
We overload this so as to use our form to see if there is label set there. If so
then we'll pass that as the default instead of having our parent derive
the field from the name.
"""
default = None
for form_field in self.form:
if form_field.name == field:
default = form_field.label
break
return super(SmartFormMixin, self).lookup_field_label(context, field, default=default) | [
"def",
"lookup_field_label",
"(",
"self",
",",
"context",
",",
"field",
",",
"default",
"=",
"None",
")",
":",
"default",
"=",
"None",
"for",
"form_field",
"in",
"self",
".",
"form",
":",
"if",
"form_field",
".",
"name",
"==",
"field",
":",
"default",
... | Figures out what the field label should be for the passed in field name.
We overload this so as to use our form to see if there is label set there. If so
then we'll pass that as the default instead of having our parent derive
the field from the name. | [
"Figures",
"out",
"what",
"the",
"field",
"label",
"should",
"be",
"for",
"the",
"passed",
"in",
"field",
"name",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L881-L896 | train |
nyaruka/smartmin | smartmin/views.py | SmartFormMixin.lookup_field_help | def lookup_field_help(self, field, default=None):
"""
Looks up the help text for the passed in field.
This is overloaded so that we can check whether our form has help text set
explicitely. If so, we will pass this as the default to our parent function.
"""
default = None
for form_field in self.form:
if form_field.name == field:
default = form_field.help_text
break
return super(SmartFormMixin, self).lookup_field_help(field, default=default) | python | def lookup_field_help(self, field, default=None):
"""
Looks up the help text for the passed in field.
This is overloaded so that we can check whether our form has help text set
explicitely. If so, we will pass this as the default to our parent function.
"""
default = None
for form_field in self.form:
if form_field.name == field:
default = form_field.help_text
break
return super(SmartFormMixin, self).lookup_field_help(field, default=default) | [
"def",
"lookup_field_help",
"(",
"self",
",",
"field",
",",
"default",
"=",
"None",
")",
":",
"default",
"=",
"None",
"for",
"form_field",
"in",
"self",
".",
"form",
":",
"if",
"form_field",
".",
"name",
"==",
"field",
":",
"default",
"=",
"form_field",
... | Looks up the help text for the passed in field.
This is overloaded so that we can check whether our form has help text set
explicitely. If so, we will pass this as the default to our parent function. | [
"Looks",
"up",
"the",
"help",
"text",
"for",
"the",
"passed",
"in",
"field",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L898-L912 | train |
nyaruka/smartmin | smartmin/views.py | SmartFormMixin.derive_readonly | def derive_readonly(self):
"""
Figures out what fields should be readonly. We iterate our field_config to find all
that have a readonly of true
"""
readonly = list(self.readonly)
for key, value in self.field_config.items():
if 'readonly' in value and value['readonly']:
readonly.append(key)
return readonly | python | def derive_readonly(self):
"""
Figures out what fields should be readonly. We iterate our field_config to find all
that have a readonly of true
"""
readonly = list(self.readonly)
for key, value in self.field_config.items():
if 'readonly' in value and value['readonly']:
readonly.append(key)
return readonly | [
"def",
"derive_readonly",
"(",
"self",
")",
":",
"readonly",
"=",
"list",
"(",
"self",
".",
"readonly",
")",
"for",
"key",
",",
"value",
"in",
"self",
".",
"field_config",
".",
"items",
"(",
")",
":",
"if",
"'readonly'",
"in",
"value",
"and",
"value",
... | Figures out what fields should be readonly. We iterate our field_config to find all
that have a readonly of true | [
"Figures",
"out",
"what",
"fields",
"should",
"be",
"readonly",
".",
"We",
"iterate",
"our",
"field_config",
"to",
"find",
"all",
"that",
"have",
"a",
"readonly",
"of",
"true"
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L914-L924 | train |
nyaruka/smartmin | smartmin/views.py | SmartFormMixin.derive_fields | def derive_fields(self):
"""
Derives our fields.
"""
if self.fields is not None:
fields = list(self.fields)
else:
form = self.form
fields = []
for field in form:
fields.append(field.name)
# this is slightly confusing but we add in readonly fields here because they will still
# need to be displayed
readonly = self.derive_readonly()
if readonly:
fields += readonly
# remove any excluded fields
for exclude in self.derive_exclude():
if exclude in fields:
fields.remove(exclude)
return fields | python | def derive_fields(self):
"""
Derives our fields.
"""
if self.fields is not None:
fields = list(self.fields)
else:
form = self.form
fields = []
for field in form:
fields.append(field.name)
# this is slightly confusing but we add in readonly fields here because they will still
# need to be displayed
readonly = self.derive_readonly()
if readonly:
fields += readonly
# remove any excluded fields
for exclude in self.derive_exclude():
if exclude in fields:
fields.remove(exclude)
return fields | [
"def",
"derive_fields",
"(",
"self",
")",
":",
"if",
"self",
".",
"fields",
"is",
"not",
"None",
":",
"fields",
"=",
"list",
"(",
"self",
".",
"fields",
")",
"else",
":",
"form",
"=",
"self",
".",
"form",
"fields",
"=",
"[",
"]",
"for",
"field",
... | Derives our fields. | [
"Derives",
"our",
"fields",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L926-L949 | train |
nyaruka/smartmin | smartmin/views.py | SmartFormMixin.get_form_class | def get_form_class(self):
"""
Returns the form class to use in this view
"""
if self.form_class:
form_class = self.form_class
else:
if self.model is not None:
# If a model has been explicitly provided, use it
model = self.model
elif hasattr(self, 'object') and self.object is not None:
# If this view is operating on a single object, use
# the class of that object
model = self.object.__class__
else:
# Try to get a queryset and extract the model class
# from that
model = self.get_queryset().model
# run time parameters when building our form
factory_kwargs = self.get_factory_kwargs()
form_class = model_forms.modelform_factory(model, **factory_kwargs)
return form_class | python | def get_form_class(self):
"""
Returns the form class to use in this view
"""
if self.form_class:
form_class = self.form_class
else:
if self.model is not None:
# If a model has been explicitly provided, use it
model = self.model
elif hasattr(self, 'object') and self.object is not None:
# If this view is operating on a single object, use
# the class of that object
model = self.object.__class__
else:
# Try to get a queryset and extract the model class
# from that
model = self.get_queryset().model
# run time parameters when building our form
factory_kwargs = self.get_factory_kwargs()
form_class = model_forms.modelform_factory(model, **factory_kwargs)
return form_class | [
"def",
"get_form_class",
"(",
"self",
")",
":",
"if",
"self",
".",
"form_class",
":",
"form_class",
"=",
"self",
".",
"form_class",
"else",
":",
"if",
"self",
".",
"model",
"is",
"not",
"None",
":",
"# If a model has been explicitly provided, use it",
"model",
... | Returns the form class to use in this view | [
"Returns",
"the",
"form",
"class",
"to",
"use",
"in",
"this",
"view"
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L951-L975 | train |
nyaruka/smartmin | smartmin/views.py | SmartFormMixin.get_factory_kwargs | def get_factory_kwargs(self):
"""
Let's us specify any extra parameters we might want to call for our form factory.
These can include: 'form', 'fields', 'exclude' or 'formfield_callback'
"""
params = dict()
exclude = self.derive_exclude()
exclude += self.derive_readonly()
if self.fields:
fields = list(self.fields)
for ex in exclude:
if ex in fields:
fields.remove(ex)
params['fields'] = fields
if exclude:
params['exclude'] = exclude
return params | python | def get_factory_kwargs(self):
"""
Let's us specify any extra parameters we might want to call for our form factory.
These can include: 'form', 'fields', 'exclude' or 'formfield_callback'
"""
params = dict()
exclude = self.derive_exclude()
exclude += self.derive_readonly()
if self.fields:
fields = list(self.fields)
for ex in exclude:
if ex in fields:
fields.remove(ex)
params['fields'] = fields
if exclude:
params['exclude'] = exclude
return params | [
"def",
"get_factory_kwargs",
"(",
"self",
")",
":",
"params",
"=",
"dict",
"(",
")",
"exclude",
"=",
"self",
".",
"derive_exclude",
"(",
")",
"exclude",
"+=",
"self",
".",
"derive_readonly",
"(",
")",
"if",
"self",
".",
"fields",
":",
"fields",
"=",
"l... | Let's us specify any extra parameters we might want to call for our form factory.
These can include: 'form', 'fields', 'exclude' or 'formfield_callback' | [
"Let",
"s",
"us",
"specify",
"any",
"extra",
"parameters",
"we",
"might",
"want",
"to",
"call",
"for",
"our",
"form",
"factory",
"."
] | 488a676a4960555e4d216a7b95d6e01a4ad4efd8 | https://github.com/nyaruka/smartmin/blob/488a676a4960555e4d216a7b95d6e01a4ad4efd8/smartmin/views.py#L977-L999 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.