repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
lobocv/pyperform | pyperform/benchmark.py | Benchmark.write_log | python | def write_log(self, fs=None):
log = StringIO.StringIO()
log.write(self.setup_src)
# If the function is not bound, write the test score to the log
if not self.is_class_method:
time_avg = convert_time_units(self.time_average_seconds)
log.write("\nAverage time: {0} \n".format(time_avg))
if fs:
with open(fs, 'w') as _f:
_f.write(log.getvalue()) | Write the results of the benchmark to a log file.
:param fs: file-like object. | train | https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/benchmark.py#L68-L83 | [
"def convert_time_units(t):\n \"\"\" Convert time in seconds into reasonable time units. \"\"\"\n if t == 0:\n return '0 s'\n order = log10(t)\n if -9 < order < -6:\n time_units = 'ns'\n factor = 1000000000\n elif -6 <= order < -3:\n time_units = 'us'\n factor = 1000000\n elif -3 <= order < -1:\n time_units = 'ms'\n factor = 1000.\n elif -1 <= order:\n time_units = 's'\n factor = 1\n return \"{:.3f} {}\".format(factor * t, time_units)\n"
] | class Benchmark(object):
enable = True
def __init__(self, setup=None, classname=None, timeit_repeat=3, timeit_number=1000, largs=None, kwargs=None):
self.setup = setup
self.timeit_repeat = timeit_repeat
self.timeit_number = timeit_number
self.classname = classname
self.group = None
self.is_class_method = None
if largs is not None and type(largs) is tuple:
self._args = largs[:]
else:
self._args = ()
self._kwargs = kwargs.copy() if kwargs is not None else {}
self.setup_src = ''
self.callable = None
self._is_function = None
self.log = StringIO.StringIO()
self.time_average_seconds = None
def __call__(self, caller):
if self.enable:
self.callable = caller
self._is_function = isinstance(caller, FunctionType)
fp = inspect.getfile(caller)
imports = get_tagged_imports(fp)
func_src = remove_decorators(globalize_indentation(inspect.getsource(caller)))
# Determine if the function is bound. If it is, keep track of it so we can run the benchmark after the class
# benchmark has been initialized.
src_lines = func_src.splitlines()
self.is_class_method = 'def' in src_lines[0] and 'self' in src_lines[0]
if self.is_class_method and self.classname:
from .benchmarkedclass import BenchmarkedClass
try:
BenchmarkedClass.bound_functions[self.classname].append(self)
except KeyError:
BenchmarkedClass.bound_functions[self.classname] = [self]
if callable(self.setup):
setup_func = inspect.getsource(self.setup)
setup_src = globalize_indentation(setup_func[setup_func.index('\n') + 1:])
elif type(self.setup) == str:
setup_src = self.setup
else:
setup_src = ''
src = '\n'.join([imports, setup_src, func_src])
self.setup_src = src + '\n'
self.log.write(self.setup_src)
self.stmt = generate_call_statement(caller, self.is_class_method, *self._args, **self._kwargs)
return caller
def run_timeit(self, stmt, setup):
""" Create the function call statement as a string used for timeit. """
_timer = timeit.Timer(stmt=stmt, setup=setup)
trials = _timer.repeat(self.timeit_repeat, self.timeit_number)
self.time_average_seconds = sum(trials) / len(trials) / self.timeit_number
# Convert into reasonable time units
time_avg = convert_time_units(self.time_average_seconds)
return time_avg
|
lobocv/pyperform | pyperform/benchmark.py | Benchmark.run_timeit | python | def run_timeit(self, stmt, setup):
_timer = timeit.Timer(stmt=stmt, setup=setup)
trials = _timer.repeat(self.timeit_repeat, self.timeit_number)
self.time_average_seconds = sum(trials) / len(trials) / self.timeit_number
# Convert into reasonable time units
time_avg = convert_time_units(self.time_average_seconds)
return time_avg | Create the function call statement as a string used for timeit. | train | https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/benchmark.py#L85-L92 | [
"def convert_time_units(t):\n \"\"\" Convert time in seconds into reasonable time units. \"\"\"\n if t == 0:\n return '0 s'\n order = log10(t)\n if -9 < order < -6:\n time_units = 'ns'\n factor = 1000000000\n elif -6 <= order < -3:\n time_units = 'us'\n factor = 1000000\n elif -3 <= order < -1:\n time_units = 'ms'\n factor = 1000.\n elif -1 <= order:\n time_units = 's'\n factor = 1\n return \"{:.3f} {}\".format(factor * t, time_units)\n"
] | class Benchmark(object):
enable = True
def __init__(self, setup=None, classname=None, timeit_repeat=3, timeit_number=1000, largs=None, kwargs=None):
self.setup = setup
self.timeit_repeat = timeit_repeat
self.timeit_number = timeit_number
self.classname = classname
self.group = None
self.is_class_method = None
if largs is not None and type(largs) is tuple:
self._args = largs[:]
else:
self._args = ()
self._kwargs = kwargs.copy() if kwargs is not None else {}
self.setup_src = ''
self.callable = None
self._is_function = None
self.log = StringIO.StringIO()
self.time_average_seconds = None
def __call__(self, caller):
if self.enable:
self.callable = caller
self._is_function = isinstance(caller, FunctionType)
fp = inspect.getfile(caller)
imports = get_tagged_imports(fp)
func_src = remove_decorators(globalize_indentation(inspect.getsource(caller)))
# Determine if the function is bound. If it is, keep track of it so we can run the benchmark after the class
# benchmark has been initialized.
src_lines = func_src.splitlines()
self.is_class_method = 'def' in src_lines[0] and 'self' in src_lines[0]
if self.is_class_method and self.classname:
from .benchmarkedclass import BenchmarkedClass
try:
BenchmarkedClass.bound_functions[self.classname].append(self)
except KeyError:
BenchmarkedClass.bound_functions[self.classname] = [self]
if callable(self.setup):
setup_func = inspect.getsource(self.setup)
setup_src = globalize_indentation(setup_func[setup_func.index('\n') + 1:])
elif type(self.setup) == str:
setup_src = self.setup
else:
setup_src = ''
src = '\n'.join([imports, setup_src, func_src])
self.setup_src = src + '\n'
self.log.write(self.setup_src)
self.stmt = generate_call_statement(caller, self.is_class_method, *self._args, **self._kwargs)
return caller
def write_log(self, fs=None):
"""
Write the results of the benchmark to a log file.
:param fs: file-like object.
"""
log = StringIO.StringIO()
log.write(self.setup_src)
# If the function is not bound, write the test score to the log
if not self.is_class_method:
time_avg = convert_time_units(self.time_average_seconds)
log.write("\nAverage time: {0} \n".format(time_avg))
if fs:
with open(fs, 'w') as _f:
_f.write(log.getvalue())
|
lobocv/pyperform | pyperform/cprofile_parser.py | cProfileFuncStat.from_dict | python | def from_dict(cls, d):
stats = []
for (filename, lineno, name), stat_values in d.iteritems():
if len(stat_values) == 5:
ncalls, ncall_nr, total_time, cum_time, subcall_stats = stat_values
else:
ncalls, ncall_nr, total_time, cum_time = stat_values
subcall_stats = None
stat = cProfileFuncStat(filename, lineno, name, ncalls, ncall_nr, total_time, cum_time, subcall_stats)
stats.append(stat)
return stats | Used to create an instance of this class from a pstats dict item | train | https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/cprofile_parser.py#L62-L74 | null | class cProfileFuncStat(object):
"""
Class that represents a item in the pstats dictionary
"""
stats = {}
run_time_s = 0
n_decimal_percentages = 2
def __init__(self, filename, lineno, name, ncalls, ncall_nr, total_time, cum_time, subcall_stats=None):
self.filename = filename
self.line_number = lineno
self.name = name
self.ncalls = ncalls
self.nonrecursive_calls = ncall_nr
self.own_time_s = total_time
self.cummulative_time_s = cum_time
self.exclude = False
self.parent = None
if (filename, lineno, name) not in cProfileFuncStat.stats:
cProfileFuncStat.stats[(filename, lineno, name)] = self
cProfileFuncStat.run_time_s += self.own_time_s
if subcall_stats:
self.subcall = cProfileFuncStat.from_dict(subcall_stats)
for s in self.subcall:
s.parent = self
else:
self.subcall = subcall_stats
@property
def total_time(self):
return convert_time_units(self.own_time_s)
@property
def cummulative_time(self):
return convert_time_units(self.cummulative_time_s)
@property
def percentage_cummulative(self):
return round(100 * self.cummulative_time_s / cProfileFuncStat.run_time_s, cProfileFuncStat.n_decimal_percentages)
@property
def percentage_own(self):
return round(100 * self.own_time_s / cProfileFuncStat.run_time_s, cProfileFuncStat.n_decimal_percentages)
@property
def per_call_time(self):
return self.own_time_s / self.ncalls
@property
def per_call_time_non_recursive(self):
return self.own_time_s / self.nonrecursive_calls
@classmethod
def to_dict(self):
"""Convert back to the pstats dictionary representation (used for saving back as pstats binary file)"""
if self.subcall is not None:
if isinstance(self.subcall, dict):
subcalls = self.subcall
else:
subcalls = {}
for s in self.subcall:
subcalls.update(s.to_dict())
return {(self.filename, self.line_number, self.name): \
(self.ncalls, self.nonrecursive_calls, self.own_time_s, self.cummulative_time_s, subcalls)}
else:
return {(self.filename, self.line_number, self.name): \
(self.ncalls, self.nonrecursive_calls, self.own_time_s, self.cummulative_time_s)}
def __repr__(self):
return "{s.name}: total={s.total_time}, cum={s.cummulative_time}" \
" N={s.ncalls}, N_nr={s.nonrecursive_calls}".format(s=self)
|
lobocv/pyperform | pyperform/cprofile_parser.py | cProfileFuncStat.to_dict | python | def to_dict(self):
if self.subcall is not None:
if isinstance(self.subcall, dict):
subcalls = self.subcall
else:
subcalls = {}
for s in self.subcall:
subcalls.update(s.to_dict())
return {(self.filename, self.line_number, self.name): \
(self.ncalls, self.nonrecursive_calls, self.own_time_s, self.cummulative_time_s, subcalls)}
else:
return {(self.filename, self.line_number, self.name): \
(self.ncalls, self.nonrecursive_calls, self.own_time_s, self.cummulative_time_s)} | Convert back to the pstats dictionary representation (used for saving back as pstats binary file) | train | https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/cprofile_parser.py#L76-L89 | null | class cProfileFuncStat(object):
"""
Class that represents a item in the pstats dictionary
"""
stats = {}
run_time_s = 0
n_decimal_percentages = 2
def __init__(self, filename, lineno, name, ncalls, ncall_nr, total_time, cum_time, subcall_stats=None):
self.filename = filename
self.line_number = lineno
self.name = name
self.ncalls = ncalls
self.nonrecursive_calls = ncall_nr
self.own_time_s = total_time
self.cummulative_time_s = cum_time
self.exclude = False
self.parent = None
if (filename, lineno, name) not in cProfileFuncStat.stats:
cProfileFuncStat.stats[(filename, lineno, name)] = self
cProfileFuncStat.run_time_s += self.own_time_s
if subcall_stats:
self.subcall = cProfileFuncStat.from_dict(subcall_stats)
for s in self.subcall:
s.parent = self
else:
self.subcall = subcall_stats
@property
def total_time(self):
return convert_time_units(self.own_time_s)
@property
def cummulative_time(self):
return convert_time_units(self.cummulative_time_s)
@property
def percentage_cummulative(self):
return round(100 * self.cummulative_time_s / cProfileFuncStat.run_time_s, cProfileFuncStat.n_decimal_percentages)
@property
def percentage_own(self):
return round(100 * self.own_time_s / cProfileFuncStat.run_time_s, cProfileFuncStat.n_decimal_percentages)
@property
def per_call_time(self):
return self.own_time_s / self.ncalls
@property
def per_call_time_non_recursive(self):
return self.own_time_s / self.nonrecursive_calls
@classmethod
def from_dict(cls, d):
"""Used to create an instance of this class from a pstats dict item"""
stats = []
for (filename, lineno, name), stat_values in d.iteritems():
if len(stat_values) == 5:
ncalls, ncall_nr, total_time, cum_time, subcall_stats = stat_values
else:
ncalls, ncall_nr, total_time, cum_time = stat_values
subcall_stats = None
stat = cProfileFuncStat(filename, lineno, name, ncalls, ncall_nr, total_time, cum_time, subcall_stats)
stats.append(stat)
return stats
def __repr__(self):
return "{s.name}: total={s.total_time}, cum={s.cummulative_time}" \
" N={s.ncalls}, N_nr={s.nonrecursive_calls}".format(s=self)
|
lobocv/pyperform | pyperform/cprofile_parser.py | cProfileParser.exclude_functions | python | def exclude_functions(self, *funcs):
for f in funcs:
f.exclude = True
run_time_s = sum(0 if s.exclude else s.own_time_s for s in self.stats)
cProfileFuncStat.run_time_s = run_time_s | Excludes the contributions from the following functions. | train | https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/cprofile_parser.py#L111-L118 | null | class cProfileParser(object):
"""
A manager class that reads in a pstats file and allows futher decontruction of the statistics.
"""
def __init__(self, pstats_file):
self.path = pstats_file
with open(stat_file, 'rb') as _f:
self.raw_stats = _f.read()
with open(stat_file, 'r') as _f:
self.stats_dict = marshal.load(_f)
self.stats = cProfileFuncStat.from_dict(self.stats_dict)
def get_top(self, stat, n):
"""Return the top n values when sorting by 'stat'"""
return sorted(self.stats, key=lambda x: getattr(x, stat), reverse=True)[:n]
def save_pstat(self, path):
"""
Save the modified pstats file
"""
stats = {}
for s in self.stats:
if not s.exclude:
stats.update(s.to_dict())
with open(path, 'wb') as f:
marshal.dump(stats, f)
|
lobocv/pyperform | pyperform/cprofile_parser.py | cProfileParser.get_top | python | def get_top(self, stat, n):
"""Return the top n values when sorting by 'stat'"""
return sorted(self.stats, key=lambda x: getattr(x, stat), reverse=True)[:n] | Return the top n values when sorting by 'stat | train | https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/cprofile_parser.py#L120-L122 | null | class cProfileParser(object):
"""
A manager class that reads in a pstats file and allows futher decontruction of the statistics.
"""
def __init__(self, pstats_file):
self.path = pstats_file
with open(stat_file, 'rb') as _f:
self.raw_stats = _f.read()
with open(stat_file, 'r') as _f:
self.stats_dict = marshal.load(_f)
self.stats = cProfileFuncStat.from_dict(self.stats_dict)
def exclude_functions(self, *funcs):
"""
Excludes the contributions from the following functions.
"""
for f in funcs:
f.exclude = True
run_time_s = sum(0 if s.exclude else s.own_time_s for s in self.stats)
cProfileFuncStat.run_time_s = run_time_s
def save_pstat(self, path):
"""
Save the modified pstats file
"""
stats = {}
for s in self.stats:
if not s.exclude:
stats.update(s.to_dict())
with open(path, 'wb') as f:
marshal.dump(stats, f)
|
lobocv/pyperform | pyperform/cprofile_parser.py | cProfileParser.save_pstat | python | def save_pstat(self, path):
stats = {}
for s in self.stats:
if not s.exclude:
stats.update(s.to_dict())
with open(path, 'wb') as f:
marshal.dump(stats, f) | Save the modified pstats file | train | https://github.com/lobocv/pyperform/blob/97d87e8b9ddb35bd8f2a6782965fd7735ab0349f/pyperform/cprofile_parser.py#L124-L134 | null | class cProfileParser(object):
"""
A manager class that reads in a pstats file and allows futher decontruction of the statistics.
"""
def __init__(self, pstats_file):
self.path = pstats_file
with open(stat_file, 'rb') as _f:
self.raw_stats = _f.read()
with open(stat_file, 'r') as _f:
self.stats_dict = marshal.load(_f)
self.stats = cProfileFuncStat.from_dict(self.stats_dict)
def exclude_functions(self, *funcs):
"""
Excludes the contributions from the following functions.
"""
for f in funcs:
f.exclude = True
run_time_s = sum(0 if s.exclude else s.own_time_s for s in self.stats)
cProfileFuncStat.run_time_s = run_time_s
def get_top(self, stat, n):
"""Return the top n values when sorting by 'stat'"""
return sorted(self.stats, key=lambda x: getattr(x, stat), reverse=True)[:n]
|
ECESeniorDesign/greenhouse_envmgmt | greenhouse_envmgmt/i2c_utility.py | TCA_select | python | def TCA_select(bus, addr, channel):
"""
This function will write to the control register of the
TCA module to select the channel that will be
exposed on the TCA module.
After doing this, the desired module can be used as it would be normally.
(The caller should use the address of the I2C sensor module.
The TCA module is only written to when the channel is switched.)
addr contains address of the TCA module
channel specifies the desired channel on the TCA that will be used.
Usage - Enable a channel
TCA_select(bus, self.mux_addr, channel_to_enable)
Channel to enable begins at 0 (enables first channel)
ends at 3 (enables fourth channel)
Usage - Disable all channels
TCA_select(bus, self.mux_addr, "off")
This call must be made whenever the sensor node is no longer
being accessed.
If this is not done, there will be addressing conflicts.
"""
if addr < 0x70 or addr > 0x77:
print("The TCA address(" + str(addr) + ") is invalid. Aborting")
return False
if channel == "off":
bus.write_byte(addr, 0)
elif channel < 0 or channel > 3:
print("The requested channel does not exist.")
return False
else:
bus.write_byte(addr, 1 << channel)
status = bus.read_byte(addr)
return status | This function will write to the control register of the
TCA module to select the channel that will be
exposed on the TCA module.
After doing this, the desired module can be used as it would be normally.
(The caller should use the address of the I2C sensor module.
The TCA module is only written to when the channel is switched.)
addr contains address of the TCA module
channel specifies the desired channel on the TCA that will be used.
Usage - Enable a channel
TCA_select(bus, self.mux_addr, channel_to_enable)
Channel to enable begins at 0 (enables first channel)
ends at 3 (enables fourth channel)
Usage - Disable all channels
TCA_select(bus, self.mux_addr, "off")
This call must be made whenever the sensor node is no longer
being accessed.
If this is not done, there will be addressing conflicts. | train | https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/i2c_utility.py#L6-L40 | null | #!/usr/bin/python
# This file contains utility functions used to select
# channels via the I2C Multiplexer or the ADC
def TCA_select(bus, addr, channel):
"""
This function will write to the control register of the
TCA module to select the channel that will be
exposed on the TCA module.
After doing this, the desired module can be used as it would be normally.
(The caller should use the address of the I2C sensor module.
The TCA module is only written to when the channel is switched.)
addr contains address of the TCA module
channel specifies the desired channel on the TCA that will be used.
Usage - Enable a channel
TCA_select(bus, self.mux_addr, channel_to_enable)
Channel to enable begins at 0 (enables first channel)
ends at 3 (enables fourth channel)
Usage - Disable all channels
TCA_select(bus, self.mux_addr, "off")
This call must be made whenever the sensor node is no longer
being accessed.
If this is not done, there will be addressing conflicts.
"""
if addr < 0x70 or addr > 0x77:
print("The TCA address(" + str(addr) + ") is invalid. Aborting")
return False
if channel == "off":
bus.write_byte(addr, 0)
elif channel < 0 or channel > 3:
print("The requested channel does not exist.")
return False
else:
bus.write_byte(addr, 1 << channel)
status = bus.read_byte(addr)
return status
def get_ADC_value(bus, addr, channel):
"""
This method selects a channel and initiates conversion
The ADC operates at 240 SPS (12 bits) with 1x gain
One shot conversions are used, meaning a wait period is needed
in order to acquire new data. This is done via a constant poll
of the ready bit.
Upon completion, a voltage value is returned to the caller.
Usage - ADC_start(bus, SensorCluster.ADC_addr, channel_to_read)
IMPORTANT NOTE:
The ADC uses a 2.048V voltage reference
"""
if channel == 1:
INIT = 0b10000000
elif channel == 2:
INIT = 0b10100000
elif channel == 3:
INIT = 0b11000000
elif channel == 4:
INIT = 0b11100000
bus.write_byte(addr, INIT)
data = bus.read_i2c_block_data(addr, 0, 3)
status = (data[2] & 0b10000000) >> 7
while(status == 1):
data = bus.read_i2c_block_data(addr, 0, 3)
status = (data[2] & 0b10000000) >> 7
sign = data[0] & 0b00001000
val = ((data[0] & 0b0000111) << 8) | (data[1])
if (sign == 1):
val = (val ^ 0x3ff) + 1 # compute 2s complement for 12 bit val
# Convert val to a ratiomerical ADC reading
return float(val) * 2.048 / float(2047)
def IO_expander_output(bus, addr, bank, mask):
"""
Method for controlling the GPIO expander via I2C
which accepts a bank - A(0) or B(1) and a mask
to push to the pins of the expander.
The method also assumes the the expander is operating
in sequential mode. If this mode is not used,
the register addresses will need to be changed.
Usage:
GPIO_out(bus, GPIO_addr, 0, 0b00011111)
This call would turn on A0 through A4.
"""
IODIR_map = [0x00, 0x01]
output_map = [0x14, 0x15]
if (bank != 0) and (bank != 1):
print()
raise InvalidIOUsage("An invalid IO bank has been selected")
IO_direction = IODIR_map[bank]
output_reg = output_map[bank]
current_status = bus.read_byte_data(addr, output_reg)
if current_status == mask:
# This means nothing needs to happen
print("Current control status matches requested controls. " +
"No action is required.")
return True
bus.write_byte_data(addr, IO_direction, 0)
bus.write_byte_data(addr, output_reg, mask)
def get_IO_reg(bus, addr, bank):
"""
Method retrieves the register corresponding to respective bank (0 or 1)
"""
output_map = [0x14, 0x15]
if (bank != 0) and (bank != 1):
print()
raise InvalidIOUsage("An invalid IO bank has been selected")
output_reg = output_map[bank]
current_status = bus.read_byte_data(addr, output_reg)
return current_status
def import_i2c_addr(bus, opt="sensors"):
""" import_i2c_addresses will return a list of the
currently connected I2C devices.
This can be used a means to automatically detect
the number of connected sensor modules.
Modules are between int(112) and int(119)
By default, the method will return a list
of sensor addresses.
"""
i2c_list = []
for device in range(128):
try:
bus.read_byte(device)
i2c_list.append((device))
except IOError:
pass
if opt == "sensors":
sensor_list = []
for module in range(112,120):
try:
indx = i2c_list.index(module)
sensor_list.append(module)
except ValueError:
pass
return sensor_list
else:
return i2c_list
class InvalidIOUsage(Exception):
pass |
ECESeniorDesign/greenhouse_envmgmt | greenhouse_envmgmt/i2c_utility.py | get_ADC_value | python | def get_ADC_value(bus, addr, channel):
"""
This method selects a channel and initiates conversion
The ADC operates at 240 SPS (12 bits) with 1x gain
One shot conversions are used, meaning a wait period is needed
in order to acquire new data. This is done via a constant poll
of the ready bit.
Upon completion, a voltage value is returned to the caller.
Usage - ADC_start(bus, SensorCluster.ADC_addr, channel_to_read)
IMPORTANT NOTE:
The ADC uses a 2.048V voltage reference
"""
if channel == 1:
INIT = 0b10000000
elif channel == 2:
INIT = 0b10100000
elif channel == 3:
INIT = 0b11000000
elif channel == 4:
INIT = 0b11100000
bus.write_byte(addr, INIT)
data = bus.read_i2c_block_data(addr, 0, 3)
status = (data[2] & 0b10000000) >> 7
while(status == 1):
data = bus.read_i2c_block_data(addr, 0, 3)
status = (data[2] & 0b10000000) >> 7
sign = data[0] & 0b00001000
val = ((data[0] & 0b0000111) << 8) | (data[1])
if (sign == 1):
val = (val ^ 0x3ff) + 1 # compute 2s complement for 12 bit val
# Convert val to a ratiomerical ADC reading
return float(val) * 2.048 / float(2047) | This method selects a channel and initiates conversion
The ADC operates at 240 SPS (12 bits) with 1x gain
One shot conversions are used, meaning a wait period is needed
in order to acquire new data. This is done via a constant poll
of the ready bit.
Upon completion, a voltage value is returned to the caller.
Usage - ADC_start(bus, SensorCluster.ADC_addr, channel_to_read)
IMPORTANT NOTE:
The ADC uses a 2.048V voltage reference | train | https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/i2c_utility.py#L43-L77 | null | #!/usr/bin/python
# This file contains utility functions used to select
# channels via the I2C Multiplexer or the ADC
def TCA_select(bus, addr, channel):
"""
This function will write to the control register of the
TCA module to select the channel that will be
exposed on the TCA module.
After doing this, the desired module can be used as it would be normally.
(The caller should use the address of the I2C sensor module.
The TCA module is only written to when the channel is switched.)
addr contains address of the TCA module
channel specifies the desired channel on the TCA that will be used.
Usage - Enable a channel
TCA_select(bus, self.mux_addr, channel_to_enable)
Channel to enable begins at 0 (enables first channel)
ends at 3 (enables fourth channel)
Usage - Disable all channels
TCA_select(bus, self.mux_addr, "off")
This call must be made whenever the sensor node is no longer
being accessed.
If this is not done, there will be addressing conflicts.
"""
if addr < 0x70 or addr > 0x77:
print("The TCA address(" + str(addr) + ") is invalid. Aborting")
return False
if channel == "off":
bus.write_byte(addr, 0)
elif channel < 0 or channel > 3:
print("The requested channel does not exist.")
return False
else:
bus.write_byte(addr, 1 << channel)
status = bus.read_byte(addr)
return status
def get_ADC_value(bus, addr, channel):
"""
This method selects a channel and initiates conversion
The ADC operates at 240 SPS (12 bits) with 1x gain
One shot conversions are used, meaning a wait period is needed
in order to acquire new data. This is done via a constant poll
of the ready bit.
Upon completion, a voltage value is returned to the caller.
Usage - ADC_start(bus, SensorCluster.ADC_addr, channel_to_read)
IMPORTANT NOTE:
The ADC uses a 2.048V voltage reference
"""
if channel == 1:
INIT = 0b10000000
elif channel == 2:
INIT = 0b10100000
elif channel == 3:
INIT = 0b11000000
elif channel == 4:
INIT = 0b11100000
bus.write_byte(addr, INIT)
data = bus.read_i2c_block_data(addr, 0, 3)
status = (data[2] & 0b10000000) >> 7
while(status == 1):
data = bus.read_i2c_block_data(addr, 0, 3)
status = (data[2] & 0b10000000) >> 7
sign = data[0] & 0b00001000
val = ((data[0] & 0b0000111) << 8) | (data[1])
if (sign == 1):
val = (val ^ 0x3ff) + 1 # compute 2s complement for 12 bit val
# Convert val to a ratiomerical ADC reading
return float(val) * 2.048 / float(2047)
def IO_expander_output(bus, addr, bank, mask):
"""
Method for controlling the GPIO expander via I2C
which accepts a bank - A(0) or B(1) and a mask
to push to the pins of the expander.
The method also assumes the the expander is operating
in sequential mode. If this mode is not used,
the register addresses will need to be changed.
Usage:
GPIO_out(bus, GPIO_addr, 0, 0b00011111)
This call would turn on A0 through A4.
"""
IODIR_map = [0x00, 0x01]
output_map = [0x14, 0x15]
if (bank != 0) and (bank != 1):
print()
raise InvalidIOUsage("An invalid IO bank has been selected")
IO_direction = IODIR_map[bank]
output_reg = output_map[bank]
current_status = bus.read_byte_data(addr, output_reg)
if current_status == mask:
# This means nothing needs to happen
print("Current control status matches requested controls. " +
"No action is required.")
return True
bus.write_byte_data(addr, IO_direction, 0)
bus.write_byte_data(addr, output_reg, mask)
def get_IO_reg(bus, addr, bank):
"""
Method retrieves the register corresponding to respective bank (0 or 1)
"""
output_map = [0x14, 0x15]
if (bank != 0) and (bank != 1):
print()
raise InvalidIOUsage("An invalid IO bank has been selected")
output_reg = output_map[bank]
current_status = bus.read_byte_data(addr, output_reg)
return current_status
def import_i2c_addr(bus, opt="sensors"):
""" import_i2c_addresses will return a list of the
currently connected I2C devices.
This can be used a means to automatically detect
the number of connected sensor modules.
Modules are between int(112) and int(119)
By default, the method will return a list
of sensor addresses.
"""
i2c_list = []
for device in range(128):
try:
bus.read_byte(device)
i2c_list.append((device))
except IOError:
pass
if opt == "sensors":
sensor_list = []
for module in range(112,120):
try:
indx = i2c_list.index(module)
sensor_list.append(module)
except ValueError:
pass
return sensor_list
else:
return i2c_list
class InvalidIOUsage(Exception):
pass |
ECESeniorDesign/greenhouse_envmgmt | greenhouse_envmgmt/i2c_utility.py | IO_expander_output | python | def IO_expander_output(bus, addr, bank, mask):
"""
Method for controlling the GPIO expander via I2C
which accepts a bank - A(0) or B(1) and a mask
to push to the pins of the expander.
The method also assumes the the expander is operating
in sequential mode. If this mode is not used,
the register addresses will need to be changed.
Usage:
GPIO_out(bus, GPIO_addr, 0, 0b00011111)
This call would turn on A0 through A4.
"""
IODIR_map = [0x00, 0x01]
output_map = [0x14, 0x15]
if (bank != 0) and (bank != 1):
print()
raise InvalidIOUsage("An invalid IO bank has been selected")
IO_direction = IODIR_map[bank]
output_reg = output_map[bank]
current_status = bus.read_byte_data(addr, output_reg)
if current_status == mask:
# This means nothing needs to happen
print("Current control status matches requested controls. " +
"No action is required.")
return True
bus.write_byte_data(addr, IO_direction, 0)
bus.write_byte_data(addr, output_reg, mask) | Method for controlling the GPIO expander via I2C
which accepts a bank - A(0) or B(1) and a mask
to push to the pins of the expander.
The method also assumes the the expander is operating
in sequential mode. If this mode is not used,
the register addresses will need to be changed.
Usage:
GPIO_out(bus, GPIO_addr, 0, 0b00011111)
This call would turn on A0 through A4. | train | https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/i2c_utility.py#L80-L114 | null | #!/usr/bin/python
# This file contains utility functions used to select
# channels via the I2C Multiplexer or the ADC
def TCA_select(bus, addr, channel):
"""
This function will write to the control register of the
TCA module to select the channel that will be
exposed on the TCA module.
After doing this, the desired module can be used as it would be normally.
(The caller should use the address of the I2C sensor module.
The TCA module is only written to when the channel is switched.)
addr contains address of the TCA module
channel specifies the desired channel on the TCA that will be used.
Usage - Enable a channel
TCA_select(bus, self.mux_addr, channel_to_enable)
Channel to enable begins at 0 (enables first channel)
ends at 3 (enables fourth channel)
Usage - Disable all channels
TCA_select(bus, self.mux_addr, "off")
This call must be made whenever the sensor node is no longer
being accessed.
If this is not done, there will be addressing conflicts.
"""
if addr < 0x70 or addr > 0x77:
print("The TCA address(" + str(addr) + ") is invalid. Aborting")
return False
if channel == "off":
bus.write_byte(addr, 0)
elif channel < 0 or channel > 3:
print("The requested channel does not exist.")
return False
else:
bus.write_byte(addr, 1 << channel)
status = bus.read_byte(addr)
return status
def get_ADC_value(bus, addr, channel):
"""
This method selects a channel and initiates conversion
The ADC operates at 240 SPS (12 bits) with 1x gain
One shot conversions are used, meaning a wait period is needed
in order to acquire new data. This is done via a constant poll
of the ready bit.
Upon completion, a voltage value is returned to the caller.
Usage - ADC_start(bus, SensorCluster.ADC_addr, channel_to_read)
IMPORTANT NOTE:
The ADC uses a 2.048V voltage reference
"""
if channel == 1:
INIT = 0b10000000
elif channel == 2:
INIT = 0b10100000
elif channel == 3:
INIT = 0b11000000
elif channel == 4:
INIT = 0b11100000
bus.write_byte(addr, INIT)
data = bus.read_i2c_block_data(addr, 0, 3)
status = (data[2] & 0b10000000) >> 7
while(status == 1):
data = bus.read_i2c_block_data(addr, 0, 3)
status = (data[2] & 0b10000000) >> 7
sign = data[0] & 0b00001000
val = ((data[0] & 0b0000111) << 8) | (data[1])
if (sign == 1):
val = (val ^ 0x3ff) + 1 # compute 2s complement for 12 bit val
# Convert val to a ratiomerical ADC reading
return float(val) * 2.048 / float(2047)
def IO_expander_output(bus, addr, bank, mask):
"""
Method for controlling the GPIO expander via I2C
which accepts a bank - A(0) or B(1) and a mask
to push to the pins of the expander.
The method also assumes the the expander is operating
in sequential mode. If this mode is not used,
the register addresses will need to be changed.
Usage:
GPIO_out(bus, GPIO_addr, 0, 0b00011111)
This call would turn on A0 through A4.
"""
IODIR_map = [0x00, 0x01]
output_map = [0x14, 0x15]
if (bank != 0) and (bank != 1):
print()
raise InvalidIOUsage("An invalid IO bank has been selected")
IO_direction = IODIR_map[bank]
output_reg = output_map[bank]
current_status = bus.read_byte_data(addr, output_reg)
if current_status == mask:
# This means nothing needs to happen
print("Current control status matches requested controls. " +
"No action is required.")
return True
bus.write_byte_data(addr, IO_direction, 0)
bus.write_byte_data(addr, output_reg, mask)
def get_IO_reg(bus, addr, bank):
"""
Method retrieves the register corresponding to respective bank (0 or 1)
"""
output_map = [0x14, 0x15]
if (bank != 0) and (bank != 1):
print()
raise InvalidIOUsage("An invalid IO bank has been selected")
output_reg = output_map[bank]
current_status = bus.read_byte_data(addr, output_reg)
return current_status
def import_i2c_addr(bus, opt="sensors"):
""" import_i2c_addresses will return a list of the
currently connected I2C devices.
This can be used a means to automatically detect
the number of connected sensor modules.
Modules are between int(112) and int(119)
By default, the method will return a list
of sensor addresses.
"""
i2c_list = []
for device in range(128):
try:
bus.read_byte(device)
i2c_list.append((device))
except IOError:
pass
if opt == "sensors":
sensor_list = []
for module in range(112,120):
try:
indx = i2c_list.index(module)
sensor_list.append(module)
except ValueError:
pass
return sensor_list
else:
return i2c_list
class InvalidIOUsage(Exception):
pass |
ECESeniorDesign/greenhouse_envmgmt | greenhouse_envmgmt/i2c_utility.py | get_IO_reg | python | def get_IO_reg(bus, addr, bank):
"""
Method retrieves the register corresponding to respective bank (0 or 1)
"""
output_map = [0x14, 0x15]
if (bank != 0) and (bank != 1):
print()
raise InvalidIOUsage("An invalid IO bank has been selected")
output_reg = output_map[bank]
current_status = bus.read_byte_data(addr, output_reg)
return current_status | Method retrieves the register corresponding to respective bank (0 or 1) | train | https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/i2c_utility.py#L116-L127 | null | #!/usr/bin/python
# This file contains utility functions used to select
# channels via the I2C Multiplexer or the ADC
def TCA_select(bus, addr, channel):
"""
This function will write to the control register of the
TCA module to select the channel that will be
exposed on the TCA module.
After doing this, the desired module can be used as it would be normally.
(The caller should use the address of the I2C sensor module.
The TCA module is only written to when the channel is switched.)
addr contains address of the TCA module
channel specifies the desired channel on the TCA that will be used.
Usage - Enable a channel
TCA_select(bus, self.mux_addr, channel_to_enable)
Channel to enable begins at 0 (enables first channel)
ends at 3 (enables fourth channel)
Usage - Disable all channels
TCA_select(bus, self.mux_addr, "off")
This call must be made whenever the sensor node is no longer
being accessed.
If this is not done, there will be addressing conflicts.
"""
if addr < 0x70 or addr > 0x77:
print("The TCA address(" + str(addr) + ") is invalid. Aborting")
return False
if channel == "off":
bus.write_byte(addr, 0)
elif channel < 0 or channel > 3:
print("The requested channel does not exist.")
return False
else:
bus.write_byte(addr, 1 << channel)
status = bus.read_byte(addr)
return status
def get_ADC_value(bus, addr, channel):
"""
This method selects a channel and initiates conversion
The ADC operates at 240 SPS (12 bits) with 1x gain
One shot conversions are used, meaning a wait period is needed
in order to acquire new data. This is done via a constant poll
of the ready bit.
Upon completion, a voltage value is returned to the caller.
Usage - ADC_start(bus, SensorCluster.ADC_addr, channel_to_read)
IMPORTANT NOTE:
The ADC uses a 2.048V voltage reference
"""
if channel == 1:
INIT = 0b10000000
elif channel == 2:
INIT = 0b10100000
elif channel == 3:
INIT = 0b11000000
elif channel == 4:
INIT = 0b11100000
bus.write_byte(addr, INIT)
data = bus.read_i2c_block_data(addr, 0, 3)
status = (data[2] & 0b10000000) >> 7
while(status == 1):
data = bus.read_i2c_block_data(addr, 0, 3)
status = (data[2] & 0b10000000) >> 7
sign = data[0] & 0b00001000
val = ((data[0] & 0b0000111) << 8) | (data[1])
if (sign == 1):
val = (val ^ 0x3ff) + 1 # compute 2s complement for 12 bit val
# Convert val to a ratiomerical ADC reading
return float(val) * 2.048 / float(2047)
def IO_expander_output(bus, addr, bank, mask):
"""
Method for controlling the GPIO expander via I2C
which accepts a bank - A(0) or B(1) and a mask
to push to the pins of the expander.
The method also assumes the the expander is operating
in sequential mode. If this mode is not used,
the register addresses will need to be changed.
Usage:
GPIO_out(bus, GPIO_addr, 0, 0b00011111)
This call would turn on A0 through A4.
"""
IODIR_map = [0x00, 0x01]
output_map = [0x14, 0x15]
if (bank != 0) and (bank != 1):
print()
raise InvalidIOUsage("An invalid IO bank has been selected")
IO_direction = IODIR_map[bank]
output_reg = output_map[bank]
current_status = bus.read_byte_data(addr, output_reg)
if current_status == mask:
# This means nothing needs to happen
print("Current control status matches requested controls. " +
"No action is required.")
return True
bus.write_byte_data(addr, IO_direction, 0)
bus.write_byte_data(addr, output_reg, mask)
def get_IO_reg(bus, addr, bank):
"""
Method retrieves the register corresponding to respective bank (0 or 1)
"""
output_map = [0x14, 0x15]
if (bank != 0) and (bank != 1):
print()
raise InvalidIOUsage("An invalid IO bank has been selected")
output_reg = output_map[bank]
current_status = bus.read_byte_data(addr, output_reg)
return current_status
def import_i2c_addr(bus, opt="sensors"):
""" import_i2c_addresses will return a list of the
currently connected I2C devices.
This can be used a means to automatically detect
the number of connected sensor modules.
Modules are between int(112) and int(119)
By default, the method will return a list
of sensor addresses.
"""
i2c_list = []
for device in range(128):
try:
bus.read_byte(device)
i2c_list.append((device))
except IOError:
pass
if opt == "sensors":
sensor_list = []
for module in range(112,120):
try:
indx = i2c_list.index(module)
sensor_list.append(module)
except ValueError:
pass
return sensor_list
else:
return i2c_list
class InvalidIOUsage(Exception):
pass |
ECESeniorDesign/greenhouse_envmgmt | greenhouse_envmgmt/i2c_utility.py | import_i2c_addr | python | def import_i2c_addr(bus, opt="sensors"):
""" import_i2c_addresses will return a list of the
currently connected I2C devices.
This can be used a means to automatically detect
the number of connected sensor modules.
Modules are between int(112) and int(119)
By default, the method will return a list
of sensor addresses.
"""
i2c_list = []
for device in range(128):
try:
bus.read_byte(device)
i2c_list.append((device))
except IOError:
pass
if opt == "sensors":
sensor_list = []
for module in range(112,120):
try:
indx = i2c_list.index(module)
sensor_list.append(module)
except ValueError:
pass
return sensor_list
else:
return i2c_list | import_i2c_addresses will return a list of the
currently connected I2C devices.
This can be used a means to automatically detect
the number of connected sensor modules.
Modules are between int(112) and int(119)
By default, the method will return a list
of sensor addresses. | train | https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/i2c_utility.py#L129-L160 | null | #!/usr/bin/python
# This file contains utility functions used to select
# channels via the I2C Multiplexer or the ADC
def TCA_select(bus, addr, channel):
"""
This function will write to the control register of the
TCA module to select the channel that will be
exposed on the TCA module.
After doing this, the desired module can be used as it would be normally.
(The caller should use the address of the I2C sensor module.
The TCA module is only written to when the channel is switched.)
addr contains address of the TCA module
channel specifies the desired channel on the TCA that will be used.
Usage - Enable a channel
TCA_select(bus, self.mux_addr, channel_to_enable)
Channel to enable begins at 0 (enables first channel)
ends at 3 (enables fourth channel)
Usage - Disable all channels
TCA_select(bus, self.mux_addr, "off")
This call must be made whenever the sensor node is no longer
being accessed.
If this is not done, there will be addressing conflicts.
"""
if addr < 0x70 or addr > 0x77:
print("The TCA address(" + str(addr) + ") is invalid. Aborting")
return False
if channel == "off":
bus.write_byte(addr, 0)
elif channel < 0 or channel > 3:
print("The requested channel does not exist.")
return False
else:
bus.write_byte(addr, 1 << channel)
status = bus.read_byte(addr)
return status
def get_ADC_value(bus, addr, channel):
"""
This method selects a channel and initiates conversion
The ADC operates at 240 SPS (12 bits) with 1x gain
One shot conversions are used, meaning a wait period is needed
in order to acquire new data. This is done via a constant poll
of the ready bit.
Upon completion, a voltage value is returned to the caller.
Usage - ADC_start(bus, SensorCluster.ADC_addr, channel_to_read)
IMPORTANT NOTE:
The ADC uses a 2.048V voltage reference
"""
if channel == 1:
INIT = 0b10000000
elif channel == 2:
INIT = 0b10100000
elif channel == 3:
INIT = 0b11000000
elif channel == 4:
INIT = 0b11100000
bus.write_byte(addr, INIT)
data = bus.read_i2c_block_data(addr, 0, 3)
status = (data[2] & 0b10000000) >> 7
while(status == 1):
data = bus.read_i2c_block_data(addr, 0, 3)
status = (data[2] & 0b10000000) >> 7
sign = data[0] & 0b00001000
val = ((data[0] & 0b0000111) << 8) | (data[1])
if (sign == 1):
val = (val ^ 0x3ff) + 1 # compute 2s complement for 12 bit val
# Convert val to a ratiomerical ADC reading
return float(val) * 2.048 / float(2047)
def IO_expander_output(bus, addr, bank, mask):
"""
Method for controlling the GPIO expander via I2C
which accepts a bank - A(0) or B(1) and a mask
to push to the pins of the expander.
The method also assumes the the expander is operating
in sequential mode. If this mode is not used,
the register addresses will need to be changed.
Usage:
GPIO_out(bus, GPIO_addr, 0, 0b00011111)
This call would turn on A0 through A4.
"""
IODIR_map = [0x00, 0x01]
output_map = [0x14, 0x15]
if (bank != 0) and (bank != 1):
print()
raise InvalidIOUsage("An invalid IO bank has been selected")
IO_direction = IODIR_map[bank]
output_reg = output_map[bank]
current_status = bus.read_byte_data(addr, output_reg)
if current_status == mask:
# This means nothing needs to happen
print("Current control status matches requested controls. " +
"No action is required.")
return True
bus.write_byte_data(addr, IO_direction, 0)
bus.write_byte_data(addr, output_reg, mask)
def get_IO_reg(bus, addr, bank):
"""
Method retrieves the register corresponding to respective bank (0 or 1)
"""
output_map = [0x14, 0x15]
if (bank != 0) and (bank != 1):
print()
raise InvalidIOUsage("An invalid IO bank has been selected")
output_reg = output_map[bank]
current_status = bus.read_byte_data(addr, output_reg)
return current_status
def import_i2c_addr(bus, opt="sensors"):
""" import_i2c_addresses will return a list of the
currently connected I2C devices.
This can be used a means to automatically detect
the number of connected sensor modules.
Modules are between int(112) and int(119)
By default, the method will return a list
of sensor addresses.
"""
i2c_list = []
for device in range(128):
try:
bus.read_byte(device)
i2c_list.append((device))
except IOError:
pass
if opt == "sensors":
sensor_list = []
for module in range(112,120):
try:
indx = i2c_list.index(module)
sensor_list.append(module)
except ValueError:
pass
return sensor_list
else:
return i2c_list
class InvalidIOUsage(Exception):
pass |
ECESeniorDesign/greenhouse_envmgmt | greenhouse_envmgmt/sense.py | get_lux_count | python | def get_lux_count(lux_byte):
""" Method to convert data from the TSL2550D lux sensor
into more easily usable ADC count values.
"""
LUX_VALID_MASK = 0b10000000
LUX_CHORD_MASK = 0b01110000
LUX_STEP_MASK = 0b00001111
valid = lux_byte & LUX_VALID_MASK
if valid != 0:
step_num = (lux_byte & LUX_STEP_MASK)
# Shift to normalize value
chord_num = (lux_byte & LUX_CHORD_MASK) >> 4
step_val = 2**chord_num
chord_val = int(16.5 * (step_val - 1))
count = chord_val + step_val * step_num
return count
else:
raise SensorError("Invalid lux sensor data.") | Method to convert data from the TSL2550D lux sensor
into more easily usable ADC count values. | train | https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/sense.py#L282-L300 | null | #!/usr/bin/python
# Contains class information for sensor nodes.
# Each plant is treated as a base, and each plant contains multiple sensors.
# Basic usage:
# Create a plant record using:
# plant1 = Plant(temp_addr, humidity_addr, lux_addr, adc_addr)
# Updating individual sensor values can be done with
# Note that SMBus must be imported and initiated
# in order to use these classes.
import smbus
from control import ControlCluster
from i2c_utility import TCA_select, get_ADC_value, import_i2c_addr
from i2c_utility import IO_expander_output, get_IO_reg
from time import sleep, time # needed to force a delay in humidity module
from math import e
class IterList(type):
""" Metaclass for iterating over sensor objects in a _list
"""
def __iter__(cls):
return iter(cls._list)
class SensorCluster(object):
'Base class for each individual plant containing sensor info'
__metaclass__ = IterList
_list = []
analog_power_pin = 0
power_bank = 0 # bank and pin used to toggle analog sensor power
temp_addr = 0x48
temp_chan = 3
humidity_addr = 0x27
humidity_chan = 1
lux_addr = 0x39
lux_chan = 0
adc_addr = 0x68
adc_chan = 2
moisture_chan = 1
tank_adc_adr = 0x6c
tank_adc_chan = 0
bus = None
def __init__(self, ID, mux_addr=None):
# Initializes cluster, enumeration, and sets up address info
sensor_addr = import_i2c_addr(SensorCluster.bus)
if (ID < 1 or ID > len(sensor_addr)):
raise I2CBusError("Plant ID out of range.")
self.mux_addr = mux_addr or (sensor_addr[ID-1])
self.ID = ID # Plant number specified by caller
self.temp = 0
self.humidity = 0
self.lux = 0
self.light_ratio = 0
self.soil_moisture = 0
self.acidity = 0
self.timestamp = time() # record time at instantiation
self._list.append(self)
self.update_count = 0
def update_lux(self, extend=0):
""" Communicates with the TSL2550D light sensor and returns a
lux value.
Note that this method contains approximately 1 second of total delay.
This delay is necessary in order to obtain full resolution
compensated lux values.
Alternatively, the device could be put in extended mode,
which drops some resolution in favor of shorter delays.
"""
DEVICE_REG_OUT = 0x1d
LUX_PWR_ON = 0x03
if extend == 1:
LUX_MODE = 0x1d
delay = .08
scale = 5
else:
LUX_MODE = 0x18
delay = .4
scale = 1
LUX_READ_CH0 = 0x43
LUX_READ_CH1 = 0x83
# Select correct I2C mux channel on TCA module
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.lux_chan)
# Make sure lux sensor is powered up.
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_PWR_ON)
lux_on = SensorCluster.bus.read_byte_data(SensorCluster.lux_addr, LUX_PWR_ON)
# Check for successful powerup
if (lux_on == LUX_PWR_ON):
# Send command to initiate ADC on each channel
# Read each channel after the new data is ready
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_MODE)
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH0)
sleep(delay)
adc_ch0 = SensorCluster.bus.read_byte(SensorCluster.lux_addr)
count0 = get_lux_count(adc_ch0) * scale # 5x for extended mode
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH1)
sleep(delay)
adc_ch1 = SensorCluster.bus.read_byte(SensorCluster.lux_addr)
count1 = get_lux_count(adc_ch1) * scale # 5x for extended mode
ratio = count1 / (count0 - count1)
lux = (count0 - count1) * .39 * e**(-.181 * (ratio**2))
self.light_ratio = float(count1)/float(count0)
print("Light ratio Ch1/Ch0: ", self.light_ratio)
self.lux = round(lux, 3)
return TCA_select(SensorCluster.bus, self.mux_addr, "off")
else:
raise SensorError("The lux sensor is powered down.")
def update_humidity_temp(self):
""" This method utilizes the HIH7xxx sensor to read
humidity and temperature in one call.
"""
# Create mask for STATUS (first two bits of 64 bit wide result)
STATUS = 0b11 << 6
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.humidity_chan)
SensorCluster.bus.write_quick(SensorCluster.humidity_addr) # Begin conversion
sleep(.25)
# wait 100ms to make sure the conversion takes place.
data = SensorCluster.bus.read_i2c_block_data(SensorCluster.humidity_addr, 0, 4)
status = (data[0] & STATUS) >> 6
if status == 0 or status == 1: # will always pass for now.
humidity = round((((data[0] & 0x3f) << 8) |
data[1]) * 100.0 / (2**14 - 2), 3)
self.humidity = humidity
self.temp = (round((((data[2] << 6) + ((data[3] & 0xfc) >> 2))
* 165.0 / 16382.0 - 40.0), 3) * 9/5) + 32
return TCA_select(SensorCluster.bus, self.mux_addr, "off")
else:
raise I2CBusError("Unable to retrieve humidity")
def update_soil_moisture(self):
""" Method will select the ADC module,
turn on the analog sensor, wait for voltage settle,
and then digitize the sensor voltage.
Voltage division/signal loss is accounted for by
scaling up the sensor output.
This may need to be adjusted if a different sensor is used
"""
SensorCluster.analog_sensor_power(SensorCluster.bus, "on") # turn on sensor
sleep(.2)
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.adc_chan)
moisture = get_ADC_value(
SensorCluster.bus, SensorCluster.adc_addr, SensorCluster.moisture_chan)
status = TCA_select(SensorCluster.bus, self.mux_addr, "off") # Turn off mux.
SensorCluster.analog_sensor_power(SensorCluster.bus, "off") # turn off sensor
if (moisture >= 0):
soil_moisture = moisture/2.048 # Scale to a percentage value
self.soil_moisture = round(soil_moisture,3)
else:
raise SensorError(
"The soil moisture meter is not configured correctly.")
return status
def update_instance_sensors(self, opt=None):
""" Method runs through all sensor modules and updates
to the latest sensor values.
After running through each sensor module,
The sensor head (the I2C multiplexer), is disabled
in order to avoid address conflicts.
Usage:
plant_sensor_object.updateAllSensors(bus_object)
"""
self.update_count += 1
self.update_lux()
self.update_humidity_temp()
if opt == "all":
try:
self.update_soil_moisture()
except SensorError:
# This could be handled with a repeat request later.
pass
self.timestamp = time()
# disable sensor module
tca_status = TCA_select(SensorCluster.bus, self.mux_addr, "off")
if tca_status != 0:
raise I2CBusError(
"Bus multiplexer was unable to switch off to prevent conflicts")
def sensor_values(self):
"""
Returns the values of all sensors for this cluster
"""
self.update_instance_sensors(opt="all")
return {
"light": self.lux,
"water": self.soil_moisture,
"humidity": self.humidity,
"temperature": self.temp
}
@classmethod
def update_all_sensors(cls, opt=None):
""" Method iterates over all SensorCluster objects and updates
each sensor value and saves the values to the plant record.
- Note that it must receive an open bus object.
Usage:
Update all sensors exluding analog sensors that need power.
- update_all_sensors()
Update all sensors including soil moisture.
- update_all_sensors("all")
"""
for sensorobj in cls:
sensorobj.update_instance_sensors(opt)
@classmethod
def analog_sensor_power(cls, bus, operation):
""" Method that turns on all of the analog sensor modules
Includes all attached soil moisture sensors
Note that all of the SensorCluster object should be attached
in parallel and only 1 GPIO pin is available
to toggle analog sensor power.
The sensor power should be left on for at least 100ms
in order to allow the sensors to stabilize before reading.
Usage: SensorCluster.analog_sensor_power(bus,"high")
OR SensorCluster.analog_sensor_power(bus,"low")
This method should be removed if an off-board GPIO extender is used.
"""
# Set appropriate analog sensor power bit in GPIO mask
# using the ControlCluster bank_mask to avoid overwriting any data
reg_data = get_IO_reg(bus, 0x20, cls.power_bank)
if operation == "on":
reg_data = reg_data | 1 << cls.analog_power_pin
elif operation == "off":
reg_data = reg_data & (0b11111111 ^ (1 << cls.analog_power_pin))
else:
raise SensorError(
"Invalid command used while enabling analog sensors")
# Send updated IO mask to output
IO_expander_output(bus, 0x20, cls.power_bank, reg_data)
@classmethod
def get_water_level(cls):
""" This method uses the ADC on the control module to measure
the current water tank level and returns the water volume
remaining in the tank.
For this method, it is assumed that a simple voltage divider
is used to interface the sensor to the ADC module.
Testing shows that the sensor response is not completely linear,
though it is quite close. To make the results more accurate,
a mapping method approximated by a linear fit to data is used.
"""
# ----------
# These values should be updated based on the real system parameters
vref = 4.95
tank_height = 17.5 # in centimeters (height of container)
rref = 2668 # Reference resistor
# ----------
val = 0
for i in range(5):
# Take five readings and do an average
# Fetch value from ADC (0x69 - ch1)
val = get_ADC_value(cls.bus, 0x6c, 1) + val
avg = val / 5
water_sensor_res = rref * avg/(vref - avg)
depth_cm = water_sensor_res * \
(-.0163) + 28.127 # measured transfer adjusted offset
if depth_cm < 1.0: # Below 1cm, the values should not be trusted.
depth_cm = 0
cls.water_remaining = depth_cm / tank_height
# Return the current depth in case the user is interested in
# that parameter alone. (IE for automatic shut-off)
return depth_cm/tank_height
def get_lux_count(lux_byte):
""" Method to convert data from the TSL2550D lux sensor
into more easily usable ADC count values.
"""
LUX_VALID_MASK = 0b10000000
LUX_CHORD_MASK = 0b01110000
LUX_STEP_MASK = 0b00001111
valid = lux_byte & LUX_VALID_MASK
if valid != 0:
step_num = (lux_byte & LUX_STEP_MASK)
# Shift to normalize value
chord_num = (lux_byte & LUX_CHORD_MASK) >> 4
step_val = 2**chord_num
chord_val = int(16.5 * (step_val - 1))
count = chord_val + step_val * step_num
return count
else:
raise SensorError("Invalid lux sensor data.")
class SensorError(Exception):
""" Non-fatal
Implies that a sensor is either turned off
or unplugged from its slot.
All I2C objects within the sensor cluster should
be turned off before doing anything else.
"""
pass
class I2CBusError(Exception):
""" Typically fatal
- Something on the bus has become unresponsive.
- Should occur if the I2C multiplexer is not disabled
after successive updates.
"""
pass
|
ECESeniorDesign/greenhouse_envmgmt | greenhouse_envmgmt/sense.py | SensorCluster.update_lux | python | def update_lux(self, extend=0):
""" Communicates with the TSL2550D light sensor and returns a
lux value.
Note that this method contains approximately 1 second of total delay.
This delay is necessary in order to obtain full resolution
compensated lux values.
Alternatively, the device could be put in extended mode,
which drops some resolution in favor of shorter delays.
"""
DEVICE_REG_OUT = 0x1d
LUX_PWR_ON = 0x03
if extend == 1:
LUX_MODE = 0x1d
delay = .08
scale = 5
else:
LUX_MODE = 0x18
delay = .4
scale = 1
LUX_READ_CH0 = 0x43
LUX_READ_CH1 = 0x83
# Select correct I2C mux channel on TCA module
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.lux_chan)
# Make sure lux sensor is powered up.
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_PWR_ON)
lux_on = SensorCluster.bus.read_byte_data(SensorCluster.lux_addr, LUX_PWR_ON)
# Check for successful powerup
if (lux_on == LUX_PWR_ON):
# Send command to initiate ADC on each channel
# Read each channel after the new data is ready
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_MODE)
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH0)
sleep(delay)
adc_ch0 = SensorCluster.bus.read_byte(SensorCluster.lux_addr)
count0 = get_lux_count(adc_ch0) * scale # 5x for extended mode
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH1)
sleep(delay)
adc_ch1 = SensorCluster.bus.read_byte(SensorCluster.lux_addr)
count1 = get_lux_count(adc_ch1) * scale # 5x for extended mode
ratio = count1 / (count0 - count1)
lux = (count0 - count1) * .39 * e**(-.181 * (ratio**2))
self.light_ratio = float(count1)/float(count0)
print("Light ratio Ch1/Ch0: ", self.light_ratio)
self.lux = round(lux, 3)
return TCA_select(SensorCluster.bus, self.mux_addr, "off")
else:
raise SensorError("The lux sensor is powered down.") | Communicates with the TSL2550D light sensor and returns a
lux value.
Note that this method contains approximately 1 second of total delay.
This delay is necessary in order to obtain full resolution
compensated lux values.
Alternatively, the device could be put in extended mode,
which drops some resolution in favor of shorter delays. | train | https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/sense.py#L64-L115 | [
"def TCA_select(bus, addr, channel):\n \"\"\"\n This function will write to the control register of the\n TCA module to select the channel that will be\n exposed on the TCA module.\n After doing this, the desired module can be used as it would be normally.\n (The caller should use the address of the I2C sensor module.\n The TCA module is only written to when the channel is switched.)\n addr contains address of the TCA module\n channel specifies the desired channel on the TCA that will be used.\n\n Usage - Enable a channel\n TCA_select(bus, self.mux_addr, channel_to_enable)\n Channel to enable begins at 0 (enables first channel)\n ends at 3 (enables fourth channel)\n\n Usage - Disable all channels\n TCA_select(bus, self.mux_addr, \"off\")\n This call must be made whenever the sensor node is no longer\n being accessed.\n If this is not done, there will be addressing conflicts.\n \"\"\"\n if addr < 0x70 or addr > 0x77:\n print(\"The TCA address(\" + str(addr) + \") is invalid. Aborting\")\n return False\n if channel == \"off\":\n bus.write_byte(addr, 0)\n elif channel < 0 or channel > 3:\n print(\"The requested channel does not exist.\")\n return False\n else:\n bus.write_byte(addr, 1 << channel)\n\n status = bus.read_byte(addr)\n return status\n"
] | class SensorCluster(object):
'Base class for each individual plant containing sensor info'
__metaclass__ = IterList
_list = []
analog_power_pin = 0
power_bank = 0 # bank and pin used to toggle analog sensor power
temp_addr = 0x48
temp_chan = 3
humidity_addr = 0x27
humidity_chan = 1
lux_addr = 0x39
lux_chan = 0
adc_addr = 0x68
adc_chan = 2
moisture_chan = 1
tank_adc_adr = 0x6c
tank_adc_chan = 0
bus = None
def __init__(self, ID, mux_addr=None):
# Initializes cluster, enumeration, and sets up address info
sensor_addr = import_i2c_addr(SensorCluster.bus)
if (ID < 1 or ID > len(sensor_addr)):
raise I2CBusError("Plant ID out of range.")
self.mux_addr = mux_addr or (sensor_addr[ID-1])
self.ID = ID # Plant number specified by caller
self.temp = 0
self.humidity = 0
self.lux = 0
self.light_ratio = 0
self.soil_moisture = 0
self.acidity = 0
self.timestamp = time() # record time at instantiation
self._list.append(self)
self.update_count = 0
def update_lux(self, extend=0):
""" Communicates with the TSL2550D light sensor and returns a
lux value.
Note that this method contains approximately 1 second of total delay.
This delay is necessary in order to obtain full resolution
compensated lux values.
Alternatively, the device could be put in extended mode,
which drops some resolution in favor of shorter delays.
"""
DEVICE_REG_OUT = 0x1d
LUX_PWR_ON = 0x03
if extend == 1:
LUX_MODE = 0x1d
delay = .08
scale = 5
else:
LUX_MODE = 0x18
delay = .4
scale = 1
LUX_READ_CH0 = 0x43
LUX_READ_CH1 = 0x83
# Select correct I2C mux channel on TCA module
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.lux_chan)
# Make sure lux sensor is powered up.
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_PWR_ON)
lux_on = SensorCluster.bus.read_byte_data(SensorCluster.lux_addr, LUX_PWR_ON)
# Check for successful powerup
if (lux_on == LUX_PWR_ON):
# Send command to initiate ADC on each channel
# Read each channel after the new data is ready
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_MODE)
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH0)
sleep(delay)
adc_ch0 = SensorCluster.bus.read_byte(SensorCluster.lux_addr)
count0 = get_lux_count(adc_ch0) * scale # 5x for extended mode
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH1)
sleep(delay)
adc_ch1 = SensorCluster.bus.read_byte(SensorCluster.lux_addr)
count1 = get_lux_count(adc_ch1) * scale # 5x for extended mode
ratio = count1 / (count0 - count1)
lux = (count0 - count1) * .39 * e**(-.181 * (ratio**2))
self.light_ratio = float(count1)/float(count0)
print("Light ratio Ch1/Ch0: ", self.light_ratio)
self.lux = round(lux, 3)
return TCA_select(SensorCluster.bus, self.mux_addr, "off")
else:
raise SensorError("The lux sensor is powered down.")
def update_humidity_temp(self):
""" This method utilizes the HIH7xxx sensor to read
humidity and temperature in one call.
"""
# Create mask for STATUS (first two bits of 64 bit wide result)
STATUS = 0b11 << 6
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.humidity_chan)
SensorCluster.bus.write_quick(SensorCluster.humidity_addr) # Begin conversion
sleep(.25)
# wait 100ms to make sure the conversion takes place.
data = SensorCluster.bus.read_i2c_block_data(SensorCluster.humidity_addr, 0, 4)
status = (data[0] & STATUS) >> 6
if status == 0 or status == 1: # will always pass for now.
humidity = round((((data[0] & 0x3f) << 8) |
data[1]) * 100.0 / (2**14 - 2), 3)
self.humidity = humidity
self.temp = (round((((data[2] << 6) + ((data[3] & 0xfc) >> 2))
* 165.0 / 16382.0 - 40.0), 3) * 9/5) + 32
return TCA_select(SensorCluster.bus, self.mux_addr, "off")
else:
raise I2CBusError("Unable to retrieve humidity")
def update_soil_moisture(self):
""" Method will select the ADC module,
turn on the analog sensor, wait for voltage settle,
and then digitize the sensor voltage.
Voltage division/signal loss is accounted for by
scaling up the sensor output.
This may need to be adjusted if a different sensor is used
"""
SensorCluster.analog_sensor_power(SensorCluster.bus, "on") # turn on sensor
sleep(.2)
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.adc_chan)
moisture = get_ADC_value(
SensorCluster.bus, SensorCluster.adc_addr, SensorCluster.moisture_chan)
status = TCA_select(SensorCluster.bus, self.mux_addr, "off") # Turn off mux.
SensorCluster.analog_sensor_power(SensorCluster.bus, "off") # turn off sensor
if (moisture >= 0):
soil_moisture = moisture/2.048 # Scale to a percentage value
self.soil_moisture = round(soil_moisture,3)
else:
raise SensorError(
"The soil moisture meter is not configured correctly.")
return status
def update_instance_sensors(self, opt=None):
""" Method runs through all sensor modules and updates
to the latest sensor values.
After running through each sensor module,
The sensor head (the I2C multiplexer), is disabled
in order to avoid address conflicts.
Usage:
plant_sensor_object.updateAllSensors(bus_object)
"""
self.update_count += 1
self.update_lux()
self.update_humidity_temp()
if opt == "all":
try:
self.update_soil_moisture()
except SensorError:
# This could be handled with a repeat request later.
pass
self.timestamp = time()
# disable sensor module
tca_status = TCA_select(SensorCluster.bus, self.mux_addr, "off")
if tca_status != 0:
raise I2CBusError(
"Bus multiplexer was unable to switch off to prevent conflicts")
def sensor_values(self):
"""
Returns the values of all sensors for this cluster
"""
self.update_instance_sensors(opt="all")
return {
"light": self.lux,
"water": self.soil_moisture,
"humidity": self.humidity,
"temperature": self.temp
}
@classmethod
def update_all_sensors(cls, opt=None):
""" Method iterates over all SensorCluster objects and updates
each sensor value and saves the values to the plant record.
- Note that it must receive an open bus object.
Usage:
Update all sensors exluding analog sensors that need power.
- update_all_sensors()
Update all sensors including soil moisture.
- update_all_sensors("all")
"""
for sensorobj in cls:
sensorobj.update_instance_sensors(opt)
@classmethod
def analog_sensor_power(cls, bus, operation):
""" Method that turns on all of the analog sensor modules
Includes all attached soil moisture sensors
Note that all of the SensorCluster object should be attached
in parallel and only 1 GPIO pin is available
to toggle analog sensor power.
The sensor power should be left on for at least 100ms
in order to allow the sensors to stabilize before reading.
Usage: SensorCluster.analog_sensor_power(bus,"high")
OR SensorCluster.analog_sensor_power(bus,"low")
This method should be removed if an off-board GPIO extender is used.
"""
# Set appropriate analog sensor power bit in GPIO mask
# using the ControlCluster bank_mask to avoid overwriting any data
reg_data = get_IO_reg(bus, 0x20, cls.power_bank)
if operation == "on":
reg_data = reg_data | 1 << cls.analog_power_pin
elif operation == "off":
reg_data = reg_data & (0b11111111 ^ (1 << cls.analog_power_pin))
else:
raise SensorError(
"Invalid command used while enabling analog sensors")
# Send updated IO mask to output
IO_expander_output(bus, 0x20, cls.power_bank, reg_data)
@classmethod
def get_water_level(cls):
""" This method uses the ADC on the control module to measure
the current water tank level and returns the water volume
remaining in the tank.
For this method, it is assumed that a simple voltage divider
is used to interface the sensor to the ADC module.
Testing shows that the sensor response is not completely linear,
though it is quite close. To make the results more accurate,
a mapping method approximated by a linear fit to data is used.
"""
# ----------
# These values should be updated based on the real system parameters
vref = 4.95
tank_height = 17.5 # in centimeters (height of container)
rref = 2668 # Reference resistor
# ----------
val = 0
for i in range(5):
# Take five readings and do an average
# Fetch value from ADC (0x69 - ch1)
val = get_ADC_value(cls.bus, 0x6c, 1) + val
avg = val / 5
water_sensor_res = rref * avg/(vref - avg)
depth_cm = water_sensor_res * \
(-.0163) + 28.127 # measured transfer adjusted offset
if depth_cm < 1.0: # Below 1cm, the values should not be trusted.
depth_cm = 0
cls.water_remaining = depth_cm / tank_height
# Return the current depth in case the user is interested in
# that parameter alone. (IE for automatic shut-off)
return depth_cm/tank_height
|
ECESeniorDesign/greenhouse_envmgmt | greenhouse_envmgmt/sense.py | SensorCluster.update_humidity_temp | python | def update_humidity_temp(self):
""" This method utilizes the HIH7xxx sensor to read
humidity and temperature in one call.
"""
# Create mask for STATUS (first two bits of 64 bit wide result)
STATUS = 0b11 << 6
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.humidity_chan)
SensorCluster.bus.write_quick(SensorCluster.humidity_addr) # Begin conversion
sleep(.25)
# wait 100ms to make sure the conversion takes place.
data = SensorCluster.bus.read_i2c_block_data(SensorCluster.humidity_addr, 0, 4)
status = (data[0] & STATUS) >> 6
if status == 0 or status == 1: # will always pass for now.
humidity = round((((data[0] & 0x3f) << 8) |
data[1]) * 100.0 / (2**14 - 2), 3)
self.humidity = humidity
self.temp = (round((((data[2] << 6) + ((data[3] & 0xfc) >> 2))
* 165.0 / 16382.0 - 40.0), 3) * 9/5) + 32
return TCA_select(SensorCluster.bus, self.mux_addr, "off")
else:
raise I2CBusError("Unable to retrieve humidity") | This method utilizes the HIH7xxx sensor to read
humidity and temperature in one call. | train | https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/sense.py#L117-L139 | [
"def TCA_select(bus, addr, channel):\n \"\"\"\n This function will write to the control register of the\n TCA module to select the channel that will be\n exposed on the TCA module.\n After doing this, the desired module can be used as it would be normally.\n (The caller should use the address of the I2C sensor module.\n The TCA module is only written to when the channel is switched.)\n addr contains address of the TCA module\n channel specifies the desired channel on the TCA that will be used.\n\n Usage - Enable a channel\n TCA_select(bus, self.mux_addr, channel_to_enable)\n Channel to enable begins at 0 (enables first channel)\n ends at 3 (enables fourth channel)\n\n Usage - Disable all channels\n TCA_select(bus, self.mux_addr, \"off\")\n This call must be made whenever the sensor node is no longer\n being accessed.\n If this is not done, there will be addressing conflicts.\n \"\"\"\n if addr < 0x70 or addr > 0x77:\n print(\"The TCA address(\" + str(addr) + \") is invalid. Aborting\")\n return False\n if channel == \"off\":\n bus.write_byte(addr, 0)\n elif channel < 0 or channel > 3:\n print(\"The requested channel does not exist.\")\n return False\n else:\n bus.write_byte(addr, 1 << channel)\n\n status = bus.read_byte(addr)\n return status\n"
] | class SensorCluster(object):
'Base class for each individual plant containing sensor info'
__metaclass__ = IterList
_list = []
analog_power_pin = 0
power_bank = 0 # bank and pin used to toggle analog sensor power
temp_addr = 0x48
temp_chan = 3
humidity_addr = 0x27
humidity_chan = 1
lux_addr = 0x39
lux_chan = 0
adc_addr = 0x68
adc_chan = 2
moisture_chan = 1
tank_adc_adr = 0x6c
tank_adc_chan = 0
bus = None
def __init__(self, ID, mux_addr=None):
# Initializes cluster, enumeration, and sets up address info
sensor_addr = import_i2c_addr(SensorCluster.bus)
if (ID < 1 or ID > len(sensor_addr)):
raise I2CBusError("Plant ID out of range.")
self.mux_addr = mux_addr or (sensor_addr[ID-1])
self.ID = ID # Plant number specified by caller
self.temp = 0
self.humidity = 0
self.lux = 0
self.light_ratio = 0
self.soil_moisture = 0
self.acidity = 0
self.timestamp = time() # record time at instantiation
self._list.append(self)
self.update_count = 0
def update_lux(self, extend=0):
""" Communicates with the TSL2550D light sensor and returns a
lux value.
Note that this method contains approximately 1 second of total delay.
This delay is necessary in order to obtain full resolution
compensated lux values.
Alternatively, the device could be put in extended mode,
which drops some resolution in favor of shorter delays.
"""
DEVICE_REG_OUT = 0x1d
LUX_PWR_ON = 0x03
if extend == 1:
LUX_MODE = 0x1d
delay = .08
scale = 5
else:
LUX_MODE = 0x18
delay = .4
scale = 1
LUX_READ_CH0 = 0x43
LUX_READ_CH1 = 0x83
# Select correct I2C mux channel on TCA module
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.lux_chan)
# Make sure lux sensor is powered up.
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_PWR_ON)
lux_on = SensorCluster.bus.read_byte_data(SensorCluster.lux_addr, LUX_PWR_ON)
# Check for successful powerup
if (lux_on == LUX_PWR_ON):
# Send command to initiate ADC on each channel
# Read each channel after the new data is ready
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_MODE)
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH0)
sleep(delay)
adc_ch0 = SensorCluster.bus.read_byte(SensorCluster.lux_addr)
count0 = get_lux_count(adc_ch0) * scale # 5x for extended mode
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH1)
sleep(delay)
adc_ch1 = SensorCluster.bus.read_byte(SensorCluster.lux_addr)
count1 = get_lux_count(adc_ch1) * scale # 5x for extended mode
ratio = count1 / (count0 - count1)
lux = (count0 - count1) * .39 * e**(-.181 * (ratio**2))
self.light_ratio = float(count1)/float(count0)
print("Light ratio Ch1/Ch0: ", self.light_ratio)
self.lux = round(lux, 3)
return TCA_select(SensorCluster.bus, self.mux_addr, "off")
else:
raise SensorError("The lux sensor is powered down.")
def update_humidity_temp(self):
""" This method utilizes the HIH7xxx sensor to read
humidity and temperature in one call.
"""
# Create mask for STATUS (first two bits of 64 bit wide result)
STATUS = 0b11 << 6
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.humidity_chan)
SensorCluster.bus.write_quick(SensorCluster.humidity_addr) # Begin conversion
sleep(.25)
# wait 100ms to make sure the conversion takes place.
data = SensorCluster.bus.read_i2c_block_data(SensorCluster.humidity_addr, 0, 4)
status = (data[0] & STATUS) >> 6
if status == 0 or status == 1: # will always pass for now.
humidity = round((((data[0] & 0x3f) << 8) |
data[1]) * 100.0 / (2**14 - 2), 3)
self.humidity = humidity
self.temp = (round((((data[2] << 6) + ((data[3] & 0xfc) >> 2))
* 165.0 / 16382.0 - 40.0), 3) * 9/5) + 32
return TCA_select(SensorCluster.bus, self.mux_addr, "off")
else:
raise I2CBusError("Unable to retrieve humidity")
def update_soil_moisture(self):
""" Method will select the ADC module,
turn on the analog sensor, wait for voltage settle,
and then digitize the sensor voltage.
Voltage division/signal loss is accounted for by
scaling up the sensor output.
This may need to be adjusted if a different sensor is used
"""
SensorCluster.analog_sensor_power(SensorCluster.bus, "on") # turn on sensor
sleep(.2)
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.adc_chan)
moisture = get_ADC_value(
SensorCluster.bus, SensorCluster.adc_addr, SensorCluster.moisture_chan)
status = TCA_select(SensorCluster.bus, self.mux_addr, "off") # Turn off mux.
SensorCluster.analog_sensor_power(SensorCluster.bus, "off") # turn off sensor
if (moisture >= 0):
soil_moisture = moisture/2.048 # Scale to a percentage value
self.soil_moisture = round(soil_moisture,3)
else:
raise SensorError(
"The soil moisture meter is not configured correctly.")
return status
def update_instance_sensors(self, opt=None):
""" Method runs through all sensor modules and updates
to the latest sensor values.
After running through each sensor module,
The sensor head (the I2C multiplexer), is disabled
in order to avoid address conflicts.
Usage:
plant_sensor_object.updateAllSensors(bus_object)
"""
self.update_count += 1
self.update_lux()
self.update_humidity_temp()
if opt == "all":
try:
self.update_soil_moisture()
except SensorError:
# This could be handled with a repeat request later.
pass
self.timestamp = time()
# disable sensor module
tca_status = TCA_select(SensorCluster.bus, self.mux_addr, "off")
if tca_status != 0:
raise I2CBusError(
"Bus multiplexer was unable to switch off to prevent conflicts")
def sensor_values(self):
"""
Returns the values of all sensors for this cluster
"""
self.update_instance_sensors(opt="all")
return {
"light": self.lux,
"water": self.soil_moisture,
"humidity": self.humidity,
"temperature": self.temp
}
@classmethod
def update_all_sensors(cls, opt=None):
""" Method iterates over all SensorCluster objects and updates
each sensor value and saves the values to the plant record.
- Note that it must receive an open bus object.
Usage:
Update all sensors exluding analog sensors that need power.
- update_all_sensors()
Update all sensors including soil moisture.
- update_all_sensors("all")
"""
for sensorobj in cls:
sensorobj.update_instance_sensors(opt)
@classmethod
def analog_sensor_power(cls, bus, operation):
""" Method that turns on all of the analog sensor modules
Includes all attached soil moisture sensors
Note that all of the SensorCluster object should be attached
in parallel and only 1 GPIO pin is available
to toggle analog sensor power.
The sensor power should be left on for at least 100ms
in order to allow the sensors to stabilize before reading.
Usage: SensorCluster.analog_sensor_power(bus,"high")
OR SensorCluster.analog_sensor_power(bus,"low")
This method should be removed if an off-board GPIO extender is used.
"""
# Set appropriate analog sensor power bit in GPIO mask
# using the ControlCluster bank_mask to avoid overwriting any data
reg_data = get_IO_reg(bus, 0x20, cls.power_bank)
if operation == "on":
reg_data = reg_data | 1 << cls.analog_power_pin
elif operation == "off":
reg_data = reg_data & (0b11111111 ^ (1 << cls.analog_power_pin))
else:
raise SensorError(
"Invalid command used while enabling analog sensors")
# Send updated IO mask to output
IO_expander_output(bus, 0x20, cls.power_bank, reg_data)
@classmethod
def get_water_level(cls):
""" This method uses the ADC on the control module to measure
the current water tank level and returns the water volume
remaining in the tank.
For this method, it is assumed that a simple voltage divider
is used to interface the sensor to the ADC module.
Testing shows that the sensor response is not completely linear,
though it is quite close. To make the results more accurate,
a mapping method approximated by a linear fit to data is used.
"""
# ----------
# These values should be updated based on the real system parameters
vref = 4.95
tank_height = 17.5 # in centimeters (height of container)
rref = 2668 # Reference resistor
# ----------
val = 0
for i in range(5):
# Take five readings and do an average
# Fetch value from ADC (0x69 - ch1)
val = get_ADC_value(cls.bus, 0x6c, 1) + val
avg = val / 5
water_sensor_res = rref * avg/(vref - avg)
depth_cm = water_sensor_res * \
(-.0163) + 28.127 # measured transfer adjusted offset
if depth_cm < 1.0: # Below 1cm, the values should not be trusted.
depth_cm = 0
cls.water_remaining = depth_cm / tank_height
# Return the current depth in case the user is interested in
# that parameter alone. (IE for automatic shut-off)
return depth_cm/tank_height
|
ECESeniorDesign/greenhouse_envmgmt | greenhouse_envmgmt/sense.py | SensorCluster.update_soil_moisture | python | def update_soil_moisture(self):
""" Method will select the ADC module,
turn on the analog sensor, wait for voltage settle,
and then digitize the sensor voltage.
Voltage division/signal loss is accounted for by
scaling up the sensor output.
This may need to be adjusted if a different sensor is used
"""
SensorCluster.analog_sensor_power(SensorCluster.bus, "on") # turn on sensor
sleep(.2)
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.adc_chan)
moisture = get_ADC_value(
SensorCluster.bus, SensorCluster.adc_addr, SensorCluster.moisture_chan)
status = TCA_select(SensorCluster.bus, self.mux_addr, "off") # Turn off mux.
SensorCluster.analog_sensor_power(SensorCluster.bus, "off") # turn off sensor
if (moisture >= 0):
soil_moisture = moisture/2.048 # Scale to a percentage value
self.soil_moisture = round(soil_moisture,3)
else:
raise SensorError(
"The soil moisture meter is not configured correctly.")
return status | Method will select the ADC module,
turn on the analog sensor, wait for voltage settle,
and then digitize the sensor voltage.
Voltage division/signal loss is accounted for by
scaling up the sensor output.
This may need to be adjusted if a different sensor is used | train | https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/sense.py#L141-L162 | [
"def get_ADC_value(bus, addr, channel):\n \"\"\"\n This method selects a channel and initiates conversion\n The ADC operates at 240 SPS (12 bits) with 1x gain\n One shot conversions are used, meaning a wait period is needed\n in order to acquire new data. This is done via a constant poll\n of the ready bit.\n Upon completion, a voltage value is returned to the caller.\n\n Usage - ADC_start(bus, SensorCluster.ADC_addr, channel_to_read)\n\n IMPORTANT NOTE:\n The ADC uses a 2.048V voltage reference\n\n \"\"\"\n if channel == 1:\n INIT = 0b10000000\n elif channel == 2:\n INIT = 0b10100000\n elif channel == 3:\n INIT = 0b11000000\n elif channel == 4:\n INIT = 0b11100000\n bus.write_byte(addr, INIT)\n data = bus.read_i2c_block_data(addr, 0, 3)\n status = (data[2] & 0b10000000) >> 7\n while(status == 1):\n data = bus.read_i2c_block_data(addr, 0, 3)\n status = (data[2] & 0b10000000) >> 7\n sign = data[0] & 0b00001000\n val = ((data[0] & 0b0000111) << 8) | (data[1])\n if (sign == 1):\n val = (val ^ 0x3ff) + 1 # compute 2s complement for 12 bit val\n # Convert val to a ratiomerical ADC reading\n return float(val) * 2.048 / float(2047)\n",
"def TCA_select(bus, addr, channel):\n \"\"\"\n This function will write to the control register of the\n TCA module to select the channel that will be\n exposed on the TCA module.\n After doing this, the desired module can be used as it would be normally.\n (The caller should use the address of the I2C sensor module.\n The TCA module is only written to when the channel is switched.)\n addr contains address of the TCA module\n channel specifies the desired channel on the TCA that will be used.\n\n Usage - Enable a channel\n TCA_select(bus, self.mux_addr, channel_to_enable)\n Channel to enable begins at 0 (enables first channel)\n ends at 3 (enables fourth channel)\n\n Usage - Disable all channels\n TCA_select(bus, self.mux_addr, \"off\")\n This call must be made whenever the sensor node is no longer\n being accessed.\n If this is not done, there will be addressing conflicts.\n \"\"\"\n if addr < 0x70 or addr > 0x77:\n print(\"The TCA address(\" + str(addr) + \") is invalid. Aborting\")\n return False\n if channel == \"off\":\n bus.write_byte(addr, 0)\n elif channel < 0 or channel > 3:\n print(\"The requested channel does not exist.\")\n return False\n else:\n bus.write_byte(addr, 1 << channel)\n\n status = bus.read_byte(addr)\n return status\n",
"def analog_sensor_power(cls, bus, operation):\n \"\"\" Method that turns on all of the analog sensor modules\n Includes all attached soil moisture sensors\n Note that all of the SensorCluster object should be attached\n in parallel and only 1 GPIO pin is available\n to toggle analog sensor power.\n The sensor power should be left on for at least 100ms\n in order to allow the sensors to stabilize before reading. \n Usage: SensorCluster.analog_sensor_power(bus,\"high\")\n OR SensorCluster.analog_sensor_power(bus,\"low\")\n This method should be removed if an off-board GPIO extender is used.\n \"\"\"\n # Set appropriate analog sensor power bit in GPIO mask\n # using the ControlCluster bank_mask to avoid overwriting any data\n reg_data = get_IO_reg(bus, 0x20, cls.power_bank)\n\n if operation == \"on\":\n reg_data = reg_data | 1 << cls.analog_power_pin\n elif operation == \"off\":\n reg_data = reg_data & (0b11111111 ^ (1 << cls.analog_power_pin))\n else:\n raise SensorError(\n \"Invalid command used while enabling analog sensors\")\n # Send updated IO mask to output\n IO_expander_output(bus, 0x20, cls.power_bank, reg_data)\n"
] | class SensorCluster(object):
'Base class for each individual plant containing sensor info'
__metaclass__ = IterList
_list = []
analog_power_pin = 0
power_bank = 0 # bank and pin used to toggle analog sensor power
temp_addr = 0x48
temp_chan = 3
humidity_addr = 0x27
humidity_chan = 1
lux_addr = 0x39
lux_chan = 0
adc_addr = 0x68
adc_chan = 2
moisture_chan = 1
tank_adc_adr = 0x6c
tank_adc_chan = 0
bus = None
def __init__(self, ID, mux_addr=None):
# Initializes cluster, enumeration, and sets up address info
sensor_addr = import_i2c_addr(SensorCluster.bus)
if (ID < 1 or ID > len(sensor_addr)):
raise I2CBusError("Plant ID out of range.")
self.mux_addr = mux_addr or (sensor_addr[ID-1])
self.ID = ID # Plant number specified by caller
self.temp = 0
self.humidity = 0
self.lux = 0
self.light_ratio = 0
self.soil_moisture = 0
self.acidity = 0
self.timestamp = time() # record time at instantiation
self._list.append(self)
self.update_count = 0
def update_lux(self, extend=0):
""" Communicates with the TSL2550D light sensor and returns a
lux value.
Note that this method contains approximately 1 second of total delay.
This delay is necessary in order to obtain full resolution
compensated lux values.
Alternatively, the device could be put in extended mode,
which drops some resolution in favor of shorter delays.
"""
DEVICE_REG_OUT = 0x1d
LUX_PWR_ON = 0x03
if extend == 1:
LUX_MODE = 0x1d
delay = .08
scale = 5
else:
LUX_MODE = 0x18
delay = .4
scale = 1
LUX_READ_CH0 = 0x43
LUX_READ_CH1 = 0x83
# Select correct I2C mux channel on TCA module
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.lux_chan)
# Make sure lux sensor is powered up.
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_PWR_ON)
lux_on = SensorCluster.bus.read_byte_data(SensorCluster.lux_addr, LUX_PWR_ON)
# Check for successful powerup
if (lux_on == LUX_PWR_ON):
# Send command to initiate ADC on each channel
# Read each channel after the new data is ready
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_MODE)
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH0)
sleep(delay)
adc_ch0 = SensorCluster.bus.read_byte(SensorCluster.lux_addr)
count0 = get_lux_count(adc_ch0) * scale # 5x for extended mode
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH1)
sleep(delay)
adc_ch1 = SensorCluster.bus.read_byte(SensorCluster.lux_addr)
count1 = get_lux_count(adc_ch1) * scale # 5x for extended mode
ratio = count1 / (count0 - count1)
lux = (count0 - count1) * .39 * e**(-.181 * (ratio**2))
self.light_ratio = float(count1)/float(count0)
print("Light ratio Ch1/Ch0: ", self.light_ratio)
self.lux = round(lux, 3)
return TCA_select(SensorCluster.bus, self.mux_addr, "off")
else:
raise SensorError("The lux sensor is powered down.")
def update_humidity_temp(self):
""" This method utilizes the HIH7xxx sensor to read
humidity and temperature in one call.
"""
# Create mask for STATUS (first two bits of 64 bit wide result)
STATUS = 0b11 << 6
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.humidity_chan)
SensorCluster.bus.write_quick(SensorCluster.humidity_addr) # Begin conversion
sleep(.25)
# wait 100ms to make sure the conversion takes place.
data = SensorCluster.bus.read_i2c_block_data(SensorCluster.humidity_addr, 0, 4)
status = (data[0] & STATUS) >> 6
if status == 0 or status == 1: # will always pass for now.
humidity = round((((data[0] & 0x3f) << 8) |
data[1]) * 100.0 / (2**14 - 2), 3)
self.humidity = humidity
self.temp = (round((((data[2] << 6) + ((data[3] & 0xfc) >> 2))
* 165.0 / 16382.0 - 40.0), 3) * 9/5) + 32
return TCA_select(SensorCluster.bus, self.mux_addr, "off")
else:
raise I2CBusError("Unable to retrieve humidity")
def update_soil_moisture(self):
""" Method will select the ADC module,
turn on the analog sensor, wait for voltage settle,
and then digitize the sensor voltage.
Voltage division/signal loss is accounted for by
scaling up the sensor output.
This may need to be adjusted if a different sensor is used
"""
SensorCluster.analog_sensor_power(SensorCluster.bus, "on") # turn on sensor
sleep(.2)
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.adc_chan)
moisture = get_ADC_value(
SensorCluster.bus, SensorCluster.adc_addr, SensorCluster.moisture_chan)
status = TCA_select(SensorCluster.bus, self.mux_addr, "off") # Turn off mux.
SensorCluster.analog_sensor_power(SensorCluster.bus, "off") # turn off sensor
if (moisture >= 0):
soil_moisture = moisture/2.048 # Scale to a percentage value
self.soil_moisture = round(soil_moisture,3)
else:
raise SensorError(
"The soil moisture meter is not configured correctly.")
return status
def update_instance_sensors(self, opt=None):
""" Method runs through all sensor modules and updates
to the latest sensor values.
After running through each sensor module,
The sensor head (the I2C multiplexer), is disabled
in order to avoid address conflicts.
Usage:
plant_sensor_object.updateAllSensors(bus_object)
"""
self.update_count += 1
self.update_lux()
self.update_humidity_temp()
if opt == "all":
try:
self.update_soil_moisture()
except SensorError:
# This could be handled with a repeat request later.
pass
self.timestamp = time()
# disable sensor module
tca_status = TCA_select(SensorCluster.bus, self.mux_addr, "off")
if tca_status != 0:
raise I2CBusError(
"Bus multiplexer was unable to switch off to prevent conflicts")
def sensor_values(self):
"""
Returns the values of all sensors for this cluster
"""
self.update_instance_sensors(opt="all")
return {
"light": self.lux,
"water": self.soil_moisture,
"humidity": self.humidity,
"temperature": self.temp
}
@classmethod
def update_all_sensors(cls, opt=None):
""" Method iterates over all SensorCluster objects and updates
each sensor value and saves the values to the plant record.
- Note that it must receive an open bus object.
Usage:
Update all sensors exluding analog sensors that need power.
- update_all_sensors()
Update all sensors including soil moisture.
- update_all_sensors("all")
"""
for sensorobj in cls:
sensorobj.update_instance_sensors(opt)
@classmethod
def analog_sensor_power(cls, bus, operation):
""" Method that turns on all of the analog sensor modules
Includes all attached soil moisture sensors
Note that all of the SensorCluster object should be attached
in parallel and only 1 GPIO pin is available
to toggle analog sensor power.
The sensor power should be left on for at least 100ms
in order to allow the sensors to stabilize before reading.
Usage: SensorCluster.analog_sensor_power(bus,"high")
OR SensorCluster.analog_sensor_power(bus,"low")
This method should be removed if an off-board GPIO extender is used.
"""
# Set appropriate analog sensor power bit in GPIO mask
# using the ControlCluster bank_mask to avoid overwriting any data
reg_data = get_IO_reg(bus, 0x20, cls.power_bank)
if operation == "on":
reg_data = reg_data | 1 << cls.analog_power_pin
elif operation == "off":
reg_data = reg_data & (0b11111111 ^ (1 << cls.analog_power_pin))
else:
raise SensorError(
"Invalid command used while enabling analog sensors")
# Send updated IO mask to output
IO_expander_output(bus, 0x20, cls.power_bank, reg_data)
@classmethod
def get_water_level(cls):
""" This method uses the ADC on the control module to measure
the current water tank level and returns the water volume
remaining in the tank.
For this method, it is assumed that a simple voltage divider
is used to interface the sensor to the ADC module.
Testing shows that the sensor response is not completely linear,
though it is quite close. To make the results more accurate,
a mapping method approximated by a linear fit to data is used.
"""
# ----------
# These values should be updated based on the real system parameters
vref = 4.95
tank_height = 17.5 # in centimeters (height of container)
rref = 2668 # Reference resistor
# ----------
val = 0
for i in range(5):
# Take five readings and do an average
# Fetch value from ADC (0x69 - ch1)
val = get_ADC_value(cls.bus, 0x6c, 1) + val
avg = val / 5
water_sensor_res = rref * avg/(vref - avg)
depth_cm = water_sensor_res * \
(-.0163) + 28.127 # measured transfer adjusted offset
if depth_cm < 1.0: # Below 1cm, the values should not be trusted.
depth_cm = 0
cls.water_remaining = depth_cm / tank_height
# Return the current depth in case the user is interested in
# that parameter alone. (IE for automatic shut-off)
return depth_cm/tank_height
|
ECESeniorDesign/greenhouse_envmgmt | greenhouse_envmgmt/sense.py | SensorCluster.update_instance_sensors | python | def update_instance_sensors(self, opt=None):
""" Method runs through all sensor modules and updates
to the latest sensor values.
After running through each sensor module,
The sensor head (the I2C multiplexer), is disabled
in order to avoid address conflicts.
Usage:
plant_sensor_object.updateAllSensors(bus_object)
"""
self.update_count += 1
self.update_lux()
self.update_humidity_temp()
if opt == "all":
try:
self.update_soil_moisture()
except SensorError:
# This could be handled with a repeat request later.
pass
self.timestamp = time()
# disable sensor module
tca_status = TCA_select(SensorCluster.bus, self.mux_addr, "off")
if tca_status != 0:
raise I2CBusError(
"Bus multiplexer was unable to switch off to prevent conflicts") | Method runs through all sensor modules and updates
to the latest sensor values.
After running through each sensor module,
The sensor head (the I2C multiplexer), is disabled
in order to avoid address conflicts.
Usage:
plant_sensor_object.updateAllSensors(bus_object) | train | https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/sense.py#L164-L189 | [
"def TCA_select(bus, addr, channel):\n \"\"\"\n This function will write to the control register of the\n TCA module to select the channel that will be\n exposed on the TCA module.\n After doing this, the desired module can be used as it would be normally.\n (The caller should use the address of the I2C sensor module.\n The TCA module is only written to when the channel is switched.)\n addr contains address of the TCA module\n channel specifies the desired channel on the TCA that will be used.\n\n Usage - Enable a channel\n TCA_select(bus, self.mux_addr, channel_to_enable)\n Channel to enable begins at 0 (enables first channel)\n ends at 3 (enables fourth channel)\n\n Usage - Disable all channels\n TCA_select(bus, self.mux_addr, \"off\")\n This call must be made whenever the sensor node is no longer\n being accessed.\n If this is not done, there will be addressing conflicts.\n \"\"\"\n if addr < 0x70 or addr > 0x77:\n print(\"The TCA address(\" + str(addr) + \") is invalid. Aborting\")\n return False\n if channel == \"off\":\n bus.write_byte(addr, 0)\n elif channel < 0 or channel > 3:\n print(\"The requested channel does not exist.\")\n return False\n else:\n bus.write_byte(addr, 1 << channel)\n\n status = bus.read_byte(addr)\n return status\n",
"def update_lux(self, extend=0):\n \"\"\" Communicates with the TSL2550D light sensor and returns a \n lux value. \n\n Note that this method contains approximately 1 second of total delay.\n This delay is necessary in order to obtain full resolution\n compensated lux values.\n\n Alternatively, the device could be put in extended mode, \n which drops some resolution in favor of shorter delays.\n\n \"\"\"\n DEVICE_REG_OUT = 0x1d\n LUX_PWR_ON = 0x03\n if extend == 1:\n LUX_MODE = 0x1d\n delay = .08\n scale = 5\n else:\n LUX_MODE = 0x18\n delay = .4\n scale = 1\n LUX_READ_CH0 = 0x43\n LUX_READ_CH1 = 0x83\n # Select correct I2C mux channel on TCA module\n\n TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.lux_chan)\n # Make sure lux sensor is powered up.\n SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_PWR_ON)\n lux_on = SensorCluster.bus.read_byte_data(SensorCluster.lux_addr, LUX_PWR_ON)\n\n # Check for successful powerup\n if (lux_on == LUX_PWR_ON):\n # Send command to initiate ADC on each channel\n # Read each channel after the new data is ready\n SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_MODE)\n SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH0)\n sleep(delay)\n adc_ch0 = SensorCluster.bus.read_byte(SensorCluster.lux_addr)\n count0 = get_lux_count(adc_ch0) * scale # 5x for extended mode\n SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH1)\n sleep(delay)\n adc_ch1 = SensorCluster.bus.read_byte(SensorCluster.lux_addr)\n count1 = get_lux_count(adc_ch1) * scale # 5x for extended mode\n ratio = count1 / (count0 - count1)\n lux = (count0 - count1) * .39 * e**(-.181 * (ratio**2))\n self.light_ratio = float(count1)/float(count0)\n print(\"Light ratio Ch1/Ch0: \", self.light_ratio)\n self.lux = round(lux, 3)\n return TCA_select(SensorCluster.bus, self.mux_addr, \"off\")\n else:\n raise SensorError(\"The lux sensor is powered down.\")\n",
"def update_humidity_temp(self):\n \"\"\" This method utilizes the HIH7xxx sensor to read\n humidity and temperature in one call. \n \"\"\"\n # Create mask for STATUS (first two bits of 64 bit wide result)\n STATUS = 0b11 << 6\n\n TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.humidity_chan)\n SensorCluster.bus.write_quick(SensorCluster.humidity_addr) # Begin conversion\n sleep(.25)\n # wait 100ms to make sure the conversion takes place.\n data = SensorCluster.bus.read_i2c_block_data(SensorCluster.humidity_addr, 0, 4)\n status = (data[0] & STATUS) >> 6\n\n if status == 0 or status == 1: # will always pass for now.\n humidity = round((((data[0] & 0x3f) << 8) |\n data[1]) * 100.0 / (2**14 - 2), 3)\n self.humidity = humidity\n self.temp = (round((((data[2] << 6) + ((data[3] & 0xfc) >> 2))\n * 165.0 / 16382.0 - 40.0), 3) * 9/5) + 32\n return TCA_select(SensorCluster.bus, self.mux_addr, \"off\")\n else:\n raise I2CBusError(\"Unable to retrieve humidity\")\n",
"def update_soil_moisture(self):\n \"\"\" Method will select the ADC module,\n turn on the analog sensor, wait for voltage settle, \n and then digitize the sensor voltage. \n Voltage division/signal loss is accounted for by \n scaling up the sensor output.\n This may need to be adjusted if a different sensor is used\n \"\"\"\n SensorCluster.analog_sensor_power(SensorCluster.bus, \"on\") # turn on sensor\n sleep(.2)\n TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.adc_chan)\n moisture = get_ADC_value(\n SensorCluster.bus, SensorCluster.adc_addr, SensorCluster.moisture_chan)\n status = TCA_select(SensorCluster.bus, self.mux_addr, \"off\") # Turn off mux.\n SensorCluster.analog_sensor_power(SensorCluster.bus, \"off\") # turn off sensor\n if (moisture >= 0):\n soil_moisture = moisture/2.048 # Scale to a percentage value \n self.soil_moisture = round(soil_moisture,3)\n else:\n raise SensorError(\n \"The soil moisture meter is not configured correctly.\")\n return status\n"
] | class SensorCluster(object):
'Base class for each individual plant containing sensor info'
__metaclass__ = IterList
_list = []
analog_power_pin = 0
power_bank = 0 # bank and pin used to toggle analog sensor power
temp_addr = 0x48
temp_chan = 3
humidity_addr = 0x27
humidity_chan = 1
lux_addr = 0x39
lux_chan = 0
adc_addr = 0x68
adc_chan = 2
moisture_chan = 1
tank_adc_adr = 0x6c
tank_adc_chan = 0
bus = None
def __init__(self, ID, mux_addr=None):
# Initializes cluster, enumeration, and sets up address info
sensor_addr = import_i2c_addr(SensorCluster.bus)
if (ID < 1 or ID > len(sensor_addr)):
raise I2CBusError("Plant ID out of range.")
self.mux_addr = mux_addr or (sensor_addr[ID-1])
self.ID = ID # Plant number specified by caller
self.temp = 0
self.humidity = 0
self.lux = 0
self.light_ratio = 0
self.soil_moisture = 0
self.acidity = 0
self.timestamp = time() # record time at instantiation
self._list.append(self)
self.update_count = 0
def update_lux(self, extend=0):
""" Communicates with the TSL2550D light sensor and returns a
lux value.
Note that this method contains approximately 1 second of total delay.
This delay is necessary in order to obtain full resolution
compensated lux values.
Alternatively, the device could be put in extended mode,
which drops some resolution in favor of shorter delays.
"""
DEVICE_REG_OUT = 0x1d
LUX_PWR_ON = 0x03
if extend == 1:
LUX_MODE = 0x1d
delay = .08
scale = 5
else:
LUX_MODE = 0x18
delay = .4
scale = 1
LUX_READ_CH0 = 0x43
LUX_READ_CH1 = 0x83
# Select correct I2C mux channel on TCA module
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.lux_chan)
# Make sure lux sensor is powered up.
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_PWR_ON)
lux_on = SensorCluster.bus.read_byte_data(SensorCluster.lux_addr, LUX_PWR_ON)
# Check for successful powerup
if (lux_on == LUX_PWR_ON):
# Send command to initiate ADC on each channel
# Read each channel after the new data is ready
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_MODE)
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH0)
sleep(delay)
adc_ch0 = SensorCluster.bus.read_byte(SensorCluster.lux_addr)
count0 = get_lux_count(adc_ch0) * scale # 5x for extended mode
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH1)
sleep(delay)
adc_ch1 = SensorCluster.bus.read_byte(SensorCluster.lux_addr)
count1 = get_lux_count(adc_ch1) * scale # 5x for extended mode
ratio = count1 / (count0 - count1)
lux = (count0 - count1) * .39 * e**(-.181 * (ratio**2))
self.light_ratio = float(count1)/float(count0)
print("Light ratio Ch1/Ch0: ", self.light_ratio)
self.lux = round(lux, 3)
return TCA_select(SensorCluster.bus, self.mux_addr, "off")
else:
raise SensorError("The lux sensor is powered down.")
def update_humidity_temp(self):
""" This method utilizes the HIH7xxx sensor to read
humidity and temperature in one call.
"""
# Create mask for STATUS (first two bits of 64 bit wide result)
STATUS = 0b11 << 6
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.humidity_chan)
SensorCluster.bus.write_quick(SensorCluster.humidity_addr) # Begin conversion
sleep(.25)
# wait 100ms to make sure the conversion takes place.
data = SensorCluster.bus.read_i2c_block_data(SensorCluster.humidity_addr, 0, 4)
status = (data[0] & STATUS) >> 6
if status == 0 or status == 1: # will always pass for now.
humidity = round((((data[0] & 0x3f) << 8) |
data[1]) * 100.0 / (2**14 - 2), 3)
self.humidity = humidity
self.temp = (round((((data[2] << 6) + ((data[3] & 0xfc) >> 2))
* 165.0 / 16382.0 - 40.0), 3) * 9/5) + 32
return TCA_select(SensorCluster.bus, self.mux_addr, "off")
else:
raise I2CBusError("Unable to retrieve humidity")
def update_soil_moisture(self):
""" Method will select the ADC module,
turn on the analog sensor, wait for voltage settle,
and then digitize the sensor voltage.
Voltage division/signal loss is accounted for by
scaling up the sensor output.
This may need to be adjusted if a different sensor is used
"""
SensorCluster.analog_sensor_power(SensorCluster.bus, "on") # turn on sensor
sleep(.2)
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.adc_chan)
moisture = get_ADC_value(
SensorCluster.bus, SensorCluster.adc_addr, SensorCluster.moisture_chan)
status = TCA_select(SensorCluster.bus, self.mux_addr, "off") # Turn off mux.
SensorCluster.analog_sensor_power(SensorCluster.bus, "off") # turn off sensor
if (moisture >= 0):
soil_moisture = moisture/2.048 # Scale to a percentage value
self.soil_moisture = round(soil_moisture,3)
else:
raise SensorError(
"The soil moisture meter is not configured correctly.")
return status
def update_instance_sensors(self, opt=None):
""" Method runs through all sensor modules and updates
to the latest sensor values.
After running through each sensor module,
The sensor head (the I2C multiplexer), is disabled
in order to avoid address conflicts.
Usage:
plant_sensor_object.updateAllSensors(bus_object)
"""
self.update_count += 1
self.update_lux()
self.update_humidity_temp()
if opt == "all":
try:
self.update_soil_moisture()
except SensorError:
# This could be handled with a repeat request later.
pass
self.timestamp = time()
# disable sensor module
tca_status = TCA_select(SensorCluster.bus, self.mux_addr, "off")
if tca_status != 0:
raise I2CBusError(
"Bus multiplexer was unable to switch off to prevent conflicts")
def sensor_values(self):
"""
Returns the values of all sensors for this cluster
"""
self.update_instance_sensors(opt="all")
return {
"light": self.lux,
"water": self.soil_moisture,
"humidity": self.humidity,
"temperature": self.temp
}
@classmethod
def update_all_sensors(cls, opt=None):
""" Method iterates over all SensorCluster objects and updates
each sensor value and saves the values to the plant record.
- Note that it must receive an open bus object.
Usage:
Update all sensors exluding analog sensors that need power.
- update_all_sensors()
Update all sensors including soil moisture.
- update_all_sensors("all")
"""
for sensorobj in cls:
sensorobj.update_instance_sensors(opt)
@classmethod
def analog_sensor_power(cls, bus, operation):
""" Method that turns on all of the analog sensor modules
Includes all attached soil moisture sensors
Note that all of the SensorCluster object should be attached
in parallel and only 1 GPIO pin is available
to toggle analog sensor power.
The sensor power should be left on for at least 100ms
in order to allow the sensors to stabilize before reading.
Usage: SensorCluster.analog_sensor_power(bus,"high")
OR SensorCluster.analog_sensor_power(bus,"low")
This method should be removed if an off-board GPIO extender is used.
"""
# Set appropriate analog sensor power bit in GPIO mask
# using the ControlCluster bank_mask to avoid overwriting any data
reg_data = get_IO_reg(bus, 0x20, cls.power_bank)
if operation == "on":
reg_data = reg_data | 1 << cls.analog_power_pin
elif operation == "off":
reg_data = reg_data & (0b11111111 ^ (1 << cls.analog_power_pin))
else:
raise SensorError(
"Invalid command used while enabling analog sensors")
# Send updated IO mask to output
IO_expander_output(bus, 0x20, cls.power_bank, reg_data)
@classmethod
def get_water_level(cls):
""" This method uses the ADC on the control module to measure
the current water tank level and returns the water volume
remaining in the tank.
For this method, it is assumed that a simple voltage divider
is used to interface the sensor to the ADC module.
Testing shows that the sensor response is not completely linear,
though it is quite close. To make the results more accurate,
a mapping method approximated by a linear fit to data is used.
"""
# ----------
# These values should be updated based on the real system parameters
vref = 4.95
tank_height = 17.5 # in centimeters (height of container)
rref = 2668 # Reference resistor
# ----------
val = 0
for i in range(5):
# Take five readings and do an average
# Fetch value from ADC (0x69 - ch1)
val = get_ADC_value(cls.bus, 0x6c, 1) + val
avg = val / 5
water_sensor_res = rref * avg/(vref - avg)
depth_cm = water_sensor_res * \
(-.0163) + 28.127 # measured transfer adjusted offset
if depth_cm < 1.0: # Below 1cm, the values should not be trusted.
depth_cm = 0
cls.water_remaining = depth_cm / tank_height
# Return the current depth in case the user is interested in
# that parameter alone. (IE for automatic shut-off)
return depth_cm/tank_height
|
ECESeniorDesign/greenhouse_envmgmt | greenhouse_envmgmt/sense.py | SensorCluster.sensor_values | python | def sensor_values(self):
"""
Returns the values of all sensors for this cluster
"""
self.update_instance_sensors(opt="all")
return {
"light": self.lux,
"water": self.soil_moisture,
"humidity": self.humidity,
"temperature": self.temp
} | Returns the values of all sensors for this cluster | train | https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/sense.py#L191-L201 | [
"def update_instance_sensors(self, opt=None):\n\n \"\"\" Method runs through all sensor modules and updates \n to the latest sensor values.\n After running through each sensor module,\n The sensor head (the I2C multiplexer), is disabled\n in order to avoid address conflicts.\n Usage:\n plant_sensor_object.updateAllSensors(bus_object)\n \"\"\"\n self.update_count += 1\n self.update_lux()\n self.update_humidity_temp()\n if opt == \"all\":\n try:\n self.update_soil_moisture()\n except SensorError:\n # This could be handled with a repeat request later.\n pass\n self.timestamp = time()\n # disable sensor module\n\n tca_status = TCA_select(SensorCluster.bus, self.mux_addr, \"off\")\n if tca_status != 0:\n raise I2CBusError(\n \"Bus multiplexer was unable to switch off to prevent conflicts\")\n"
] | class SensorCluster(object):
'Base class for each individual plant containing sensor info'
__metaclass__ = IterList
_list = []
analog_power_pin = 0
power_bank = 0 # bank and pin used to toggle analog sensor power
temp_addr = 0x48
temp_chan = 3
humidity_addr = 0x27
humidity_chan = 1
lux_addr = 0x39
lux_chan = 0
adc_addr = 0x68
adc_chan = 2
moisture_chan = 1
tank_adc_adr = 0x6c
tank_adc_chan = 0
bus = None
def __init__(self, ID, mux_addr=None):
# Initializes cluster, enumeration, and sets up address info
sensor_addr = import_i2c_addr(SensorCluster.bus)
if (ID < 1 or ID > len(sensor_addr)):
raise I2CBusError("Plant ID out of range.")
self.mux_addr = mux_addr or (sensor_addr[ID-1])
self.ID = ID # Plant number specified by caller
self.temp = 0
self.humidity = 0
self.lux = 0
self.light_ratio = 0
self.soil_moisture = 0
self.acidity = 0
self.timestamp = time() # record time at instantiation
self._list.append(self)
self.update_count = 0
def update_lux(self, extend=0):
""" Communicates with the TSL2550D light sensor and returns a
lux value.
Note that this method contains approximately 1 second of total delay.
This delay is necessary in order to obtain full resolution
compensated lux values.
Alternatively, the device could be put in extended mode,
which drops some resolution in favor of shorter delays.
"""
DEVICE_REG_OUT = 0x1d
LUX_PWR_ON = 0x03
if extend == 1:
LUX_MODE = 0x1d
delay = .08
scale = 5
else:
LUX_MODE = 0x18
delay = .4
scale = 1
LUX_READ_CH0 = 0x43
LUX_READ_CH1 = 0x83
# Select correct I2C mux channel on TCA module
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.lux_chan)
# Make sure lux sensor is powered up.
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_PWR_ON)
lux_on = SensorCluster.bus.read_byte_data(SensorCluster.lux_addr, LUX_PWR_ON)
# Check for successful powerup
if (lux_on == LUX_PWR_ON):
# Send command to initiate ADC on each channel
# Read each channel after the new data is ready
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_MODE)
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH0)
sleep(delay)
adc_ch0 = SensorCluster.bus.read_byte(SensorCluster.lux_addr)
count0 = get_lux_count(adc_ch0) * scale # 5x for extended mode
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH1)
sleep(delay)
adc_ch1 = SensorCluster.bus.read_byte(SensorCluster.lux_addr)
count1 = get_lux_count(adc_ch1) * scale # 5x for extended mode
ratio = count1 / (count0 - count1)
lux = (count0 - count1) * .39 * e**(-.181 * (ratio**2))
self.light_ratio = float(count1)/float(count0)
print("Light ratio Ch1/Ch0: ", self.light_ratio)
self.lux = round(lux, 3)
return TCA_select(SensorCluster.bus, self.mux_addr, "off")
else:
raise SensorError("The lux sensor is powered down.")
def update_humidity_temp(self):
""" This method utilizes the HIH7xxx sensor to read
humidity and temperature in one call.
"""
# Create mask for STATUS (first two bits of 64 bit wide result)
STATUS = 0b11 << 6
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.humidity_chan)
SensorCluster.bus.write_quick(SensorCluster.humidity_addr) # Begin conversion
sleep(.25)
# wait 100ms to make sure the conversion takes place.
data = SensorCluster.bus.read_i2c_block_data(SensorCluster.humidity_addr, 0, 4)
status = (data[0] & STATUS) >> 6
if status == 0 or status == 1: # will always pass for now.
humidity = round((((data[0] & 0x3f) << 8) |
data[1]) * 100.0 / (2**14 - 2), 3)
self.humidity = humidity
self.temp = (round((((data[2] << 6) + ((data[3] & 0xfc) >> 2))
* 165.0 / 16382.0 - 40.0), 3) * 9/5) + 32
return TCA_select(SensorCluster.bus, self.mux_addr, "off")
else:
raise I2CBusError("Unable to retrieve humidity")
def update_soil_moisture(self):
""" Method will select the ADC module,
turn on the analog sensor, wait for voltage settle,
and then digitize the sensor voltage.
Voltage division/signal loss is accounted for by
scaling up the sensor output.
This may need to be adjusted if a different sensor is used
"""
SensorCluster.analog_sensor_power(SensorCluster.bus, "on") # turn on sensor
sleep(.2)
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.adc_chan)
moisture = get_ADC_value(
SensorCluster.bus, SensorCluster.adc_addr, SensorCluster.moisture_chan)
status = TCA_select(SensorCluster.bus, self.mux_addr, "off") # Turn off mux.
SensorCluster.analog_sensor_power(SensorCluster.bus, "off") # turn off sensor
if (moisture >= 0):
soil_moisture = moisture/2.048 # Scale to a percentage value
self.soil_moisture = round(soil_moisture,3)
else:
raise SensorError(
"The soil moisture meter is not configured correctly.")
return status
def update_instance_sensors(self, opt=None):
""" Method runs through all sensor modules and updates
to the latest sensor values.
After running through each sensor module,
The sensor head (the I2C multiplexer), is disabled
in order to avoid address conflicts.
Usage:
plant_sensor_object.updateAllSensors(bus_object)
"""
self.update_count += 1
self.update_lux()
self.update_humidity_temp()
if opt == "all":
try:
self.update_soil_moisture()
except SensorError:
# This could be handled with a repeat request later.
pass
self.timestamp = time()
# disable sensor module
tca_status = TCA_select(SensorCluster.bus, self.mux_addr, "off")
if tca_status != 0:
raise I2CBusError(
"Bus multiplexer was unable to switch off to prevent conflicts")
def sensor_values(self):
"""
Returns the values of all sensors for this cluster
"""
self.update_instance_sensors(opt="all")
return {
"light": self.lux,
"water": self.soil_moisture,
"humidity": self.humidity,
"temperature": self.temp
}
@classmethod
def update_all_sensors(cls, opt=None):
""" Method iterates over all SensorCluster objects and updates
each sensor value and saves the values to the plant record.
- Note that it must receive an open bus object.
Usage:
Update all sensors exluding analog sensors that need power.
- update_all_sensors()
Update all sensors including soil moisture.
- update_all_sensors("all")
"""
for sensorobj in cls:
sensorobj.update_instance_sensors(opt)
@classmethod
def analog_sensor_power(cls, bus, operation):
""" Method that turns on all of the analog sensor modules
Includes all attached soil moisture sensors
Note that all of the SensorCluster object should be attached
in parallel and only 1 GPIO pin is available
to toggle analog sensor power.
The sensor power should be left on for at least 100ms
in order to allow the sensors to stabilize before reading.
Usage: SensorCluster.analog_sensor_power(bus,"high")
OR SensorCluster.analog_sensor_power(bus,"low")
This method should be removed if an off-board GPIO extender is used.
"""
# Set appropriate analog sensor power bit in GPIO mask
# using the ControlCluster bank_mask to avoid overwriting any data
reg_data = get_IO_reg(bus, 0x20, cls.power_bank)
if operation == "on":
reg_data = reg_data | 1 << cls.analog_power_pin
elif operation == "off":
reg_data = reg_data & (0b11111111 ^ (1 << cls.analog_power_pin))
else:
raise SensorError(
"Invalid command used while enabling analog sensors")
# Send updated IO mask to output
IO_expander_output(bus, 0x20, cls.power_bank, reg_data)
@classmethod
def get_water_level(cls):
""" This method uses the ADC on the control module to measure
the current water tank level and returns the water volume
remaining in the tank.
For this method, it is assumed that a simple voltage divider
is used to interface the sensor to the ADC module.
Testing shows that the sensor response is not completely linear,
though it is quite close. To make the results more accurate,
a mapping method approximated by a linear fit to data is used.
"""
# ----------
# These values should be updated based on the real system parameters
vref = 4.95
tank_height = 17.5 # in centimeters (height of container)
rref = 2668 # Reference resistor
# ----------
val = 0
for i in range(5):
# Take five readings and do an average
# Fetch value from ADC (0x69 - ch1)
val = get_ADC_value(cls.bus, 0x6c, 1) + val
avg = val / 5
water_sensor_res = rref * avg/(vref - avg)
depth_cm = water_sensor_res * \
(-.0163) + 28.127 # measured transfer adjusted offset
if depth_cm < 1.0: # Below 1cm, the values should not be trusted.
depth_cm = 0
cls.water_remaining = depth_cm / tank_height
# Return the current depth in case the user is interested in
# that parameter alone. (IE for automatic shut-off)
return depth_cm/tank_height
|
ECESeniorDesign/greenhouse_envmgmt | greenhouse_envmgmt/sense.py | SensorCluster.analog_sensor_power | python | def analog_sensor_power(cls, bus, operation):
""" Method that turns on all of the analog sensor modules
Includes all attached soil moisture sensors
Note that all of the SensorCluster object should be attached
in parallel and only 1 GPIO pin is available
to toggle analog sensor power.
The sensor power should be left on for at least 100ms
in order to allow the sensors to stabilize before reading.
Usage: SensorCluster.analog_sensor_power(bus,"high")
OR SensorCluster.analog_sensor_power(bus,"low")
This method should be removed if an off-board GPIO extender is used.
"""
# Set appropriate analog sensor power bit in GPIO mask
# using the ControlCluster bank_mask to avoid overwriting any data
reg_data = get_IO_reg(bus, 0x20, cls.power_bank)
if operation == "on":
reg_data = reg_data | 1 << cls.analog_power_pin
elif operation == "off":
reg_data = reg_data & (0b11111111 ^ (1 << cls.analog_power_pin))
else:
raise SensorError(
"Invalid command used while enabling analog sensors")
# Send updated IO mask to output
IO_expander_output(bus, 0x20, cls.power_bank, reg_data) | Method that turns on all of the analog sensor modules
Includes all attached soil moisture sensors
Note that all of the SensorCluster object should be attached
in parallel and only 1 GPIO pin is available
to toggle analog sensor power.
The sensor power should be left on for at least 100ms
in order to allow the sensors to stabilize before reading.
Usage: SensorCluster.analog_sensor_power(bus,"high")
OR SensorCluster.analog_sensor_power(bus,"low")
This method should be removed if an off-board GPIO extender is used. | train | https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/sense.py#L220-L244 | [
"def IO_expander_output(bus, addr, bank, mask):\n \"\"\"\n Method for controlling the GPIO expander via I2C\n which accepts a bank - A(0) or B(1) and a mask\n to push to the pins of the expander.\n\n The method also assumes the the expander is operating\n in sequential mode. If this mode is not used,\n the register addresses will need to be changed.\n\n Usage:\n GPIO_out(bus, GPIO_addr, 0, 0b00011111)\n This call would turn on A0 through A4. \n\n \"\"\"\n IODIR_map = [0x00, 0x01]\n output_map = [0x14, 0x15]\n\n if (bank != 0) and (bank != 1):\n print()\n raise InvalidIOUsage(\"An invalid IO bank has been selected\")\n\n\n IO_direction = IODIR_map[bank]\n output_reg = output_map[bank]\n\n current_status = bus.read_byte_data(addr, output_reg)\n if current_status == mask:\n # This means nothing needs to happen\n print(\"Current control status matches requested controls. \" +\n \"No action is required.\")\n return True\n\n bus.write_byte_data(addr, IO_direction, 0)\n bus.write_byte_data(addr, output_reg, mask)\n",
"def get_IO_reg(bus, addr, bank):\n \"\"\"\n Method retrieves the register corresponding to respective bank (0 or 1)\n \"\"\"\n output_map = [0x14, 0x15]\n if (bank != 0) and (bank != 1):\n print()\n raise InvalidIOUsage(\"An invalid IO bank has been selected\")\n\n output_reg = output_map[bank]\n current_status = bus.read_byte_data(addr, output_reg)\n return current_status\n"
] | class SensorCluster(object):
'Base class for each individual plant containing sensor info'
__metaclass__ = IterList
_list = []
analog_power_pin = 0
power_bank = 0 # bank and pin used to toggle analog sensor power
temp_addr = 0x48
temp_chan = 3
humidity_addr = 0x27
humidity_chan = 1
lux_addr = 0x39
lux_chan = 0
adc_addr = 0x68
adc_chan = 2
moisture_chan = 1
tank_adc_adr = 0x6c
tank_adc_chan = 0
bus = None
def __init__(self, ID, mux_addr=None):
# Initializes cluster, enumeration, and sets up address info
sensor_addr = import_i2c_addr(SensorCluster.bus)
if (ID < 1 or ID > len(sensor_addr)):
raise I2CBusError("Plant ID out of range.")
self.mux_addr = mux_addr or (sensor_addr[ID-1])
self.ID = ID # Plant number specified by caller
self.temp = 0
self.humidity = 0
self.lux = 0
self.light_ratio = 0
self.soil_moisture = 0
self.acidity = 0
self.timestamp = time() # record time at instantiation
self._list.append(self)
self.update_count = 0
def update_lux(self, extend=0):
""" Communicates with the TSL2550D light sensor and returns a
lux value.
Note that this method contains approximately 1 second of total delay.
This delay is necessary in order to obtain full resolution
compensated lux values.
Alternatively, the device could be put in extended mode,
which drops some resolution in favor of shorter delays.
"""
DEVICE_REG_OUT = 0x1d
LUX_PWR_ON = 0x03
if extend == 1:
LUX_MODE = 0x1d
delay = .08
scale = 5
else:
LUX_MODE = 0x18
delay = .4
scale = 1
LUX_READ_CH0 = 0x43
LUX_READ_CH1 = 0x83
# Select correct I2C mux channel on TCA module
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.lux_chan)
# Make sure lux sensor is powered up.
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_PWR_ON)
lux_on = SensorCluster.bus.read_byte_data(SensorCluster.lux_addr, LUX_PWR_ON)
# Check for successful powerup
if (lux_on == LUX_PWR_ON):
# Send command to initiate ADC on each channel
# Read each channel after the new data is ready
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_MODE)
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH0)
sleep(delay)
adc_ch0 = SensorCluster.bus.read_byte(SensorCluster.lux_addr)
count0 = get_lux_count(adc_ch0) * scale # 5x for extended mode
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH1)
sleep(delay)
adc_ch1 = SensorCluster.bus.read_byte(SensorCluster.lux_addr)
count1 = get_lux_count(adc_ch1) * scale # 5x for extended mode
ratio = count1 / (count0 - count1)
lux = (count0 - count1) * .39 * e**(-.181 * (ratio**2))
self.light_ratio = float(count1)/float(count0)
print("Light ratio Ch1/Ch0: ", self.light_ratio)
self.lux = round(lux, 3)
return TCA_select(SensorCluster.bus, self.mux_addr, "off")
else:
raise SensorError("The lux sensor is powered down.")
def update_humidity_temp(self):
""" This method utilizes the HIH7xxx sensor to read
humidity and temperature in one call.
"""
# Create mask for STATUS (first two bits of 64 bit wide result)
STATUS = 0b11 << 6
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.humidity_chan)
SensorCluster.bus.write_quick(SensorCluster.humidity_addr) # Begin conversion
sleep(.25)
# wait 100ms to make sure the conversion takes place.
data = SensorCluster.bus.read_i2c_block_data(SensorCluster.humidity_addr, 0, 4)
status = (data[0] & STATUS) >> 6
if status == 0 or status == 1: # will always pass for now.
humidity = round((((data[0] & 0x3f) << 8) |
data[1]) * 100.0 / (2**14 - 2), 3)
self.humidity = humidity
self.temp = (round((((data[2] << 6) + ((data[3] & 0xfc) >> 2))
* 165.0 / 16382.0 - 40.0), 3) * 9/5) + 32
return TCA_select(SensorCluster.bus, self.mux_addr, "off")
else:
raise I2CBusError("Unable to retrieve humidity")
def update_soil_moisture(self):
""" Method will select the ADC module,
turn on the analog sensor, wait for voltage settle,
and then digitize the sensor voltage.
Voltage division/signal loss is accounted for by
scaling up the sensor output.
This may need to be adjusted if a different sensor is used
"""
SensorCluster.analog_sensor_power(SensorCluster.bus, "on") # turn on sensor
sleep(.2)
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.adc_chan)
moisture = get_ADC_value(
SensorCluster.bus, SensorCluster.adc_addr, SensorCluster.moisture_chan)
status = TCA_select(SensorCluster.bus, self.mux_addr, "off") # Turn off mux.
SensorCluster.analog_sensor_power(SensorCluster.bus, "off") # turn off sensor
if (moisture >= 0):
soil_moisture = moisture/2.048 # Scale to a percentage value
self.soil_moisture = round(soil_moisture,3)
else:
raise SensorError(
"The soil moisture meter is not configured correctly.")
return status
def update_instance_sensors(self, opt=None):
""" Method runs through all sensor modules and updates
to the latest sensor values.
After running through each sensor module,
The sensor head (the I2C multiplexer), is disabled
in order to avoid address conflicts.
Usage:
plant_sensor_object.updateAllSensors(bus_object)
"""
self.update_count += 1
self.update_lux()
self.update_humidity_temp()
if opt == "all":
try:
self.update_soil_moisture()
except SensorError:
# This could be handled with a repeat request later.
pass
self.timestamp = time()
# disable sensor module
tca_status = TCA_select(SensorCluster.bus, self.mux_addr, "off")
if tca_status != 0:
raise I2CBusError(
"Bus multiplexer was unable to switch off to prevent conflicts")
def sensor_values(self):
"""
Returns the values of all sensors for this cluster
"""
self.update_instance_sensors(opt="all")
return {
"light": self.lux,
"water": self.soil_moisture,
"humidity": self.humidity,
"temperature": self.temp
}
@classmethod
def update_all_sensors(cls, opt=None):
""" Method iterates over all SensorCluster objects and updates
each sensor value and saves the values to the plant record.
- Note that it must receive an open bus object.
Usage:
Update all sensors exluding analog sensors that need power.
- update_all_sensors()
Update all sensors including soil moisture.
- update_all_sensors("all")
"""
for sensorobj in cls:
sensorobj.update_instance_sensors(opt)
@classmethod
def analog_sensor_power(cls, bus, operation):
""" Method that turns on all of the analog sensor modules
Includes all attached soil moisture sensors
Note that all of the SensorCluster object should be attached
in parallel and only 1 GPIO pin is available
to toggle analog sensor power.
The sensor power should be left on for at least 100ms
in order to allow the sensors to stabilize before reading.
Usage: SensorCluster.analog_sensor_power(bus,"high")
OR SensorCluster.analog_sensor_power(bus,"low")
This method should be removed if an off-board GPIO extender is used.
"""
# Set appropriate analog sensor power bit in GPIO mask
# using the ControlCluster bank_mask to avoid overwriting any data
reg_data = get_IO_reg(bus, 0x20, cls.power_bank)
if operation == "on":
reg_data = reg_data | 1 << cls.analog_power_pin
elif operation == "off":
reg_data = reg_data & (0b11111111 ^ (1 << cls.analog_power_pin))
else:
raise SensorError(
"Invalid command used while enabling analog sensors")
# Send updated IO mask to output
IO_expander_output(bus, 0x20, cls.power_bank, reg_data)
@classmethod
def get_water_level(cls):
""" This method uses the ADC on the control module to measure
the current water tank level and returns the water volume
remaining in the tank.
For this method, it is assumed that a simple voltage divider
is used to interface the sensor to the ADC module.
Testing shows that the sensor response is not completely linear,
though it is quite close. To make the results more accurate,
a mapping method approximated by a linear fit to data is used.
"""
# ----------
# These values should be updated based on the real system parameters
vref = 4.95
tank_height = 17.5 # in centimeters (height of container)
rref = 2668 # Reference resistor
# ----------
val = 0
for i in range(5):
# Take five readings and do an average
# Fetch value from ADC (0x69 - ch1)
val = get_ADC_value(cls.bus, 0x6c, 1) + val
avg = val / 5
water_sensor_res = rref * avg/(vref - avg)
depth_cm = water_sensor_res * \
(-.0163) + 28.127 # measured transfer adjusted offset
if depth_cm < 1.0: # Below 1cm, the values should not be trusted.
depth_cm = 0
cls.water_remaining = depth_cm / tank_height
# Return the current depth in case the user is interested in
# that parameter alone. (IE for automatic shut-off)
return depth_cm/tank_height
|
ECESeniorDesign/greenhouse_envmgmt | greenhouse_envmgmt/sense.py | SensorCluster.get_water_level | python | def get_water_level(cls):
""" This method uses the ADC on the control module to measure
the current water tank level and returns the water volume
remaining in the tank.
For this method, it is assumed that a simple voltage divider
is used to interface the sensor to the ADC module.
Testing shows that the sensor response is not completely linear,
though it is quite close. To make the results more accurate,
a mapping method approximated by a linear fit to data is used.
"""
# ----------
# These values should be updated based on the real system parameters
vref = 4.95
tank_height = 17.5 # in centimeters (height of container)
rref = 2668 # Reference resistor
# ----------
val = 0
for i in range(5):
# Take five readings and do an average
# Fetch value from ADC (0x69 - ch1)
val = get_ADC_value(cls.bus, 0x6c, 1) + val
avg = val / 5
water_sensor_res = rref * avg/(vref - avg)
depth_cm = water_sensor_res * \
(-.0163) + 28.127 # measured transfer adjusted offset
if depth_cm < 1.0: # Below 1cm, the values should not be trusted.
depth_cm = 0
cls.water_remaining = depth_cm / tank_height
# Return the current depth in case the user is interested in
# that parameter alone. (IE for automatic shut-off)
return depth_cm/tank_height | This method uses the ADC on the control module to measure
the current water tank level and returns the water volume
remaining in the tank.
For this method, it is assumed that a simple voltage divider
is used to interface the sensor to the ADC module.
Testing shows that the sensor response is not completely linear,
though it is quite close. To make the results more accurate,
a mapping method approximated by a linear fit to data is used. | train | https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/sense.py#L247-L279 | [
"def get_ADC_value(bus, addr, channel):\n \"\"\"\n This method selects a channel and initiates conversion\n The ADC operates at 240 SPS (12 bits) with 1x gain\n One shot conversions are used, meaning a wait period is needed\n in order to acquire new data. This is done via a constant poll\n of the ready bit.\n Upon completion, a voltage value is returned to the caller.\n\n Usage - ADC_start(bus, SensorCluster.ADC_addr, channel_to_read)\n\n IMPORTANT NOTE:\n The ADC uses a 2.048V voltage reference\n\n \"\"\"\n if channel == 1:\n INIT = 0b10000000\n elif channel == 2:\n INIT = 0b10100000\n elif channel == 3:\n INIT = 0b11000000\n elif channel == 4:\n INIT = 0b11100000\n bus.write_byte(addr, INIT)\n data = bus.read_i2c_block_data(addr, 0, 3)\n status = (data[2] & 0b10000000) >> 7\n while(status == 1):\n data = bus.read_i2c_block_data(addr, 0, 3)\n status = (data[2] & 0b10000000) >> 7\n sign = data[0] & 0b00001000\n val = ((data[0] & 0b0000111) << 8) | (data[1])\n if (sign == 1):\n val = (val ^ 0x3ff) + 1 # compute 2s complement for 12 bit val\n # Convert val to a ratiomerical ADC reading\n return float(val) * 2.048 / float(2047)\n"
] | class SensorCluster(object):
'Base class for each individual plant containing sensor info'
__metaclass__ = IterList
_list = []
analog_power_pin = 0
power_bank = 0 # bank and pin used to toggle analog sensor power
temp_addr = 0x48
temp_chan = 3
humidity_addr = 0x27
humidity_chan = 1
lux_addr = 0x39
lux_chan = 0
adc_addr = 0x68
adc_chan = 2
moisture_chan = 1
tank_adc_adr = 0x6c
tank_adc_chan = 0
bus = None
def __init__(self, ID, mux_addr=None):
# Initializes cluster, enumeration, and sets up address info
sensor_addr = import_i2c_addr(SensorCluster.bus)
if (ID < 1 or ID > len(sensor_addr)):
raise I2CBusError("Plant ID out of range.")
self.mux_addr = mux_addr or (sensor_addr[ID-1])
self.ID = ID # Plant number specified by caller
self.temp = 0
self.humidity = 0
self.lux = 0
self.light_ratio = 0
self.soil_moisture = 0
self.acidity = 0
self.timestamp = time() # record time at instantiation
self._list.append(self)
self.update_count = 0
def update_lux(self, extend=0):
""" Communicates with the TSL2550D light sensor and returns a
lux value.
Note that this method contains approximately 1 second of total delay.
This delay is necessary in order to obtain full resolution
compensated lux values.
Alternatively, the device could be put in extended mode,
which drops some resolution in favor of shorter delays.
"""
DEVICE_REG_OUT = 0x1d
LUX_PWR_ON = 0x03
if extend == 1:
LUX_MODE = 0x1d
delay = .08
scale = 5
else:
LUX_MODE = 0x18
delay = .4
scale = 1
LUX_READ_CH0 = 0x43
LUX_READ_CH1 = 0x83
# Select correct I2C mux channel on TCA module
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.lux_chan)
# Make sure lux sensor is powered up.
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_PWR_ON)
lux_on = SensorCluster.bus.read_byte_data(SensorCluster.lux_addr, LUX_PWR_ON)
# Check for successful powerup
if (lux_on == LUX_PWR_ON):
# Send command to initiate ADC on each channel
# Read each channel after the new data is ready
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_MODE)
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH0)
sleep(delay)
adc_ch0 = SensorCluster.bus.read_byte(SensorCluster.lux_addr)
count0 = get_lux_count(adc_ch0) * scale # 5x for extended mode
SensorCluster.bus.write_byte(SensorCluster.lux_addr, LUX_READ_CH1)
sleep(delay)
adc_ch1 = SensorCluster.bus.read_byte(SensorCluster.lux_addr)
count1 = get_lux_count(adc_ch1) * scale # 5x for extended mode
ratio = count1 / (count0 - count1)
lux = (count0 - count1) * .39 * e**(-.181 * (ratio**2))
self.light_ratio = float(count1)/float(count0)
print("Light ratio Ch1/Ch0: ", self.light_ratio)
self.lux = round(lux, 3)
return TCA_select(SensorCluster.bus, self.mux_addr, "off")
else:
raise SensorError("The lux sensor is powered down.")
def update_humidity_temp(self):
""" This method utilizes the HIH7xxx sensor to read
humidity and temperature in one call.
"""
# Create mask for STATUS (first two bits of 64 bit wide result)
STATUS = 0b11 << 6
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.humidity_chan)
SensorCluster.bus.write_quick(SensorCluster.humidity_addr) # Begin conversion
sleep(.25)
# wait 100ms to make sure the conversion takes place.
data = SensorCluster.bus.read_i2c_block_data(SensorCluster.humidity_addr, 0, 4)
status = (data[0] & STATUS) >> 6
if status == 0 or status == 1: # will always pass for now.
humidity = round((((data[0] & 0x3f) << 8) |
data[1]) * 100.0 / (2**14 - 2), 3)
self.humidity = humidity
self.temp = (round((((data[2] << 6) + ((data[3] & 0xfc) >> 2))
* 165.0 / 16382.0 - 40.0), 3) * 9/5) + 32
return TCA_select(SensorCluster.bus, self.mux_addr, "off")
else:
raise I2CBusError("Unable to retrieve humidity")
def update_soil_moisture(self):
""" Method will select the ADC module,
turn on the analog sensor, wait for voltage settle,
and then digitize the sensor voltage.
Voltage division/signal loss is accounted for by
scaling up the sensor output.
This may need to be adjusted if a different sensor is used
"""
SensorCluster.analog_sensor_power(SensorCluster.bus, "on") # turn on sensor
sleep(.2)
TCA_select(SensorCluster.bus, self.mux_addr, SensorCluster.adc_chan)
moisture = get_ADC_value(
SensorCluster.bus, SensorCluster.adc_addr, SensorCluster.moisture_chan)
status = TCA_select(SensorCluster.bus, self.mux_addr, "off") # Turn off mux.
SensorCluster.analog_sensor_power(SensorCluster.bus, "off") # turn off sensor
if (moisture >= 0):
soil_moisture = moisture/2.048 # Scale to a percentage value
self.soil_moisture = round(soil_moisture,3)
else:
raise SensorError(
"The soil moisture meter is not configured correctly.")
return status
def update_instance_sensors(self, opt=None):
""" Method runs through all sensor modules and updates
to the latest sensor values.
After running through each sensor module,
The sensor head (the I2C multiplexer), is disabled
in order to avoid address conflicts.
Usage:
plant_sensor_object.updateAllSensors(bus_object)
"""
self.update_count += 1
self.update_lux()
self.update_humidity_temp()
if opt == "all":
try:
self.update_soil_moisture()
except SensorError:
# This could be handled with a repeat request later.
pass
self.timestamp = time()
# disable sensor module
tca_status = TCA_select(SensorCluster.bus, self.mux_addr, "off")
if tca_status != 0:
raise I2CBusError(
"Bus multiplexer was unable to switch off to prevent conflicts")
def sensor_values(self):
"""
Returns the values of all sensors for this cluster
"""
self.update_instance_sensors(opt="all")
return {
"light": self.lux,
"water": self.soil_moisture,
"humidity": self.humidity,
"temperature": self.temp
}
@classmethod
def update_all_sensors(cls, opt=None):
""" Method iterates over all SensorCluster objects and updates
each sensor value and saves the values to the plant record.
- Note that it must receive an open bus object.
Usage:
Update all sensors exluding analog sensors that need power.
- update_all_sensors()
Update all sensors including soil moisture.
- update_all_sensors("all")
"""
for sensorobj in cls:
sensorobj.update_instance_sensors(opt)
@classmethod
def analog_sensor_power(cls, bus, operation):
""" Method that turns on all of the analog sensor modules
Includes all attached soil moisture sensors
Note that all of the SensorCluster object should be attached
in parallel and only 1 GPIO pin is available
to toggle analog sensor power.
The sensor power should be left on for at least 100ms
in order to allow the sensors to stabilize before reading.
Usage: SensorCluster.analog_sensor_power(bus,"high")
OR SensorCluster.analog_sensor_power(bus,"low")
This method should be removed if an off-board GPIO extender is used.
"""
# Set appropriate analog sensor power bit in GPIO mask
# using the ControlCluster bank_mask to avoid overwriting any data
reg_data = get_IO_reg(bus, 0x20, cls.power_bank)
if operation == "on":
reg_data = reg_data | 1 << cls.analog_power_pin
elif operation == "off":
reg_data = reg_data & (0b11111111 ^ (1 << cls.analog_power_pin))
else:
raise SensorError(
"Invalid command used while enabling analog sensors")
# Send updated IO mask to output
IO_expander_output(bus, 0x20, cls.power_bank, reg_data)
@classmethod
def get_water_level(cls):
""" This method uses the ADC on the control module to measure
the current water tank level and returns the water volume
remaining in the tank.
For this method, it is assumed that a simple voltage divider
is used to interface the sensor to the ADC module.
Testing shows that the sensor response is not completely linear,
though it is quite close. To make the results more accurate,
a mapping method approximated by a linear fit to data is used.
"""
# ----------
# These values should be updated based on the real system parameters
vref = 4.95
tank_height = 17.5 # in centimeters (height of container)
rref = 2668 # Reference resistor
# ----------
val = 0
for i in range(5):
# Take five readings and do an average
# Fetch value from ADC (0x69 - ch1)
val = get_ADC_value(cls.bus, 0x6c, 1) + val
avg = val / 5
water_sensor_res = rref * avg/(vref - avg)
depth_cm = water_sensor_res * \
(-.0163) + 28.127 # measured transfer adjusted offset
if depth_cm < 1.0: # Below 1cm, the values should not be trusted.
depth_cm = 0
cls.water_remaining = depth_cm / tank_height
# Return the current depth in case the user is interested in
# that parameter alone. (IE for automatic shut-off)
return depth_cm/tank_height
|
ECESeniorDesign/greenhouse_envmgmt | greenhouse_envmgmt/control.py | ControlCluster.compile_instance_masks | python | def compile_instance_masks(cls):
# Compute required # of IO expanders needed, clear mask variable.
number_IO_expanders = ((len(cls._list) - 1) / 4) + 1
cls.master_mask = [0, 0] * number_IO_expanders
for ctrlobj in cls:
# Or masks together bank-by-banl
cls.master_mask[ctrlobj.bank] |= ctrlobj.mask
# Handle the pump request seperately
if ctrlobj.pump_request == 1:
cls.master_mask[cls.pump_bank] |= 1 << cls.pump_pin | Compiles instance masks into a master mask that is usable by
the IO expander. Also determines whether or not the pump
should be on.
Method is generalized to support multiple IO expanders
for possible future expansion. | train | https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/control.py#L44-L60 | null | class ControlCluster(object):
""" This class serves as a control module for each plant's
fan, light, and pump valve.
Upon instantiation, the class will use it's ID in order to
generate a GPIO mapping corresponding to the pins on the MCP
IO expander.
Currently, only four plant control sets can be supported. IDs
must be greater than 1 and no higher than 4.
Usage: plant1Control = ControlCluster(1)
This will create the first plant control unit.
Turning on control units individually:
plant1Control.manage("fan", "on")
plant1Control.manage("light", "off")
plant1Control.update()
"""
__metaclass__ = IterList
_list = []
GPIOdict = []
pump_pin = 1 # Pin A1 is assigned to the pump
pump_bank = 0
current_volume = 0
bus = None
@classmethod
def update(self):
""" This method exposes a more simple interface to the IO module
Regardless of what the control instance contains, this method
will transmit the queued IO commands to the IO expander
Usage: plant1Control.update(bus)
"""
ControlCluster.compile_instance_masks()
IO_expander_output(
ControlCluster.bus, self.IOexpander,
self.bank,
ControlCluster.master_mask[self.bank])
if self.bank != ControlCluster.pump_bank:
IO_expander_output(
ControlCluster.bus, self.IOexpander,
ControlCluster.pump_bank,
ControlCluster.master_mask[ControlCluster.pump_bank])
def form_GPIO_map(self):
""" This method creates a dictionary to map plant IDs to
GPIO pins are associated in triples.
Each ID gets a light, a fan, and a mist nozzle.
"""
# Compute bank/pins/IOexpander address based on ID
if self.ID == 1:
self.IOexpander = 0x20
self.bank = 0
self.fan = 2
self.light = 3
self.valve = 4
elif self.ID == 2:
self.IOexpander = 0x20
self.bank = 0
self.fan = 5
self.light = 6
self.valve = 7
elif self.ID == 3:
self.IOexpander = 0x20
self.bank = 1
self.fan = 0
self.light = 1
self.valve = 2
elif self.ID == 4:
self.IOexpander = 0x20
self.bank = 1
self.fan = 3
self.light = 5
self.valve = 6
else:
raise InvalidIOMap(
"Mapping not available for ID: " + str(self.ID))
# Check to make sure reserved pins are not requested
if (self.bank == 0) and (min(self.fan, self.light, self.valve) < 2):
raise InvalidIOMap(
"Pins A0 and A1 are reserved for other functions")
self.GPIO_dict = [{'ID': self.ID, 'bank': self.bank,
'fan': self.fan, 'valve': self.valve, 'light': self.light}]
# Append dictionary to class and resort dictionary by ID # if needed
ControlCluster.GPIOdict.append(self.GPIO_dict)
# ControlCluster.GPIOdict=sorted(
# ControlCluster.GPIOdict, key=itemgetter('ID'))
def manage_light(self, operation):
""" Turns on the lights depending on the operation command
Usage:
manage_lights("on")
"""
return self.manage("light", operation)
def manage_fan(self, operation):
""" Usage:
manageFans("on") # Turn on the fan for plant 1
"""
return self.manage("fan", operation)
def manage_valve(self, operation):
"""Manages turning on the mist pump based on water data from the plant.
We will need to aggregate the total amount of water that the plant
receives so that we can keep track of what it's receiving daily.
This function will need to select a nozzle to open
before turning on the mist pump.
"""
return self.manage("valve", operation)
def manage_pump(self, operation):
"""
Updates control module knowledge of pump requests.
If any sensor module requests water, the pump will turn on.
"""
if operation == "on":
self.controls["pump"] = "on"
elif operation == "off":
self.controls["pump"] = "off"
return True
def manage(self, control, operation):
if control not in {"light", "valve", "fan", "pump"}:
raise IOExpanderFailure(
"Invalid controller")
if operation not in ["on", "off"]:
raise IOExpanderFailure(
"Invalid operation passed to {} controller".format(control))
if control == "pump":
return self.manage_pump(operation)
else:
self.controls[control] = operation
return True
def control(self, on=[], off=[]):
"""
This method serves as the primary interaction point
to the controls interface.
- The 'on' and 'off' arguments can either be a list or a single string.
This allows for both individual device control and batch controls.
Note:
Both the onlist and offlist are optional.
If only one item is being managed, it can be passed as a string.
Usage:
- Turning off all devices:
ctrlobj.control(off="all")
- Turning on all devices:
ctrlobj.control(on="all")
- Turning on the light and fan ONLY (for example)
ctrlobj.control(on=["light", "fan"])
- Turning on the light and turning off the fan (for example)
ctrolobj.control(on="light", off="fan")
"""
controls = {"light", "valve", "fan", "pump"}
def cast_arg(arg):
if type(arg) is str:
if arg == "all":
return controls
else:
return {arg} & controls
else:
return set(arg) & controls
# User has requested individual controls.
for item in cast_arg(on):
self.manage(item, "on")
for item in cast_arg(off):
self.manage(item, "off")
sleep(.01) # Force delay to throttle requests
return self.update()
def restore_state(self):
""" Method should be called on obj. initialization
When called, the method will attempt to restore
IO expander and RPi coherence and restore
local knowledge across a possible power failure
"""
current_mask = get_IO_reg(ControlCluster.bus,
self.IOexpander,
self.bank)
if current_mask & (1 << ControlCluster.pump_pin):
self.manage_pump("on")
if current_mask & (1 << self.fan):
self.manage_fan("on")
if current_mask & (1 << self.light):
self.manage_fan("on")
@property
def mask(self):
def construct_mask(mask, control):
if self.controls[control] == "on":
return mask | 1 << self.GPIO_dict[0][control]
else:
return mask
# probably should hold a constant of these somewhere
controls = ["fan", "light", "valve"]
mask = reduce(construct_mask, controls, 0x0)
# handle pump separately
if self.controls["pump"] == "on":
self.pump_request = 1
else:
self.pump_request = 0
return mask
def __init__(self, ID):
self.ID = ID
self.form_GPIO_map()
self.controls = {"light": "off",
"valve": "off", "fan": "off", "pump": "off"}
self.restore_state()
self._list.append(self)
|
ECESeniorDesign/greenhouse_envmgmt | greenhouse_envmgmt/control.py | ControlCluster.update | python | def update(self):
ControlCluster.compile_instance_masks()
IO_expander_output(
ControlCluster.bus, self.IOexpander,
self.bank,
ControlCluster.master_mask[self.bank])
if self.bank != ControlCluster.pump_bank:
IO_expander_output(
ControlCluster.bus, self.IOexpander,
ControlCluster.pump_bank,
ControlCluster.master_mask[ControlCluster.pump_bank]) | This method exposes a more simple interface to the IO module
Regardless of what the control instance contains, this method
will transmit the queued IO commands to the IO expander
Usage: plant1Control.update(bus) | train | https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/control.py#L62-L80 | [
"def IO_expander_output(bus, addr, bank, mask):\n \"\"\"\n Method for controlling the GPIO expander via I2C\n which accepts a bank - A(0) or B(1) and a mask\n to push to the pins of the expander.\n\n The method also assumes the the expander is operating\n in sequential mode. If this mode is not used,\n the register addresses will need to be changed.\n\n Usage:\n GPIO_out(bus, GPIO_addr, 0, 0b00011111)\n This call would turn on A0 through A4. \n\n \"\"\"\n IODIR_map = [0x00, 0x01]\n output_map = [0x14, 0x15]\n\n if (bank != 0) and (bank != 1):\n print()\n raise InvalidIOUsage(\"An invalid IO bank has been selected\")\n\n\n IO_direction = IODIR_map[bank]\n output_reg = output_map[bank]\n\n current_status = bus.read_byte_data(addr, output_reg)\n if current_status == mask:\n # This means nothing needs to happen\n print(\"Current control status matches requested controls. \" +\n \"No action is required.\")\n return True\n\n bus.write_byte_data(addr, IO_direction, 0)\n bus.write_byte_data(addr, output_reg, mask)\n",
"def compile_instance_masks(cls):\n \"\"\" Compiles instance masks into a master mask that is usable by\n the IO expander. Also determines whether or not the pump\n should be on. \n Method is generalized to support multiple IO expanders\n for possible future expansion.\n \"\"\"\n # Compute required # of IO expanders needed, clear mask variable.\n number_IO_expanders = ((len(cls._list) - 1) / 4) + 1\n cls.master_mask = [0, 0] * number_IO_expanders\n\n for ctrlobj in cls:\n # Or masks together bank-by-banl\n cls.master_mask[ctrlobj.bank] |= ctrlobj.mask\n # Handle the pump request seperately\n if ctrlobj.pump_request == 1:\n cls.master_mask[cls.pump_bank] |= 1 << cls.pump_pin\n"
] | class ControlCluster(object):
""" This class serves as a control module for each plant's
fan, light, and pump valve.
Upon instantiation, the class will use it's ID in order to
generate a GPIO mapping corresponding to the pins on the MCP
IO expander.
Currently, only four plant control sets can be supported. IDs
must be greater than 1 and no higher than 4.
Usage: plant1Control = ControlCluster(1)
This will create the first plant control unit.
Turning on control units individually:
plant1Control.manage("fan", "on")
plant1Control.manage("light", "off")
plant1Control.update()
"""
__metaclass__ = IterList
_list = []
GPIOdict = []
pump_pin = 1 # Pin A1 is assigned to the pump
pump_bank = 0
current_volume = 0
bus = None
@classmethod
def compile_instance_masks(cls):
""" Compiles instance masks into a master mask that is usable by
the IO expander. Also determines whether or not the pump
should be on.
Method is generalized to support multiple IO expanders
for possible future expansion.
"""
# Compute required # of IO expanders needed, clear mask variable.
number_IO_expanders = ((len(cls._list) - 1) / 4) + 1
cls.master_mask = [0, 0] * number_IO_expanders
for ctrlobj in cls:
# Or masks together bank-by-banl
cls.master_mask[ctrlobj.bank] |= ctrlobj.mask
# Handle the pump request seperately
if ctrlobj.pump_request == 1:
cls.master_mask[cls.pump_bank] |= 1 << cls.pump_pin
def form_GPIO_map(self):
""" This method creates a dictionary to map plant IDs to
GPIO pins are associated in triples.
Each ID gets a light, a fan, and a mist nozzle.
"""
# Compute bank/pins/IOexpander address based on ID
if self.ID == 1:
self.IOexpander = 0x20
self.bank = 0
self.fan = 2
self.light = 3
self.valve = 4
elif self.ID == 2:
self.IOexpander = 0x20
self.bank = 0
self.fan = 5
self.light = 6
self.valve = 7
elif self.ID == 3:
self.IOexpander = 0x20
self.bank = 1
self.fan = 0
self.light = 1
self.valve = 2
elif self.ID == 4:
self.IOexpander = 0x20
self.bank = 1
self.fan = 3
self.light = 5
self.valve = 6
else:
raise InvalidIOMap(
"Mapping not available for ID: " + str(self.ID))
# Check to make sure reserved pins are not requested
if (self.bank == 0) and (min(self.fan, self.light, self.valve) < 2):
raise InvalidIOMap(
"Pins A0 and A1 are reserved for other functions")
self.GPIO_dict = [{'ID': self.ID, 'bank': self.bank,
'fan': self.fan, 'valve': self.valve, 'light': self.light}]
# Append dictionary to class and resort dictionary by ID # if needed
ControlCluster.GPIOdict.append(self.GPIO_dict)
# ControlCluster.GPIOdict=sorted(
# ControlCluster.GPIOdict, key=itemgetter('ID'))
def manage_light(self, operation):
""" Turns on the lights depending on the operation command
Usage:
manage_lights("on")
"""
return self.manage("light", operation)
def manage_fan(self, operation):
""" Usage:
manageFans("on") # Turn on the fan for plant 1
"""
return self.manage("fan", operation)
def manage_valve(self, operation):
"""Manages turning on the mist pump based on water data from the plant.
We will need to aggregate the total amount of water that the plant
receives so that we can keep track of what it's receiving daily.
This function will need to select a nozzle to open
before turning on the mist pump.
"""
return self.manage("valve", operation)
def manage_pump(self, operation):
"""
Updates control module knowledge of pump requests.
If any sensor module requests water, the pump will turn on.
"""
if operation == "on":
self.controls["pump"] = "on"
elif operation == "off":
self.controls["pump"] = "off"
return True
def manage(self, control, operation):
if control not in {"light", "valve", "fan", "pump"}:
raise IOExpanderFailure(
"Invalid controller")
if operation not in ["on", "off"]:
raise IOExpanderFailure(
"Invalid operation passed to {} controller".format(control))
if control == "pump":
return self.manage_pump(operation)
else:
self.controls[control] = operation
return True
def control(self, on=[], off=[]):
"""
This method serves as the primary interaction point
to the controls interface.
- The 'on' and 'off' arguments can either be a list or a single string.
This allows for both individual device control and batch controls.
Note:
Both the onlist and offlist are optional.
If only one item is being managed, it can be passed as a string.
Usage:
- Turning off all devices:
ctrlobj.control(off="all")
- Turning on all devices:
ctrlobj.control(on="all")
- Turning on the light and fan ONLY (for example)
ctrlobj.control(on=["light", "fan"])
- Turning on the light and turning off the fan (for example)
ctrolobj.control(on="light", off="fan")
"""
controls = {"light", "valve", "fan", "pump"}
def cast_arg(arg):
if type(arg) is str:
if arg == "all":
return controls
else:
return {arg} & controls
else:
return set(arg) & controls
# User has requested individual controls.
for item in cast_arg(on):
self.manage(item, "on")
for item in cast_arg(off):
self.manage(item, "off")
sleep(.01) # Force delay to throttle requests
return self.update()
def restore_state(self):
""" Method should be called on obj. initialization
When called, the method will attempt to restore
IO expander and RPi coherence and restore
local knowledge across a possible power failure
"""
current_mask = get_IO_reg(ControlCluster.bus,
self.IOexpander,
self.bank)
if current_mask & (1 << ControlCluster.pump_pin):
self.manage_pump("on")
if current_mask & (1 << self.fan):
self.manage_fan("on")
if current_mask & (1 << self.light):
self.manage_fan("on")
@property
def mask(self):
def construct_mask(mask, control):
if self.controls[control] == "on":
return mask | 1 << self.GPIO_dict[0][control]
else:
return mask
# probably should hold a constant of these somewhere
controls = ["fan", "light", "valve"]
mask = reduce(construct_mask, controls, 0x0)
# handle pump separately
if self.controls["pump"] == "on":
self.pump_request = 1
else:
self.pump_request = 0
return mask
def __init__(self, ID):
self.ID = ID
self.form_GPIO_map()
self.controls = {"light": "off",
"valve": "off", "fan": "off", "pump": "off"}
self.restore_state()
self._list.append(self)
|
ECESeniorDesign/greenhouse_envmgmt | greenhouse_envmgmt/control.py | ControlCluster.form_GPIO_map | python | def form_GPIO_map(self):
# Compute bank/pins/IOexpander address based on ID
if self.ID == 1:
self.IOexpander = 0x20
self.bank = 0
self.fan = 2
self.light = 3
self.valve = 4
elif self.ID == 2:
self.IOexpander = 0x20
self.bank = 0
self.fan = 5
self.light = 6
self.valve = 7
elif self.ID == 3:
self.IOexpander = 0x20
self.bank = 1
self.fan = 0
self.light = 1
self.valve = 2
elif self.ID == 4:
self.IOexpander = 0x20
self.bank = 1
self.fan = 3
self.light = 5
self.valve = 6
else:
raise InvalidIOMap(
"Mapping not available for ID: " + str(self.ID))
# Check to make sure reserved pins are not requested
if (self.bank == 0) and (min(self.fan, self.light, self.valve) < 2):
raise InvalidIOMap(
"Pins A0 and A1 are reserved for other functions")
self.GPIO_dict = [{'ID': self.ID, 'bank': self.bank,
'fan': self.fan, 'valve': self.valve, 'light': self.light}]
# Append dictionary to class and resort dictionary by ID # if needed
ControlCluster.GPIOdict.append(self.GPIO_dict) | This method creates a dictionary to map plant IDs to
GPIO pins are associated in triples.
Each ID gets a light, a fan, and a mist nozzle. | train | https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/control.py#L82-L125 | null | class ControlCluster(object):
""" This class serves as a control module for each plant's
fan, light, and pump valve.
Upon instantiation, the class will use it's ID in order to
generate a GPIO mapping corresponding to the pins on the MCP
IO expander.
Currently, only four plant control sets can be supported. IDs
must be greater than 1 and no higher than 4.
Usage: plant1Control = ControlCluster(1)
This will create the first plant control unit.
Turning on control units individually:
plant1Control.manage("fan", "on")
plant1Control.manage("light", "off")
plant1Control.update()
"""
__metaclass__ = IterList
_list = []
GPIOdict = []
pump_pin = 1 # Pin A1 is assigned to the pump
pump_bank = 0
current_volume = 0
bus = None
@classmethod
def compile_instance_masks(cls):
""" Compiles instance masks into a master mask that is usable by
the IO expander. Also determines whether or not the pump
should be on.
Method is generalized to support multiple IO expanders
for possible future expansion.
"""
# Compute required # of IO expanders needed, clear mask variable.
number_IO_expanders = ((len(cls._list) - 1) / 4) + 1
cls.master_mask = [0, 0] * number_IO_expanders
for ctrlobj in cls:
# Or masks together bank-by-banl
cls.master_mask[ctrlobj.bank] |= ctrlobj.mask
# Handle the pump request seperately
if ctrlobj.pump_request == 1:
cls.master_mask[cls.pump_bank] |= 1 << cls.pump_pin
def update(self):
""" This method exposes a more simple interface to the IO module
Regardless of what the control instance contains, this method
will transmit the queued IO commands to the IO expander
Usage: plant1Control.update(bus)
"""
ControlCluster.compile_instance_masks()
IO_expander_output(
ControlCluster.bus, self.IOexpander,
self.bank,
ControlCluster.master_mask[self.bank])
if self.bank != ControlCluster.pump_bank:
IO_expander_output(
ControlCluster.bus, self.IOexpander,
ControlCluster.pump_bank,
ControlCluster.master_mask[ControlCluster.pump_bank])
# ControlCluster.GPIOdict=sorted(
# ControlCluster.GPIOdict, key=itemgetter('ID'))
def manage_light(self, operation):
""" Turns on the lights depending on the operation command
Usage:
manage_lights("on")
"""
return self.manage("light", operation)
def manage_fan(self, operation):
""" Usage:
manageFans("on") # Turn on the fan for plant 1
"""
return self.manage("fan", operation)
def manage_valve(self, operation):
"""Manages turning on the mist pump based on water data from the plant.
We will need to aggregate the total amount of water that the plant
receives so that we can keep track of what it's receiving daily.
This function will need to select a nozzle to open
before turning on the mist pump.
"""
return self.manage("valve", operation)
def manage_pump(self, operation):
"""
Updates control module knowledge of pump requests.
If any sensor module requests water, the pump will turn on.
"""
if operation == "on":
self.controls["pump"] = "on"
elif operation == "off":
self.controls["pump"] = "off"
return True
def manage(self, control, operation):
if control not in {"light", "valve", "fan", "pump"}:
raise IOExpanderFailure(
"Invalid controller")
if operation not in ["on", "off"]:
raise IOExpanderFailure(
"Invalid operation passed to {} controller".format(control))
if control == "pump":
return self.manage_pump(operation)
else:
self.controls[control] = operation
return True
def control(self, on=[], off=[]):
"""
This method serves as the primary interaction point
to the controls interface.
- The 'on' and 'off' arguments can either be a list or a single string.
This allows for both individual device control and batch controls.
Note:
Both the onlist and offlist are optional.
If only one item is being managed, it can be passed as a string.
Usage:
- Turning off all devices:
ctrlobj.control(off="all")
- Turning on all devices:
ctrlobj.control(on="all")
- Turning on the light and fan ONLY (for example)
ctrlobj.control(on=["light", "fan"])
- Turning on the light and turning off the fan (for example)
ctrolobj.control(on="light", off="fan")
"""
controls = {"light", "valve", "fan", "pump"}
def cast_arg(arg):
if type(arg) is str:
if arg == "all":
return controls
else:
return {arg} & controls
else:
return set(arg) & controls
# User has requested individual controls.
for item in cast_arg(on):
self.manage(item, "on")
for item in cast_arg(off):
self.manage(item, "off")
sleep(.01) # Force delay to throttle requests
return self.update()
def restore_state(self):
""" Method should be called on obj. initialization
When called, the method will attempt to restore
IO expander and RPi coherence and restore
local knowledge across a possible power failure
"""
current_mask = get_IO_reg(ControlCluster.bus,
self.IOexpander,
self.bank)
if current_mask & (1 << ControlCluster.pump_pin):
self.manage_pump("on")
if current_mask & (1 << self.fan):
self.manage_fan("on")
if current_mask & (1 << self.light):
self.manage_fan("on")
@property
def mask(self):
def construct_mask(mask, control):
if self.controls[control] == "on":
return mask | 1 << self.GPIO_dict[0][control]
else:
return mask
# probably should hold a constant of these somewhere
controls = ["fan", "light", "valve"]
mask = reduce(construct_mask, controls, 0x0)
# handle pump separately
if self.controls["pump"] == "on":
self.pump_request = 1
else:
self.pump_request = 0
return mask
def __init__(self, ID):
self.ID = ID
self.form_GPIO_map()
self.controls = {"light": "off",
"valve": "off", "fan": "off", "pump": "off"}
self.restore_state()
self._list.append(self)
|
ECESeniorDesign/greenhouse_envmgmt | greenhouse_envmgmt/control.py | ControlCluster.manage_pump | python | def manage_pump(self, operation):
if operation == "on":
self.controls["pump"] = "on"
elif operation == "off":
self.controls["pump"] = "off"
return True | Updates control module knowledge of pump requests.
If any sensor module requests water, the pump will turn on. | train | https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/control.py#L152-L163 | null | class ControlCluster(object):
""" This class serves as a control module for each plant's
fan, light, and pump valve.
Upon instantiation, the class will use it's ID in order to
generate a GPIO mapping corresponding to the pins on the MCP
IO expander.
Currently, only four plant control sets can be supported. IDs
must be greater than 1 and no higher than 4.
Usage: plant1Control = ControlCluster(1)
This will create the first plant control unit.
Turning on control units individually:
plant1Control.manage("fan", "on")
plant1Control.manage("light", "off")
plant1Control.update()
"""
__metaclass__ = IterList
_list = []
GPIOdict = []
pump_pin = 1 # Pin A1 is assigned to the pump
pump_bank = 0
current_volume = 0
bus = None
@classmethod
def compile_instance_masks(cls):
""" Compiles instance masks into a master mask that is usable by
the IO expander. Also determines whether or not the pump
should be on.
Method is generalized to support multiple IO expanders
for possible future expansion.
"""
# Compute required # of IO expanders needed, clear mask variable.
number_IO_expanders = ((len(cls._list) - 1) / 4) + 1
cls.master_mask = [0, 0] * number_IO_expanders
for ctrlobj in cls:
# Or masks together bank-by-banl
cls.master_mask[ctrlobj.bank] |= ctrlobj.mask
# Handle the pump request seperately
if ctrlobj.pump_request == 1:
cls.master_mask[cls.pump_bank] |= 1 << cls.pump_pin
def update(self):
""" This method exposes a more simple interface to the IO module
Regardless of what the control instance contains, this method
will transmit the queued IO commands to the IO expander
Usage: plant1Control.update(bus)
"""
ControlCluster.compile_instance_masks()
IO_expander_output(
ControlCluster.bus, self.IOexpander,
self.bank,
ControlCluster.master_mask[self.bank])
if self.bank != ControlCluster.pump_bank:
IO_expander_output(
ControlCluster.bus, self.IOexpander,
ControlCluster.pump_bank,
ControlCluster.master_mask[ControlCluster.pump_bank])
def form_GPIO_map(self):
""" This method creates a dictionary to map plant IDs to
GPIO pins are associated in triples.
Each ID gets a light, a fan, and a mist nozzle.
"""
# Compute bank/pins/IOexpander address based on ID
if self.ID == 1:
self.IOexpander = 0x20
self.bank = 0
self.fan = 2
self.light = 3
self.valve = 4
elif self.ID == 2:
self.IOexpander = 0x20
self.bank = 0
self.fan = 5
self.light = 6
self.valve = 7
elif self.ID == 3:
self.IOexpander = 0x20
self.bank = 1
self.fan = 0
self.light = 1
self.valve = 2
elif self.ID == 4:
self.IOexpander = 0x20
self.bank = 1
self.fan = 3
self.light = 5
self.valve = 6
else:
raise InvalidIOMap(
"Mapping not available for ID: " + str(self.ID))
# Check to make sure reserved pins are not requested
if (self.bank == 0) and (min(self.fan, self.light, self.valve) < 2):
raise InvalidIOMap(
"Pins A0 and A1 are reserved for other functions")
self.GPIO_dict = [{'ID': self.ID, 'bank': self.bank,
'fan': self.fan, 'valve': self.valve, 'light': self.light}]
# Append dictionary to class and resort dictionary by ID # if needed
ControlCluster.GPIOdict.append(self.GPIO_dict)
# ControlCluster.GPIOdict=sorted(
# ControlCluster.GPIOdict, key=itemgetter('ID'))
def manage_light(self, operation):
""" Turns on the lights depending on the operation command
Usage:
manage_lights("on")
"""
return self.manage("light", operation)
def manage_fan(self, operation):
""" Usage:
manageFans("on") # Turn on the fan for plant 1
"""
return self.manage("fan", operation)
def manage_valve(self, operation):
"""Manages turning on the mist pump based on water data from the plant.
We will need to aggregate the total amount of water that the plant
receives so that we can keep track of what it's receiving daily.
This function will need to select a nozzle to open
before turning on the mist pump.
"""
return self.manage("valve", operation)
def manage(self, control, operation):
if control not in {"light", "valve", "fan", "pump"}:
raise IOExpanderFailure(
"Invalid controller")
if operation not in ["on", "off"]:
raise IOExpanderFailure(
"Invalid operation passed to {} controller".format(control))
if control == "pump":
return self.manage_pump(operation)
else:
self.controls[control] = operation
return True
def control(self, on=[], off=[]):
"""
This method serves as the primary interaction point
to the controls interface.
- The 'on' and 'off' arguments can either be a list or a single string.
This allows for both individual device control and batch controls.
Note:
Both the onlist and offlist are optional.
If only one item is being managed, it can be passed as a string.
Usage:
- Turning off all devices:
ctrlobj.control(off="all")
- Turning on all devices:
ctrlobj.control(on="all")
- Turning on the light and fan ONLY (for example)
ctrlobj.control(on=["light", "fan"])
- Turning on the light and turning off the fan (for example)
ctrolobj.control(on="light", off="fan")
"""
controls = {"light", "valve", "fan", "pump"}
def cast_arg(arg):
if type(arg) is str:
if arg == "all":
return controls
else:
return {arg} & controls
else:
return set(arg) & controls
# User has requested individual controls.
for item in cast_arg(on):
self.manage(item, "on")
for item in cast_arg(off):
self.manage(item, "off")
sleep(.01) # Force delay to throttle requests
return self.update()
def restore_state(self):
""" Method should be called on obj. initialization
When called, the method will attempt to restore
IO expander and RPi coherence and restore
local knowledge across a possible power failure
"""
current_mask = get_IO_reg(ControlCluster.bus,
self.IOexpander,
self.bank)
if current_mask & (1 << ControlCluster.pump_pin):
self.manage_pump("on")
if current_mask & (1 << self.fan):
self.manage_fan("on")
if current_mask & (1 << self.light):
self.manage_fan("on")
@property
def mask(self):
def construct_mask(mask, control):
if self.controls[control] == "on":
return mask | 1 << self.GPIO_dict[0][control]
else:
return mask
# probably should hold a constant of these somewhere
controls = ["fan", "light", "valve"]
mask = reduce(construct_mask, controls, 0x0)
# handle pump separately
if self.controls["pump"] == "on":
self.pump_request = 1
else:
self.pump_request = 0
return mask
def __init__(self, ID):
self.ID = ID
self.form_GPIO_map()
self.controls = {"light": "off",
"valve": "off", "fan": "off", "pump": "off"}
self.restore_state()
self._list.append(self)
|
ECESeniorDesign/greenhouse_envmgmt | greenhouse_envmgmt/control.py | ControlCluster.control | python | def control(self, on=[], off=[]):
controls = {"light", "valve", "fan", "pump"}
def cast_arg(arg):
if type(arg) is str:
if arg == "all":
return controls
else:
return {arg} & controls
else:
return set(arg) & controls
# User has requested individual controls.
for item in cast_arg(on):
self.manage(item, "on")
for item in cast_arg(off):
self.manage(item, "off")
sleep(.01) # Force delay to throttle requests
return self.update() | This method serves as the primary interaction point
to the controls interface.
- The 'on' and 'off' arguments can either be a list or a single string.
This allows for both individual device control and batch controls.
Note:
Both the onlist and offlist are optional.
If only one item is being managed, it can be passed as a string.
Usage:
- Turning off all devices:
ctrlobj.control(off="all")
- Turning on all devices:
ctrlobj.control(on="all")
- Turning on the light and fan ONLY (for example)
ctrlobj.control(on=["light", "fan"])
- Turning on the light and turning off the fan (for example)
ctrolobj.control(on="light", off="fan") | train | https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/control.py#L178-L219 | [
"def update(self):\n \"\"\" This method exposes a more simple interface to the IO module\n Regardless of what the control instance contains, this method\n will transmit the queued IO commands to the IO expander\n\n Usage: plant1Control.update(bus)\n \"\"\"\n ControlCluster.compile_instance_masks()\n\n IO_expander_output(\n ControlCluster.bus, self.IOexpander,\n self.bank,\n ControlCluster.master_mask[self.bank])\n\n if self.bank != ControlCluster.pump_bank:\n IO_expander_output(\n ControlCluster.bus, self.IOexpander,\n ControlCluster.pump_bank,\n ControlCluster.master_mask[ControlCluster.pump_bank])\n",
"def manage(self, control, operation):\n if control not in {\"light\", \"valve\", \"fan\", \"pump\"}:\n raise IOExpanderFailure(\n \"Invalid controller\")\n if operation not in [\"on\", \"off\"]:\n raise IOExpanderFailure(\n \"Invalid operation passed to {} controller\".format(control))\n if control == \"pump\":\n return self.manage_pump(operation)\n else:\n self.controls[control] = operation\n return True\n",
"def cast_arg(arg):\n if type(arg) is str:\n if arg == \"all\":\n return controls\n else:\n return {arg} & controls\n else:\n return set(arg) & controls\n"
] | class ControlCluster(object):
""" This class serves as a control module for each plant's
fan, light, and pump valve.
Upon instantiation, the class will use it's ID in order to
generate a GPIO mapping corresponding to the pins on the MCP
IO expander.
Currently, only four plant control sets can be supported. IDs
must be greater than 1 and no higher than 4.
Usage: plant1Control = ControlCluster(1)
This will create the first plant control unit.
Turning on control units individually:
plant1Control.manage("fan", "on")
plant1Control.manage("light", "off")
plant1Control.update()
"""
__metaclass__ = IterList
_list = []
GPIOdict = []
pump_pin = 1 # Pin A1 is assigned to the pump
pump_bank = 0
current_volume = 0
bus = None
@classmethod
def compile_instance_masks(cls):
""" Compiles instance masks into a master mask that is usable by
the IO expander. Also determines whether or not the pump
should be on.
Method is generalized to support multiple IO expanders
for possible future expansion.
"""
# Compute required # of IO expanders needed, clear mask variable.
number_IO_expanders = ((len(cls._list) - 1) / 4) + 1
cls.master_mask = [0, 0] * number_IO_expanders
for ctrlobj in cls:
# Or masks together bank-by-banl
cls.master_mask[ctrlobj.bank] |= ctrlobj.mask
# Handle the pump request seperately
if ctrlobj.pump_request == 1:
cls.master_mask[cls.pump_bank] |= 1 << cls.pump_pin
def update(self):
""" This method exposes a more simple interface to the IO module
Regardless of what the control instance contains, this method
will transmit the queued IO commands to the IO expander
Usage: plant1Control.update(bus)
"""
ControlCluster.compile_instance_masks()
IO_expander_output(
ControlCluster.bus, self.IOexpander,
self.bank,
ControlCluster.master_mask[self.bank])
if self.bank != ControlCluster.pump_bank:
IO_expander_output(
ControlCluster.bus, self.IOexpander,
ControlCluster.pump_bank,
ControlCluster.master_mask[ControlCluster.pump_bank])
def form_GPIO_map(self):
""" This method creates a dictionary to map plant IDs to
GPIO pins are associated in triples.
Each ID gets a light, a fan, and a mist nozzle.
"""
# Compute bank/pins/IOexpander address based on ID
if self.ID == 1:
self.IOexpander = 0x20
self.bank = 0
self.fan = 2
self.light = 3
self.valve = 4
elif self.ID == 2:
self.IOexpander = 0x20
self.bank = 0
self.fan = 5
self.light = 6
self.valve = 7
elif self.ID == 3:
self.IOexpander = 0x20
self.bank = 1
self.fan = 0
self.light = 1
self.valve = 2
elif self.ID == 4:
self.IOexpander = 0x20
self.bank = 1
self.fan = 3
self.light = 5
self.valve = 6
else:
raise InvalidIOMap(
"Mapping not available for ID: " + str(self.ID))
# Check to make sure reserved pins are not requested
if (self.bank == 0) and (min(self.fan, self.light, self.valve) < 2):
raise InvalidIOMap(
"Pins A0 and A1 are reserved for other functions")
self.GPIO_dict = [{'ID': self.ID, 'bank': self.bank,
'fan': self.fan, 'valve': self.valve, 'light': self.light}]
# Append dictionary to class and resort dictionary by ID # if needed
ControlCluster.GPIOdict.append(self.GPIO_dict)
# ControlCluster.GPIOdict=sorted(
# ControlCluster.GPIOdict, key=itemgetter('ID'))
def manage_light(self, operation):
""" Turns on the lights depending on the operation command
Usage:
manage_lights("on")
"""
return self.manage("light", operation)
def manage_fan(self, operation):
""" Usage:
manageFans("on") # Turn on the fan for plant 1
"""
return self.manage("fan", operation)
def manage_valve(self, operation):
"""Manages turning on the mist pump based on water data from the plant.
We will need to aggregate the total amount of water that the plant
receives so that we can keep track of what it's receiving daily.
This function will need to select a nozzle to open
before turning on the mist pump.
"""
return self.manage("valve", operation)
def manage_pump(self, operation):
"""
Updates control module knowledge of pump requests.
If any sensor module requests water, the pump will turn on.
"""
if operation == "on":
self.controls["pump"] = "on"
elif operation == "off":
self.controls["pump"] = "off"
return True
def manage(self, control, operation):
if control not in {"light", "valve", "fan", "pump"}:
raise IOExpanderFailure(
"Invalid controller")
if operation not in ["on", "off"]:
raise IOExpanderFailure(
"Invalid operation passed to {} controller".format(control))
if control == "pump":
return self.manage_pump(operation)
else:
self.controls[control] = operation
return True
def restore_state(self):
""" Method should be called on obj. initialization
When called, the method will attempt to restore
IO expander and RPi coherence and restore
local knowledge across a possible power failure
"""
current_mask = get_IO_reg(ControlCluster.bus,
self.IOexpander,
self.bank)
if current_mask & (1 << ControlCluster.pump_pin):
self.manage_pump("on")
if current_mask & (1 << self.fan):
self.manage_fan("on")
if current_mask & (1 << self.light):
self.manage_fan("on")
@property
def mask(self):
def construct_mask(mask, control):
if self.controls[control] == "on":
return mask | 1 << self.GPIO_dict[0][control]
else:
return mask
# probably should hold a constant of these somewhere
controls = ["fan", "light", "valve"]
mask = reduce(construct_mask, controls, 0x0)
# handle pump separately
if self.controls["pump"] == "on":
self.pump_request = 1
else:
self.pump_request = 0
return mask
def __init__(self, ID):
self.ID = ID
self.form_GPIO_map()
self.controls = {"light": "off",
"valve": "off", "fan": "off", "pump": "off"}
self.restore_state()
self._list.append(self)
|
ECESeniorDesign/greenhouse_envmgmt | greenhouse_envmgmt/control.py | ControlCluster.restore_state | python | def restore_state(self):
current_mask = get_IO_reg(ControlCluster.bus,
self.IOexpander,
self.bank)
if current_mask & (1 << ControlCluster.pump_pin):
self.manage_pump("on")
if current_mask & (1 << self.fan):
self.manage_fan("on")
if current_mask & (1 << self.light):
self.manage_fan("on") | Method should be called on obj. initialization
When called, the method will attempt to restore
IO expander and RPi coherence and restore
local knowledge across a possible power failure | train | https://github.com/ECESeniorDesign/greenhouse_envmgmt/blob/864e0ce98bfb220f9954026913a5470536de9818/greenhouse_envmgmt/control.py#L221-L235 | [
"def get_IO_reg(bus, addr, bank):\n \"\"\"\n Method retrieves the register corresponding to respective bank (0 or 1)\n \"\"\"\n output_map = [0x14, 0x15]\n if (bank != 0) and (bank != 1):\n print()\n raise InvalidIOUsage(\"An invalid IO bank has been selected\")\n\n output_reg = output_map[bank]\n current_status = bus.read_byte_data(addr, output_reg)\n return current_status\n",
"def manage_fan(self, operation):\n \"\"\" Usage:\n manageFans(\"on\") # Turn on the fan for plant 1\n \"\"\"\n return self.manage(\"fan\", operation)\n",
"def manage_pump(self, operation):\n \"\"\"\n Updates control module knowledge of pump requests.\n If any sensor module requests water, the pump will turn on.\n\n \"\"\"\n if operation == \"on\":\n self.controls[\"pump\"] = \"on\"\n elif operation == \"off\":\n self.controls[\"pump\"] = \"off\"\n\n return True\n"
] | class ControlCluster(object):
""" This class serves as a control module for each plant's
fan, light, and pump valve.
Upon instantiation, the class will use it's ID in order to
generate a GPIO mapping corresponding to the pins on the MCP
IO expander.
Currently, only four plant control sets can be supported. IDs
must be greater than 1 and no higher than 4.
Usage: plant1Control = ControlCluster(1)
This will create the first plant control unit.
Turning on control units individually:
plant1Control.manage("fan", "on")
plant1Control.manage("light", "off")
plant1Control.update()
"""
__metaclass__ = IterList
_list = []
GPIOdict = []
pump_pin = 1 # Pin A1 is assigned to the pump
pump_bank = 0
current_volume = 0
bus = None
@classmethod
def compile_instance_masks(cls):
""" Compiles instance masks into a master mask that is usable by
the IO expander. Also determines whether or not the pump
should be on.
Method is generalized to support multiple IO expanders
for possible future expansion.
"""
# Compute required # of IO expanders needed, clear mask variable.
number_IO_expanders = ((len(cls._list) - 1) / 4) + 1
cls.master_mask = [0, 0] * number_IO_expanders
for ctrlobj in cls:
# Or masks together bank-by-banl
cls.master_mask[ctrlobj.bank] |= ctrlobj.mask
# Handle the pump request seperately
if ctrlobj.pump_request == 1:
cls.master_mask[cls.pump_bank] |= 1 << cls.pump_pin
def update(self):
""" This method exposes a more simple interface to the IO module
Regardless of what the control instance contains, this method
will transmit the queued IO commands to the IO expander
Usage: plant1Control.update(bus)
"""
ControlCluster.compile_instance_masks()
IO_expander_output(
ControlCluster.bus, self.IOexpander,
self.bank,
ControlCluster.master_mask[self.bank])
if self.bank != ControlCluster.pump_bank:
IO_expander_output(
ControlCluster.bus, self.IOexpander,
ControlCluster.pump_bank,
ControlCluster.master_mask[ControlCluster.pump_bank])
def form_GPIO_map(self):
""" This method creates a dictionary to map plant IDs to
GPIO pins are associated in triples.
Each ID gets a light, a fan, and a mist nozzle.
"""
# Compute bank/pins/IOexpander address based on ID
if self.ID == 1:
self.IOexpander = 0x20
self.bank = 0
self.fan = 2
self.light = 3
self.valve = 4
elif self.ID == 2:
self.IOexpander = 0x20
self.bank = 0
self.fan = 5
self.light = 6
self.valve = 7
elif self.ID == 3:
self.IOexpander = 0x20
self.bank = 1
self.fan = 0
self.light = 1
self.valve = 2
elif self.ID == 4:
self.IOexpander = 0x20
self.bank = 1
self.fan = 3
self.light = 5
self.valve = 6
else:
raise InvalidIOMap(
"Mapping not available for ID: " + str(self.ID))
# Check to make sure reserved pins are not requested
if (self.bank == 0) and (min(self.fan, self.light, self.valve) < 2):
raise InvalidIOMap(
"Pins A0 and A1 are reserved for other functions")
self.GPIO_dict = [{'ID': self.ID, 'bank': self.bank,
'fan': self.fan, 'valve': self.valve, 'light': self.light}]
# Append dictionary to class and resort dictionary by ID # if needed
ControlCluster.GPIOdict.append(self.GPIO_dict)
# ControlCluster.GPIOdict=sorted(
# ControlCluster.GPIOdict, key=itemgetter('ID'))
def manage_light(self, operation):
""" Turns on the lights depending on the operation command
Usage:
manage_lights("on")
"""
return self.manage("light", operation)
def manage_fan(self, operation):
""" Usage:
manageFans("on") # Turn on the fan for plant 1
"""
return self.manage("fan", operation)
def manage_valve(self, operation):
"""Manages turning on the mist pump based on water data from the plant.
We will need to aggregate the total amount of water that the plant
receives so that we can keep track of what it's receiving daily.
This function will need to select a nozzle to open
before turning on the mist pump.
"""
return self.manage("valve", operation)
def manage_pump(self, operation):
"""
Updates control module knowledge of pump requests.
If any sensor module requests water, the pump will turn on.
"""
if operation == "on":
self.controls["pump"] = "on"
elif operation == "off":
self.controls["pump"] = "off"
return True
def manage(self, control, operation):
if control not in {"light", "valve", "fan", "pump"}:
raise IOExpanderFailure(
"Invalid controller")
if operation not in ["on", "off"]:
raise IOExpanderFailure(
"Invalid operation passed to {} controller".format(control))
if control == "pump":
return self.manage_pump(operation)
else:
self.controls[control] = operation
return True
def control(self, on=[], off=[]):
"""
This method serves as the primary interaction point
to the controls interface.
- The 'on' and 'off' arguments can either be a list or a single string.
This allows for both individual device control and batch controls.
Note:
Both the onlist and offlist are optional.
If only one item is being managed, it can be passed as a string.
Usage:
- Turning off all devices:
ctrlobj.control(off="all")
- Turning on all devices:
ctrlobj.control(on="all")
- Turning on the light and fan ONLY (for example)
ctrlobj.control(on=["light", "fan"])
- Turning on the light and turning off the fan (for example)
ctrolobj.control(on="light", off="fan")
"""
controls = {"light", "valve", "fan", "pump"}
def cast_arg(arg):
if type(arg) is str:
if arg == "all":
return controls
else:
return {arg} & controls
else:
return set(arg) & controls
# User has requested individual controls.
for item in cast_arg(on):
self.manage(item, "on")
for item in cast_arg(off):
self.manage(item, "off")
sleep(.01) # Force delay to throttle requests
return self.update()
@property
def mask(self):
def construct_mask(mask, control):
if self.controls[control] == "on":
return mask | 1 << self.GPIO_dict[0][control]
else:
return mask
# probably should hold a constant of these somewhere
controls = ["fan", "light", "valve"]
mask = reduce(construct_mask, controls, 0x0)
# handle pump separately
if self.controls["pump"] == "on":
self.pump_request = 1
else:
self.pump_request = 0
return mask
def __init__(self, ID):
self.ID = ID
self.form_GPIO_map()
self.controls = {"light": "off",
"valve": "off", "fan": "off", "pump": "off"}
self.restore_state()
self._list.append(self)
|
brbsix/pip-utils | pip_utils/dependants.py | command_dependants | python | def command_dependants(options):
dependants = sorted(
get_dependants(options.package.project_name),
key=lambda n: n.lower()
)
if dependants:
print(*dependants, sep='\n') | Command launched by CLI. | train | https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/dependants.py#L18-L26 | [
"def get_dependants(project_name):\n \"\"\"Yield dependants of `project_name`.\"\"\"\n for package in get_installed_distributions(user_only=ENABLE_USER_SITE):\n if is_dependant(package, project_name):\n yield package.project_name\n"
] | # -*- coding: utf-8 -*-
"""List dependants of package"""
# Python 2 forwards-compatibility
from __future__ import absolute_import, print_function
# standard imports
from site import ENABLE_USER_SITE
# external imports
try:
from pip._internal.utils.misc import get_installed_distributions
except ImportError:
# legacy support for pip 8 & 9
from pip import get_installed_distributions
def get_dependants(project_name):
"""Yield dependants of `project_name`."""
for package in get_installed_distributions(user_only=ENABLE_USER_SITE):
if is_dependant(package, project_name):
yield package.project_name
def is_dependant(package, project_name):
"""Determine whether `package` is a dependant of `project_name`."""
for requirement in package.requires():
# perform case-insensitive matching
if requirement.project_name.lower() == project_name.lower():
return True
return False
|
brbsix/pip-utils | pip_utils/dependants.py | get_dependants | python | def get_dependants(project_name):
for package in get_installed_distributions(user_only=ENABLE_USER_SITE):
if is_dependant(package, project_name):
yield package.project_name | Yield dependants of `project_name`. | train | https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/dependants.py#L29-L33 | [
"def is_dependant(package, project_name):\n \"\"\"Determine whether `package` is a dependant of `project_name`.\"\"\"\n for requirement in package.requires():\n # perform case-insensitive matching\n if requirement.project_name.lower() == project_name.lower():\n return True\n return False\n"
] | # -*- coding: utf-8 -*-
"""List dependants of package"""
# Python 2 forwards-compatibility
from __future__ import absolute_import, print_function
# standard imports
from site import ENABLE_USER_SITE
# external imports
try:
from pip._internal.utils.misc import get_installed_distributions
except ImportError:
# legacy support for pip 8 & 9
from pip import get_installed_distributions
def command_dependants(options):
"""Command launched by CLI."""
dependants = sorted(
get_dependants(options.package.project_name),
key=lambda n: n.lower()
)
if dependants:
print(*dependants, sep='\n')
def is_dependant(package, project_name):
"""Determine whether `package` is a dependant of `project_name`."""
for requirement in package.requires():
# perform case-insensitive matching
if requirement.project_name.lower() == project_name.lower():
return True
return False
|
brbsix/pip-utils | pip_utils/dependants.py | is_dependant | python | def is_dependant(package, project_name):
for requirement in package.requires():
# perform case-insensitive matching
if requirement.project_name.lower() == project_name.lower():
return True
return False | Determine whether `package` is a dependant of `project_name`. | train | https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/dependants.py#L36-L42 | null | # -*- coding: utf-8 -*-
"""List dependants of package"""
# Python 2 forwards-compatibility
from __future__ import absolute_import, print_function
# standard imports
from site import ENABLE_USER_SITE
# external imports
try:
from pip._internal.utils.misc import get_installed_distributions
except ImportError:
# legacy support for pip 8 & 9
from pip import get_installed_distributions
def command_dependants(options):
"""Command launched by CLI."""
dependants = sorted(
get_dependants(options.package.project_name),
key=lambda n: n.lower()
)
if dependants:
print(*dependants, sep='\n')
def get_dependants(project_name):
"""Yield dependants of `project_name`."""
for package in get_installed_distributions(user_only=ENABLE_USER_SITE):
if is_dependant(package, project_name):
yield package.project_name
|
brbsix/pip-utils | pip_utils/locate.py | command_locate | python | def command_locate(options):
matches = find_owners(options.file.name)
if matches:
print(*matches, sep='\n') | Command launched by CLI. | train | https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/locate.py#L21-L26 | [
"def find_owners(path):\n \"\"\"Return the package(s) that file belongs to.\"\"\"\n abspath = os.path.abspath(path)\n\n packages = search_packages_info(\n sorted((d.project_name for d in\n get_installed_distributions(user_only=ENABLE_USER_SITE)),\n key=lambda d: d.lower()))\n\n return [p['name'] for p in packages if is_owner(p, abspath)]\n"
] | # -*- coding: utf-8 -*-
"""Identify package that file belongs to"""
# Python 2 forwards-compatibility
from __future__ import absolute_import, print_function
# standard imports
import os
from site import ENABLE_USER_SITE
# external imports
try:
from pip._internal.commands.show import search_packages_info
from pip._internal.utils.misc import get_installed_distributions
except ImportError:
# legacy support for pip 8 & 9
from pip.commands.show import search_packages_info
from pip import get_installed_distributions
def find_owners(path):
"""Return the package(s) that file belongs to."""
abspath = os.path.abspath(path)
packages = search_packages_info(
sorted((d.project_name for d in
get_installed_distributions(user_only=ENABLE_USER_SITE)),
key=lambda d: d.lower()))
return [p['name'] for p in packages if is_owner(p, abspath)]
def is_owner(package, abspath):
"""Determine whether `abspath` belongs to `package`."""
try:
files = package['files']
location = package['location']
except KeyError:
return False
paths = (os.path.abspath(os.path.join(location, f))
for f in files)
return abspath in paths
|
brbsix/pip-utils | pip_utils/locate.py | find_owners | python | def find_owners(path):
abspath = os.path.abspath(path)
packages = search_packages_info(
sorted((d.project_name for d in
get_installed_distributions(user_only=ENABLE_USER_SITE)),
key=lambda d: d.lower()))
return [p['name'] for p in packages if is_owner(p, abspath)] | Return the package(s) that file belongs to. | train | https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/locate.py#L29-L38 | null | # -*- coding: utf-8 -*-
"""Identify package that file belongs to"""
# Python 2 forwards-compatibility
from __future__ import absolute_import, print_function
# standard imports
import os
from site import ENABLE_USER_SITE
# external imports
try:
from pip._internal.commands.show import search_packages_info
from pip._internal.utils.misc import get_installed_distributions
except ImportError:
# legacy support for pip 8 & 9
from pip.commands.show import search_packages_info
from pip import get_installed_distributions
def command_locate(options):
"""Command launched by CLI."""
matches = find_owners(options.file.name)
if matches:
print(*matches, sep='\n')
def is_owner(package, abspath):
"""Determine whether `abspath` belongs to `package`."""
try:
files = package['files']
location = package['location']
except KeyError:
return False
paths = (os.path.abspath(os.path.join(location, f))
for f in files)
return abspath in paths
|
brbsix/pip-utils | pip_utils/locate.py | is_owner | python | def is_owner(package, abspath):
try:
files = package['files']
location = package['location']
except KeyError:
return False
paths = (os.path.abspath(os.path.join(location, f))
for f in files)
return abspath in paths | Determine whether `abspath` belongs to `package`. | train | https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/locate.py#L41-L52 | null | # -*- coding: utf-8 -*-
"""Identify package that file belongs to"""
# Python 2 forwards-compatibility
from __future__ import absolute_import, print_function
# standard imports
import os
from site import ENABLE_USER_SITE
# external imports
try:
from pip._internal.commands.show import search_packages_info
from pip._internal.utils.misc import get_installed_distributions
except ImportError:
# legacy support for pip 8 & 9
from pip.commands.show import search_packages_info
from pip import get_installed_distributions
def command_locate(options):
"""Command launched by CLI."""
matches = find_owners(options.file.name)
if matches:
print(*matches, sep='\n')
def find_owners(path):
"""Return the package(s) that file belongs to."""
abspath = os.path.abspath(path)
packages = search_packages_info(
sorted((d.project_name for d in
get_installed_distributions(user_only=ENABLE_USER_SITE)),
key=lambda d: d.lower()))
return [p['name'] for p in packages if is_owner(p, abspath)]
|
brbsix/pip-utils | setup.py | long_description | python | def long_description():
# use re.compile() for flags support in Python 2.6
pattern = re.compile(r'\n^\.\. start-badges.*^\.\. end-badges\n',
flags=re.M | re.S)
return pattern.sub('', read('README.rst')) | Return the contents of README.rst (with badging removed). | train | https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/setup.py#L32-L37 | [
"def read(*names, **kwargs):\n \"\"\"Return contents of text file (in the same directory as this file).\"\"\"\n return io.open(\n os.path.join(os.path.dirname(__file__), *names),\n encoding=kwargs.get('encoding', 'utf8')\n ).read()\n"
] | # -*- coding: utf-8 -*-
"""
Application setup script
To build package:
python3 setup.py bdist_wheel sdist
python2 setup.py bdist_wheel clean
To run tests in an virtualenv:
python setup.py test
To run tests directly with verbose output:
python -m pytest -vv
"""
# Python 2 forwards-compatibility
from __future__ import absolute_import
# standard imports
import io
import os
import re
import sys
# external imports
from setuptools import setup
# application imports
from pip_utils import __description__, __program__, __version__
def read(*names, **kwargs):
"""Return contents of text file (in the same directory as this file)."""
return io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
SETUP_REQUIRES = ['pytest-runner'] if \
{'ptr', 'pytest', 'test'}.intersection(sys.argv) else []
INSTALL_REQUIRES = ['pip>=8.0.0']
TESTS_REQUIRE = ['pytest-pylint'] # Python 2.7 & 3.3+ only
setup(
name=__program__,
version=__version__,
description=__description__,
author='Brian Beffa',
author_email='brbsix@gmail.com',
long_description=long_description(),
url='https://github.com/brbsix/pip-utils',
license='GPLv3',
keywords=['package', 'packaging', 'pip', 'PyPi'],
packages=['pip_utils'],
install_requires=INSTALL_REQUIRES,
setup_requires=SETUP_REQUIRES,
tests_require=TESTS_REQUIRE,
extras_require={'testing': TESTS_REQUIRE},
entry_points={
'console_scripts': [
'pip{v}-utils=pip_utils.cli:main'.format(
v=sys.version_info[0]) # not a named tuple in Python 2.6
]
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: System',
'Topic :: System :: Software Distribution',
'Topic :: System :: Systems Administration',
'Topic :: Utilities'
]
)
|
brbsix/pip-utils | pip_utils/dependents.py | command_dependents | python | def command_dependents(options):
dependents = dependencies(options.package, options.recursive, options.info)
if dependents:
print(*dependents, sep='\n') | Command launched by CLI. | train | https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/dependents.py#L11-L16 | [
"def dependencies(dist, recursive=False, info=False):\n \"\"\"Yield distribution's dependencies.\"\"\"\n\n def case_sorted(items):\n \"\"\"Return unique list sorted in case-insensitive order.\"\"\"\n return sorted(set(items), key=lambda i: i.lower())\n\n def requires(distribution):\n \"\"\"Return the requirements for a distribution.\"\"\"\n if recursive:\n req = set(pkg_resources.require(distribution.project_name))\n req.remove(distribution)\n return {r.as_requirement() for r in req}\n return distribution.requires()\n\n def modifier(distribution):\n \"\"\"Return project's name or full requirement string.\"\"\"\n return str(distribution) if info else distribution.project_name\n\n return case_sorted(modifier(r) for r in requires(dist))\n"
] | # -*- coding: utf-8 -*-
"""List dependents of package"""
# Python 2 forwards-compatibility
from __future__ import absolute_import, print_function
# external imports
from pip._vendor import pkg_resources
def dependencies(dist, recursive=False, info=False):
"""Yield distribution's dependencies."""
def case_sorted(items):
"""Return unique list sorted in case-insensitive order."""
return sorted(set(items), key=lambda i: i.lower())
def requires(distribution):
"""Return the requirements for a distribution."""
if recursive:
req = set(pkg_resources.require(distribution.project_name))
req.remove(distribution)
return {r.as_requirement() for r in req}
return distribution.requires()
def modifier(distribution):
"""Return project's name or full requirement string."""
return str(distribution) if info else distribution.project_name
return case_sorted(modifier(r) for r in requires(dist))
|
brbsix/pip-utils | pip_utils/dependents.py | dependencies | python | def dependencies(dist, recursive=False, info=False):
def case_sorted(items):
"""Return unique list sorted in case-insensitive order."""
return sorted(set(items), key=lambda i: i.lower())
def requires(distribution):
"""Return the requirements for a distribution."""
if recursive:
req = set(pkg_resources.require(distribution.project_name))
req.remove(distribution)
return {r.as_requirement() for r in req}
return distribution.requires()
def modifier(distribution):
"""Return project's name or full requirement string."""
return str(distribution) if info else distribution.project_name
return case_sorted(modifier(r) for r in requires(dist)) | Yield distribution's dependencies. | train | https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/dependents.py#L19-L38 | [
"def case_sorted(items):\n \"\"\"Return unique list sorted in case-insensitive order.\"\"\"\n return sorted(set(items), key=lambda i: i.lower())\n",
"def requires(distribution):\n \"\"\"Return the requirements for a distribution.\"\"\"\n if recursive:\n req = set(pkg_resources.require(distribution.project_name))\n req.remove(distribution)\n return {r.as_requirement() for r in req}\n return distribution.requires()\n"
] | # -*- coding: utf-8 -*-
"""List dependents of package"""
# Python 2 forwards-compatibility
from __future__ import absolute_import, print_function
# external imports
from pip._vendor import pkg_resources
def command_dependents(options):
"""Command launched by CLI."""
dependents = dependencies(options.package, options.recursive, options.info)
if dependents:
print(*dependents, sep='\n')
|
brbsix/pip-utils | pip_utils/outdated.py | ListCommand._build_package_finder | python | def _build_package_finder(options, index_urls, session):
return PackageFinder(
find_links=options.get('find_links'),
index_urls=index_urls,
allow_all_prereleases=options.get('pre'),
trusted_hosts=options.get('trusted_hosts'),
session=session,
) | Create a package finder appropriate to this list command. | train | https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/outdated.py#L76-L86 | null | class ListCommand(object):
"""
Modified version of pip's list command.
Sourced from: pip.commands.list.ListCommand
"""
installed_distributions = []
options = {
'help': None,
'local': True,
'no_index': False,
'allow_all_insecure': False,
'proxy': '',
'require_venv': False,
'timeout': 15,
'exists_action': [],
'no_input': False,
'isolated_mode': False,
'allow_external': [],
'quiet': 1,
'editable': False,
'client_cert': None,
'allow_unverified': [],
'disable_pip_version_check': False,
'default_vcs': '',
'skip_requirements_regex': '',
'trusted_hosts': [],
'version': None,
'log': None,
'index_url': 'https://pypi.python.org/simple',
'cache_dir': os.path.join(os.environ['HOME'], '.cache/pip'),
'outdated': True,
'retries': 5,
'allow_all_external': False,
'pre': False,
'find_links': [],
'cert': None,
'uptodate': False,
'extra_index_urls': [],
'user': ENABLE_USER_SITE,
'verbose': 0
}
@staticmethod
@staticmethod
def _build_session(options, retries=None, timeout=None):
session = PipSession(
cache=(
normalize_path(os.path.join(options.get('cache_dir'), 'http'))
if options.get('cache_dir') else None
),
retries=retries if retries is not None else options.get('retries'),
insecure_hosts=options.get('trusted_hosts'),
)
# Handle custom ca-bundles from the user
if options.get('cert'):
session.verify = options.get('cert')
# Handle SSL client certificate
if options.get('client_cert'):
session.cert = options.get('client_cert')
# Handle timeouts
if options.get('timeout') or timeout:
session.timeout = (
timeout if timeout is not None else options.get('timeout')
)
# Handle configured proxies
if options.get('proxy'):
session.proxies = {
'http': options.get('proxy'),
'https': options.get('proxy'),
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.get('no_input')
return session
@classmethod
def can_be_updated(cls, dist, latest_version):
"""Determine whether package can be updated or not."""
scheme = get_scheme('default')
name = dist.project_name
dependants = cls.get_dependants(name)
for dependant in dependants:
requires = dependant.requires()
for requirement in cls.get_requirement(name, requires):
req = parse_requirement(requirement)
# Ignore error if version in requirement spec can't be parsed
try:
matcher = scheme.matcher(req.requirement)
except UnsupportedVersionError:
continue
if not matcher.match(str(latest_version)):
return False
return True
@classmethod
def find_packages_latest_versions(cls, options):
"""Yield latest versions."""
index_urls = [] if options.get('no_index') else \
[options.get('index_url')] + options.get('extra_index_urls')
with cls._build_session(options) as session:
finder = cls._build_package_finder(options, index_urls, session)
cls.installed_distributions = get_installed_distributions(
local_only=options.get('local'),
user_only=options.get('user'),
editables_only=options.get('editable'),
)
for dist in cls.installed_distributions:
all_candidates = finder.find_all_candidates(dist.key)
if not options.get('pre'):
# Remove prereleases
all_candidates = [c for c in all_candidates if not
c.version.is_prerelease]
if not all_candidates:
continue
# pylint: disable=protected-access
best_candidate = max(all_candidates,
key=finder._candidate_sort_key)
remote_version = best_candidate.version
typ = 'wheel' if best_candidate.location.is_wheel else 'sdist'
yield dist, remote_version, typ
@classmethod
def get_dependants(cls, dist):
"""Yield dependant user packages for a given package name."""
for package in cls.installed_distributions:
for requirement_package in package.requires():
requirement_name = requirement_package.project_name
# perform case-insensitive matching
if requirement_name.lower() == dist.lower():
yield package
@staticmethod
def get_requirement(name, requires):
"""
Yield matching requirement strings.
The strings are presented in the format demanded by
pip._vendor.distlib.util.parse_requirement. Hopefully
I'll be able to figure out a better way to handle this
in the future. Perhaps figure out how pip does it's
version satisfaction tests and see if it is offloadable?
FYI there should only really be ONE matching requirement
string, but I want to be able to process additional ones
in case a certain package does something funky and splits
up the requirements over multiple entries.
"""
for require in requires:
if name.lower() == require.project_name.lower() and require.specs:
safe_name = require.project_name.replace('-', '_')
yield '%s (%s)' % (safe_name, require.specifier)
@staticmethod
def output_package(dist):
"""Return string displaying package information."""
if dist_is_editable(dist):
return '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
return '%s (%s)' % (dist.project_name, dist.version)
@classmethod
def run_outdated(cls, options):
"""Print outdated user packages."""
latest_versions = sorted(
cls.find_packages_latest_versions(cls.options),
key=lambda p: p[0].project_name.lower())
for dist, latest_version, typ in latest_versions:
if latest_version > dist.parsed_version:
if options.all:
pass
elif options.pinned:
if cls.can_be_updated(dist, latest_version):
continue
elif not options.pinned:
if not cls.can_be_updated(dist, latest_version):
continue
elif options.update:
print(dist.project_name if options.brief else
'Updating %s to Latest: %s [%s]' %
(cls.output_package(dist), latest_version, typ))
main(['install', '--upgrade'] + ([
'--user'
] if ENABLE_USER_SITE else []) + [dist.key])
continue
print(dist.project_name if options.brief else
'%s - Latest: %s [%s]' %
(cls.output_package(dist), latest_version, typ))
|
brbsix/pip-utils | pip_utils/outdated.py | ListCommand.can_be_updated | python | def can_be_updated(cls, dist, latest_version):
scheme = get_scheme('default')
name = dist.project_name
dependants = cls.get_dependants(name)
for dependant in dependants:
requires = dependant.requires()
for requirement in cls.get_requirement(name, requires):
req = parse_requirement(requirement)
# Ignore error if version in requirement spec can't be parsed
try:
matcher = scheme.matcher(req.requirement)
except UnsupportedVersionError:
continue
if not matcher.match(str(latest_version)):
return False
return True | Determine whether package can be updated or not. | train | https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/outdated.py#L126-L143 | null | class ListCommand(object):
"""
Modified version of pip's list command.
Sourced from: pip.commands.list.ListCommand
"""
installed_distributions = []
options = {
'help': None,
'local': True,
'no_index': False,
'allow_all_insecure': False,
'proxy': '',
'require_venv': False,
'timeout': 15,
'exists_action': [],
'no_input': False,
'isolated_mode': False,
'allow_external': [],
'quiet': 1,
'editable': False,
'client_cert': None,
'allow_unverified': [],
'disable_pip_version_check': False,
'default_vcs': '',
'skip_requirements_regex': '',
'trusted_hosts': [],
'version': None,
'log': None,
'index_url': 'https://pypi.python.org/simple',
'cache_dir': os.path.join(os.environ['HOME'], '.cache/pip'),
'outdated': True,
'retries': 5,
'allow_all_external': False,
'pre': False,
'find_links': [],
'cert': None,
'uptodate': False,
'extra_index_urls': [],
'user': ENABLE_USER_SITE,
'verbose': 0
}
@staticmethod
def _build_package_finder(options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.get('find_links'),
index_urls=index_urls,
allow_all_prereleases=options.get('pre'),
trusted_hosts=options.get('trusted_hosts'),
session=session,
)
@staticmethod
def _build_session(options, retries=None, timeout=None):
session = PipSession(
cache=(
normalize_path(os.path.join(options.get('cache_dir'), 'http'))
if options.get('cache_dir') else None
),
retries=retries if retries is not None else options.get('retries'),
insecure_hosts=options.get('trusted_hosts'),
)
# Handle custom ca-bundles from the user
if options.get('cert'):
session.verify = options.get('cert')
# Handle SSL client certificate
if options.get('client_cert'):
session.cert = options.get('client_cert')
# Handle timeouts
if options.get('timeout') or timeout:
session.timeout = (
timeout if timeout is not None else options.get('timeout')
)
# Handle configured proxies
if options.get('proxy'):
session.proxies = {
'http': options.get('proxy'),
'https': options.get('proxy'),
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.get('no_input')
return session
@classmethod
@classmethod
def find_packages_latest_versions(cls, options):
"""Yield latest versions."""
index_urls = [] if options.get('no_index') else \
[options.get('index_url')] + options.get('extra_index_urls')
with cls._build_session(options) as session:
finder = cls._build_package_finder(options, index_urls, session)
cls.installed_distributions = get_installed_distributions(
local_only=options.get('local'),
user_only=options.get('user'),
editables_only=options.get('editable'),
)
for dist in cls.installed_distributions:
all_candidates = finder.find_all_candidates(dist.key)
if not options.get('pre'):
# Remove prereleases
all_candidates = [c for c in all_candidates if not
c.version.is_prerelease]
if not all_candidates:
continue
# pylint: disable=protected-access
best_candidate = max(all_candidates,
key=finder._candidate_sort_key)
remote_version = best_candidate.version
typ = 'wheel' if best_candidate.location.is_wheel else 'sdist'
yield dist, remote_version, typ
@classmethod
def get_dependants(cls, dist):
"""Yield dependant user packages for a given package name."""
for package in cls.installed_distributions:
for requirement_package in package.requires():
requirement_name = requirement_package.project_name
# perform case-insensitive matching
if requirement_name.lower() == dist.lower():
yield package
@staticmethod
def get_requirement(name, requires):
"""
Yield matching requirement strings.
The strings are presented in the format demanded by
pip._vendor.distlib.util.parse_requirement. Hopefully
I'll be able to figure out a better way to handle this
in the future. Perhaps figure out how pip does it's
version satisfaction tests and see if it is offloadable?
FYI there should only really be ONE matching requirement
string, but I want to be able to process additional ones
in case a certain package does something funky and splits
up the requirements over multiple entries.
"""
for require in requires:
if name.lower() == require.project_name.lower() and require.specs:
safe_name = require.project_name.replace('-', '_')
yield '%s (%s)' % (safe_name, require.specifier)
@staticmethod
def output_package(dist):
"""Return string displaying package information."""
if dist_is_editable(dist):
return '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
return '%s (%s)' % (dist.project_name, dist.version)
@classmethod
def run_outdated(cls, options):
"""Print outdated user packages."""
latest_versions = sorted(
cls.find_packages_latest_versions(cls.options),
key=lambda p: p[0].project_name.lower())
for dist, latest_version, typ in latest_versions:
if latest_version > dist.parsed_version:
if options.all:
pass
elif options.pinned:
if cls.can_be_updated(dist, latest_version):
continue
elif not options.pinned:
if not cls.can_be_updated(dist, latest_version):
continue
elif options.update:
print(dist.project_name if options.brief else
'Updating %s to Latest: %s [%s]' %
(cls.output_package(dist), latest_version, typ))
main(['install', '--upgrade'] + ([
'--user'
] if ENABLE_USER_SITE else []) + [dist.key])
continue
print(dist.project_name if options.brief else
'%s - Latest: %s [%s]' %
(cls.output_package(dist), latest_version, typ))
|
brbsix/pip-utils | pip_utils/outdated.py | ListCommand.get_dependants | python | def get_dependants(cls, dist):
for package in cls.installed_distributions:
for requirement_package in package.requires():
requirement_name = requirement_package.project_name
# perform case-insensitive matching
if requirement_name.lower() == dist.lower():
yield package | Yield dependant user packages for a given package name. | train | https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/outdated.py#L176-L183 | null | class ListCommand(object):
"""
Modified version of pip's list command.
Sourced from: pip.commands.list.ListCommand
"""
installed_distributions = []
options = {
'help': None,
'local': True,
'no_index': False,
'allow_all_insecure': False,
'proxy': '',
'require_venv': False,
'timeout': 15,
'exists_action': [],
'no_input': False,
'isolated_mode': False,
'allow_external': [],
'quiet': 1,
'editable': False,
'client_cert': None,
'allow_unverified': [],
'disable_pip_version_check': False,
'default_vcs': '',
'skip_requirements_regex': '',
'trusted_hosts': [],
'version': None,
'log': None,
'index_url': 'https://pypi.python.org/simple',
'cache_dir': os.path.join(os.environ['HOME'], '.cache/pip'),
'outdated': True,
'retries': 5,
'allow_all_external': False,
'pre': False,
'find_links': [],
'cert': None,
'uptodate': False,
'extra_index_urls': [],
'user': ENABLE_USER_SITE,
'verbose': 0
}
@staticmethod
def _build_package_finder(options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.get('find_links'),
index_urls=index_urls,
allow_all_prereleases=options.get('pre'),
trusted_hosts=options.get('trusted_hosts'),
session=session,
)
@staticmethod
def _build_session(options, retries=None, timeout=None):
session = PipSession(
cache=(
normalize_path(os.path.join(options.get('cache_dir'), 'http'))
if options.get('cache_dir') else None
),
retries=retries if retries is not None else options.get('retries'),
insecure_hosts=options.get('trusted_hosts'),
)
# Handle custom ca-bundles from the user
if options.get('cert'):
session.verify = options.get('cert')
# Handle SSL client certificate
if options.get('client_cert'):
session.cert = options.get('client_cert')
# Handle timeouts
if options.get('timeout') or timeout:
session.timeout = (
timeout if timeout is not None else options.get('timeout')
)
# Handle configured proxies
if options.get('proxy'):
session.proxies = {
'http': options.get('proxy'),
'https': options.get('proxy'),
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.get('no_input')
return session
@classmethod
def can_be_updated(cls, dist, latest_version):
"""Determine whether package can be updated or not."""
scheme = get_scheme('default')
name = dist.project_name
dependants = cls.get_dependants(name)
for dependant in dependants:
requires = dependant.requires()
for requirement in cls.get_requirement(name, requires):
req = parse_requirement(requirement)
# Ignore error if version in requirement spec can't be parsed
try:
matcher = scheme.matcher(req.requirement)
except UnsupportedVersionError:
continue
if not matcher.match(str(latest_version)):
return False
return True
@classmethod
def find_packages_latest_versions(cls, options):
"""Yield latest versions."""
index_urls = [] if options.get('no_index') else \
[options.get('index_url')] + options.get('extra_index_urls')
with cls._build_session(options) as session:
finder = cls._build_package_finder(options, index_urls, session)
cls.installed_distributions = get_installed_distributions(
local_only=options.get('local'),
user_only=options.get('user'),
editables_only=options.get('editable'),
)
for dist in cls.installed_distributions:
all_candidates = finder.find_all_candidates(dist.key)
if not options.get('pre'):
# Remove prereleases
all_candidates = [c for c in all_candidates if not
c.version.is_prerelease]
if not all_candidates:
continue
# pylint: disable=protected-access
best_candidate = max(all_candidates,
key=finder._candidate_sort_key)
remote_version = best_candidate.version
typ = 'wheel' if best_candidate.location.is_wheel else 'sdist'
yield dist, remote_version, typ
@classmethod
@staticmethod
def get_requirement(name, requires):
"""
Yield matching requirement strings.
The strings are presented in the format demanded by
pip._vendor.distlib.util.parse_requirement. Hopefully
I'll be able to figure out a better way to handle this
in the future. Perhaps figure out how pip does it's
version satisfaction tests and see if it is offloadable?
FYI there should only really be ONE matching requirement
string, but I want to be able to process additional ones
in case a certain package does something funky and splits
up the requirements over multiple entries.
"""
for require in requires:
if name.lower() == require.project_name.lower() and require.specs:
safe_name = require.project_name.replace('-', '_')
yield '%s (%s)' % (safe_name, require.specifier)
@staticmethod
def output_package(dist):
"""Return string displaying package information."""
if dist_is_editable(dist):
return '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
return '%s (%s)' % (dist.project_name, dist.version)
@classmethod
def run_outdated(cls, options):
"""Print outdated user packages."""
latest_versions = sorted(
cls.find_packages_latest_versions(cls.options),
key=lambda p: p[0].project_name.lower())
for dist, latest_version, typ in latest_versions:
if latest_version > dist.parsed_version:
if options.all:
pass
elif options.pinned:
if cls.can_be_updated(dist, latest_version):
continue
elif not options.pinned:
if not cls.can_be_updated(dist, latest_version):
continue
elif options.update:
print(dist.project_name if options.brief else
'Updating %s to Latest: %s [%s]' %
(cls.output_package(dist), latest_version, typ))
main(['install', '--upgrade'] + ([
'--user'
] if ENABLE_USER_SITE else []) + [dist.key])
continue
print(dist.project_name if options.brief else
'%s - Latest: %s [%s]' %
(cls.output_package(dist), latest_version, typ))
|
brbsix/pip-utils | pip_utils/outdated.py | ListCommand.get_requirement | python | def get_requirement(name, requires):
for require in requires:
if name.lower() == require.project_name.lower() and require.specs:
safe_name = require.project_name.replace('-', '_')
yield '%s (%s)' % (safe_name, require.specifier) | Yield matching requirement strings.
The strings are presented in the format demanded by
pip._vendor.distlib.util.parse_requirement. Hopefully
I'll be able to figure out a better way to handle this
in the future. Perhaps figure out how pip does it's
version satisfaction tests and see if it is offloadable?
FYI there should only really be ONE matching requirement
string, but I want to be able to process additional ones
in case a certain package does something funky and splits
up the requirements over multiple entries. | train | https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/outdated.py#L186-L204 | null | class ListCommand(object):
"""
Modified version of pip's list command.
Sourced from: pip.commands.list.ListCommand
"""
installed_distributions = []
options = {
'help': None,
'local': True,
'no_index': False,
'allow_all_insecure': False,
'proxy': '',
'require_venv': False,
'timeout': 15,
'exists_action': [],
'no_input': False,
'isolated_mode': False,
'allow_external': [],
'quiet': 1,
'editable': False,
'client_cert': None,
'allow_unverified': [],
'disable_pip_version_check': False,
'default_vcs': '',
'skip_requirements_regex': '',
'trusted_hosts': [],
'version': None,
'log': None,
'index_url': 'https://pypi.python.org/simple',
'cache_dir': os.path.join(os.environ['HOME'], '.cache/pip'),
'outdated': True,
'retries': 5,
'allow_all_external': False,
'pre': False,
'find_links': [],
'cert': None,
'uptodate': False,
'extra_index_urls': [],
'user': ENABLE_USER_SITE,
'verbose': 0
}
@staticmethod
def _build_package_finder(options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.get('find_links'),
index_urls=index_urls,
allow_all_prereleases=options.get('pre'),
trusted_hosts=options.get('trusted_hosts'),
session=session,
)
@staticmethod
def _build_session(options, retries=None, timeout=None):
session = PipSession(
cache=(
normalize_path(os.path.join(options.get('cache_dir'), 'http'))
if options.get('cache_dir') else None
),
retries=retries if retries is not None else options.get('retries'),
insecure_hosts=options.get('trusted_hosts'),
)
# Handle custom ca-bundles from the user
if options.get('cert'):
session.verify = options.get('cert')
# Handle SSL client certificate
if options.get('client_cert'):
session.cert = options.get('client_cert')
# Handle timeouts
if options.get('timeout') or timeout:
session.timeout = (
timeout if timeout is not None else options.get('timeout')
)
# Handle configured proxies
if options.get('proxy'):
session.proxies = {
'http': options.get('proxy'),
'https': options.get('proxy'),
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.get('no_input')
return session
@classmethod
def can_be_updated(cls, dist, latest_version):
"""Determine whether package can be updated or not."""
scheme = get_scheme('default')
name = dist.project_name
dependants = cls.get_dependants(name)
for dependant in dependants:
requires = dependant.requires()
for requirement in cls.get_requirement(name, requires):
req = parse_requirement(requirement)
# Ignore error if version in requirement spec can't be parsed
try:
matcher = scheme.matcher(req.requirement)
except UnsupportedVersionError:
continue
if not matcher.match(str(latest_version)):
return False
return True
@classmethod
def find_packages_latest_versions(cls, options):
"""Yield latest versions."""
index_urls = [] if options.get('no_index') else \
[options.get('index_url')] + options.get('extra_index_urls')
with cls._build_session(options) as session:
finder = cls._build_package_finder(options, index_urls, session)
cls.installed_distributions = get_installed_distributions(
local_only=options.get('local'),
user_only=options.get('user'),
editables_only=options.get('editable'),
)
for dist in cls.installed_distributions:
all_candidates = finder.find_all_candidates(dist.key)
if not options.get('pre'):
# Remove prereleases
all_candidates = [c for c in all_candidates if not
c.version.is_prerelease]
if not all_candidates:
continue
# pylint: disable=protected-access
best_candidate = max(all_candidates,
key=finder._candidate_sort_key)
remote_version = best_candidate.version
typ = 'wheel' if best_candidate.location.is_wheel else 'sdist'
yield dist, remote_version, typ
@classmethod
def get_dependants(cls, dist):
"""Yield dependant user packages for a given package name."""
for package in cls.installed_distributions:
for requirement_package in package.requires():
requirement_name = requirement_package.project_name
# perform case-insensitive matching
if requirement_name.lower() == dist.lower():
yield package
@staticmethod
@staticmethod
def output_package(dist):
"""Return string displaying package information."""
if dist_is_editable(dist):
return '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
return '%s (%s)' % (dist.project_name, dist.version)
@classmethod
def run_outdated(cls, options):
"""Print outdated user packages."""
latest_versions = sorted(
cls.find_packages_latest_versions(cls.options),
key=lambda p: p[0].project_name.lower())
for dist, latest_version, typ in latest_versions:
if latest_version > dist.parsed_version:
if options.all:
pass
elif options.pinned:
if cls.can_be_updated(dist, latest_version):
continue
elif not options.pinned:
if not cls.can_be_updated(dist, latest_version):
continue
elif options.update:
print(dist.project_name if options.brief else
'Updating %s to Latest: %s [%s]' %
(cls.output_package(dist), latest_version, typ))
main(['install', '--upgrade'] + ([
'--user'
] if ENABLE_USER_SITE else []) + [dist.key])
continue
print(dist.project_name if options.brief else
'%s - Latest: %s [%s]' %
(cls.output_package(dist), latest_version, typ))
|
brbsix/pip-utils | pip_utils/outdated.py | ListCommand.output_package | python | def output_package(dist):
if dist_is_editable(dist):
return '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
return '%s (%s)' % (dist.project_name, dist.version) | Return string displaying package information. | train | https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/outdated.py#L207-L215 | null | class ListCommand(object):
"""
Modified version of pip's list command.
Sourced from: pip.commands.list.ListCommand
"""
installed_distributions = []
options = {
'help': None,
'local': True,
'no_index': False,
'allow_all_insecure': False,
'proxy': '',
'require_venv': False,
'timeout': 15,
'exists_action': [],
'no_input': False,
'isolated_mode': False,
'allow_external': [],
'quiet': 1,
'editable': False,
'client_cert': None,
'allow_unverified': [],
'disable_pip_version_check': False,
'default_vcs': '',
'skip_requirements_regex': '',
'trusted_hosts': [],
'version': None,
'log': None,
'index_url': 'https://pypi.python.org/simple',
'cache_dir': os.path.join(os.environ['HOME'], '.cache/pip'),
'outdated': True,
'retries': 5,
'allow_all_external': False,
'pre': False,
'find_links': [],
'cert': None,
'uptodate': False,
'extra_index_urls': [],
'user': ENABLE_USER_SITE,
'verbose': 0
}
@staticmethod
def _build_package_finder(options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.get('find_links'),
index_urls=index_urls,
allow_all_prereleases=options.get('pre'),
trusted_hosts=options.get('trusted_hosts'),
session=session,
)
@staticmethod
def _build_session(options, retries=None, timeout=None):
session = PipSession(
cache=(
normalize_path(os.path.join(options.get('cache_dir'), 'http'))
if options.get('cache_dir') else None
),
retries=retries if retries is not None else options.get('retries'),
insecure_hosts=options.get('trusted_hosts'),
)
# Handle custom ca-bundles from the user
if options.get('cert'):
session.verify = options.get('cert')
# Handle SSL client certificate
if options.get('client_cert'):
session.cert = options.get('client_cert')
# Handle timeouts
if options.get('timeout') or timeout:
session.timeout = (
timeout if timeout is not None else options.get('timeout')
)
# Handle configured proxies
if options.get('proxy'):
session.proxies = {
'http': options.get('proxy'),
'https': options.get('proxy'),
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.get('no_input')
return session
@classmethod
def can_be_updated(cls, dist, latest_version):
"""Determine whether package can be updated or not."""
scheme = get_scheme('default')
name = dist.project_name
dependants = cls.get_dependants(name)
for dependant in dependants:
requires = dependant.requires()
for requirement in cls.get_requirement(name, requires):
req = parse_requirement(requirement)
# Ignore error if version in requirement spec can't be parsed
try:
matcher = scheme.matcher(req.requirement)
except UnsupportedVersionError:
continue
if not matcher.match(str(latest_version)):
return False
return True
@classmethod
def find_packages_latest_versions(cls, options):
"""Yield latest versions."""
index_urls = [] if options.get('no_index') else \
[options.get('index_url')] + options.get('extra_index_urls')
with cls._build_session(options) as session:
finder = cls._build_package_finder(options, index_urls, session)
cls.installed_distributions = get_installed_distributions(
local_only=options.get('local'),
user_only=options.get('user'),
editables_only=options.get('editable'),
)
for dist in cls.installed_distributions:
all_candidates = finder.find_all_candidates(dist.key)
if not options.get('pre'):
# Remove prereleases
all_candidates = [c for c in all_candidates if not
c.version.is_prerelease]
if not all_candidates:
continue
# pylint: disable=protected-access
best_candidate = max(all_candidates,
key=finder._candidate_sort_key)
remote_version = best_candidate.version
typ = 'wheel' if best_candidate.location.is_wheel else 'sdist'
yield dist, remote_version, typ
@classmethod
def get_dependants(cls, dist):
"""Yield dependant user packages for a given package name."""
for package in cls.installed_distributions:
for requirement_package in package.requires():
requirement_name = requirement_package.project_name
# perform case-insensitive matching
if requirement_name.lower() == dist.lower():
yield package
@staticmethod
def get_requirement(name, requires):
"""
Yield matching requirement strings.
The strings are presented in the format demanded by
pip._vendor.distlib.util.parse_requirement. Hopefully
I'll be able to figure out a better way to handle this
in the future. Perhaps figure out how pip does it's
version satisfaction tests and see if it is offloadable?
FYI there should only really be ONE matching requirement
string, but I want to be able to process additional ones
in case a certain package does something funky and splits
up the requirements over multiple entries.
"""
for require in requires:
if name.lower() == require.project_name.lower() and require.specs:
safe_name = require.project_name.replace('-', '_')
yield '%s (%s)' % (safe_name, require.specifier)
@staticmethod
@classmethod
def run_outdated(cls, options):
"""Print outdated user packages."""
latest_versions = sorted(
cls.find_packages_latest_versions(cls.options),
key=lambda p: p[0].project_name.lower())
for dist, latest_version, typ in latest_versions:
if latest_version > dist.parsed_version:
if options.all:
pass
elif options.pinned:
if cls.can_be_updated(dist, latest_version):
continue
elif not options.pinned:
if not cls.can_be_updated(dist, latest_version):
continue
elif options.update:
print(dist.project_name if options.brief else
'Updating %s to Latest: %s [%s]' %
(cls.output_package(dist), latest_version, typ))
main(['install', '--upgrade'] + ([
'--user'
] if ENABLE_USER_SITE else []) + [dist.key])
continue
print(dist.project_name if options.brief else
'%s - Latest: %s [%s]' %
(cls.output_package(dist), latest_version, typ))
|
brbsix/pip-utils | pip_utils/outdated.py | ListCommand.run_outdated | python | def run_outdated(cls, options):
latest_versions = sorted(
cls.find_packages_latest_versions(cls.options),
key=lambda p: p[0].project_name.lower())
for dist, latest_version, typ in latest_versions:
if latest_version > dist.parsed_version:
if options.all:
pass
elif options.pinned:
if cls.can_be_updated(dist, latest_version):
continue
elif not options.pinned:
if not cls.can_be_updated(dist, latest_version):
continue
elif options.update:
print(dist.project_name if options.brief else
'Updating %s to Latest: %s [%s]' %
(cls.output_package(dist), latest_version, typ))
main(['install', '--upgrade'] + ([
'--user'
] if ENABLE_USER_SITE else []) + [dist.key])
continue
print(dist.project_name if options.brief else
'%s - Latest: %s [%s]' %
(cls.output_package(dist), latest_version, typ)) | Print outdated user packages. | train | https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/outdated.py#L218-L245 | [
"def can_be_updated(cls, dist, latest_version):\n \"\"\"Determine whether package can be updated or not.\"\"\"\n scheme = get_scheme('default')\n name = dist.project_name\n dependants = cls.get_dependants(name)\n for dependant in dependants:\n requires = dependant.requires()\n for requirement in cls.get_requirement(name, requires):\n req = parse_requirement(requirement)\n # Ignore error if version in requirement spec can't be parsed\n try:\n matcher = scheme.matcher(req.requirement)\n except UnsupportedVersionError:\n continue\n if not matcher.match(str(latest_version)):\n return False\n\n return True\n",
"def find_packages_latest_versions(cls, options):\n \"\"\"Yield latest versions.\"\"\"\n index_urls = [] if options.get('no_index') else \\\n [options.get('index_url')] + options.get('extra_index_urls')\n\n with cls._build_session(options) as session:\n finder = cls._build_package_finder(options, index_urls, session)\n\n cls.installed_distributions = get_installed_distributions(\n local_only=options.get('local'),\n user_only=options.get('user'),\n editables_only=options.get('editable'),\n )\n for dist in cls.installed_distributions:\n all_candidates = finder.find_all_candidates(dist.key)\n if not options.get('pre'):\n # Remove prereleases\n all_candidates = [c for c in all_candidates if not\n c.version.is_prerelease]\n\n if not all_candidates:\n continue\n # pylint: disable=protected-access\n best_candidate = max(all_candidates,\n key=finder._candidate_sort_key)\n remote_version = best_candidate.version\n typ = 'wheel' if best_candidate.location.is_wheel else 'sdist'\n yield dist, remote_version, typ\n",
"def output_package(dist):\n \"\"\"Return string displaying package information.\"\"\"\n if dist_is_editable(dist):\n return '%s (%s, %s)' % (\n dist.project_name,\n dist.version,\n dist.location,\n )\n return '%s (%s)' % (dist.project_name, dist.version)\n"
] | class ListCommand(object):
"""
Modified version of pip's list command.
Sourced from: pip.commands.list.ListCommand
"""
installed_distributions = []
options = {
'help': None,
'local': True,
'no_index': False,
'allow_all_insecure': False,
'proxy': '',
'require_venv': False,
'timeout': 15,
'exists_action': [],
'no_input': False,
'isolated_mode': False,
'allow_external': [],
'quiet': 1,
'editable': False,
'client_cert': None,
'allow_unverified': [],
'disable_pip_version_check': False,
'default_vcs': '',
'skip_requirements_regex': '',
'trusted_hosts': [],
'version': None,
'log': None,
'index_url': 'https://pypi.python.org/simple',
'cache_dir': os.path.join(os.environ['HOME'], '.cache/pip'),
'outdated': True,
'retries': 5,
'allow_all_external': False,
'pre': False,
'find_links': [],
'cert': None,
'uptodate': False,
'extra_index_urls': [],
'user': ENABLE_USER_SITE,
'verbose': 0
}
@staticmethod
def _build_package_finder(options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.get('find_links'),
index_urls=index_urls,
allow_all_prereleases=options.get('pre'),
trusted_hosts=options.get('trusted_hosts'),
session=session,
)
@staticmethod
def _build_session(options, retries=None, timeout=None):
session = PipSession(
cache=(
normalize_path(os.path.join(options.get('cache_dir'), 'http'))
if options.get('cache_dir') else None
),
retries=retries if retries is not None else options.get('retries'),
insecure_hosts=options.get('trusted_hosts'),
)
# Handle custom ca-bundles from the user
if options.get('cert'):
session.verify = options.get('cert')
# Handle SSL client certificate
if options.get('client_cert'):
session.cert = options.get('client_cert')
# Handle timeouts
if options.get('timeout') or timeout:
session.timeout = (
timeout if timeout is not None else options.get('timeout')
)
# Handle configured proxies
if options.get('proxy'):
session.proxies = {
'http': options.get('proxy'),
'https': options.get('proxy'),
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.get('no_input')
return session
@classmethod
def can_be_updated(cls, dist, latest_version):
"""Determine whether package can be updated or not."""
scheme = get_scheme('default')
name = dist.project_name
dependants = cls.get_dependants(name)
for dependant in dependants:
requires = dependant.requires()
for requirement in cls.get_requirement(name, requires):
req = parse_requirement(requirement)
# Ignore error if version in requirement spec can't be parsed
try:
matcher = scheme.matcher(req.requirement)
except UnsupportedVersionError:
continue
if not matcher.match(str(latest_version)):
return False
return True
@classmethod
def find_packages_latest_versions(cls, options):
"""Yield latest versions."""
index_urls = [] if options.get('no_index') else \
[options.get('index_url')] + options.get('extra_index_urls')
with cls._build_session(options) as session:
finder = cls._build_package_finder(options, index_urls, session)
cls.installed_distributions = get_installed_distributions(
local_only=options.get('local'),
user_only=options.get('user'),
editables_only=options.get('editable'),
)
for dist in cls.installed_distributions:
all_candidates = finder.find_all_candidates(dist.key)
if not options.get('pre'):
# Remove prereleases
all_candidates = [c for c in all_candidates if not
c.version.is_prerelease]
if not all_candidates:
continue
# pylint: disable=protected-access
best_candidate = max(all_candidates,
key=finder._candidate_sort_key)
remote_version = best_candidate.version
typ = 'wheel' if best_candidate.location.is_wheel else 'sdist'
yield dist, remote_version, typ
@classmethod
def get_dependants(cls, dist):
"""Yield dependant user packages for a given package name."""
for package in cls.installed_distributions:
for requirement_package in package.requires():
requirement_name = requirement_package.project_name
# perform case-insensitive matching
if requirement_name.lower() == dist.lower():
yield package
@staticmethod
def get_requirement(name, requires):
"""
Yield matching requirement strings.
The strings are presented in the format demanded by
pip._vendor.distlib.util.parse_requirement. Hopefully
I'll be able to figure out a better way to handle this
in the future. Perhaps figure out how pip does it's
version satisfaction tests and see if it is offloadable?
FYI there should only really be ONE matching requirement
string, but I want to be able to process additional ones
in case a certain package does something funky and splits
up the requirements over multiple entries.
"""
for require in requires:
if name.lower() == require.project_name.lower() and require.specs:
safe_name = require.project_name.replace('-', '_')
yield '%s (%s)' % (safe_name, require.specifier)
@staticmethod
def output_package(dist):
"""Return string displaying package information."""
if dist_is_editable(dist):
return '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
return '%s (%s)' % (dist.project_name, dist.version)
@classmethod
|
brbsix/pip-utils | pip_utils/parents.py | get_parents | python | def get_parents():
distributions = get_installed_distributions(user_only=ENABLE_USER_SITE)
remaining = {d.project_name.lower() for d in distributions}
requirements = {r.project_name.lower() for d in distributions for
r in d.requires()}
return get_realnames(remaining - requirements) | Return sorted list of names of packages without dependants. | train | https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/parents.py#L28-L35 | [
"def get_realnames(packages):\n \"\"\"\n Return list of unique case-correct package names.\n\n Packages are listed in a case-insensitive sorted order.\n \"\"\"\n return sorted({get_distribution(p).project_name for p in packages},\n key=lambda n: n.lower())\n"
] | # -*- coding: utf-8 -*-
"""List packages lacking dependants"""
# Python 2 forwards-compatibility
from __future__ import absolute_import, print_function
# standard imports
from site import ENABLE_USER_SITE
# external imports
try:
from pip._internal.utils.misc import get_installed_distributions
except ImportError:
# legacy support for pip 8 & 9
from pip import get_installed_distributions
from pip._vendor.pkg_resources import get_distribution
# pylint: disable=unused-argument
def command_parents(options):
"""Command launched by CLI."""
parents = get_parents()
if parents:
print(*parents, sep='\n')
def get_realnames(packages):
"""
Return list of unique case-correct package names.
Packages are listed in a case-insensitive sorted order.
"""
return sorted({get_distribution(p).project_name for p in packages},
key=lambda n: n.lower())
|
brbsix/pip-utils | pip_utils/parents.py | get_realnames | python | def get_realnames(packages):
return sorted({get_distribution(p).project_name for p in packages},
key=lambda n: n.lower()) | Return list of unique case-correct package names.
Packages are listed in a case-insensitive sorted order. | train | https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/parents.py#L38-L45 | null | # -*- coding: utf-8 -*-
"""List packages lacking dependants"""
# Python 2 forwards-compatibility
from __future__ import absolute_import, print_function
# standard imports
from site import ENABLE_USER_SITE
# external imports
try:
from pip._internal.utils.misc import get_installed_distributions
except ImportError:
# legacy support for pip 8 & 9
from pip import get_installed_distributions
from pip._vendor.pkg_resources import get_distribution
# pylint: disable=unused-argument
def command_parents(options):
"""Command launched by CLI."""
parents = get_parents()
if parents:
print(*parents, sep='\n')
def get_parents():
"""Return sorted list of names of packages without dependants."""
distributions = get_installed_distributions(user_only=ENABLE_USER_SITE)
remaining = {d.project_name.lower() for d in distributions}
requirements = {r.project_name.lower() for d in distributions for
r in d.requires()}
return get_realnames(remaining - requirements)
|
brbsix/pip-utils | pip_utils/cli.py | _parser | python | def _parser():
launcher = 'pip%s-utils' % sys.version_info.major
parser = argparse.ArgumentParser(
description='%s.' % __description__,
epilog='See `%s COMMAND --help` for help '
'on a specific subcommand.' % launcher,
prog=launcher)
parser.add_argument(
'--version',
action='version',
version='%(prog)s ' + __version__)
subparsers = parser.add_subparsers()
# dependants
parser_dependants = subparsers.add_parser(
'dependants',
add_help=False,
help='list dependants of package')
parser_dependants.add_argument(
'package',
metavar='PACKAGE',
type=_distribution)
parser_dependants.add_argument(
'-h', '--help',
action='help',
help=argparse.SUPPRESS)
parser_dependants.set_defaults(
func=command_dependants)
# dependents
parser_dependents = subparsers.add_parser(
'dependents',
add_help=False,
help='list dependents of package')
parser_dependents.add_argument(
'package',
metavar='PACKAGE',
type=_distribution)
parser_dependents.add_argument(
'-i', '--info',
action='store_true',
help='show version requirements')
parser_dependents.add_argument(
'-r', '--recursive',
action='store_true',
help='list dependencies recursively')
parser_dependents.add_argument(
'-h', '--help',
action='help',
help=argparse.SUPPRESS)
parser_dependents.set_defaults(
func=command_dependents)
# locate
parser_locate = subparsers.add_parser(
'locate',
add_help=False,
help='identify packages that file belongs to')
parser_locate.add_argument(
'file',
metavar='FILE',
type=argparse.FileType('r'))
parser_locate.add_argument(
'-h', '--help',
action='help',
help=argparse.SUPPRESS)
parser_locate.set_defaults(
func=command_locate)
# outdated
parser_outdated = subparsers.add_parser(
'outdated',
add_help=False,
help='list outdated packages that may be updated')
parser_outdated.add_argument(
'-b', '--brief',
action='store_true',
help='show package name only')
group = parser_outdated.add_mutually_exclusive_group()
group.add_argument(
'-a', '--all',
action='store_true',
help='list all outdated packages')
group.add_argument(
'-p', '--pinned',
action='store_true',
help='list outdated packages unable to be updated')
group.add_argument(
'-U', '--upgrade',
action='store_true',
dest='update',
help='update packages that can be updated'
)
parser_outdated.add_argument(
'-h', '--help',
action='help',
help=argparse.SUPPRESS)
parser_outdated.set_defaults(
func=command_outdated)
# parents
parser_parents = subparsers.add_parser(
'parents',
add_help=False,
help='list packages lacking dependants')
parser_parents.add_argument(
'-h', '--help',
action='help',
help=argparse.SUPPRESS)
parser_parents.set_defaults(
func=command_parents)
return parser | Parse command-line options. | train | https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/cli.py#L31-L146 | null | # -*- coding: utf-8 -*-
"""Command-line application"""
# Python 2 forwards-compatibility
from __future__ import absolute_import
# standard imports
import argparse
import sys
# external imports
from pip._vendor.pkg_resources import DistributionNotFound, get_distribution
# application imports
from . import __description__, __version__
from .dependants import command_dependants
from .dependents import command_dependents
from .locate import command_locate
from .outdated import command_outdated
from .parents import command_parents
def _distribution(value):
"""Ensure value is the name of an installed distribution."""
try:
return get_distribution(value)
except DistributionNotFound:
raise argparse.ArgumentTypeError('invalid package: %r' % value)
def main(args=None):
"""Start application."""
parser = _parser()
# Python 2 will error 'too few arguments' if no subcommand is supplied.
# No such error occurs in Python 3, which makes it feasible to check
# whether a subcommand was provided (displaying a help message if not).
# argparse internals vary significantly over the major versions, so it's
# much easier to just override the args passed to it. In this case, print
# the usage message if there are no args.
if args is None and len(sys.argv) <= 1:
sys.argv.append('--help')
options = parser.parse_args(args)
# pass options to subcommand
options.func(options)
return 0
|
brbsix/pip-utils | pip_utils/cli.py | main | python | def main(args=None):
parser = _parser()
# Python 2 will error 'too few arguments' if no subcommand is supplied.
# No such error occurs in Python 3, which makes it feasible to check
# whether a subcommand was provided (displaying a help message if not).
# argparse internals vary significantly over the major versions, so it's
# much easier to just override the args passed to it. In this case, print
# the usage message if there are no args.
if args is None and len(sys.argv) <= 1:
sys.argv.append('--help')
options = parser.parse_args(args)
# pass options to subcommand
options.func(options)
return 0 | Start application. | train | https://github.com/brbsix/pip-utils/blob/bdd2a0a17cf36a1c88aa9e68002e9ed04a27bad8/pip_utils/cli.py#L149-L167 | [
"def _parser():\n \"\"\"Parse command-line options.\"\"\"\n launcher = 'pip%s-utils' % sys.version_info.major\n\n parser = argparse.ArgumentParser(\n description='%s.' % __description__,\n epilog='See `%s COMMAND --help` for help '\n 'on a specific subcommand.' % launcher,\n prog=launcher)\n parser.add_argument(\n '--version',\n action='version',\n version='%(prog)s ' + __version__)\n\n subparsers = parser.add_subparsers()\n\n # dependants\n parser_dependants = subparsers.add_parser(\n 'dependants',\n add_help=False,\n help='list dependants of package')\n parser_dependants.add_argument(\n 'package',\n metavar='PACKAGE',\n type=_distribution)\n parser_dependants.add_argument(\n '-h', '--help',\n action='help',\n help=argparse.SUPPRESS)\n parser_dependants.set_defaults(\n func=command_dependants)\n\n # dependents\n parser_dependents = subparsers.add_parser(\n 'dependents',\n add_help=False,\n help='list dependents of package')\n parser_dependents.add_argument(\n 'package',\n metavar='PACKAGE',\n type=_distribution)\n parser_dependents.add_argument(\n '-i', '--info',\n action='store_true',\n help='show version requirements')\n parser_dependents.add_argument(\n '-r', '--recursive',\n action='store_true',\n help='list dependencies recursively')\n parser_dependents.add_argument(\n '-h', '--help',\n action='help',\n help=argparse.SUPPRESS)\n parser_dependents.set_defaults(\n func=command_dependents)\n\n # locate\n parser_locate = subparsers.add_parser(\n 'locate',\n add_help=False,\n help='identify packages that file belongs to')\n parser_locate.add_argument(\n 'file',\n metavar='FILE',\n type=argparse.FileType('r'))\n parser_locate.add_argument(\n '-h', '--help',\n action='help',\n help=argparse.SUPPRESS)\n parser_locate.set_defaults(\n func=command_locate)\n\n # outdated\n parser_outdated = subparsers.add_parser(\n 'outdated',\n add_help=False,\n help='list outdated packages that may be updated')\n parser_outdated.add_argument(\n '-b', '--brief',\n action='store_true',\n help='show package name only')\n group = parser_outdated.add_mutually_exclusive_group()\n group.add_argument(\n '-a', '--all',\n action='store_true',\n help='list all outdated packages')\n group.add_argument(\n '-p', '--pinned',\n action='store_true',\n help='list outdated packages unable to be updated')\n group.add_argument(\n '-U', '--upgrade',\n action='store_true',\n dest='update',\n help='update packages that can be updated'\n )\n parser_outdated.add_argument(\n '-h', '--help',\n action='help',\n help=argparse.SUPPRESS)\n parser_outdated.set_defaults(\n func=command_outdated)\n\n # parents\n parser_parents = subparsers.add_parser(\n 'parents',\n add_help=False,\n help='list packages lacking dependants')\n parser_parents.add_argument(\n '-h', '--help',\n action='help',\n help=argparse.SUPPRESS)\n parser_parents.set_defaults(\n func=command_parents)\n\n return parser\n"
] | # -*- coding: utf-8 -*-
"""Command-line application"""
# Python 2 forwards-compatibility
from __future__ import absolute_import
# standard imports
import argparse
import sys
# external imports
from pip._vendor.pkg_resources import DistributionNotFound, get_distribution
# application imports
from . import __description__, __version__
from .dependants import command_dependants
from .dependents import command_dependents
from .locate import command_locate
from .outdated import command_outdated
from .parents import command_parents
def _distribution(value):
"""Ensure value is the name of an installed distribution."""
try:
return get_distribution(value)
except DistributionNotFound:
raise argparse.ArgumentTypeError('invalid package: %r' % value)
def _parser():
"""Parse command-line options."""
launcher = 'pip%s-utils' % sys.version_info.major
parser = argparse.ArgumentParser(
description='%s.' % __description__,
epilog='See `%s COMMAND --help` for help '
'on a specific subcommand.' % launcher,
prog=launcher)
parser.add_argument(
'--version',
action='version',
version='%(prog)s ' + __version__)
subparsers = parser.add_subparsers()
# dependants
parser_dependants = subparsers.add_parser(
'dependants',
add_help=False,
help='list dependants of package')
parser_dependants.add_argument(
'package',
metavar='PACKAGE',
type=_distribution)
parser_dependants.add_argument(
'-h', '--help',
action='help',
help=argparse.SUPPRESS)
parser_dependants.set_defaults(
func=command_dependants)
# dependents
parser_dependents = subparsers.add_parser(
'dependents',
add_help=False,
help='list dependents of package')
parser_dependents.add_argument(
'package',
metavar='PACKAGE',
type=_distribution)
parser_dependents.add_argument(
'-i', '--info',
action='store_true',
help='show version requirements')
parser_dependents.add_argument(
'-r', '--recursive',
action='store_true',
help='list dependencies recursively')
parser_dependents.add_argument(
'-h', '--help',
action='help',
help=argparse.SUPPRESS)
parser_dependents.set_defaults(
func=command_dependents)
# locate
parser_locate = subparsers.add_parser(
'locate',
add_help=False,
help='identify packages that file belongs to')
parser_locate.add_argument(
'file',
metavar='FILE',
type=argparse.FileType('r'))
parser_locate.add_argument(
'-h', '--help',
action='help',
help=argparse.SUPPRESS)
parser_locate.set_defaults(
func=command_locate)
# outdated
parser_outdated = subparsers.add_parser(
'outdated',
add_help=False,
help='list outdated packages that may be updated')
parser_outdated.add_argument(
'-b', '--brief',
action='store_true',
help='show package name only')
group = parser_outdated.add_mutually_exclusive_group()
group.add_argument(
'-a', '--all',
action='store_true',
help='list all outdated packages')
group.add_argument(
'-p', '--pinned',
action='store_true',
help='list outdated packages unable to be updated')
group.add_argument(
'-U', '--upgrade',
action='store_true',
dest='update',
help='update packages that can be updated'
)
parser_outdated.add_argument(
'-h', '--help',
action='help',
help=argparse.SUPPRESS)
parser_outdated.set_defaults(
func=command_outdated)
# parents
parser_parents = subparsers.add_parser(
'parents',
add_help=False,
help='list packages lacking dependants')
parser_parents.add_argument(
'-h', '--help',
action='help',
help=argparse.SUPPRESS)
parser_parents.set_defaults(
func=command_parents)
return parser
|
etalab/cada | setup.py | pip | python | def pip(filename):
'''Parse pip requirement file and transform it to setuptools requirements'''
requirements = []
for line in open(os.path.join('requirements', filename)).readlines():
match = RE_REQUIREMENT.match(line)
if match:
requirements.extend(pip(match.group('filename')))
else:
requirements.append(line)
return requirements | Parse pip requirement file and transform it to setuptools requirements | train | https://github.com/etalab/cada/blob/36e8b57514445c01ff7cd59a1c965180baf83d5e/setup.py#L38-L47 | [
"def pip(filename):\n '''Parse pip requirement file and transform it to setuptools requirements'''\n requirements = []\n for line in open(os.path.join('requirements', filename)).readlines():\n match = RE_REQUIREMENT.match(line)\n if match:\n requirements.extend(pip(match.group('filename')))\n else:\n requirements.append(line)\n return requirements\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
import re
from setuptools import setup, find_packages
RE_REQUIREMENT = re.compile(r'^\s*-r\s*(?P<filename>.*)$')
RE_BADGE = re.compile(r'^\[\!\[(?P<text>[^\]]+)\]\[(?P<badge>[^\]]+)\]\]\[(?P<target>[^\]]+)\]$', re.M)
BADGES_TO_KEEP = ['gitter-badge', 'readthedocs-badge']
def md(filename):
'''
Load .md (markdown) file and sanitize it for PyPI.
Remove unsupported github tags:
- code-block directive
- travis ci build badges
'''
content = io.open(filename).read()
for match in RE_BADGE.finditer(content):
if match.group('badge') not in BADGES_TO_KEEP:
content = content.replace(match.group(0), '')
return content
long_description = '\n'.join((
md('README.md'),
md('CHANGELOG.md'),
''
))
install_requires = pip('install.pip')
setup(
name='cada',
version='0.2.1.dev',
description='Search and consult CADA advices',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/etalab/cada',
author='Axel Haustant',
author_email='axel@data.gouv.fr',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
setup_requires=['setuptools>=38.6.0'],
python_requires='==2.7.*',
extras_require={
'sentry': pip('sentry.pip'),
'test': pip('test.pip'),
'report': pip('report.pip'),
},
entry_points={
'console_scripts': [
'cada = cada.commands:cli',
]
},
license='AGPLv3+',
keywords='cada',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python',
'Environment :: Web Environment',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Topic :: System :: Software Distribution',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
],
)
|
etalab/cada | cada/commands.py | echo | python | def echo(msg, *args, **kwargs):
'''Wraps click.echo, handles formatting and check encoding'''
file = kwargs.pop('file', None)
nl = kwargs.pop('nl', True)
err = kwargs.pop('err', False)
color = kwargs.pop('color', None)
msg = safe_unicode(msg).format(*args, **kwargs)
click.echo(msg, file=file, nl=nl, err=err, color=color) | Wraps click.echo, handles formatting and check encoding | train | https://github.com/etalab/cada/blob/36e8b57514445c01ff7cd59a1c965180baf83d5e/cada/commands.py#L60-L67 | [
"def safe_unicode(string):\n '''Safely transform any object into utf8 encoded bytes'''\n if not isinstance(string, basestring):\n string = unicode(string)\n if isinstance(string, unicode):\n string = string.encode('utf8')\n return string\n"
] | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import click
import logging
import pkg_resources
import shutil
import sys
from glob import iglob
from os.path import exists
from webassets.script import CommandLineEnvironment
from flask.cli import FlaskGroup, shell_command, run_command, routes_command
from cada import create_app, csv
from cada.assets import assets
from cada.models import Advice
from cada.search import es, index
log = logging.getLogger(__name__)
OK = '✔'.encode('utf8')
KO = '✘'.encode('utf8')
INFO = '➢'.encode('utf8')
WARNING = '⚠'.encode('utf8')
HEADER = '✯'.encode('utf8')
NO_CAST = (int, float, bool)
CONTEXT_SETTINGS = {
'auto_envvar_prefix': 'cada',
'help_option_names': ['-?', '-h', '--help'],
}
click.disable_unicode_literals_warning = True
def safe_unicode(string):
'''Safely transform any object into utf8 encoded bytes'''
if not isinstance(string, basestring):
string = unicode(string)
if isinstance(string, unicode):
string = string.encode('utf8')
return string
def color(name, **kwargs):
return lambda t: click.style(safe_unicode(t), fg=name, **kwargs).decode('utf8')
green = color('green', bold=True)
yellow = color('yellow', bold=True)
red = color('red', bold=True)
cyan = color('cyan', bold=True)
magenta = color('magenta', bold=True)
white = color('white', bold=True)
def header(msg, *args, **kwargs):
'''Display an header'''
msg = ' '.join((yellow(HEADER), white(msg), yellow(HEADER)))
echo(msg, *args, **kwargs)
def success(msg, *args, **kwargs):
'''Display a success message'''
echo('{0} {1}'.format(green(OK), white(msg)), *args, **kwargs)
def warning(msg, *args, **kwargs):
'''Display a warning message'''
msg = '{0} {1}'.format(yellow(WARNING), msg)
echo(msg, *args, **kwargs)
def error(msg, details=None, *args, **kwargs):
'''Display an error message with optionnal details'''
msg = '{0} {1}'.format(red(KO), white(msg))
if details:
msg = '\n'.join((msg, safe_unicode(details)))
echo(format_multiline(msg), *args, **kwargs)
def exit_with_error(msg='Aborted', details=None, code=-1, *args, **kwargs):
'''Exit with error'''
error(msg, details=details, *args, **kwargs)
sys.exit(code)
def format_multiline(string):
string = string.replace('\n', '\n│ ')
return replace_last(string, '│', '└')
def replace_last(string, char, replacement):
return replacement.join(string.rsplit(char, 1))
LEVEL_COLORS = {
logging.DEBUG: cyan,
logging.WARNING: yellow,
logging.ERROR: red,
logging.CRITICAL: color('white', bg='red', bold=True),
}
LEVELS_PREFIX = {
logging.INFO: cyan(INFO),
logging.WARNING: yellow(WARNING),
}
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo(pkg_resources.get_distribution('cada').version)
ctx.exit()
@click.group(cls=FlaskGroup, create_app=create_app, context_settings=CONTEXT_SETTINGS,
add_version_option=False, add_default_commands=False)
@click.option('--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True)
def cli():
'''CADA Management client'''
cli._loaded_plugin_commands = True # Prevent extensions to register their commands
cli.add_command(shell_command)
cli.add_command(run_command, name='runserver')
cli.add_command(routes_command)
@cli.command()
@click.argument('patterns', nargs=-1)
@click.option('-r', '--reindex', 'full_reindex', is_flag=True,
help='Trigger a full reindexation instead of indexing new advices')
def load(patterns, full_reindex):
'''
Load one or more CADA CSV files matching patterns
'''
header('Loading CSV files')
for pattern in patterns:
for filename in iglob(pattern):
echo('Loading {}'.format(white(filename)))
with open(filename) as f:
reader = csv.reader(f)
# Skip header
reader.next()
for idx, row in enumerate(reader, 1):
try:
advice = csv.from_row(row)
skipped = False
if not full_reindex:
index(advice)
echo('.' if idx % 50 else white(idx), nl=False)
except Exception:
echo(cyan('s') if idx % 50 else white('{0}(s)'.format(idx)), nl=False)
skipped = True
if skipped:
echo(white('{}(s)'.format(idx)) if idx % 50 else '')
else:
echo(white(idx) if idx % 50 else '')
success('Processed {0} rows'.format(idx))
if full_reindex:
reindex()
@cli.command()
def reindex():
'''Reindex all advices'''
header('Reindexing all advices')
echo('Deleting index {0}', white(es.index_name))
if es.indices.exists(es.index_name):
es.indices.delete(index=es.index_name)
es.initialize()
idx = 0
for idx, advice in enumerate(Advice.objects, 1):
index(advice)
echo('.' if idx % 50 else white(idx), nl=False)
echo(white(idx) if idx % 50 else '')
es.indices.refresh(index=es.index_name)
success('Indexed {0} advices', idx)
@cli.command()
@click.argument('path', default='static')
@click.option('-ni', '--no-input', is_flag=True, help="Disable input prompts")
def static(path, no_input):
'''Compile and collect static files into path'''
log = logging.getLogger('webassets')
log.addHandler(logging.StreamHandler())
log.setLevel(logging.DEBUG)
cmdenv = CommandLineEnvironment(assets, log)
cmdenv.build()
if exists(path):
warning('{0} directory already exists and will be {1}', white(path), white('erased'))
if not no_input and not click.confirm('Are you sure'):
exit_with_error()
shutil.rmtree(path)
echo('Copying assets into {0}', white(path))
shutil.copytree(assets.directory, path)
success('Done')
@cli.command()
def anon():
'''Check for candidates to anonymization'''
header(anon.__doc__)
filename = 'urls_to_check.csv'
candidates = Advice.objects(__raw__={
'$or': [
{'subject': {
'$regex': '(Monsieur|Madame|Docteur|Mademoiselle)\s+[^X\s\.]{3}',
'$options': 'imx',
}},
{'content': {
'$regex': '(Monsieur|Madame|Docteur|Mademoiselle)\s+[^X\s\.]{3}',
'$options': 'imx',
}}
]
})
with open(filename, 'wb') as csvfile:
writer = csv.writer(csvfile)
# Generate header
writer.writerow(csv.ANON_HEADER)
for idx, advice in enumerate(candidates, 1):
writer.writerow(csv.to_anon_row(advice))
echo('.' if idx % 50 else white(idx), nl=False)
echo(white(idx) if idx % 50 else '')
success('Total: {0} candidates', len(candidates))
@cli.command()
@click.argument('csvfile', default='fix.csv', type=click.File('r'))
def fix(csvfile):
'''Apply a fix (ie. remove plain names)'''
header('Apply fixes from {}', csvfile.name)
bads = []
reader = csv.reader(csvfile)
reader.next() # Skip header
for id, _, sources, dests in reader:
advice = Advice.objects.get(id=id)
sources = [s.strip() for s in sources.split(',') if s.strip()]
dests = [d.strip() for d in dests.split(',') if d.strip()]
if not len(sources) == len(dests):
bads.append(id)
continue
for source, dest in zip(sources, dests):
echo('{0}: Replace {1} with {2}', white(id), white(source), white(dest))
advice.subject = advice.subject.replace(source, dest)
advice.content = advice.content.replace(source, dest)
advice.save()
index(advice)
for id in bads:
echo('{0}: Replacements length not matching', white(id))
success('Done')
|
etalab/cada | cada/commands.py | header | python | def header(msg, *args, **kwargs):
'''Display an header'''
msg = ' '.join((yellow(HEADER), white(msg), yellow(HEADER)))
echo(msg, *args, **kwargs) | Display an header | train | https://github.com/etalab/cada/blob/36e8b57514445c01ff7cd59a1c965180baf83d5e/cada/commands.py#L70-L73 | [
"def echo(msg, *args, **kwargs):\n '''Wraps click.echo, handles formatting and check encoding'''\n file = kwargs.pop('file', None)\n nl = kwargs.pop('nl', True)\n err = kwargs.pop('err', False)\n color = kwargs.pop('color', None)\n msg = safe_unicode(msg).format(*args, **kwargs)\n click.echo(msg, file=file, nl=nl, err=err, color=color)\n",
"return lambda t: click.style(safe_unicode(t), fg=name, **kwargs).decode('utf8')\n"
] | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import click
import logging
import pkg_resources
import shutil
import sys
from glob import iglob
from os.path import exists
from webassets.script import CommandLineEnvironment
from flask.cli import FlaskGroup, shell_command, run_command, routes_command
from cada import create_app, csv
from cada.assets import assets
from cada.models import Advice
from cada.search import es, index
log = logging.getLogger(__name__)
OK = '✔'.encode('utf8')
KO = '✘'.encode('utf8')
INFO = '➢'.encode('utf8')
WARNING = '⚠'.encode('utf8')
HEADER = '✯'.encode('utf8')
NO_CAST = (int, float, bool)
CONTEXT_SETTINGS = {
'auto_envvar_prefix': 'cada',
'help_option_names': ['-?', '-h', '--help'],
}
click.disable_unicode_literals_warning = True
def safe_unicode(string):
'''Safely transform any object into utf8 encoded bytes'''
if not isinstance(string, basestring):
string = unicode(string)
if isinstance(string, unicode):
string = string.encode('utf8')
return string
def color(name, **kwargs):
return lambda t: click.style(safe_unicode(t), fg=name, **kwargs).decode('utf8')
green = color('green', bold=True)
yellow = color('yellow', bold=True)
red = color('red', bold=True)
cyan = color('cyan', bold=True)
magenta = color('magenta', bold=True)
white = color('white', bold=True)
def echo(msg, *args, **kwargs):
'''Wraps click.echo, handles formatting and check encoding'''
file = kwargs.pop('file', None)
nl = kwargs.pop('nl', True)
err = kwargs.pop('err', False)
color = kwargs.pop('color', None)
msg = safe_unicode(msg).format(*args, **kwargs)
click.echo(msg, file=file, nl=nl, err=err, color=color)
def success(msg, *args, **kwargs):
'''Display a success message'''
echo('{0} {1}'.format(green(OK), white(msg)), *args, **kwargs)
def warning(msg, *args, **kwargs):
'''Display a warning message'''
msg = '{0} {1}'.format(yellow(WARNING), msg)
echo(msg, *args, **kwargs)
def error(msg, details=None, *args, **kwargs):
'''Display an error message with optionnal details'''
msg = '{0} {1}'.format(red(KO), white(msg))
if details:
msg = '\n'.join((msg, safe_unicode(details)))
echo(format_multiline(msg), *args, **kwargs)
def exit_with_error(msg='Aborted', details=None, code=-1, *args, **kwargs):
'''Exit with error'''
error(msg, details=details, *args, **kwargs)
sys.exit(code)
def format_multiline(string):
string = string.replace('\n', '\n│ ')
return replace_last(string, '│', '└')
def replace_last(string, char, replacement):
return replacement.join(string.rsplit(char, 1))
LEVEL_COLORS = {
logging.DEBUG: cyan,
logging.WARNING: yellow,
logging.ERROR: red,
logging.CRITICAL: color('white', bg='red', bold=True),
}
LEVELS_PREFIX = {
logging.INFO: cyan(INFO),
logging.WARNING: yellow(WARNING),
}
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo(pkg_resources.get_distribution('cada').version)
ctx.exit()
@click.group(cls=FlaskGroup, create_app=create_app, context_settings=CONTEXT_SETTINGS,
add_version_option=False, add_default_commands=False)
@click.option('--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True)
def cli():
'''CADA Management client'''
cli._loaded_plugin_commands = True # Prevent extensions to register their commands
cli.add_command(shell_command)
cli.add_command(run_command, name='runserver')
cli.add_command(routes_command)
@cli.command()
@click.argument('patterns', nargs=-1)
@click.option('-r', '--reindex', 'full_reindex', is_flag=True,
help='Trigger a full reindexation instead of indexing new advices')
def load(patterns, full_reindex):
'''
Load one or more CADA CSV files matching patterns
'''
header('Loading CSV files')
for pattern in patterns:
for filename in iglob(pattern):
echo('Loading {}'.format(white(filename)))
with open(filename) as f:
reader = csv.reader(f)
# Skip header
reader.next()
for idx, row in enumerate(reader, 1):
try:
advice = csv.from_row(row)
skipped = False
if not full_reindex:
index(advice)
echo('.' if idx % 50 else white(idx), nl=False)
except Exception:
echo(cyan('s') if idx % 50 else white('{0}(s)'.format(idx)), nl=False)
skipped = True
if skipped:
echo(white('{}(s)'.format(idx)) if idx % 50 else '')
else:
echo(white(idx) if idx % 50 else '')
success('Processed {0} rows'.format(idx))
if full_reindex:
reindex()
@cli.command()
def reindex():
'''Reindex all advices'''
header('Reindexing all advices')
echo('Deleting index {0}', white(es.index_name))
if es.indices.exists(es.index_name):
es.indices.delete(index=es.index_name)
es.initialize()
idx = 0
for idx, advice in enumerate(Advice.objects, 1):
index(advice)
echo('.' if idx % 50 else white(idx), nl=False)
echo(white(idx) if idx % 50 else '')
es.indices.refresh(index=es.index_name)
success('Indexed {0} advices', idx)
@cli.command()
@click.argument('path', default='static')
@click.option('-ni', '--no-input', is_flag=True, help="Disable input prompts")
def static(path, no_input):
'''Compile and collect static files into path'''
log = logging.getLogger('webassets')
log.addHandler(logging.StreamHandler())
log.setLevel(logging.DEBUG)
cmdenv = CommandLineEnvironment(assets, log)
cmdenv.build()
if exists(path):
warning('{0} directory already exists and will be {1}', white(path), white('erased'))
if not no_input and not click.confirm('Are you sure'):
exit_with_error()
shutil.rmtree(path)
echo('Copying assets into {0}', white(path))
shutil.copytree(assets.directory, path)
success('Done')
@cli.command()
def anon():
'''Check for candidates to anonymization'''
header(anon.__doc__)
filename = 'urls_to_check.csv'
candidates = Advice.objects(__raw__={
'$or': [
{'subject': {
'$regex': '(Monsieur|Madame|Docteur|Mademoiselle)\s+[^X\s\.]{3}',
'$options': 'imx',
}},
{'content': {
'$regex': '(Monsieur|Madame|Docteur|Mademoiselle)\s+[^X\s\.]{3}',
'$options': 'imx',
}}
]
})
with open(filename, 'wb') as csvfile:
writer = csv.writer(csvfile)
# Generate header
writer.writerow(csv.ANON_HEADER)
for idx, advice in enumerate(candidates, 1):
writer.writerow(csv.to_anon_row(advice))
echo('.' if idx % 50 else white(idx), nl=False)
echo(white(idx) if idx % 50 else '')
success('Total: {0} candidates', len(candidates))
@cli.command()
@click.argument('csvfile', default='fix.csv', type=click.File('r'))
def fix(csvfile):
'''Apply a fix (ie. remove plain names)'''
header('Apply fixes from {}', csvfile.name)
bads = []
reader = csv.reader(csvfile)
reader.next() # Skip header
for id, _, sources, dests in reader:
advice = Advice.objects.get(id=id)
sources = [s.strip() for s in sources.split(',') if s.strip()]
dests = [d.strip() for d in dests.split(',') if d.strip()]
if not len(sources) == len(dests):
bads.append(id)
continue
for source, dest in zip(sources, dests):
echo('{0}: Replace {1} with {2}', white(id), white(source), white(dest))
advice.subject = advice.subject.replace(source, dest)
advice.content = advice.content.replace(source, dest)
advice.save()
index(advice)
for id in bads:
echo('{0}: Replacements length not matching', white(id))
success('Done')
|
etalab/cada | cada/commands.py | success | python | def success(msg, *args, **kwargs):
'''Display a success message'''
echo('{0} {1}'.format(green(OK), white(msg)), *args, **kwargs) | Display a success message | train | https://github.com/etalab/cada/blob/36e8b57514445c01ff7cd59a1c965180baf83d5e/cada/commands.py#L76-L78 | [
"def echo(msg, *args, **kwargs):\n '''Wraps click.echo, handles formatting and check encoding'''\n file = kwargs.pop('file', None)\n nl = kwargs.pop('nl', True)\n err = kwargs.pop('err', False)\n color = kwargs.pop('color', None)\n msg = safe_unicode(msg).format(*args, **kwargs)\n click.echo(msg, file=file, nl=nl, err=err, color=color)\n",
"return lambda t: click.style(safe_unicode(t), fg=name, **kwargs).decode('utf8')\n"
] | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import click
import logging
import pkg_resources
import shutil
import sys
from glob import iglob
from os.path import exists
from webassets.script import CommandLineEnvironment
from flask.cli import FlaskGroup, shell_command, run_command, routes_command
from cada import create_app, csv
from cada.assets import assets
from cada.models import Advice
from cada.search import es, index
log = logging.getLogger(__name__)
OK = '✔'.encode('utf8')
KO = '✘'.encode('utf8')
INFO = '➢'.encode('utf8')
WARNING = '⚠'.encode('utf8')
HEADER = '✯'.encode('utf8')
NO_CAST = (int, float, bool)
CONTEXT_SETTINGS = {
'auto_envvar_prefix': 'cada',
'help_option_names': ['-?', '-h', '--help'],
}
click.disable_unicode_literals_warning = True
def safe_unicode(string):
'''Safely transform any object into utf8 encoded bytes'''
if not isinstance(string, basestring):
string = unicode(string)
if isinstance(string, unicode):
string = string.encode('utf8')
return string
def color(name, **kwargs):
return lambda t: click.style(safe_unicode(t), fg=name, **kwargs).decode('utf8')
green = color('green', bold=True)
yellow = color('yellow', bold=True)
red = color('red', bold=True)
cyan = color('cyan', bold=True)
magenta = color('magenta', bold=True)
white = color('white', bold=True)
def echo(msg, *args, **kwargs):
'''Wraps click.echo, handles formatting and check encoding'''
file = kwargs.pop('file', None)
nl = kwargs.pop('nl', True)
err = kwargs.pop('err', False)
color = kwargs.pop('color', None)
msg = safe_unicode(msg).format(*args, **kwargs)
click.echo(msg, file=file, nl=nl, err=err, color=color)
def header(msg, *args, **kwargs):
'''Display an header'''
msg = ' '.join((yellow(HEADER), white(msg), yellow(HEADER)))
echo(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
'''Display a warning message'''
msg = '{0} {1}'.format(yellow(WARNING), msg)
echo(msg, *args, **kwargs)
def error(msg, details=None, *args, **kwargs):
'''Display an error message with optionnal details'''
msg = '{0} {1}'.format(red(KO), white(msg))
if details:
msg = '\n'.join((msg, safe_unicode(details)))
echo(format_multiline(msg), *args, **kwargs)
def exit_with_error(msg='Aborted', details=None, code=-1, *args, **kwargs):
'''Exit with error'''
error(msg, details=details, *args, **kwargs)
sys.exit(code)
def format_multiline(string):
string = string.replace('\n', '\n│ ')
return replace_last(string, '│', '└')
def replace_last(string, char, replacement):
return replacement.join(string.rsplit(char, 1))
LEVEL_COLORS = {
logging.DEBUG: cyan,
logging.WARNING: yellow,
logging.ERROR: red,
logging.CRITICAL: color('white', bg='red', bold=True),
}
LEVELS_PREFIX = {
logging.INFO: cyan(INFO),
logging.WARNING: yellow(WARNING),
}
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo(pkg_resources.get_distribution('cada').version)
ctx.exit()
@click.group(cls=FlaskGroup, create_app=create_app, context_settings=CONTEXT_SETTINGS,
add_version_option=False, add_default_commands=False)
@click.option('--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True)
def cli():
'''CADA Management client'''
cli._loaded_plugin_commands = True # Prevent extensions to register their commands
cli.add_command(shell_command)
cli.add_command(run_command, name='runserver')
cli.add_command(routes_command)
@cli.command()
@click.argument('patterns', nargs=-1)
@click.option('-r', '--reindex', 'full_reindex', is_flag=True,
help='Trigger a full reindexation instead of indexing new advices')
def load(patterns, full_reindex):
'''
Load one or more CADA CSV files matching patterns
'''
header('Loading CSV files')
for pattern in patterns:
for filename in iglob(pattern):
echo('Loading {}'.format(white(filename)))
with open(filename) as f:
reader = csv.reader(f)
# Skip header
reader.next()
for idx, row in enumerate(reader, 1):
try:
advice = csv.from_row(row)
skipped = False
if not full_reindex:
index(advice)
echo('.' if idx % 50 else white(idx), nl=False)
except Exception:
echo(cyan('s') if idx % 50 else white('{0}(s)'.format(idx)), nl=False)
skipped = True
if skipped:
echo(white('{}(s)'.format(idx)) if idx % 50 else '')
else:
echo(white(idx) if idx % 50 else '')
success('Processed {0} rows'.format(idx))
if full_reindex:
reindex()
@cli.command()
def reindex():
'''Reindex all advices'''
header('Reindexing all advices')
echo('Deleting index {0}', white(es.index_name))
if es.indices.exists(es.index_name):
es.indices.delete(index=es.index_name)
es.initialize()
idx = 0
for idx, advice in enumerate(Advice.objects, 1):
index(advice)
echo('.' if idx % 50 else white(idx), nl=False)
echo(white(idx) if idx % 50 else '')
es.indices.refresh(index=es.index_name)
success('Indexed {0} advices', idx)
@cli.command()
@click.argument('path', default='static')
@click.option('-ni', '--no-input', is_flag=True, help="Disable input prompts")
def static(path, no_input):
'''Compile and collect static files into path'''
log = logging.getLogger('webassets')
log.addHandler(logging.StreamHandler())
log.setLevel(logging.DEBUG)
cmdenv = CommandLineEnvironment(assets, log)
cmdenv.build()
if exists(path):
warning('{0} directory already exists and will be {1}', white(path), white('erased'))
if not no_input and not click.confirm('Are you sure'):
exit_with_error()
shutil.rmtree(path)
echo('Copying assets into {0}', white(path))
shutil.copytree(assets.directory, path)
success('Done')
@cli.command()
def anon():
'''Check for candidates to anonymization'''
header(anon.__doc__)
filename = 'urls_to_check.csv'
candidates = Advice.objects(__raw__={
'$or': [
{'subject': {
'$regex': '(Monsieur|Madame|Docteur|Mademoiselle)\s+[^X\s\.]{3}',
'$options': 'imx',
}},
{'content': {
'$regex': '(Monsieur|Madame|Docteur|Mademoiselle)\s+[^X\s\.]{3}',
'$options': 'imx',
}}
]
})
with open(filename, 'wb') as csvfile:
writer = csv.writer(csvfile)
# Generate header
writer.writerow(csv.ANON_HEADER)
for idx, advice in enumerate(candidates, 1):
writer.writerow(csv.to_anon_row(advice))
echo('.' if idx % 50 else white(idx), nl=False)
echo(white(idx) if idx % 50 else '')
success('Total: {0} candidates', len(candidates))
@cli.command()
@click.argument('csvfile', default='fix.csv', type=click.File('r'))
def fix(csvfile):
'''Apply a fix (ie. remove plain names)'''
header('Apply fixes from {}', csvfile.name)
bads = []
reader = csv.reader(csvfile)
reader.next() # Skip header
for id, _, sources, dests in reader:
advice = Advice.objects.get(id=id)
sources = [s.strip() for s in sources.split(',') if s.strip()]
dests = [d.strip() for d in dests.split(',') if d.strip()]
if not len(sources) == len(dests):
bads.append(id)
continue
for source, dest in zip(sources, dests):
echo('{0}: Replace {1} with {2}', white(id), white(source), white(dest))
advice.subject = advice.subject.replace(source, dest)
advice.content = advice.content.replace(source, dest)
advice.save()
index(advice)
for id in bads:
echo('{0}: Replacements length not matching', white(id))
success('Done')
|
etalab/cada | cada/commands.py | warning | python | def warning(msg, *args, **kwargs):
'''Display a warning message'''
msg = '{0} {1}'.format(yellow(WARNING), msg)
echo(msg, *args, **kwargs) | Display a warning message | train | https://github.com/etalab/cada/blob/36e8b57514445c01ff7cd59a1c965180baf83d5e/cada/commands.py#L81-L84 | [
"def echo(msg, *args, **kwargs):\n '''Wraps click.echo, handles formatting and check encoding'''\n file = kwargs.pop('file', None)\n nl = kwargs.pop('nl', True)\n err = kwargs.pop('err', False)\n color = kwargs.pop('color', None)\n msg = safe_unicode(msg).format(*args, **kwargs)\n click.echo(msg, file=file, nl=nl, err=err, color=color)\n",
"return lambda t: click.style(safe_unicode(t), fg=name, **kwargs).decode('utf8')\n"
] | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import click
import logging
import pkg_resources
import shutil
import sys
from glob import iglob
from os.path import exists
from webassets.script import CommandLineEnvironment
from flask.cli import FlaskGroup, shell_command, run_command, routes_command
from cada import create_app, csv
from cada.assets import assets
from cada.models import Advice
from cada.search import es, index
log = logging.getLogger(__name__)
OK = '✔'.encode('utf8')
KO = '✘'.encode('utf8')
INFO = '➢'.encode('utf8')
WARNING = '⚠'.encode('utf8')
HEADER = '✯'.encode('utf8')
NO_CAST = (int, float, bool)
CONTEXT_SETTINGS = {
'auto_envvar_prefix': 'cada',
'help_option_names': ['-?', '-h', '--help'],
}
click.disable_unicode_literals_warning = True
def safe_unicode(string):
'''Safely transform any object into utf8 encoded bytes'''
if not isinstance(string, basestring):
string = unicode(string)
if isinstance(string, unicode):
string = string.encode('utf8')
return string
def color(name, **kwargs):
return lambda t: click.style(safe_unicode(t), fg=name, **kwargs).decode('utf8')
green = color('green', bold=True)
yellow = color('yellow', bold=True)
red = color('red', bold=True)
cyan = color('cyan', bold=True)
magenta = color('magenta', bold=True)
white = color('white', bold=True)
def echo(msg, *args, **kwargs):
'''Wraps click.echo, handles formatting and check encoding'''
file = kwargs.pop('file', None)
nl = kwargs.pop('nl', True)
err = kwargs.pop('err', False)
color = kwargs.pop('color', None)
msg = safe_unicode(msg).format(*args, **kwargs)
click.echo(msg, file=file, nl=nl, err=err, color=color)
def header(msg, *args, **kwargs):
'''Display an header'''
msg = ' '.join((yellow(HEADER), white(msg), yellow(HEADER)))
echo(msg, *args, **kwargs)
def success(msg, *args, **kwargs):
'''Display a success message'''
echo('{0} {1}'.format(green(OK), white(msg)), *args, **kwargs)
def error(msg, details=None, *args, **kwargs):
'''Display an error message with optionnal details'''
msg = '{0} {1}'.format(red(KO), white(msg))
if details:
msg = '\n'.join((msg, safe_unicode(details)))
echo(format_multiline(msg), *args, **kwargs)
def exit_with_error(msg='Aborted', details=None, code=-1, *args, **kwargs):
'''Exit with error'''
error(msg, details=details, *args, **kwargs)
sys.exit(code)
def format_multiline(string):
string = string.replace('\n', '\n│ ')
return replace_last(string, '│', '└')
def replace_last(string, char, replacement):
return replacement.join(string.rsplit(char, 1))
LEVEL_COLORS = {
logging.DEBUG: cyan,
logging.WARNING: yellow,
logging.ERROR: red,
logging.CRITICAL: color('white', bg='red', bold=True),
}
LEVELS_PREFIX = {
logging.INFO: cyan(INFO),
logging.WARNING: yellow(WARNING),
}
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo(pkg_resources.get_distribution('cada').version)
ctx.exit()
@click.group(cls=FlaskGroup, create_app=create_app, context_settings=CONTEXT_SETTINGS,
add_version_option=False, add_default_commands=False)
@click.option('--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True)
def cli():
'''CADA Management client'''
cli._loaded_plugin_commands = True # Prevent extensions to register their commands
cli.add_command(shell_command)
cli.add_command(run_command, name='runserver')
cli.add_command(routes_command)
@cli.command()
@click.argument('patterns', nargs=-1)
@click.option('-r', '--reindex', 'full_reindex', is_flag=True,
help='Trigger a full reindexation instead of indexing new advices')
def load(patterns, full_reindex):
'''
Load one or more CADA CSV files matching patterns
'''
header('Loading CSV files')
for pattern in patterns:
for filename in iglob(pattern):
echo('Loading {}'.format(white(filename)))
with open(filename) as f:
reader = csv.reader(f)
# Skip header
reader.next()
for idx, row in enumerate(reader, 1):
try:
advice = csv.from_row(row)
skipped = False
if not full_reindex:
index(advice)
echo('.' if idx % 50 else white(idx), nl=False)
except Exception:
echo(cyan('s') if idx % 50 else white('{0}(s)'.format(idx)), nl=False)
skipped = True
if skipped:
echo(white('{}(s)'.format(idx)) if idx % 50 else '')
else:
echo(white(idx) if idx % 50 else '')
success('Processed {0} rows'.format(idx))
if full_reindex:
reindex()
@cli.command()
def reindex():
'''Reindex all advices'''
header('Reindexing all advices')
echo('Deleting index {0}', white(es.index_name))
if es.indices.exists(es.index_name):
es.indices.delete(index=es.index_name)
es.initialize()
idx = 0
for idx, advice in enumerate(Advice.objects, 1):
index(advice)
echo('.' if idx % 50 else white(idx), nl=False)
echo(white(idx) if idx % 50 else '')
es.indices.refresh(index=es.index_name)
success('Indexed {0} advices', idx)
@cli.command()
@click.argument('path', default='static')
@click.option('-ni', '--no-input', is_flag=True, help="Disable input prompts")
def static(path, no_input):
'''Compile and collect static files into path'''
log = logging.getLogger('webassets')
log.addHandler(logging.StreamHandler())
log.setLevel(logging.DEBUG)
cmdenv = CommandLineEnvironment(assets, log)
cmdenv.build()
if exists(path):
warning('{0} directory already exists and will be {1}', white(path), white('erased'))
if not no_input and not click.confirm('Are you sure'):
exit_with_error()
shutil.rmtree(path)
echo('Copying assets into {0}', white(path))
shutil.copytree(assets.directory, path)
success('Done')
@cli.command()
def anon():
'''Check for candidates to anonymization'''
header(anon.__doc__)
filename = 'urls_to_check.csv'
candidates = Advice.objects(__raw__={
'$or': [
{'subject': {
'$regex': '(Monsieur|Madame|Docteur|Mademoiselle)\s+[^X\s\.]{3}',
'$options': 'imx',
}},
{'content': {
'$regex': '(Monsieur|Madame|Docteur|Mademoiselle)\s+[^X\s\.]{3}',
'$options': 'imx',
}}
]
})
with open(filename, 'wb') as csvfile:
writer = csv.writer(csvfile)
# Generate header
writer.writerow(csv.ANON_HEADER)
for idx, advice in enumerate(candidates, 1):
writer.writerow(csv.to_anon_row(advice))
echo('.' if idx % 50 else white(idx), nl=False)
echo(white(idx) if idx % 50 else '')
success('Total: {0} candidates', len(candidates))
@cli.command()
@click.argument('csvfile', default='fix.csv', type=click.File('r'))
def fix(csvfile):
'''Apply a fix (ie. remove plain names)'''
header('Apply fixes from {}', csvfile.name)
bads = []
reader = csv.reader(csvfile)
reader.next() # Skip header
for id, _, sources, dests in reader:
advice = Advice.objects.get(id=id)
sources = [s.strip() for s in sources.split(',') if s.strip()]
dests = [d.strip() for d in dests.split(',') if d.strip()]
if not len(sources) == len(dests):
bads.append(id)
continue
for source, dest in zip(sources, dests):
echo('{0}: Replace {1} with {2}', white(id), white(source), white(dest))
advice.subject = advice.subject.replace(source, dest)
advice.content = advice.content.replace(source, dest)
advice.save()
index(advice)
for id in bads:
echo('{0}: Replacements length not matching', white(id))
success('Done')
|
etalab/cada | cada/commands.py | error | python | def error(msg, details=None, *args, **kwargs):
'''Display an error message with optionnal details'''
msg = '{0} {1}'.format(red(KO), white(msg))
if details:
msg = '\n'.join((msg, safe_unicode(details)))
echo(format_multiline(msg), *args, **kwargs) | Display an error message with optionnal details | train | https://github.com/etalab/cada/blob/36e8b57514445c01ff7cd59a1c965180baf83d5e/cada/commands.py#L87-L92 | [
"def echo(msg, *args, **kwargs):\n '''Wraps click.echo, handles formatting and check encoding'''\n file = kwargs.pop('file', None)\n nl = kwargs.pop('nl', True)\n err = kwargs.pop('err', False)\n color = kwargs.pop('color', None)\n msg = safe_unicode(msg).format(*args, **kwargs)\n click.echo(msg, file=file, nl=nl, err=err, color=color)\n",
"def safe_unicode(string):\n '''Safely transform any object into utf8 encoded bytes'''\n if not isinstance(string, basestring):\n string = unicode(string)\n if isinstance(string, unicode):\n string = string.encode('utf8')\n return string\n",
"def format_multiline(string):\n string = string.replace('\\n', '\\n│ ')\n return replace_last(string, '│', '└')\n",
"return lambda t: click.style(safe_unicode(t), fg=name, **kwargs).decode('utf8')\n"
] | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import click
import logging
import pkg_resources
import shutil
import sys
from glob import iglob
from os.path import exists
from webassets.script import CommandLineEnvironment
from flask.cli import FlaskGroup, shell_command, run_command, routes_command
from cada import create_app, csv
from cada.assets import assets
from cada.models import Advice
from cada.search import es, index
log = logging.getLogger(__name__)
OK = '✔'.encode('utf8')
KO = '✘'.encode('utf8')
INFO = '➢'.encode('utf8')
WARNING = '⚠'.encode('utf8')
HEADER = '✯'.encode('utf8')
NO_CAST = (int, float, bool)
CONTEXT_SETTINGS = {
'auto_envvar_prefix': 'cada',
'help_option_names': ['-?', '-h', '--help'],
}
click.disable_unicode_literals_warning = True
def safe_unicode(string):
'''Safely transform any object into utf8 encoded bytes'''
if not isinstance(string, basestring):
string = unicode(string)
if isinstance(string, unicode):
string = string.encode('utf8')
return string
def color(name, **kwargs):
return lambda t: click.style(safe_unicode(t), fg=name, **kwargs).decode('utf8')
green = color('green', bold=True)
yellow = color('yellow', bold=True)
red = color('red', bold=True)
cyan = color('cyan', bold=True)
magenta = color('magenta', bold=True)
white = color('white', bold=True)
def echo(msg, *args, **kwargs):
'''Wraps click.echo, handles formatting and check encoding'''
file = kwargs.pop('file', None)
nl = kwargs.pop('nl', True)
err = kwargs.pop('err', False)
color = kwargs.pop('color', None)
msg = safe_unicode(msg).format(*args, **kwargs)
click.echo(msg, file=file, nl=nl, err=err, color=color)
def header(msg, *args, **kwargs):
'''Display an header'''
msg = ' '.join((yellow(HEADER), white(msg), yellow(HEADER)))
echo(msg, *args, **kwargs)
def success(msg, *args, **kwargs):
'''Display a success message'''
echo('{0} {1}'.format(green(OK), white(msg)), *args, **kwargs)
def warning(msg, *args, **kwargs):
'''Display a warning message'''
msg = '{0} {1}'.format(yellow(WARNING), msg)
echo(msg, *args, **kwargs)
def exit_with_error(msg='Aborted', details=None, code=-1, *args, **kwargs):
'''Exit with error'''
error(msg, details=details, *args, **kwargs)
sys.exit(code)
def format_multiline(string):
string = string.replace('\n', '\n│ ')
return replace_last(string, '│', '└')
def replace_last(string, char, replacement):
return replacement.join(string.rsplit(char, 1))
LEVEL_COLORS = {
logging.DEBUG: cyan,
logging.WARNING: yellow,
logging.ERROR: red,
logging.CRITICAL: color('white', bg='red', bold=True),
}
LEVELS_PREFIX = {
logging.INFO: cyan(INFO),
logging.WARNING: yellow(WARNING),
}
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo(pkg_resources.get_distribution('cada').version)
ctx.exit()
@click.group(cls=FlaskGroup, create_app=create_app, context_settings=CONTEXT_SETTINGS,
add_version_option=False, add_default_commands=False)
@click.option('--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True)
def cli():
'''CADA Management client'''
cli._loaded_plugin_commands = True # Prevent extensions to register their commands
cli.add_command(shell_command)
cli.add_command(run_command, name='runserver')
cli.add_command(routes_command)
@cli.command()
@click.argument('patterns', nargs=-1)
@click.option('-r', '--reindex', 'full_reindex', is_flag=True,
help='Trigger a full reindexation instead of indexing new advices')
def load(patterns, full_reindex):
'''
Load one or more CADA CSV files matching patterns
'''
header('Loading CSV files')
for pattern in patterns:
for filename in iglob(pattern):
echo('Loading {}'.format(white(filename)))
with open(filename) as f:
reader = csv.reader(f)
# Skip header
reader.next()
for idx, row in enumerate(reader, 1):
try:
advice = csv.from_row(row)
skipped = False
if not full_reindex:
index(advice)
echo('.' if idx % 50 else white(idx), nl=False)
except Exception:
echo(cyan('s') if idx % 50 else white('{0}(s)'.format(idx)), nl=False)
skipped = True
if skipped:
echo(white('{}(s)'.format(idx)) if idx % 50 else '')
else:
echo(white(idx) if idx % 50 else '')
success('Processed {0} rows'.format(idx))
if full_reindex:
reindex()
@cli.command()
def reindex():
'''Reindex all advices'''
header('Reindexing all advices')
echo('Deleting index {0}', white(es.index_name))
if es.indices.exists(es.index_name):
es.indices.delete(index=es.index_name)
es.initialize()
idx = 0
for idx, advice in enumerate(Advice.objects, 1):
index(advice)
echo('.' if idx % 50 else white(idx), nl=False)
echo(white(idx) if idx % 50 else '')
es.indices.refresh(index=es.index_name)
success('Indexed {0} advices', idx)
@cli.command()
@click.argument('path', default='static')
@click.option('-ni', '--no-input', is_flag=True, help="Disable input prompts")
def static(path, no_input):
'''Compile and collect static files into path'''
log = logging.getLogger('webassets')
log.addHandler(logging.StreamHandler())
log.setLevel(logging.DEBUG)
cmdenv = CommandLineEnvironment(assets, log)
cmdenv.build()
if exists(path):
warning('{0} directory already exists and will be {1}', white(path), white('erased'))
if not no_input and not click.confirm('Are you sure'):
exit_with_error()
shutil.rmtree(path)
echo('Copying assets into {0}', white(path))
shutil.copytree(assets.directory, path)
success('Done')
@cli.command()
def anon():
'''Check for candidates to anonymization'''
header(anon.__doc__)
filename = 'urls_to_check.csv'
candidates = Advice.objects(__raw__={
'$or': [
{'subject': {
'$regex': '(Monsieur|Madame|Docteur|Mademoiselle)\s+[^X\s\.]{3}',
'$options': 'imx',
}},
{'content': {
'$regex': '(Monsieur|Madame|Docteur|Mademoiselle)\s+[^X\s\.]{3}',
'$options': 'imx',
}}
]
})
with open(filename, 'wb') as csvfile:
writer = csv.writer(csvfile)
# Generate header
writer.writerow(csv.ANON_HEADER)
for idx, advice in enumerate(candidates, 1):
writer.writerow(csv.to_anon_row(advice))
echo('.' if idx % 50 else white(idx), nl=False)
echo(white(idx) if idx % 50 else '')
success('Total: {0} candidates', len(candidates))
@cli.command()
@click.argument('csvfile', default='fix.csv', type=click.File('r'))
def fix(csvfile):
'''Apply a fix (ie. remove plain names)'''
header('Apply fixes from {}', csvfile.name)
bads = []
reader = csv.reader(csvfile)
reader.next() # Skip header
for id, _, sources, dests in reader:
advice = Advice.objects.get(id=id)
sources = [s.strip() for s in sources.split(',') if s.strip()]
dests = [d.strip() for d in dests.split(',') if d.strip()]
if not len(sources) == len(dests):
bads.append(id)
continue
for source, dest in zip(sources, dests):
echo('{0}: Replace {1} with {2}', white(id), white(source), white(dest))
advice.subject = advice.subject.replace(source, dest)
advice.content = advice.content.replace(source, dest)
advice.save()
index(advice)
for id in bads:
echo('{0}: Replacements length not matching', white(id))
success('Done')
|
etalab/cada | cada/commands.py | exit_with_error | python | def exit_with_error(msg='Aborted', details=None, code=-1, *args, **kwargs):
'''Exit with error'''
error(msg, details=details, *args, **kwargs)
sys.exit(code) | Exit with error | train | https://github.com/etalab/cada/blob/36e8b57514445c01ff7cd59a1c965180baf83d5e/cada/commands.py#L95-L98 | [
"def error(msg, details=None, *args, **kwargs):\n '''Display an error message with optionnal details'''\n msg = '{0} {1}'.format(red(KO), white(msg))\n if details:\n msg = '\\n'.join((msg, safe_unicode(details)))\n echo(format_multiline(msg), *args, **kwargs)\n"
] | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import click
import logging
import pkg_resources
import shutil
import sys
from glob import iglob
from os.path import exists
from webassets.script import CommandLineEnvironment
from flask.cli import FlaskGroup, shell_command, run_command, routes_command
from cada import create_app, csv
from cada.assets import assets
from cada.models import Advice
from cada.search import es, index
log = logging.getLogger(__name__)
OK = '✔'.encode('utf8')
KO = '✘'.encode('utf8')
INFO = '➢'.encode('utf8')
WARNING = '⚠'.encode('utf8')
HEADER = '✯'.encode('utf8')
NO_CAST = (int, float, bool)
CONTEXT_SETTINGS = {
'auto_envvar_prefix': 'cada',
'help_option_names': ['-?', '-h', '--help'],
}
click.disable_unicode_literals_warning = True
def safe_unicode(string):
'''Safely transform any object into utf8 encoded bytes'''
if not isinstance(string, basestring):
string = unicode(string)
if isinstance(string, unicode):
string = string.encode('utf8')
return string
def color(name, **kwargs):
return lambda t: click.style(safe_unicode(t), fg=name, **kwargs).decode('utf8')
green = color('green', bold=True)
yellow = color('yellow', bold=True)
red = color('red', bold=True)
cyan = color('cyan', bold=True)
magenta = color('magenta', bold=True)
white = color('white', bold=True)
def echo(msg, *args, **kwargs):
'''Wraps click.echo, handles formatting and check encoding'''
file = kwargs.pop('file', None)
nl = kwargs.pop('nl', True)
err = kwargs.pop('err', False)
color = kwargs.pop('color', None)
msg = safe_unicode(msg).format(*args, **kwargs)
click.echo(msg, file=file, nl=nl, err=err, color=color)
def header(msg, *args, **kwargs):
'''Display an header'''
msg = ' '.join((yellow(HEADER), white(msg), yellow(HEADER)))
echo(msg, *args, **kwargs)
def success(msg, *args, **kwargs):
'''Display a success message'''
echo('{0} {1}'.format(green(OK), white(msg)), *args, **kwargs)
def warning(msg, *args, **kwargs):
'''Display a warning message'''
msg = '{0} {1}'.format(yellow(WARNING), msg)
echo(msg, *args, **kwargs)
def error(msg, details=None, *args, **kwargs):
'''Display an error message with optionnal details'''
msg = '{0} {1}'.format(red(KO), white(msg))
if details:
msg = '\n'.join((msg, safe_unicode(details)))
echo(format_multiline(msg), *args, **kwargs)
def format_multiline(string):
string = string.replace('\n', '\n│ ')
return replace_last(string, '│', '└')
def replace_last(string, char, replacement):
return replacement.join(string.rsplit(char, 1))
LEVEL_COLORS = {
logging.DEBUG: cyan,
logging.WARNING: yellow,
logging.ERROR: red,
logging.CRITICAL: color('white', bg='red', bold=True),
}
LEVELS_PREFIX = {
logging.INFO: cyan(INFO),
logging.WARNING: yellow(WARNING),
}
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo(pkg_resources.get_distribution('cada').version)
ctx.exit()
@click.group(cls=FlaskGroup, create_app=create_app, context_settings=CONTEXT_SETTINGS,
add_version_option=False, add_default_commands=False)
@click.option('--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True)
def cli():
'''CADA Management client'''
cli._loaded_plugin_commands = True # Prevent extensions to register their commands
cli.add_command(shell_command)
cli.add_command(run_command, name='runserver')
cli.add_command(routes_command)
@cli.command()
@click.argument('patterns', nargs=-1)
@click.option('-r', '--reindex', 'full_reindex', is_flag=True,
help='Trigger a full reindexation instead of indexing new advices')
def load(patterns, full_reindex):
'''
Load one or more CADA CSV files matching patterns
'''
header('Loading CSV files')
for pattern in patterns:
for filename in iglob(pattern):
echo('Loading {}'.format(white(filename)))
with open(filename) as f:
reader = csv.reader(f)
# Skip header
reader.next()
for idx, row in enumerate(reader, 1):
try:
advice = csv.from_row(row)
skipped = False
if not full_reindex:
index(advice)
echo('.' if idx % 50 else white(idx), nl=False)
except Exception:
echo(cyan('s') if idx % 50 else white('{0}(s)'.format(idx)), nl=False)
skipped = True
if skipped:
echo(white('{}(s)'.format(idx)) if idx % 50 else '')
else:
echo(white(idx) if idx % 50 else '')
success('Processed {0} rows'.format(idx))
if full_reindex:
reindex()
@cli.command()
def reindex():
'''Reindex all advices'''
header('Reindexing all advices')
echo('Deleting index {0}', white(es.index_name))
if es.indices.exists(es.index_name):
es.indices.delete(index=es.index_name)
es.initialize()
idx = 0
for idx, advice in enumerate(Advice.objects, 1):
index(advice)
echo('.' if idx % 50 else white(idx), nl=False)
echo(white(idx) if idx % 50 else '')
es.indices.refresh(index=es.index_name)
success('Indexed {0} advices', idx)
@cli.command()
@click.argument('path', default='static')
@click.option('-ni', '--no-input', is_flag=True, help="Disable input prompts")
def static(path, no_input):
'''Compile and collect static files into path'''
log = logging.getLogger('webassets')
log.addHandler(logging.StreamHandler())
log.setLevel(logging.DEBUG)
cmdenv = CommandLineEnvironment(assets, log)
cmdenv.build()
if exists(path):
warning('{0} directory already exists and will be {1}', white(path), white('erased'))
if not no_input and not click.confirm('Are you sure'):
exit_with_error()
shutil.rmtree(path)
echo('Copying assets into {0}', white(path))
shutil.copytree(assets.directory, path)
success('Done')
@cli.command()
def anon():
'''Check for candidates to anonymization'''
header(anon.__doc__)
filename = 'urls_to_check.csv'
candidates = Advice.objects(__raw__={
'$or': [
{'subject': {
'$regex': '(Monsieur|Madame|Docteur|Mademoiselle)\s+[^X\s\.]{3}',
'$options': 'imx',
}},
{'content': {
'$regex': '(Monsieur|Madame|Docteur|Mademoiselle)\s+[^X\s\.]{3}',
'$options': 'imx',
}}
]
})
with open(filename, 'wb') as csvfile:
writer = csv.writer(csvfile)
# Generate header
writer.writerow(csv.ANON_HEADER)
for idx, advice in enumerate(candidates, 1):
writer.writerow(csv.to_anon_row(advice))
echo('.' if idx % 50 else white(idx), nl=False)
echo(white(idx) if idx % 50 else '')
success('Total: {0} candidates', len(candidates))
@cli.command()
@click.argument('csvfile', default='fix.csv', type=click.File('r'))
def fix(csvfile):
'''Apply a fix (ie. remove plain names)'''
header('Apply fixes from {}', csvfile.name)
bads = []
reader = csv.reader(csvfile)
reader.next() # Skip header
for id, _, sources, dests in reader:
advice = Advice.objects.get(id=id)
sources = [s.strip() for s in sources.split(',') if s.strip()]
dests = [d.strip() for d in dests.split(',') if d.strip()]
if not len(sources) == len(dests):
bads.append(id)
continue
for source, dest in zip(sources, dests):
echo('{0}: Replace {1} with {2}', white(id), white(source), white(dest))
advice.subject = advice.subject.replace(source, dest)
advice.content = advice.content.replace(source, dest)
advice.save()
index(advice)
for id in bads:
echo('{0}: Replacements length not matching', white(id))
success('Done')
|
etalab/cada | cada/commands.py | load | python | def load(patterns, full_reindex):
'''
Load one or more CADA CSV files matching patterns
'''
header('Loading CSV files')
for pattern in patterns:
for filename in iglob(pattern):
echo('Loading {}'.format(white(filename)))
with open(filename) as f:
reader = csv.reader(f)
# Skip header
reader.next()
for idx, row in enumerate(reader, 1):
try:
advice = csv.from_row(row)
skipped = False
if not full_reindex:
index(advice)
echo('.' if idx % 50 else white(idx), nl=False)
except Exception:
echo(cyan('s') if idx % 50 else white('{0}(s)'.format(idx)), nl=False)
skipped = True
if skipped:
echo(white('{}(s)'.format(idx)) if idx % 50 else '')
else:
echo(white(idx) if idx % 50 else '')
success('Processed {0} rows'.format(idx))
if full_reindex:
reindex() | Load one or more CADA CSV files matching patterns | train | https://github.com/etalab/cada/blob/36e8b57514445c01ff7cd59a1c965180baf83d5e/cada/commands.py#L147-L175 | [
"def index(advice):\n '''Index/Reindex a CADA advice'''\n topics = []\n for topic in advice.topics:\n topics.append(topic)\n parts = topic.split('/')\n if len(parts) > 1:\n topics.append(parts[0])\n\n try:\n es.index(index=es.index_name, doc_type=DOCTYPE, id=advice.id, body={\n 'id': advice.id,\n 'administration': advice.administration,\n 'type': advice.type,\n 'session': advice.session.strftime('%Y-%m-%d'),\n 'subject': advice.subject,\n 'topics': topics,\n 'tags': advice.tags,\n 'meanings': advice.meanings,\n 'part': advice.part,\n 'content': advice.content,\n })\n except Exception:\n log.exception('Unable to index advice %s', advice.id)\n",
"def header(msg, *args, **kwargs):\n '''Display an header'''\n msg = ' '.join((yellow(HEADER), white(msg), yellow(HEADER)))\n echo(msg, *args, **kwargs)\n",
"def reader(f):\n '''CSV Reader factory for CADA format'''\n return unicodecsv.reader(f, encoding='utf-8', delimiter=b',', quotechar=b'\"')\n",
"def echo(msg, *args, **kwargs):\n '''Wraps click.echo, handles formatting and check encoding'''\n file = kwargs.pop('file', None)\n nl = kwargs.pop('nl', True)\n err = kwargs.pop('err', False)\n color = kwargs.pop('color', None)\n msg = safe_unicode(msg).format(*args, **kwargs)\n click.echo(msg, file=file, nl=nl, err=err, color=color)\n",
"def success(msg, *args, **kwargs):\n '''Display a success message'''\n echo('{0} {1}'.format(green(OK), white(msg)), *args, **kwargs)\n",
"def from_row(row):\n '''Create an advice from a CSV row'''\n subject = (row[5][0].upper() + row[5][1:]) if row[5] else row[5]\n return Advice.objects.create(\n id=row[0],\n administration=cleanup(row[1]),\n type=row[2],\n session=datetime.strptime(row[4], '%d/%m/%Y'),\n subject=cleanup(subject),\n topics=[t.title() for t in cleanup(row[6]).split(', ')],\n tags=[tag.strip() for tag in row[7].split(',') if tag.strip()],\n meanings=cleanup(row[8]).replace(' / ', '/').split(', '),\n part=_part(row[9]),\n content=cleanup(row[10]),\n )\n",
"return lambda t: click.style(safe_unicode(t), fg=name, **kwargs).decode('utf8')\n"
] | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import click
import logging
import pkg_resources
import shutil
import sys
from glob import iglob
from os.path import exists
from webassets.script import CommandLineEnvironment
from flask.cli import FlaskGroup, shell_command, run_command, routes_command
from cada import create_app, csv
from cada.assets import assets
from cada.models import Advice
from cada.search import es, index
log = logging.getLogger(__name__)
OK = '✔'.encode('utf8')
KO = '✘'.encode('utf8')
INFO = '➢'.encode('utf8')
WARNING = '⚠'.encode('utf8')
HEADER = '✯'.encode('utf8')
NO_CAST = (int, float, bool)
CONTEXT_SETTINGS = {
'auto_envvar_prefix': 'cada',
'help_option_names': ['-?', '-h', '--help'],
}
click.disable_unicode_literals_warning = True
def safe_unicode(string):
'''Safely transform any object into utf8 encoded bytes'''
if not isinstance(string, basestring):
string = unicode(string)
if isinstance(string, unicode):
string = string.encode('utf8')
return string
def color(name, **kwargs):
return lambda t: click.style(safe_unicode(t), fg=name, **kwargs).decode('utf8')
green = color('green', bold=True)
yellow = color('yellow', bold=True)
red = color('red', bold=True)
cyan = color('cyan', bold=True)
magenta = color('magenta', bold=True)
white = color('white', bold=True)
def echo(msg, *args, **kwargs):
'''Wraps click.echo, handles formatting and check encoding'''
file = kwargs.pop('file', None)
nl = kwargs.pop('nl', True)
err = kwargs.pop('err', False)
color = kwargs.pop('color', None)
msg = safe_unicode(msg).format(*args, **kwargs)
click.echo(msg, file=file, nl=nl, err=err, color=color)
def header(msg, *args, **kwargs):
'''Display an header'''
msg = ' '.join((yellow(HEADER), white(msg), yellow(HEADER)))
echo(msg, *args, **kwargs)
def success(msg, *args, **kwargs):
'''Display a success message'''
echo('{0} {1}'.format(green(OK), white(msg)), *args, **kwargs)
def warning(msg, *args, **kwargs):
'''Display a warning message'''
msg = '{0} {1}'.format(yellow(WARNING), msg)
echo(msg, *args, **kwargs)
def error(msg, details=None, *args, **kwargs):
'''Display an error message with optionnal details'''
msg = '{0} {1}'.format(red(KO), white(msg))
if details:
msg = '\n'.join((msg, safe_unicode(details)))
echo(format_multiline(msg), *args, **kwargs)
def exit_with_error(msg='Aborted', details=None, code=-1, *args, **kwargs):
'''Exit with error'''
error(msg, details=details, *args, **kwargs)
sys.exit(code)
def format_multiline(string):
string = string.replace('\n', '\n│ ')
return replace_last(string, '│', '└')
def replace_last(string, char, replacement):
return replacement.join(string.rsplit(char, 1))
LEVEL_COLORS = {
logging.DEBUG: cyan,
logging.WARNING: yellow,
logging.ERROR: red,
logging.CRITICAL: color('white', bg='red', bold=True),
}
LEVELS_PREFIX = {
logging.INFO: cyan(INFO),
logging.WARNING: yellow(WARNING),
}
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo(pkg_resources.get_distribution('cada').version)
ctx.exit()
@click.group(cls=FlaskGroup, create_app=create_app, context_settings=CONTEXT_SETTINGS,
add_version_option=False, add_default_commands=False)
@click.option('--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True)
def cli():
'''CADA Management client'''
cli._loaded_plugin_commands = True # Prevent extensions to register their commands
cli.add_command(shell_command)
cli.add_command(run_command, name='runserver')
cli.add_command(routes_command)
@cli.command()
@click.argument('patterns', nargs=-1)
@click.option('-r', '--reindex', 'full_reindex', is_flag=True,
help='Trigger a full reindexation instead of indexing new advices')
@cli.command()
def reindex():
'''Reindex all advices'''
header('Reindexing all advices')
echo('Deleting index {0}', white(es.index_name))
if es.indices.exists(es.index_name):
es.indices.delete(index=es.index_name)
es.initialize()
idx = 0
for idx, advice in enumerate(Advice.objects, 1):
index(advice)
echo('.' if idx % 50 else white(idx), nl=False)
echo(white(idx) if idx % 50 else '')
es.indices.refresh(index=es.index_name)
success('Indexed {0} advices', idx)
@cli.command()
@click.argument('path', default='static')
@click.option('-ni', '--no-input', is_flag=True, help="Disable input prompts")
def static(path, no_input):
'''Compile and collect static files into path'''
log = logging.getLogger('webassets')
log.addHandler(logging.StreamHandler())
log.setLevel(logging.DEBUG)
cmdenv = CommandLineEnvironment(assets, log)
cmdenv.build()
if exists(path):
warning('{0} directory already exists and will be {1}', white(path), white('erased'))
if not no_input and not click.confirm('Are you sure'):
exit_with_error()
shutil.rmtree(path)
echo('Copying assets into {0}', white(path))
shutil.copytree(assets.directory, path)
success('Done')
@cli.command()
def anon():
'''Check for candidates to anonymization'''
header(anon.__doc__)
filename = 'urls_to_check.csv'
candidates = Advice.objects(__raw__={
'$or': [
{'subject': {
'$regex': '(Monsieur|Madame|Docteur|Mademoiselle)\s+[^X\s\.]{3}',
'$options': 'imx',
}},
{'content': {
'$regex': '(Monsieur|Madame|Docteur|Mademoiselle)\s+[^X\s\.]{3}',
'$options': 'imx',
}}
]
})
with open(filename, 'wb') as csvfile:
writer = csv.writer(csvfile)
# Generate header
writer.writerow(csv.ANON_HEADER)
for idx, advice in enumerate(candidates, 1):
writer.writerow(csv.to_anon_row(advice))
echo('.' if idx % 50 else white(idx), nl=False)
echo(white(idx) if idx % 50 else '')
success('Total: {0} candidates', len(candidates))
@cli.command()
@click.argument('csvfile', default='fix.csv', type=click.File('r'))
def fix(csvfile):
'''Apply a fix (ie. remove plain names)'''
header('Apply fixes from {}', csvfile.name)
bads = []
reader = csv.reader(csvfile)
reader.next() # Skip header
for id, _, sources, dests in reader:
advice = Advice.objects.get(id=id)
sources = [s.strip() for s in sources.split(',') if s.strip()]
dests = [d.strip() for d in dests.split(',') if d.strip()]
if not len(sources) == len(dests):
bads.append(id)
continue
for source, dest in zip(sources, dests):
echo('{0}: Replace {1} with {2}', white(id), white(source), white(dest))
advice.subject = advice.subject.replace(source, dest)
advice.content = advice.content.replace(source, dest)
advice.save()
index(advice)
for id in bads:
echo('{0}: Replacements length not matching', white(id))
success('Done')
|
etalab/cada | cada/commands.py | reindex | python | def reindex():
'''Reindex all advices'''
header('Reindexing all advices')
echo('Deleting index {0}', white(es.index_name))
if es.indices.exists(es.index_name):
es.indices.delete(index=es.index_name)
es.initialize()
idx = 0
for idx, advice in enumerate(Advice.objects, 1):
index(advice)
echo('.' if idx % 50 else white(idx), nl=False)
echo(white(idx) if idx % 50 else '')
es.indices.refresh(index=es.index_name)
success('Indexed {0} advices', idx) | Reindex all advices | train | https://github.com/etalab/cada/blob/36e8b57514445c01ff7cd59a1c965180baf83d5e/cada/commands.py#L179-L193 | [
"def index(advice):\n '''Index/Reindex a CADA advice'''\n topics = []\n for topic in advice.topics:\n topics.append(topic)\n parts = topic.split('/')\n if len(parts) > 1:\n topics.append(parts[0])\n\n try:\n es.index(index=es.index_name, doc_type=DOCTYPE, id=advice.id, body={\n 'id': advice.id,\n 'administration': advice.administration,\n 'type': advice.type,\n 'session': advice.session.strftime('%Y-%m-%d'),\n 'subject': advice.subject,\n 'topics': topics,\n 'tags': advice.tags,\n 'meanings': advice.meanings,\n 'part': advice.part,\n 'content': advice.content,\n })\n except Exception:\n log.exception('Unable to index advice %s', advice.id)\n",
"def header(msg, *args, **kwargs):\n '''Display an header'''\n msg = ' '.join((yellow(HEADER), white(msg), yellow(HEADER)))\n echo(msg, *args, **kwargs)\n",
"def echo(msg, *args, **kwargs):\n '''Wraps click.echo, handles formatting and check encoding'''\n file = kwargs.pop('file', None)\n nl = kwargs.pop('nl', True)\n err = kwargs.pop('err', False)\n color = kwargs.pop('color', None)\n msg = safe_unicode(msg).format(*args, **kwargs)\n click.echo(msg, file=file, nl=nl, err=err, color=color)\n",
"def success(msg, *args, **kwargs):\n '''Display a success message'''\n echo('{0} {1}'.format(green(OK), white(msg)), *args, **kwargs)\n",
"return lambda t: click.style(safe_unicode(t), fg=name, **kwargs).decode('utf8')\n",
"def initialize(self):\n '''Create or update indices and mappings'''\n if es.indices.exists(self.index_name):\n es.indices.put_mapping(index=self.index_name, doc_type=DOCTYPE, body=MAPPING)\n else:\n es.indices.create(self.index_name, {\n 'mappings': {'advice': MAPPING},\n 'settings': {'analysis': ANALSYS},\n })\n"
] | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import click
import logging
import pkg_resources
import shutil
import sys
from glob import iglob
from os.path import exists
from webassets.script import CommandLineEnvironment
from flask.cli import FlaskGroup, shell_command, run_command, routes_command
from cada import create_app, csv
from cada.assets import assets
from cada.models import Advice
from cada.search import es, index
log = logging.getLogger(__name__)
OK = '✔'.encode('utf8')
KO = '✘'.encode('utf8')
INFO = '➢'.encode('utf8')
WARNING = '⚠'.encode('utf8')
HEADER = '✯'.encode('utf8')
NO_CAST = (int, float, bool)
CONTEXT_SETTINGS = {
'auto_envvar_prefix': 'cada',
'help_option_names': ['-?', '-h', '--help'],
}
click.disable_unicode_literals_warning = True
def safe_unicode(string):
'''Safely transform any object into utf8 encoded bytes'''
if not isinstance(string, basestring):
string = unicode(string)
if isinstance(string, unicode):
string = string.encode('utf8')
return string
def color(name, **kwargs):
return lambda t: click.style(safe_unicode(t), fg=name, **kwargs).decode('utf8')
green = color('green', bold=True)
yellow = color('yellow', bold=True)
red = color('red', bold=True)
cyan = color('cyan', bold=True)
magenta = color('magenta', bold=True)
white = color('white', bold=True)
def echo(msg, *args, **kwargs):
'''Wraps click.echo, handles formatting and check encoding'''
file = kwargs.pop('file', None)
nl = kwargs.pop('nl', True)
err = kwargs.pop('err', False)
color = kwargs.pop('color', None)
msg = safe_unicode(msg).format(*args, **kwargs)
click.echo(msg, file=file, nl=nl, err=err, color=color)
def header(msg, *args, **kwargs):
'''Display an header'''
msg = ' '.join((yellow(HEADER), white(msg), yellow(HEADER)))
echo(msg, *args, **kwargs)
def success(msg, *args, **kwargs):
'''Display a success message'''
echo('{0} {1}'.format(green(OK), white(msg)), *args, **kwargs)
def warning(msg, *args, **kwargs):
'''Display a warning message'''
msg = '{0} {1}'.format(yellow(WARNING), msg)
echo(msg, *args, **kwargs)
def error(msg, details=None, *args, **kwargs):
'''Display an error message with optionnal details'''
msg = '{0} {1}'.format(red(KO), white(msg))
if details:
msg = '\n'.join((msg, safe_unicode(details)))
echo(format_multiline(msg), *args, **kwargs)
def exit_with_error(msg='Aborted', details=None, code=-1, *args, **kwargs):
'''Exit with error'''
error(msg, details=details, *args, **kwargs)
sys.exit(code)
def format_multiline(string):
string = string.replace('\n', '\n│ ')
return replace_last(string, '│', '└')
def replace_last(string, char, replacement):
return replacement.join(string.rsplit(char, 1))
LEVEL_COLORS = {
logging.DEBUG: cyan,
logging.WARNING: yellow,
logging.ERROR: red,
logging.CRITICAL: color('white', bg='red', bold=True),
}
LEVELS_PREFIX = {
logging.INFO: cyan(INFO),
logging.WARNING: yellow(WARNING),
}
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo(pkg_resources.get_distribution('cada').version)
ctx.exit()
@click.group(cls=FlaskGroup, create_app=create_app, context_settings=CONTEXT_SETTINGS,
add_version_option=False, add_default_commands=False)
@click.option('--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True)
def cli():
'''CADA Management client'''
cli._loaded_plugin_commands = True # Prevent extensions to register their commands
cli.add_command(shell_command)
cli.add_command(run_command, name='runserver')
cli.add_command(routes_command)
@cli.command()
@click.argument('patterns', nargs=-1)
@click.option('-r', '--reindex', 'full_reindex', is_flag=True,
help='Trigger a full reindexation instead of indexing new advices')
def load(patterns, full_reindex):
'''
Load one or more CADA CSV files matching patterns
'''
header('Loading CSV files')
for pattern in patterns:
for filename in iglob(pattern):
echo('Loading {}'.format(white(filename)))
with open(filename) as f:
reader = csv.reader(f)
# Skip header
reader.next()
for idx, row in enumerate(reader, 1):
try:
advice = csv.from_row(row)
skipped = False
if not full_reindex:
index(advice)
echo('.' if idx % 50 else white(idx), nl=False)
except Exception:
echo(cyan('s') if idx % 50 else white('{0}(s)'.format(idx)), nl=False)
skipped = True
if skipped:
echo(white('{}(s)'.format(idx)) if idx % 50 else '')
else:
echo(white(idx) if idx % 50 else '')
success('Processed {0} rows'.format(idx))
if full_reindex:
reindex()
@cli.command()
@cli.command()
@click.argument('path', default='static')
@click.option('-ni', '--no-input', is_flag=True, help="Disable input prompts")
def static(path, no_input):
'''Compile and collect static files into path'''
log = logging.getLogger('webassets')
log.addHandler(logging.StreamHandler())
log.setLevel(logging.DEBUG)
cmdenv = CommandLineEnvironment(assets, log)
cmdenv.build()
if exists(path):
warning('{0} directory already exists and will be {1}', white(path), white('erased'))
if not no_input and not click.confirm('Are you sure'):
exit_with_error()
shutil.rmtree(path)
echo('Copying assets into {0}', white(path))
shutil.copytree(assets.directory, path)
success('Done')
@cli.command()
def anon():
'''Check for candidates to anonymization'''
header(anon.__doc__)
filename = 'urls_to_check.csv'
candidates = Advice.objects(__raw__={
'$or': [
{'subject': {
'$regex': '(Monsieur|Madame|Docteur|Mademoiselle)\s+[^X\s\.]{3}',
'$options': 'imx',
}},
{'content': {
'$regex': '(Monsieur|Madame|Docteur|Mademoiselle)\s+[^X\s\.]{3}',
'$options': 'imx',
}}
]
})
with open(filename, 'wb') as csvfile:
writer = csv.writer(csvfile)
# Generate header
writer.writerow(csv.ANON_HEADER)
for idx, advice in enumerate(candidates, 1):
writer.writerow(csv.to_anon_row(advice))
echo('.' if idx % 50 else white(idx), nl=False)
echo(white(idx) if idx % 50 else '')
success('Total: {0} candidates', len(candidates))
@cli.command()
@click.argument('csvfile', default='fix.csv', type=click.File('r'))
def fix(csvfile):
'''Apply a fix (ie. remove plain names)'''
header('Apply fixes from {}', csvfile.name)
bads = []
reader = csv.reader(csvfile)
reader.next() # Skip header
for id, _, sources, dests in reader:
advice = Advice.objects.get(id=id)
sources = [s.strip() for s in sources.split(',') if s.strip()]
dests = [d.strip() for d in dests.split(',') if d.strip()]
if not len(sources) == len(dests):
bads.append(id)
continue
for source, dest in zip(sources, dests):
echo('{0}: Replace {1} with {2}', white(id), white(source), white(dest))
advice.subject = advice.subject.replace(source, dest)
advice.content = advice.content.replace(source, dest)
advice.save()
index(advice)
for id in bads:
echo('{0}: Replacements length not matching', white(id))
success('Done')
|
etalab/cada | cada/commands.py | static | python | def static(path, no_input):
'''Compile and collect static files into path'''
log = logging.getLogger('webassets')
log.addHandler(logging.StreamHandler())
log.setLevel(logging.DEBUG)
cmdenv = CommandLineEnvironment(assets, log)
cmdenv.build()
if exists(path):
warning('{0} directory already exists and will be {1}', white(path), white('erased'))
if not no_input and not click.confirm('Are you sure'):
exit_with_error()
shutil.rmtree(path)
echo('Copying assets into {0}', white(path))
shutil.copytree(assets.directory, path)
success('Done') | Compile and collect static files into path | train | https://github.com/etalab/cada/blob/36e8b57514445c01ff7cd59a1c965180baf83d5e/cada/commands.py#L199-L217 | [
"def warning(msg, *args, **kwargs):\n '''Display a warning message'''\n msg = '{0} {1}'.format(yellow(WARNING), msg)\n echo(msg, *args, **kwargs)\n",
"def echo(msg, *args, **kwargs):\n '''Wraps click.echo, handles formatting and check encoding'''\n file = kwargs.pop('file', None)\n nl = kwargs.pop('nl', True)\n err = kwargs.pop('err', False)\n color = kwargs.pop('color', None)\n msg = safe_unicode(msg).format(*args, **kwargs)\n click.echo(msg, file=file, nl=nl, err=err, color=color)\n",
"def success(msg, *args, **kwargs):\n '''Display a success message'''\n echo('{0} {1}'.format(green(OK), white(msg)), *args, **kwargs)\n",
"def exit_with_error(msg='Aborted', details=None, code=-1, *args, **kwargs):\n '''Exit with error'''\n error(msg, details=details, *args, **kwargs)\n sys.exit(code)\n",
"return lambda t: click.style(safe_unicode(t), fg=name, **kwargs).decode('utf8')\n"
] | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import click
import logging
import pkg_resources
import shutil
import sys
from glob import iglob
from os.path import exists
from webassets.script import CommandLineEnvironment
from flask.cli import FlaskGroup, shell_command, run_command, routes_command
from cada import create_app, csv
from cada.assets import assets
from cada.models import Advice
from cada.search import es, index
log = logging.getLogger(__name__)
OK = '✔'.encode('utf8')
KO = '✘'.encode('utf8')
INFO = '➢'.encode('utf8')
WARNING = '⚠'.encode('utf8')
HEADER = '✯'.encode('utf8')
NO_CAST = (int, float, bool)
CONTEXT_SETTINGS = {
'auto_envvar_prefix': 'cada',
'help_option_names': ['-?', '-h', '--help'],
}
click.disable_unicode_literals_warning = True
def safe_unicode(string):
'''Safely transform any object into utf8 encoded bytes'''
if not isinstance(string, basestring):
string = unicode(string)
if isinstance(string, unicode):
string = string.encode('utf8')
return string
def color(name, **kwargs):
return lambda t: click.style(safe_unicode(t), fg=name, **kwargs).decode('utf8')
green = color('green', bold=True)
yellow = color('yellow', bold=True)
red = color('red', bold=True)
cyan = color('cyan', bold=True)
magenta = color('magenta', bold=True)
white = color('white', bold=True)
def echo(msg, *args, **kwargs):
'''Wraps click.echo, handles formatting and check encoding'''
file = kwargs.pop('file', None)
nl = kwargs.pop('nl', True)
err = kwargs.pop('err', False)
color = kwargs.pop('color', None)
msg = safe_unicode(msg).format(*args, **kwargs)
click.echo(msg, file=file, nl=nl, err=err, color=color)
def header(msg, *args, **kwargs):
'''Display an header'''
msg = ' '.join((yellow(HEADER), white(msg), yellow(HEADER)))
echo(msg, *args, **kwargs)
def success(msg, *args, **kwargs):
'''Display a success message'''
echo('{0} {1}'.format(green(OK), white(msg)), *args, **kwargs)
def warning(msg, *args, **kwargs):
'''Display a warning message'''
msg = '{0} {1}'.format(yellow(WARNING), msg)
echo(msg, *args, **kwargs)
def error(msg, details=None, *args, **kwargs):
'''Display an error message with optionnal details'''
msg = '{0} {1}'.format(red(KO), white(msg))
if details:
msg = '\n'.join((msg, safe_unicode(details)))
echo(format_multiline(msg), *args, **kwargs)
def exit_with_error(msg='Aborted', details=None, code=-1, *args, **kwargs):
'''Exit with error'''
error(msg, details=details, *args, **kwargs)
sys.exit(code)
def format_multiline(string):
string = string.replace('\n', '\n│ ')
return replace_last(string, '│', '└')
def replace_last(string, char, replacement):
return replacement.join(string.rsplit(char, 1))
LEVEL_COLORS = {
logging.DEBUG: cyan,
logging.WARNING: yellow,
logging.ERROR: red,
logging.CRITICAL: color('white', bg='red', bold=True),
}
LEVELS_PREFIX = {
logging.INFO: cyan(INFO),
logging.WARNING: yellow(WARNING),
}
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo(pkg_resources.get_distribution('cada').version)
ctx.exit()
@click.group(cls=FlaskGroup, create_app=create_app, context_settings=CONTEXT_SETTINGS,
add_version_option=False, add_default_commands=False)
@click.option('--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True)
def cli():
'''CADA Management client'''
cli._loaded_plugin_commands = True # Prevent extensions to register their commands
cli.add_command(shell_command)
cli.add_command(run_command, name='runserver')
cli.add_command(routes_command)
@cli.command()
@click.argument('patterns', nargs=-1)
@click.option('-r', '--reindex', 'full_reindex', is_flag=True,
help='Trigger a full reindexation instead of indexing new advices')
def load(patterns, full_reindex):
'''
Load one or more CADA CSV files matching patterns
'''
header('Loading CSV files')
for pattern in patterns:
for filename in iglob(pattern):
echo('Loading {}'.format(white(filename)))
with open(filename) as f:
reader = csv.reader(f)
# Skip header
reader.next()
for idx, row in enumerate(reader, 1):
try:
advice = csv.from_row(row)
skipped = False
if not full_reindex:
index(advice)
echo('.' if idx % 50 else white(idx), nl=False)
except Exception:
echo(cyan('s') if idx % 50 else white('{0}(s)'.format(idx)), nl=False)
skipped = True
if skipped:
echo(white('{}(s)'.format(idx)) if idx % 50 else '')
else:
echo(white(idx) if idx % 50 else '')
success('Processed {0} rows'.format(idx))
if full_reindex:
reindex()
@cli.command()
def reindex():
'''Reindex all advices'''
header('Reindexing all advices')
echo('Deleting index {0}', white(es.index_name))
if es.indices.exists(es.index_name):
es.indices.delete(index=es.index_name)
es.initialize()
idx = 0
for idx, advice in enumerate(Advice.objects, 1):
index(advice)
echo('.' if idx % 50 else white(idx), nl=False)
echo(white(idx) if idx % 50 else '')
es.indices.refresh(index=es.index_name)
success('Indexed {0} advices', idx)
@cli.command()
@click.argument('path', default='static')
@click.option('-ni', '--no-input', is_flag=True, help="Disable input prompts")
@cli.command()
def anon():
'''Check for candidates to anonymization'''
header(anon.__doc__)
filename = 'urls_to_check.csv'
candidates = Advice.objects(__raw__={
'$or': [
{'subject': {
'$regex': '(Monsieur|Madame|Docteur|Mademoiselle)\s+[^X\s\.]{3}',
'$options': 'imx',
}},
{'content': {
'$regex': '(Monsieur|Madame|Docteur|Mademoiselle)\s+[^X\s\.]{3}',
'$options': 'imx',
}}
]
})
with open(filename, 'wb') as csvfile:
writer = csv.writer(csvfile)
# Generate header
writer.writerow(csv.ANON_HEADER)
for idx, advice in enumerate(candidates, 1):
writer.writerow(csv.to_anon_row(advice))
echo('.' if idx % 50 else white(idx), nl=False)
echo(white(idx) if idx % 50 else '')
success('Total: {0} candidates', len(candidates))
@cli.command()
@click.argument('csvfile', default='fix.csv', type=click.File('r'))
def fix(csvfile):
'''Apply a fix (ie. remove plain names)'''
header('Apply fixes from {}', csvfile.name)
bads = []
reader = csv.reader(csvfile)
reader.next() # Skip header
for id, _, sources, dests in reader:
advice = Advice.objects.get(id=id)
sources = [s.strip() for s in sources.split(',') if s.strip()]
dests = [d.strip() for d in dests.split(',') if d.strip()]
if not len(sources) == len(dests):
bads.append(id)
continue
for source, dest in zip(sources, dests):
echo('{0}: Replace {1} with {2}', white(id), white(source), white(dest))
advice.subject = advice.subject.replace(source, dest)
advice.content = advice.content.replace(source, dest)
advice.save()
index(advice)
for id in bads:
echo('{0}: Replacements length not matching', white(id))
success('Done')
|
etalab/cada | cada/commands.py | anon | python | def anon():
'''Check for candidates to anonymization'''
header(anon.__doc__)
filename = 'urls_to_check.csv'
candidates = Advice.objects(__raw__={
'$or': [
{'subject': {
'$regex': '(Monsieur|Madame|Docteur|Mademoiselle)\s+[^X\s\.]{3}',
'$options': 'imx',
}},
{'content': {
'$regex': '(Monsieur|Madame|Docteur|Mademoiselle)\s+[^X\s\.]{3}',
'$options': 'imx',
}}
]
})
with open(filename, 'wb') as csvfile:
writer = csv.writer(csvfile)
# Generate header
writer.writerow(csv.ANON_HEADER)
for idx, advice in enumerate(candidates, 1):
writer.writerow(csv.to_anon_row(advice))
echo('.' if idx % 50 else white(idx), nl=False)
echo(white(idx) if idx % 50 else '')
success('Total: {0} candidates', len(candidates)) | Check for candidates to anonymization | train | https://github.com/etalab/cada/blob/36e8b57514445c01ff7cd59a1c965180baf83d5e/cada/commands.py#L221-L249 | [
"def header(msg, *args, **kwargs):\n '''Display an header'''\n msg = ' '.join((yellow(HEADER), white(msg), yellow(HEADER)))\n echo(msg, *args, **kwargs)\n",
"def echo(msg, *args, **kwargs):\n '''Wraps click.echo, handles formatting and check encoding'''\n file = kwargs.pop('file', None)\n nl = kwargs.pop('nl', True)\n err = kwargs.pop('err', False)\n color = kwargs.pop('color', None)\n msg = safe_unicode(msg).format(*args, **kwargs)\n click.echo(msg, file=file, nl=nl, err=err, color=color)\n",
"def writer(f):\n '''CSV writer factory for CADA format'''\n return unicodecsv.writer(f, encoding='utf-8', delimiter=b',', quotechar=b'\"')\n",
"def success(msg, *args, **kwargs):\n '''Display a success message'''\n echo('{0} {1}'.format(green(OK), white(msg)), *args, **kwargs)\n",
"def to_anon_row(advice):\n return (advice.id, url_for('site.display', id=advice.id, _external=True), '', '')\n",
"return lambda t: click.style(safe_unicode(t), fg=name, **kwargs).decode('utf8')\n"
] | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import click
import logging
import pkg_resources
import shutil
import sys
from glob import iglob
from os.path import exists
from webassets.script import CommandLineEnvironment
from flask.cli import FlaskGroup, shell_command, run_command, routes_command
from cada import create_app, csv
from cada.assets import assets
from cada.models import Advice
from cada.search import es, index
log = logging.getLogger(__name__)
OK = '✔'.encode('utf8')
KO = '✘'.encode('utf8')
INFO = '➢'.encode('utf8')
WARNING = '⚠'.encode('utf8')
HEADER = '✯'.encode('utf8')
NO_CAST = (int, float, bool)
CONTEXT_SETTINGS = {
'auto_envvar_prefix': 'cada',
'help_option_names': ['-?', '-h', '--help'],
}
click.disable_unicode_literals_warning = True
def safe_unicode(string):
'''Safely transform any object into utf8 encoded bytes'''
if not isinstance(string, basestring):
string = unicode(string)
if isinstance(string, unicode):
string = string.encode('utf8')
return string
def color(name, **kwargs):
return lambda t: click.style(safe_unicode(t), fg=name, **kwargs).decode('utf8')
green = color('green', bold=True)
yellow = color('yellow', bold=True)
red = color('red', bold=True)
cyan = color('cyan', bold=True)
magenta = color('magenta', bold=True)
white = color('white', bold=True)
def echo(msg, *args, **kwargs):
'''Wraps click.echo, handles formatting and check encoding'''
file = kwargs.pop('file', None)
nl = kwargs.pop('nl', True)
err = kwargs.pop('err', False)
color = kwargs.pop('color', None)
msg = safe_unicode(msg).format(*args, **kwargs)
click.echo(msg, file=file, nl=nl, err=err, color=color)
def header(msg, *args, **kwargs):
'''Display an header'''
msg = ' '.join((yellow(HEADER), white(msg), yellow(HEADER)))
echo(msg, *args, **kwargs)
def success(msg, *args, **kwargs):
'''Display a success message'''
echo('{0} {1}'.format(green(OK), white(msg)), *args, **kwargs)
def warning(msg, *args, **kwargs):
'''Display a warning message'''
msg = '{0} {1}'.format(yellow(WARNING), msg)
echo(msg, *args, **kwargs)
def error(msg, details=None, *args, **kwargs):
'''Display an error message with optionnal details'''
msg = '{0} {1}'.format(red(KO), white(msg))
if details:
msg = '\n'.join((msg, safe_unicode(details)))
echo(format_multiline(msg), *args, **kwargs)
def exit_with_error(msg='Aborted', details=None, code=-1, *args, **kwargs):
'''Exit with error'''
error(msg, details=details, *args, **kwargs)
sys.exit(code)
def format_multiline(string):
string = string.replace('\n', '\n│ ')
return replace_last(string, '│', '└')
def replace_last(string, char, replacement):
return replacement.join(string.rsplit(char, 1))
LEVEL_COLORS = {
logging.DEBUG: cyan,
logging.WARNING: yellow,
logging.ERROR: red,
logging.CRITICAL: color('white', bg='red', bold=True),
}
LEVELS_PREFIX = {
logging.INFO: cyan(INFO),
logging.WARNING: yellow(WARNING),
}
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo(pkg_resources.get_distribution('cada').version)
ctx.exit()
@click.group(cls=FlaskGroup, create_app=create_app, context_settings=CONTEXT_SETTINGS,
add_version_option=False, add_default_commands=False)
@click.option('--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True)
def cli():
'''CADA Management client'''
cli._loaded_plugin_commands = True # Prevent extensions to register their commands
cli.add_command(shell_command)
cli.add_command(run_command, name='runserver')
cli.add_command(routes_command)
@cli.command()
@click.argument('patterns', nargs=-1)
@click.option('-r', '--reindex', 'full_reindex', is_flag=True,
help='Trigger a full reindexation instead of indexing new advices')
def load(patterns, full_reindex):
'''
Load one or more CADA CSV files matching patterns
'''
header('Loading CSV files')
for pattern in patterns:
for filename in iglob(pattern):
echo('Loading {}'.format(white(filename)))
with open(filename) as f:
reader = csv.reader(f)
# Skip header
reader.next()
for idx, row in enumerate(reader, 1):
try:
advice = csv.from_row(row)
skipped = False
if not full_reindex:
index(advice)
echo('.' if idx % 50 else white(idx), nl=False)
except Exception:
echo(cyan('s') if idx % 50 else white('{0}(s)'.format(idx)), nl=False)
skipped = True
if skipped:
echo(white('{}(s)'.format(idx)) if idx % 50 else '')
else:
echo(white(idx) if idx % 50 else '')
success('Processed {0} rows'.format(idx))
if full_reindex:
reindex()
@cli.command()
def reindex():
'''Reindex all advices'''
header('Reindexing all advices')
echo('Deleting index {0}', white(es.index_name))
if es.indices.exists(es.index_name):
es.indices.delete(index=es.index_name)
es.initialize()
idx = 0
for idx, advice in enumerate(Advice.objects, 1):
index(advice)
echo('.' if idx % 50 else white(idx), nl=False)
echo(white(idx) if idx % 50 else '')
es.indices.refresh(index=es.index_name)
success('Indexed {0} advices', idx)
@cli.command()
@click.argument('path', default='static')
@click.option('-ni', '--no-input', is_flag=True, help="Disable input prompts")
def static(path, no_input):
'''Compile and collect static files into path'''
log = logging.getLogger('webassets')
log.addHandler(logging.StreamHandler())
log.setLevel(logging.DEBUG)
cmdenv = CommandLineEnvironment(assets, log)
cmdenv.build()
if exists(path):
warning('{0} directory already exists and will be {1}', white(path), white('erased'))
if not no_input and not click.confirm('Are you sure'):
exit_with_error()
shutil.rmtree(path)
echo('Copying assets into {0}', white(path))
shutil.copytree(assets.directory, path)
success('Done')
@cli.command()
@cli.command()
@click.argument('csvfile', default='fix.csv', type=click.File('r'))
def fix(csvfile):
'''Apply a fix (ie. remove plain names)'''
header('Apply fixes from {}', csvfile.name)
bads = []
reader = csv.reader(csvfile)
reader.next() # Skip header
for id, _, sources, dests in reader:
advice = Advice.objects.get(id=id)
sources = [s.strip() for s in sources.split(',') if s.strip()]
dests = [d.strip() for d in dests.split(',') if d.strip()]
if not len(sources) == len(dests):
bads.append(id)
continue
for source, dest in zip(sources, dests):
echo('{0}: Replace {1} with {2}', white(id), white(source), white(dest))
advice.subject = advice.subject.replace(source, dest)
advice.content = advice.content.replace(source, dest)
advice.save()
index(advice)
for id in bads:
echo('{0}: Replacements length not matching', white(id))
success('Done')
|
etalab/cada | cada/commands.py | fix | python | def fix(csvfile):
'''Apply a fix (ie. remove plain names)'''
header('Apply fixes from {}', csvfile.name)
bads = []
reader = csv.reader(csvfile)
reader.next() # Skip header
for id, _, sources, dests in reader:
advice = Advice.objects.get(id=id)
sources = [s.strip() for s in sources.split(',') if s.strip()]
dests = [d.strip() for d in dests.split(',') if d.strip()]
if not len(sources) == len(dests):
bads.append(id)
continue
for source, dest in zip(sources, dests):
echo('{0}: Replace {1} with {2}', white(id), white(source), white(dest))
advice.subject = advice.subject.replace(source, dest)
advice.content = advice.content.replace(source, dest)
advice.save()
index(advice)
for id in bads:
echo('{0}: Replacements length not matching', white(id))
success('Done') | Apply a fix (ie. remove plain names) | train | https://github.com/etalab/cada/blob/36e8b57514445c01ff7cd59a1c965180baf83d5e/cada/commands.py#L254-L275 | [
"def index(advice):\n '''Index/Reindex a CADA advice'''\n topics = []\n for topic in advice.topics:\n topics.append(topic)\n parts = topic.split('/')\n if len(parts) > 1:\n topics.append(parts[0])\n\n try:\n es.index(index=es.index_name, doc_type=DOCTYPE, id=advice.id, body={\n 'id': advice.id,\n 'administration': advice.administration,\n 'type': advice.type,\n 'session': advice.session.strftime('%Y-%m-%d'),\n 'subject': advice.subject,\n 'topics': topics,\n 'tags': advice.tags,\n 'meanings': advice.meanings,\n 'part': advice.part,\n 'content': advice.content,\n })\n except Exception:\n log.exception('Unable to index advice %s', advice.id)\n",
"def header(msg, *args, **kwargs):\n '''Display an header'''\n msg = ' '.join((yellow(HEADER), white(msg), yellow(HEADER)))\n echo(msg, *args, **kwargs)\n",
"def reader(f):\n '''CSV Reader factory for CADA format'''\n return unicodecsv.reader(f, encoding='utf-8', delimiter=b',', quotechar=b'\"')\n",
"def echo(msg, *args, **kwargs):\n '''Wraps click.echo, handles formatting and check encoding'''\n file = kwargs.pop('file', None)\n nl = kwargs.pop('nl', True)\n err = kwargs.pop('err', False)\n color = kwargs.pop('color', None)\n msg = safe_unicode(msg).format(*args, **kwargs)\n click.echo(msg, file=file, nl=nl, err=err, color=color)\n",
"def success(msg, *args, **kwargs):\n '''Display a success message'''\n echo('{0} {1}'.format(green(OK), white(msg)), *args, **kwargs)\n",
"return lambda t: click.style(safe_unicode(t), fg=name, **kwargs).decode('utf8')\n"
] | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import click
import logging
import pkg_resources
import shutil
import sys
from glob import iglob
from os.path import exists
from webassets.script import CommandLineEnvironment
from flask.cli import FlaskGroup, shell_command, run_command, routes_command
from cada import create_app, csv
from cada.assets import assets
from cada.models import Advice
from cada.search import es, index
log = logging.getLogger(__name__)
OK = '✔'.encode('utf8')
KO = '✘'.encode('utf8')
INFO = '➢'.encode('utf8')
WARNING = '⚠'.encode('utf8')
HEADER = '✯'.encode('utf8')
NO_CAST = (int, float, bool)
CONTEXT_SETTINGS = {
'auto_envvar_prefix': 'cada',
'help_option_names': ['-?', '-h', '--help'],
}
click.disable_unicode_literals_warning = True
def safe_unicode(string):
'''Safely transform any object into utf8 encoded bytes'''
if not isinstance(string, basestring):
string = unicode(string)
if isinstance(string, unicode):
string = string.encode('utf8')
return string
def color(name, **kwargs):
return lambda t: click.style(safe_unicode(t), fg=name, **kwargs).decode('utf8')
green = color('green', bold=True)
yellow = color('yellow', bold=True)
red = color('red', bold=True)
cyan = color('cyan', bold=True)
magenta = color('magenta', bold=True)
white = color('white', bold=True)
def echo(msg, *args, **kwargs):
'''Wraps click.echo, handles formatting and check encoding'''
file = kwargs.pop('file', None)
nl = kwargs.pop('nl', True)
err = kwargs.pop('err', False)
color = kwargs.pop('color', None)
msg = safe_unicode(msg).format(*args, **kwargs)
click.echo(msg, file=file, nl=nl, err=err, color=color)
def header(msg, *args, **kwargs):
'''Display an header'''
msg = ' '.join((yellow(HEADER), white(msg), yellow(HEADER)))
echo(msg, *args, **kwargs)
def success(msg, *args, **kwargs):
'''Display a success message'''
echo('{0} {1}'.format(green(OK), white(msg)), *args, **kwargs)
def warning(msg, *args, **kwargs):
'''Display a warning message'''
msg = '{0} {1}'.format(yellow(WARNING), msg)
echo(msg, *args, **kwargs)
def error(msg, details=None, *args, **kwargs):
'''Display an error message with optionnal details'''
msg = '{0} {1}'.format(red(KO), white(msg))
if details:
msg = '\n'.join((msg, safe_unicode(details)))
echo(format_multiline(msg), *args, **kwargs)
def exit_with_error(msg='Aborted', details=None, code=-1, *args, **kwargs):
'''Exit with error'''
error(msg, details=details, *args, **kwargs)
sys.exit(code)
def format_multiline(string):
string = string.replace('\n', '\n│ ')
return replace_last(string, '│', '└')
def replace_last(string, char, replacement):
return replacement.join(string.rsplit(char, 1))
LEVEL_COLORS = {
logging.DEBUG: cyan,
logging.WARNING: yellow,
logging.ERROR: red,
logging.CRITICAL: color('white', bg='red', bold=True),
}
LEVELS_PREFIX = {
logging.INFO: cyan(INFO),
logging.WARNING: yellow(WARNING),
}
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo(pkg_resources.get_distribution('cada').version)
ctx.exit()
@click.group(cls=FlaskGroup, create_app=create_app, context_settings=CONTEXT_SETTINGS,
add_version_option=False, add_default_commands=False)
@click.option('--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True)
def cli():
'''CADA Management client'''
cli._loaded_plugin_commands = True # Prevent extensions to register their commands
cli.add_command(shell_command)
cli.add_command(run_command, name='runserver')
cli.add_command(routes_command)
@cli.command()
@click.argument('patterns', nargs=-1)
@click.option('-r', '--reindex', 'full_reindex', is_flag=True,
help='Trigger a full reindexation instead of indexing new advices')
def load(patterns, full_reindex):
'''
Load one or more CADA CSV files matching patterns
'''
header('Loading CSV files')
for pattern in patterns:
for filename in iglob(pattern):
echo('Loading {}'.format(white(filename)))
with open(filename) as f:
reader = csv.reader(f)
# Skip header
reader.next()
for idx, row in enumerate(reader, 1):
try:
advice = csv.from_row(row)
skipped = False
if not full_reindex:
index(advice)
echo('.' if idx % 50 else white(idx), nl=False)
except Exception:
echo(cyan('s') if idx % 50 else white('{0}(s)'.format(idx)), nl=False)
skipped = True
if skipped:
echo(white('{}(s)'.format(idx)) if idx % 50 else '')
else:
echo(white(idx) if idx % 50 else '')
success('Processed {0} rows'.format(idx))
if full_reindex:
reindex()
@cli.command()
def reindex():
'''Reindex all advices'''
header('Reindexing all advices')
echo('Deleting index {0}', white(es.index_name))
if es.indices.exists(es.index_name):
es.indices.delete(index=es.index_name)
es.initialize()
idx = 0
for idx, advice in enumerate(Advice.objects, 1):
index(advice)
echo('.' if idx % 50 else white(idx), nl=False)
echo(white(idx) if idx % 50 else '')
es.indices.refresh(index=es.index_name)
success('Indexed {0} advices', idx)
@cli.command()
@click.argument('path', default='static')
@click.option('-ni', '--no-input', is_flag=True, help="Disable input prompts")
def static(path, no_input):
'''Compile and collect static files into path'''
log = logging.getLogger('webassets')
log.addHandler(logging.StreamHandler())
log.setLevel(logging.DEBUG)
cmdenv = CommandLineEnvironment(assets, log)
cmdenv.build()
if exists(path):
warning('{0} directory already exists and will be {1}', white(path), white('erased'))
if not no_input and not click.confirm('Are you sure'):
exit_with_error()
shutil.rmtree(path)
echo('Copying assets into {0}', white(path))
shutil.copytree(assets.directory, path)
success('Done')
@cli.command()
def anon():
'''Check for candidates to anonymization'''
header(anon.__doc__)
filename = 'urls_to_check.csv'
candidates = Advice.objects(__raw__={
'$or': [
{'subject': {
'$regex': '(Monsieur|Madame|Docteur|Mademoiselle)\s+[^X\s\.]{3}',
'$options': 'imx',
}},
{'content': {
'$regex': '(Monsieur|Madame|Docteur|Mademoiselle)\s+[^X\s\.]{3}',
'$options': 'imx',
}}
]
})
with open(filename, 'wb') as csvfile:
writer = csv.writer(csvfile)
# Generate header
writer.writerow(csv.ANON_HEADER)
for idx, advice in enumerate(candidates, 1):
writer.writerow(csv.to_anon_row(advice))
echo('.' if idx % 50 else white(idx), nl=False)
echo(white(idx) if idx % 50 else '')
success('Total: {0} candidates', len(candidates))
@cli.command()
@click.argument('csvfile', default='fix.csv', type=click.File('r'))
|
etalab/cada | cada/search.py | build_sort | python | def build_sort():
'''Build sort query paramter from kwargs'''
sorts = request.args.getlist('sort')
sorts = [sorts] if isinstance(sorts, basestring) else sorts
sorts = [s.split(' ') for s in sorts]
return [{SORTS[s]: d} for s, d in sorts if s in SORTS] | Build sort query paramter from kwargs | train | https://github.com/etalab/cada/blob/36e8b57514445c01ff7cd59a1c965180baf83d5e/cada/search.py#L183-L188 | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from datetime import datetime
from elasticsearch import Elasticsearch
from flask import current_app, request
from cada.models import Advice
log = logging.getLogger(__name__)
MAPPING = {
'properties': {
'id': {'type': 'string', 'index': 'not_analyzed'},
'administration': {
'type': 'string',
'analyzer': 'fr_analyzer',
'fields': {
'raw': {'type': 'string', 'index': 'not_analyzed'}
}
},
'type': {'type': 'string', 'index': 'not_analyzed'},
'session': {
'type': 'date', 'format': 'YYYY-MM-dd',
'fields': {
'raw': {'type': 'string', 'index': 'not_analyzed'}
}
},
'subject': {
'type': 'string',
'analyzer': 'fr_analyzer',
'fields': {
'raw': {'type': 'string', 'index': 'not_analyzed'}
}
},
'topics': {
'type': 'string',
'analyzer': 'fr_analyzer',
'fields': {
'raw': {'type': 'string', 'index': 'not_analyzed'}
}
},
'tags': {'type': 'string', 'index': 'not_analyzed'},
'meanings': {'type': 'string', 'index': 'not_analyzed'},
'part': {'type': 'short'},
'content': {
'type': 'string',
'analyzer': 'fr_analyzer',
'fields': {
'raw': {'type': 'string', 'index': 'not_analyzed'}
}
},
}
}
FIELDS = (
'id^5',
'subject^4',
'content^3',
'administration',
'topics',
'tags',
)
SORTS = {
'topic': 'topics.raw',
'administration': 'administration.raw',
'session': 'session',
}
FACETS = {
'administration': 'administration.raw',
# 'type': 'type',
'tag': 'tags',
'topic': 'topics.raw',
'session': 'session.raw',
'part': 'part',
'meaning': 'meanings',
}
ANALSYS = {
"filter": {
"fr_stop_filter": {
"type": "stop",
"stopwords": ["_french_"]
},
"fr_stem_filter": {
"type": "stemmer",
"name": "minimal_french"
}
},
"analyzer": {
"fr_analyzer": {
"type": "custom",
"tokenizer": "icu_tokenizer",
"filter": ["icu_folding", "icu_normalizer", "fr_stop_filter", "fr_stem_filter"],
"char_filter": ["html_strip"]
}
}
}
DOCTYPE = 'advice'
DEFAULT_PAGE_SIZE = 20
class ElasticSearch(object):
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
app.config.setdefault('ELASTICSEARCH_URL', 'localhost:9200')
app.extensions['elasticsearch'] = Elasticsearch([app.config['ELASTICSEARCH_URL']])
def __getattr__(self, item):
if 'elasticsearch' not in current_app.extensions.keys():
raise Exception('not initialised, did you forget to call init_app?')
return getattr(current_app.extensions['elasticsearch'], item)
@property
def index_name(self):
if current_app.config.get('TESTING'):
return '{0}-test'.format(current_app.name)
return current_app.name
def initialize(self):
'''Create or update indices and mappings'''
if es.indices.exists(self.index_name):
es.indices.put_mapping(index=self.index_name, doc_type=DOCTYPE, body=MAPPING)
else:
es.indices.create(self.index_name, {
'mappings': {'advice': MAPPING},
'settings': {'analysis': ANALSYS},
})
es = ElasticSearch()
def build_text_queries():
if not request.args.get('q'):
return []
query_string = request.args.get('q')
if isinstance(query_string, (list, tuple)):
query_string = ' '.join(query_string)
return [{
'multi_match': {
'query': query_string,
'fields': FIELDS,
'analyzer': 'fr_analyzer',
}
}]
def build_facet_queries():
queries = []
for name, field in FACETS.items():
if name in request.args:
value = request.args[name]
for term in [value] if isinstance(value, basestring) else value:
queries.append({'term': {field: term}})
return queries
def build_query():
must = []
must.extend(build_text_queries())
must.extend(build_facet_queries())
return {'bool': {'must': must}} if must else {'match_all': {}}
def build_aggs():
return dict([
(name, {'terms': {'field': field, 'size': 10}})
for name, field in FACETS.items()
])
def search_advices():
page = max(int(request.args.get('page', 1)), 1)
page_size = int(request.args.get('page_size', DEFAULT_PAGE_SIZE))
start = (page - 1) * page_size
result = es.search(index=es.index_name, doc_type=DOCTYPE, body={
'query': build_query(),
'aggs': build_aggs(),
'from': start,
'size': page_size,
'sort': build_sort(),
'fields': [],
})
ids = [hit['_id'] for hit in result.get('hits', {}).get('hits', [])]
advices = Advice.objects.in_bulk(ids)
advices = [advices[id] for id in ids]
facets = {}
for name, content in result.get('aggregations', {}).items():
actives = request.args.get(name)
actives = [actives] if isinstance(actives, basestring) else actives or []
facets[name] = [
(term['key'], term['doc_count'], term['key'] in actives)
for term in content.get('buckets', [])
]
return {
'advices': advices,
'facets': facets,
'page': page,
'page_size': page_size,
'total': result['hits']['total'],
}
def agg_to_list(result, facet):
return [
(t['key'], t['doc_count'])
for t in
result.get('aggregations', {}).get(facet, {}).get('buckets', [])
]
def ts_to_dt(value):
'''Convert an elasticsearch timestamp into a Python datetime'''
if not value:
return
return datetime.utcfromtimestamp(value * 1E-3)
def home_data():
result = es.search(es.index_name, body={
'query': {'match_all': {}},
'size': 0,
'aggs': {
'tags': {
'terms': {'field': 'tags', 'size': 20}
},
'topics': {
'terms': {
'field': 'topics.raw',
"exclude": "/*", # Exclude subtopics
'size': 20,
}
},
"sessions": {
"stats": {
"field": "session"
}
}
},
})
sessions = result.get('aggregations', {}).get('sessions', {})
return {
'topics': agg_to_list(result, 'topics'),
'tag_cloud': agg_to_list(result, 'tags'),
'total': result['hits']['total'],
'sessions': {
'from': ts_to_dt(sessions.get('min')),
'to': ts_to_dt(sessions.get('max')),
},
}
def index(advice):
'''Index/Reindex a CADA advice'''
topics = []
for topic in advice.topics:
topics.append(topic)
parts = topic.split('/')
if len(parts) > 1:
topics.append(parts[0])
try:
es.index(index=es.index_name, doc_type=DOCTYPE, id=advice.id, body={
'id': advice.id,
'administration': advice.administration,
'type': advice.type,
'session': advice.session.strftime('%Y-%m-%d'),
'subject': advice.subject,
'topics': topics,
'tags': advice.tags,
'meanings': advice.meanings,
'part': advice.part,
'content': advice.content,
})
except Exception:
log.exception('Unable to index advice %s', advice.id)
|
etalab/cada | cada/search.py | index | python | def index(advice):
'''Index/Reindex a CADA advice'''
topics = []
for topic in advice.topics:
topics.append(topic)
parts = topic.split('/')
if len(parts) > 1:
topics.append(parts[0])
try:
es.index(index=es.index_name, doc_type=DOCTYPE, id=advice.id, body={
'id': advice.id,
'administration': advice.administration,
'type': advice.type,
'session': advice.session.strftime('%Y-%m-%d'),
'subject': advice.subject,
'topics': topics,
'tags': advice.tags,
'meanings': advice.meanings,
'part': advice.part,
'content': advice.content,
})
except Exception:
log.exception('Unable to index advice %s', advice.id) | Index/Reindex a CADA advice | train | https://github.com/etalab/cada/blob/36e8b57514445c01ff7cd59a1c965180baf83d5e/cada/search.py#L278-L301 | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from datetime import datetime
from elasticsearch import Elasticsearch
from flask import current_app, request
from cada.models import Advice
log = logging.getLogger(__name__)
MAPPING = {
'properties': {
'id': {'type': 'string', 'index': 'not_analyzed'},
'administration': {
'type': 'string',
'analyzer': 'fr_analyzer',
'fields': {
'raw': {'type': 'string', 'index': 'not_analyzed'}
}
},
'type': {'type': 'string', 'index': 'not_analyzed'},
'session': {
'type': 'date', 'format': 'YYYY-MM-dd',
'fields': {
'raw': {'type': 'string', 'index': 'not_analyzed'}
}
},
'subject': {
'type': 'string',
'analyzer': 'fr_analyzer',
'fields': {
'raw': {'type': 'string', 'index': 'not_analyzed'}
}
},
'topics': {
'type': 'string',
'analyzer': 'fr_analyzer',
'fields': {
'raw': {'type': 'string', 'index': 'not_analyzed'}
}
},
'tags': {'type': 'string', 'index': 'not_analyzed'},
'meanings': {'type': 'string', 'index': 'not_analyzed'},
'part': {'type': 'short'},
'content': {
'type': 'string',
'analyzer': 'fr_analyzer',
'fields': {
'raw': {'type': 'string', 'index': 'not_analyzed'}
}
},
}
}
FIELDS = (
'id^5',
'subject^4',
'content^3',
'administration',
'topics',
'tags',
)
SORTS = {
'topic': 'topics.raw',
'administration': 'administration.raw',
'session': 'session',
}
FACETS = {
'administration': 'administration.raw',
# 'type': 'type',
'tag': 'tags',
'topic': 'topics.raw',
'session': 'session.raw',
'part': 'part',
'meaning': 'meanings',
}
ANALSYS = {
"filter": {
"fr_stop_filter": {
"type": "stop",
"stopwords": ["_french_"]
},
"fr_stem_filter": {
"type": "stemmer",
"name": "minimal_french"
}
},
"analyzer": {
"fr_analyzer": {
"type": "custom",
"tokenizer": "icu_tokenizer",
"filter": ["icu_folding", "icu_normalizer", "fr_stop_filter", "fr_stem_filter"],
"char_filter": ["html_strip"]
}
}
}
DOCTYPE = 'advice'
DEFAULT_PAGE_SIZE = 20
class ElasticSearch(object):
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
app.config.setdefault('ELASTICSEARCH_URL', 'localhost:9200')
app.extensions['elasticsearch'] = Elasticsearch([app.config['ELASTICSEARCH_URL']])
def __getattr__(self, item):
if 'elasticsearch' not in current_app.extensions.keys():
raise Exception('not initialised, did you forget to call init_app?')
return getattr(current_app.extensions['elasticsearch'], item)
@property
def index_name(self):
if current_app.config.get('TESTING'):
return '{0}-test'.format(current_app.name)
return current_app.name
def initialize(self):
'''Create or update indices and mappings'''
if es.indices.exists(self.index_name):
es.indices.put_mapping(index=self.index_name, doc_type=DOCTYPE, body=MAPPING)
else:
es.indices.create(self.index_name, {
'mappings': {'advice': MAPPING},
'settings': {'analysis': ANALSYS},
})
es = ElasticSearch()
def build_text_queries():
if not request.args.get('q'):
return []
query_string = request.args.get('q')
if isinstance(query_string, (list, tuple)):
query_string = ' '.join(query_string)
return [{
'multi_match': {
'query': query_string,
'fields': FIELDS,
'analyzer': 'fr_analyzer',
}
}]
def build_facet_queries():
queries = []
for name, field in FACETS.items():
if name in request.args:
value = request.args[name]
for term in [value] if isinstance(value, basestring) else value:
queries.append({'term': {field: term}})
return queries
def build_query():
must = []
must.extend(build_text_queries())
must.extend(build_facet_queries())
return {'bool': {'must': must}} if must else {'match_all': {}}
def build_aggs():
return dict([
(name, {'terms': {'field': field, 'size': 10}})
for name, field in FACETS.items()
])
def build_sort():
'''Build sort query paramter from kwargs'''
sorts = request.args.getlist('sort')
sorts = [sorts] if isinstance(sorts, basestring) else sorts
sorts = [s.split(' ') for s in sorts]
return [{SORTS[s]: d} for s, d in sorts if s in SORTS]
def search_advices():
page = max(int(request.args.get('page', 1)), 1)
page_size = int(request.args.get('page_size', DEFAULT_PAGE_SIZE))
start = (page - 1) * page_size
result = es.search(index=es.index_name, doc_type=DOCTYPE, body={
'query': build_query(),
'aggs': build_aggs(),
'from': start,
'size': page_size,
'sort': build_sort(),
'fields': [],
})
ids = [hit['_id'] for hit in result.get('hits', {}).get('hits', [])]
advices = Advice.objects.in_bulk(ids)
advices = [advices[id] for id in ids]
facets = {}
for name, content in result.get('aggregations', {}).items():
actives = request.args.get(name)
actives = [actives] if isinstance(actives, basestring) else actives or []
facets[name] = [
(term['key'], term['doc_count'], term['key'] in actives)
for term in content.get('buckets', [])
]
return {
'advices': advices,
'facets': facets,
'page': page,
'page_size': page_size,
'total': result['hits']['total'],
}
def agg_to_list(result, facet):
return [
(t['key'], t['doc_count'])
for t in
result.get('aggregations', {}).get(facet, {}).get('buckets', [])
]
def ts_to_dt(value):
'''Convert an elasticsearch timestamp into a Python datetime'''
if not value:
return
return datetime.utcfromtimestamp(value * 1E-3)
def home_data():
result = es.search(es.index_name, body={
'query': {'match_all': {}},
'size': 0,
'aggs': {
'tags': {
'terms': {'field': 'tags', 'size': 20}
},
'topics': {
'terms': {
'field': 'topics.raw',
"exclude": "/*", # Exclude subtopics
'size': 20,
}
},
"sessions": {
"stats": {
"field": "session"
}
}
},
})
sessions = result.get('aggregations', {}).get('sessions', {})
return {
'topics': agg_to_list(result, 'topics'),
'tag_cloud': agg_to_list(result, 'tags'),
'total': result['hits']['total'],
'sessions': {
'from': ts_to_dt(sessions.get('min')),
'to': ts_to_dt(sessions.get('max')),
},
}
|
etalab/cada | cada/search.py | ElasticSearch.initialize | python | def initialize(self):
'''Create or update indices and mappings'''
if es.indices.exists(self.index_name):
es.indices.put_mapping(index=self.index_name, doc_type=DOCTYPE, body=MAPPING)
else:
es.indices.create(self.index_name, {
'mappings': {'advice': MAPPING},
'settings': {'analysis': ANALSYS},
}) | Create or update indices and mappings | train | https://github.com/etalab/cada/blob/36e8b57514445c01ff7cd59a1c965180baf83d5e/cada/search.py#L130-L138 | null | class ElasticSearch(object):
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
app.config.setdefault('ELASTICSEARCH_URL', 'localhost:9200')
app.extensions['elasticsearch'] = Elasticsearch([app.config['ELASTICSEARCH_URL']])
def __getattr__(self, item):
if 'elasticsearch' not in current_app.extensions.keys():
raise Exception('not initialised, did you forget to call init_app?')
return getattr(current_app.extensions['elasticsearch'], item)
@property
def index_name(self):
if current_app.config.get('TESTING'):
return '{0}-test'.format(current_app.name)
return current_app.name
|
etalab/cada | cada/csv.py | reader | python | def reader(f):
'''CSV Reader factory for CADA format'''
return unicodecsv.reader(f, encoding='utf-8', delimiter=b',', quotechar=b'"') | CSV Reader factory for CADA format | train | https://github.com/etalab/cada/blob/36e8b57514445c01ff7cd59a1c965180baf83d5e/cada/csv.py#L30-L32 | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import unicodecsv
from flask import url_for
from datetime import datetime
from cada.models import Advice
HEADER = [
'Numéro de dossier',
'Administration',
'Type',
'Année',
'Séance',
'Objet',
'Thème et sous thème',
'Mots clés',
'Sens et motivation',
'Partie',
'Avis'
]
ANON_HEADER = ('id', 'url', 'replace', 'with')
def writer(f):
'''CSV writer factory for CADA format'''
return unicodecsv.writer(f, encoding='utf-8', delimiter=b',', quotechar=b'"')
def _part(string):
'''Transform a part string (I, II or III) into an integer'''
if string == 'I':
return 1
elif string == 'II':
return 2
elif string == 'III':
return 3
elif string == 'IV':
return 4
ROMAN_NUMS = {1: 'I', 2: 'II', 3: 'III', 4: 'IV'}
def cleanup(text):
'''Sanitize text field from HTML encoded caracters'''
return text.replace('"', '"').replace('&', '&')
def from_row(row):
'''Create an advice from a CSV row'''
subject = (row[5][0].upper() + row[5][1:]) if row[5] else row[5]
return Advice.objects.create(
id=row[0],
administration=cleanup(row[1]),
type=row[2],
session=datetime.strptime(row[4], '%d/%m/%Y'),
subject=cleanup(subject),
topics=[t.title() for t in cleanup(row[6]).split(', ')],
tags=[tag.strip() for tag in row[7].split(',') if tag.strip()],
meanings=cleanup(row[8]).replace(' / ', '/').split(', '),
part=_part(row[9]),
content=cleanup(row[10]),
)
def to_row(advice):
'''Serialize an advice into a CSV row'''
return [
advice.id,
advice.administration,
advice.type,
advice.session.year,
advice.session.strftime('%d/%m/%Y'),
advice.subject,
', '.join(advice.topics),
', '.join(advice.tags),
', '.join(advice.meanings),
ROMAN_NUMS.get(advice.part, ''),
advice.content,
]
def to_anon_row(advice):
return (advice.id, url_for('site.display', id=advice.id, _external=True), '', '')
|
etalab/cada | cada/csv.py | writer | python | def writer(f):
'''CSV writer factory for CADA format'''
return unicodecsv.writer(f, encoding='utf-8', delimiter=b',', quotechar=b'"') | CSV writer factory for CADA format | train | https://github.com/etalab/cada/blob/36e8b57514445c01ff7cd59a1c965180baf83d5e/cada/csv.py#L35-L37 | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import unicodecsv
from flask import url_for
from datetime import datetime
from cada.models import Advice
HEADER = [
'Numéro de dossier',
'Administration',
'Type',
'Année',
'Séance',
'Objet',
'Thème et sous thème',
'Mots clés',
'Sens et motivation',
'Partie',
'Avis'
]
ANON_HEADER = ('id', 'url', 'replace', 'with')
def reader(f):
'''CSV Reader factory for CADA format'''
return unicodecsv.reader(f, encoding='utf-8', delimiter=b',', quotechar=b'"')
def _part(string):
'''Transform a part string (I, II or III) into an integer'''
if string == 'I':
return 1
elif string == 'II':
return 2
elif string == 'III':
return 3
elif string == 'IV':
return 4
ROMAN_NUMS = {1: 'I', 2: 'II', 3: 'III', 4: 'IV'}
def cleanup(text):
'''Sanitize text field from HTML encoded caracters'''
return text.replace('"', '"').replace('&', '&')
def from_row(row):
'''Create an advice from a CSV row'''
subject = (row[5][0].upper() + row[5][1:]) if row[5] else row[5]
return Advice.objects.create(
id=row[0],
administration=cleanup(row[1]),
type=row[2],
session=datetime.strptime(row[4], '%d/%m/%Y'),
subject=cleanup(subject),
topics=[t.title() for t in cleanup(row[6]).split(', ')],
tags=[tag.strip() for tag in row[7].split(',') if tag.strip()],
meanings=cleanup(row[8]).replace(' / ', '/').split(', '),
part=_part(row[9]),
content=cleanup(row[10]),
)
def to_row(advice):
'''Serialize an advice into a CSV row'''
return [
advice.id,
advice.administration,
advice.type,
advice.session.year,
advice.session.strftime('%d/%m/%Y'),
advice.subject,
', '.join(advice.topics),
', '.join(advice.tags),
', '.join(advice.meanings),
ROMAN_NUMS.get(advice.part, ''),
advice.content,
]
def to_anon_row(advice):
return (advice.id, url_for('site.display', id=advice.id, _external=True), '', '')
|
etalab/cada | cada/csv.py | from_row | python | def from_row(row):
'''Create an advice from a CSV row'''
subject = (row[5][0].upper() + row[5][1:]) if row[5] else row[5]
return Advice.objects.create(
id=row[0],
administration=cleanup(row[1]),
type=row[2],
session=datetime.strptime(row[4], '%d/%m/%Y'),
subject=cleanup(subject),
topics=[t.title() for t in cleanup(row[6]).split(', ')],
tags=[tag.strip() for tag in row[7].split(',') if tag.strip()],
meanings=cleanup(row[8]).replace(' / ', '/').split(', '),
part=_part(row[9]),
content=cleanup(row[10]),
) | Create an advice from a CSV row | train | https://github.com/etalab/cada/blob/36e8b57514445c01ff7cd59a1c965180baf83d5e/cada/csv.py#L60-L74 | [
"def cleanup(text):\n '''Sanitize text field from HTML encoded caracters'''\n return text.replace('"', '\"').replace('&', '&')\n",
"def _part(string):\n '''Transform a part string (I, II or III) into an integer'''\n if string == 'I':\n return 1\n elif string == 'II':\n return 2\n elif string == 'III':\n return 3\n elif string == 'IV':\n return 4\n"
] | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import unicodecsv
from flask import url_for
from datetime import datetime
from cada.models import Advice
HEADER = [
'Numéro de dossier',
'Administration',
'Type',
'Année',
'Séance',
'Objet',
'Thème et sous thème',
'Mots clés',
'Sens et motivation',
'Partie',
'Avis'
]
ANON_HEADER = ('id', 'url', 'replace', 'with')
def reader(f):
'''CSV Reader factory for CADA format'''
return unicodecsv.reader(f, encoding='utf-8', delimiter=b',', quotechar=b'"')
def writer(f):
'''CSV writer factory for CADA format'''
return unicodecsv.writer(f, encoding='utf-8', delimiter=b',', quotechar=b'"')
def _part(string):
'''Transform a part string (I, II or III) into an integer'''
if string == 'I':
return 1
elif string == 'II':
return 2
elif string == 'III':
return 3
elif string == 'IV':
return 4
ROMAN_NUMS = {1: 'I', 2: 'II', 3: 'III', 4: 'IV'}
def cleanup(text):
'''Sanitize text field from HTML encoded caracters'''
return text.replace('"', '"').replace('&', '&')
def to_row(advice):
'''Serialize an advice into a CSV row'''
return [
advice.id,
advice.administration,
advice.type,
advice.session.year,
advice.session.strftime('%d/%m/%Y'),
advice.subject,
', '.join(advice.topics),
', '.join(advice.tags),
', '.join(advice.meanings),
ROMAN_NUMS.get(advice.part, ''),
advice.content,
]
def to_anon_row(advice):
return (advice.id, url_for('site.display', id=advice.id, _external=True), '', '')
|
etalab/cada | cada/csv.py | to_row | python | def to_row(advice):
'''Serialize an advice into a CSV row'''
return [
advice.id,
advice.administration,
advice.type,
advice.session.year,
advice.session.strftime('%d/%m/%Y'),
advice.subject,
', '.join(advice.topics),
', '.join(advice.tags),
', '.join(advice.meanings),
ROMAN_NUMS.get(advice.part, ''),
advice.content,
] | Serialize an advice into a CSV row | train | https://github.com/etalab/cada/blob/36e8b57514445c01ff7cd59a1c965180baf83d5e/cada/csv.py#L77-L91 | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import unicodecsv
from flask import url_for
from datetime import datetime
from cada.models import Advice
HEADER = [
'Numéro de dossier',
'Administration',
'Type',
'Année',
'Séance',
'Objet',
'Thème et sous thème',
'Mots clés',
'Sens et motivation',
'Partie',
'Avis'
]
ANON_HEADER = ('id', 'url', 'replace', 'with')
def reader(f):
'''CSV Reader factory for CADA format'''
return unicodecsv.reader(f, encoding='utf-8', delimiter=b',', quotechar=b'"')
def writer(f):
'''CSV writer factory for CADA format'''
return unicodecsv.writer(f, encoding='utf-8', delimiter=b',', quotechar=b'"')
def _part(string):
'''Transform a part string (I, II or III) into an integer'''
if string == 'I':
return 1
elif string == 'II':
return 2
elif string == 'III':
return 3
elif string == 'IV':
return 4
ROMAN_NUMS = {1: 'I', 2: 'II', 3: 'III', 4: 'IV'}
def cleanup(text):
'''Sanitize text field from HTML encoded caracters'''
return text.replace('"', '"').replace('&', '&')
def from_row(row):
'''Create an advice from a CSV row'''
subject = (row[5][0].upper() + row[5][1:]) if row[5] else row[5]
return Advice.objects.create(
id=row[0],
administration=cleanup(row[1]),
type=row[2],
session=datetime.strptime(row[4], '%d/%m/%Y'),
subject=cleanup(subject),
topics=[t.title() for t in cleanup(row[6]).split(', ')],
tags=[tag.strip() for tag in row[7].split(',') if tag.strip()],
meanings=cleanup(row[8]).replace(' / ', '/').split(', '),
part=_part(row[9]),
content=cleanup(row[10]),
)
def to_anon_row(advice):
return (advice.id, url_for('site.display', id=advice.id, _external=True), '', '')
|
gr33ndata/dysl | dysl/dyslib/lm.py | LM.display | python | def display(self):
'''
Displays statistics about our LM
'''
voc_list = []
doc_ids = self.term_count_n.keys()
doc_ids.sort()
for doc_id in doc_ids:
ngrams = len(self.term_count_n[doc_id]['ngrams'])
print 'n-Grams (doc %s): %d' % (str(doc_id), ngrams)
ngrams1 = len(self.term_count_n_1[doc_id]['ngrams'])
print '(n-1)-Grams (doc %s): %d' % (str(doc_id), ngrams1)
voc_list.append(ngrams)
print 'Classed Vocabularies:', voc_list
print ''
corpus_ngrams = len(self.corpus_count_n['ngrams'])
print 'n-Grams (collection): %d' % (corpus_ngrams)
corpus_ngrams1 = len(self.corpus_count_n_1['ngrams'])
print '(n-1)-Grams (collection): %d' % (corpus_ngrams1)
self.unseen_counts.display() | Displays statistics about our LM | train | https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/dyslib/lm.py#L133-L152 | null | class LM:
def __init__(self, n=3, lpad='', rpad='',
smoothing='Laplace', laplace_gama=1,
corpus_mix=0, corpus_mode='Miller',
verbose=False):
'''
Initialize our LM
n: Order of ngram LM, e.g. for bigram LM, n=2
lpad, rpad: Left and right padding.
If empty string '', then don't pad, else
For each document read pad terms
with n-1 repitition on lpad and/or rpad
smoothing: 'Laplace' or 'Witten'
laplace_gama: Multiply 1 and V by this factor gamma
corpus_mix: 0 (default) only use document probabilites
: or value between 0 and 1 lambda
: or c (or l) for Log-likelihood Odds model (Cooper LDA)
corpus_mode: 'Hiemstra' or 'Miller'
: This tell us how to calculate pr_corpus(t)
: In fact, 'Hiemstra' makes sense only with Multivaria LM
: Whereas the LM here is Multinomial.
'''
self.n = n
#(self.n, self.m) = n if type(n) == tuple else (n,0)
# Counters for joint probabilities
# Count for w_1, w_2, w_3 ... w_{n-1}, w_n
self.term_count_n = {}
# Count for w_1, w_2, w_3 ... w_{n-1}
self.term_count_n_1 = {}
# To be used in case of mixing doc prob with corpus prob.
self.corpus_count_n = {'ngrams': {}, 'total': 0}
self.corpus_count_n_1 = {'ngrams': {}, 'total': 0}
self.doc_lengths = {}
# The vocabulary of all classes (for Laplace smoothing)
self.vocabulary = set()
self.lpad=lpad
self.rpad=rpad
self.smoothing = smoothing
self.laplace_gama = float(laplace_gama)
if not type(corpus_mix) is str:
self.corpus_mix = min(float(corpus_mix),1)
else:
self.corpus_mix = corpus_mix
self.corpus_mode = corpus_mode
self.joiner = ' '
self.unseen_counts = UnseenTerms()
self.verbose = verbose
# Display overlapping n-grams between classes
#self.overlaps()
def get_ngram_counts(self):
''' Returns a list of n-gram counts
Array of classes counts and last item is for corpus
'''
ngram_counts = {
'classes': [],
'corpus': 0
}
doc_ids = self.term_count_n.keys()
doc_ids.sort()
for doc_id in doc_ids:
print self.term_count_n[doc_id]
class_ngrams = len(self.term_count_n[doc_id]['ngrams'])
ngram_counts['classes'].append(class_ngrams)
corpus_ngrams = len(self.corpus_count_n['ngrams'])
ngram_counts['corpus'] = corpus_ngrams
return ngram_counts
def overlaps(self):
omx = []
doc_ids = self.term_count_n.keys()
doc_ids.sort()
for doc_id in doc_ids:
row = [0] * len(doc_ids)
omx.append(row)
for i in range(len(doc_ids)):
doc_id_i = doc_ids[i]
ngrams = len(self.term_count_n[doc_id_i]['ngrams'])
omx[i][i] = ngrams
for j in range(i):
doc_id_j = doc_ids[j]
ongrams = 0
#for ngram in self.term_count_n[doc_id_j]['ngrams']:
for ngram in self.term_count_n[doc_id_i]['ngrams']:
#if ngram in self.term_count_n[doc_id_i]['ngrams']:
if ngram in self.term_count_n[doc_id_j]['ngrams']:
ongrams += 1
omx[i][j] = 0 #ongrams
omx[j][i] = ongrams
print '\nn-gram overlaps:'
print doc_ids
for i in range(len(omx)):
row = []
for j in range(len(omx[i])):
row.append( round( float(omx[i][j] * 100) / omx[i][i],2 ) )
#row.append(omx[i][j])
print row
def to_ngrams(self, terms):
''' Converts terms to all possibe ngrams
terms: list of terms
'''
if len(terms) <= self.n:
return terms
if self.n == 1:
n_grams = [[term] for term in terms]
else:
n_grams = []
for i in range(0,len(terms)-self.n+1):
n_grams.append(terms[i:i+self.n])
return n_grams
def lr_padding(self, terms):
'''
Pad doc from the left and right before adding,
depending on what's in self.lpad and self.rpad
If any of them is '', then don't pad there.
'''
lpad = rpad = []
if self.lpad:
lpad = [self.lpad] * (self.n - 1)
if self.rpad:
rpad = [self.rpad] * (self.n - 1)
return lpad + terms + rpad
def update_counts(self, doc_id ='', ngrams=[]):
if not doc_id in self.term_count_n:
self.term_count_n[doc_id] = {'ngrams': {}, 'total': 0}
if not doc_id in self.term_count_n_1:
self.term_count_n_1[doc_id] = {'ngrams': {}, 'total': 0}
for ngram in ngrams:
# Generate n-grams and sub-ngrams
# For example (n=2): ['t1','t2','t3','t4']
# ngram_n: ['t1 t2','t2 t3', 't3 t4']
# ngram_n_1: ['t1','t2','t3']
ngram_n = self.joiner.join(ngram)
ngram_n_1 = self.joiner.join(ngram[:-1])
# Update n-gram counts for doc_id
if ngram_n in self.term_count_n[doc_id]['ngrams']:
self.term_count_n[doc_id]['ngrams'][ngram_n] += 1
else:
self.term_count_n[doc_id]['ngrams'][ngram_n] = 1
self.term_count_n[doc_id]['total'] += 1
# Update n-gram counts for corpus
if ngram_n in self.corpus_count_n['ngrams']:
self.corpus_count_n['ngrams'][ngram_n] += 1
else:
self.corpus_count_n['ngrams'][ngram_n] = 1
self.corpus_count_n['total'] += 1
# Update (n-1)-gram counts for doc_id
if ngram_n_1 in self.term_count_n_1[doc_id]['ngrams']:
self.term_count_n_1[doc_id]['ngrams'][ngram_n_1] += 1
else:
self.term_count_n_1[doc_id]['ngrams'][ngram_n_1] = 1
self.term_count_n_1[doc_id]['total'] += 1
# Update (n-1)-gram counts for corpus
if ngram_n_1 in self.corpus_count_n_1['ngrams']:
self.corpus_count_n_1['ngrams'][ngram_n_1] += 1
else:
self.corpus_count_n_1['ngrams'][ngram_n_1] = 1
self.corpus_count_n_1['total'] += 1
def update_lengths(self, doc_id ='', doc_length=0):
if not doc_id in self.doc_lengths:
self.doc_lengths[doc_id] = {'lengths': [], 'mean': -1}
self.doc_lengths[doc_id]['lengths'].append(int(doc_length))
def get_mean_lengths(self, doc_id =''):
my_mean_len = 0
others_mean_len = []
for d_id in self.doc_lengths:
if self.doc_lengths[d_id]['mean'] == -1:
dlen = self.doc_lengths[d_id]['lengths']
doc_mean_len = float(sum(dlen)) / len(dlen)
self.doc_lengths[d_id]['mean'] = doc_mean_len
else:
doc_mean_len = self.doc_lengths[d_id]['mean']
if d_id == doc_id:
my_mean_len = doc_mean_len
else:
others_mean_len.append(doc_mean_len)
oth_mean_len = float(sum(others_mean_len)) / len(others_mean_len)
return (my_mean_len, oth_mean_len)
def add_doc(self, doc_id ='', doc_terms=[], doc_length=-1):
'''
Add new document to our Language Model (training phase)
doc_id is used here, so we build seperate LF for each doc_id
I.e. if you call it more than once with same doc_id,
then all terms given via doc_terms will contribute to same LM
doc_terms: list of words in document to be added
doc_length: the length of the document, you can provide it yourself,
otherwise, we use len(doc_terms) instead.
'''
if doc_length == -1:
self.update_lengths(doc_id=doc_id, doc_length=len(doc_terms))
else:
self.update_lengths(doc_id=doc_id, doc_length=int(doc_length))
for term in doc_terms:
self.vocabulary.add(term)
terms = self.lr_padding(doc_terms)
ngrams = self.to_ngrams(terms)
self.update_counts(doc_id, ngrams)
def laplace(self, x, y, doc_id):
add_numer = 1 * self.laplace_gama
laplace_mode = 'n-1'
if laplace_mode == 'ch':
v = len(self.vocabulary)
elif laplace_mode == 'ch^n-1':
v = math.pow(len(self.vocabulary),self.n-1)
else:
if doc_id:
v = len(self.term_count_n_1[doc_id]['ngrams'])
else:
v = len(self.corpus_count_n_1['ngrams'])
add_denom = v * self.laplace_gama
return float(x + add_numer) / float(y + add_denom)
def witten(self, count, n, t, log, new_doc):
self.return_unseen = True if new_doc else False
#print 'Witten (New Doc? %s)' % new_doc
#print 'W:', count, n, t
if count:
return float(count) / (n+t)
elif self.return_unseen:
return float(t) / (n+t)
elif log:
return 1
else:
return 1
def pr_ngram(self, doc_id, ngram, new_doc=False, log=True, logbase=2, doc_length=-1):
ngram_n = self.joiner.join(ngram)
ngram_n_1 = self.joiner.join(ngram[:-1])
ngram_n_count = self.term_count_n[doc_id]['ngrams'].get(ngram_n,0)
ngram_n_1_count = self.term_count_n_1[doc_id]['ngrams'].get(ngram_n_1,0)
#print ngram, ngram_n_count, ngram_n_1_count #DEBUG
# Apply smoothing
if self.smoothing == 'Laplace':
pr = self.laplace(ngram_n_count, ngram_n_1_count, doc_id)
#print pr #DEBUG
if self.corpus_mix == 'c' or self.corpus_mix == 'l':
corpus_ngram_n_count = self.corpus_count_n['ngrams'].get(ngram_n,0)
corpus_ngram_n_1_count = self.corpus_count_n_1['ngrams'].get(ngram_n_1,0)
pr_dash = self.laplace(corpus_ngram_n_count - ngram_n_count,
corpus_ngram_n_1_count - ngram_n_1_count,
doc_id='')
pr_mix = float(pr) / float(pr_dash)
elif type(float(self.corpus_mix)) is float and self.corpus_mix > 0:
pr_corpus = self.pr_corpus(ngram_n, ngram_n_1)
pr_mix = self.corpus_mix * pr_corpus + (1 - self.corpus_mix) * pr
else:
pr_mix = pr
elif self.smoothing == 'Witten':
wittenn = ngram_n_1_count
wittent = len([key for key in self.term_count_n[doc_id]['ngrams'] if key.startswith(ngram_n_1)])
pr = self.witten(ngram_n_count, wittenn, wittent, log, new_doc)
pr_mix = pr
else:
pr = float(ngram_n_count) / float(ngram_n_1_count)
if type(float(self.corpus_mix)) is float and self.corpus_mix > 0:
pr_corpus = self.pr_corpus(ngram_n, ngram_n_1)
pr_mix = self.corpus_mix * pr_corpus + (1 - self.corpus_mix) * pr
else:
pr_mix = pr
# Update seen/unseen counts
if ngram_n_count:
seen = True
else:
seen = False
if log:
return (-math.log(pr_mix,logbase),seen)
else:
return (pr_mix,seen)
def pr_corpus(self, ngram_n, ngram_n_1):
if self.corpus_mode == 'Hiemstra':
df = 0
df_total = 0
for doc_id in self.term_count_n_1:
if self.term_count_n[doc_id]['ngrams'].get(ngram_n,0):
df += 1
df_total += 1
pr = float(df) / float(df_total)
if not pr:
pr += 0.0001
else:
corpus_ngram_n_count = self.corpus_count_n['ngrams'].get(ngram_n,0)
corpus_ngram_n_1_count = self.corpus_count_n_1['ngrams'].get(ngram_n_1,0)
pr = self.laplace(corpus_ngram_n_count,
corpus_ngram_n_1_count,
doc_id='')
return pr
def pr_doc(self, doc_id, log=True, logbase=2, doc_length=-1):
''' This method may be overridden by implementers
Here we assume uniform Pr(doc) within Bayes rule
i.e. Pr(doc/string) = Pr(string/doc)
rather than Pr(doc/string) = Pr(string/doc) * Pr(doc)
'''
if log:
return 0
else:
return 1
def calculate(self, doc_terms=[], actual_id='', doc_length=-1):
'''
Given a set of terms, doc_terms
We find the doc in training data (calc_id),
whose LM is more likely to produce those terms
Then return the data structure calculated back
doc_length is passed to pr_ngram() and pr_doc()
it is up to them to use it or not.
normally, it should be ignored if doc_length == -1
calculated{
prob: calculated probability Pr(calc_id/doc_terms)
calc_id: Document ID in training data.
actual_id: Just returned back as passed to us.
seen_unseen_count: Counts for terms seen/unseen in training data
}
'''
calculated = {
'prob': -1,
'calc_id': '',
'actual_id': actual_id,
'seen_unseen_count': (0,0),
'all_probs': []
}
terms = self.lr_padding(doc_terms)
ngrams = self.to_ngrams(terms)
for doc_id in self.term_count_n:
#print '\n', doc_id, ':'
doc_pr = 0
new_doc = True
seen_count = 0
unseen_count = 0
for ngram in ngrams:
(ngram_pr, ngram_seen) = self.pr_ngram(doc_id, ngram,
new_doc=new_doc, log=True, logbase=2, doc_length=doc_length)
doc_pr += ngram_pr
new_doc = False
if ngram_seen:
seen_count += 1
else:
unseen_count += 1
doc_pr += self.pr_doc(doc_id, doc_length=doc_length)
if self.verbose:
print doc_id, actual_id, doc_pr
calculated['all_probs'].append(doc_pr)
if calculated['prob'] == -1 or doc_pr < calculated['prob']:
calculated['prob'] = doc_pr
calculated['calc_id'] = doc_id
calculated['seen_unseen_count'] = (seen_count, unseen_count)
self.unseen_counts.per_doc(doc_id=calculated['actual_id'],
seen_unseen=calculated['seen_unseen_count'])
self.unseen_counts.per_cic(calculated_id=calculated['calc_id'],
actual_id=calculated['actual_id'],
seen_unseen=calculated['seen_unseen_count'])
return calculated
|
gr33ndata/dysl | dysl/dyslib/lm.py | LM.get_ngram_counts | python | def get_ngram_counts(self):
''' Returns a list of n-gram counts
Array of classes counts and last item is for corpus
'''
ngram_counts = {
'classes': [],
'corpus': 0
}
doc_ids = self.term_count_n.keys()
doc_ids.sort()
for doc_id in doc_ids:
print self.term_count_n[doc_id]
class_ngrams = len(self.term_count_n[doc_id]['ngrams'])
ngram_counts['classes'].append(class_ngrams)
corpus_ngrams = len(self.corpus_count_n['ngrams'])
ngram_counts['corpus'] = corpus_ngrams
return ngram_counts | Returns a list of n-gram counts
Array of classes counts and last item is for corpus | train | https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/dyslib/lm.py#L156-L172 | null | class LM:
def __init__(self, n=3, lpad='', rpad='',
smoothing='Laplace', laplace_gama=1,
corpus_mix=0, corpus_mode='Miller',
verbose=False):
'''
Initialize our LM
n: Order of ngram LM, e.g. for bigram LM, n=2
lpad, rpad: Left and right padding.
If empty string '', then don't pad, else
For each document read pad terms
with n-1 repitition on lpad and/or rpad
smoothing: 'Laplace' or 'Witten'
laplace_gama: Multiply 1 and V by this factor gamma
corpus_mix: 0 (default) only use document probabilites
: or value between 0 and 1 lambda
: or c (or l) for Log-likelihood Odds model (Cooper LDA)
corpus_mode: 'Hiemstra' or 'Miller'
: This tell us how to calculate pr_corpus(t)
: In fact, 'Hiemstra' makes sense only with Multivaria LM
: Whereas the LM here is Multinomial.
'''
self.n = n
#(self.n, self.m) = n if type(n) == tuple else (n,0)
# Counters for joint probabilities
# Count for w_1, w_2, w_3 ... w_{n-1}, w_n
self.term_count_n = {}
# Count for w_1, w_2, w_3 ... w_{n-1}
self.term_count_n_1 = {}
# To be used in case of mixing doc prob with corpus prob.
self.corpus_count_n = {'ngrams': {}, 'total': 0}
self.corpus_count_n_1 = {'ngrams': {}, 'total': 0}
self.doc_lengths = {}
# The vocabulary of all classes (for Laplace smoothing)
self.vocabulary = set()
self.lpad=lpad
self.rpad=rpad
self.smoothing = smoothing
self.laplace_gama = float(laplace_gama)
if not type(corpus_mix) is str:
self.corpus_mix = min(float(corpus_mix),1)
else:
self.corpus_mix = corpus_mix
self.corpus_mode = corpus_mode
self.joiner = ' '
self.unseen_counts = UnseenTerms()
self.verbose = verbose
def display(self):
'''
Displays statistics about our LM
'''
voc_list = []
doc_ids = self.term_count_n.keys()
doc_ids.sort()
for doc_id in doc_ids:
ngrams = len(self.term_count_n[doc_id]['ngrams'])
print 'n-Grams (doc %s): %d' % (str(doc_id), ngrams)
ngrams1 = len(self.term_count_n_1[doc_id]['ngrams'])
print '(n-1)-Grams (doc %s): %d' % (str(doc_id), ngrams1)
voc_list.append(ngrams)
print 'Classed Vocabularies:', voc_list
print ''
corpus_ngrams = len(self.corpus_count_n['ngrams'])
print 'n-Grams (collection): %d' % (corpus_ngrams)
corpus_ngrams1 = len(self.corpus_count_n_1['ngrams'])
print '(n-1)-Grams (collection): %d' % (corpus_ngrams1)
self.unseen_counts.display()
# Display overlapping n-grams between classes
#self.overlaps()
def overlaps(self):
omx = []
doc_ids = self.term_count_n.keys()
doc_ids.sort()
for doc_id in doc_ids:
row = [0] * len(doc_ids)
omx.append(row)
for i in range(len(doc_ids)):
doc_id_i = doc_ids[i]
ngrams = len(self.term_count_n[doc_id_i]['ngrams'])
omx[i][i] = ngrams
for j in range(i):
doc_id_j = doc_ids[j]
ongrams = 0
#for ngram in self.term_count_n[doc_id_j]['ngrams']:
for ngram in self.term_count_n[doc_id_i]['ngrams']:
#if ngram in self.term_count_n[doc_id_i]['ngrams']:
if ngram in self.term_count_n[doc_id_j]['ngrams']:
ongrams += 1
omx[i][j] = 0 #ongrams
omx[j][i] = ongrams
print '\nn-gram overlaps:'
print doc_ids
for i in range(len(omx)):
row = []
for j in range(len(omx[i])):
row.append( round( float(omx[i][j] * 100) / omx[i][i],2 ) )
#row.append(omx[i][j])
print row
def to_ngrams(self, terms):
''' Converts terms to all possibe ngrams
terms: list of terms
'''
if len(terms) <= self.n:
return terms
if self.n == 1:
n_grams = [[term] for term in terms]
else:
n_grams = []
for i in range(0,len(terms)-self.n+1):
n_grams.append(terms[i:i+self.n])
return n_grams
def lr_padding(self, terms):
'''
Pad doc from the left and right before adding,
depending on what's in self.lpad and self.rpad
If any of them is '', then don't pad there.
'''
lpad = rpad = []
if self.lpad:
lpad = [self.lpad] * (self.n - 1)
if self.rpad:
rpad = [self.rpad] * (self.n - 1)
return lpad + terms + rpad
def update_counts(self, doc_id ='', ngrams=[]):
if not doc_id in self.term_count_n:
self.term_count_n[doc_id] = {'ngrams': {}, 'total': 0}
if not doc_id in self.term_count_n_1:
self.term_count_n_1[doc_id] = {'ngrams': {}, 'total': 0}
for ngram in ngrams:
# Generate n-grams and sub-ngrams
# For example (n=2): ['t1','t2','t3','t4']
# ngram_n: ['t1 t2','t2 t3', 't3 t4']
# ngram_n_1: ['t1','t2','t3']
ngram_n = self.joiner.join(ngram)
ngram_n_1 = self.joiner.join(ngram[:-1])
# Update n-gram counts for doc_id
if ngram_n in self.term_count_n[doc_id]['ngrams']:
self.term_count_n[doc_id]['ngrams'][ngram_n] += 1
else:
self.term_count_n[doc_id]['ngrams'][ngram_n] = 1
self.term_count_n[doc_id]['total'] += 1
# Update n-gram counts for corpus
if ngram_n in self.corpus_count_n['ngrams']:
self.corpus_count_n['ngrams'][ngram_n] += 1
else:
self.corpus_count_n['ngrams'][ngram_n] = 1
self.corpus_count_n['total'] += 1
# Update (n-1)-gram counts for doc_id
if ngram_n_1 in self.term_count_n_1[doc_id]['ngrams']:
self.term_count_n_1[doc_id]['ngrams'][ngram_n_1] += 1
else:
self.term_count_n_1[doc_id]['ngrams'][ngram_n_1] = 1
self.term_count_n_1[doc_id]['total'] += 1
# Update (n-1)-gram counts for corpus
if ngram_n_1 in self.corpus_count_n_1['ngrams']:
self.corpus_count_n_1['ngrams'][ngram_n_1] += 1
else:
self.corpus_count_n_1['ngrams'][ngram_n_1] = 1
self.corpus_count_n_1['total'] += 1
def update_lengths(self, doc_id ='', doc_length=0):
if not doc_id in self.doc_lengths:
self.doc_lengths[doc_id] = {'lengths': [], 'mean': -1}
self.doc_lengths[doc_id]['lengths'].append(int(doc_length))
def get_mean_lengths(self, doc_id =''):
my_mean_len = 0
others_mean_len = []
for d_id in self.doc_lengths:
if self.doc_lengths[d_id]['mean'] == -1:
dlen = self.doc_lengths[d_id]['lengths']
doc_mean_len = float(sum(dlen)) / len(dlen)
self.doc_lengths[d_id]['mean'] = doc_mean_len
else:
doc_mean_len = self.doc_lengths[d_id]['mean']
if d_id == doc_id:
my_mean_len = doc_mean_len
else:
others_mean_len.append(doc_mean_len)
oth_mean_len = float(sum(others_mean_len)) / len(others_mean_len)
return (my_mean_len, oth_mean_len)
def add_doc(self, doc_id ='', doc_terms=[], doc_length=-1):
'''
Add new document to our Language Model (training phase)
doc_id is used here, so we build seperate LF for each doc_id
I.e. if you call it more than once with same doc_id,
then all terms given via doc_terms will contribute to same LM
doc_terms: list of words in document to be added
doc_length: the length of the document, you can provide it yourself,
otherwise, we use len(doc_terms) instead.
'''
if doc_length == -1:
self.update_lengths(doc_id=doc_id, doc_length=len(doc_terms))
else:
self.update_lengths(doc_id=doc_id, doc_length=int(doc_length))
for term in doc_terms:
self.vocabulary.add(term)
terms = self.lr_padding(doc_terms)
ngrams = self.to_ngrams(terms)
self.update_counts(doc_id, ngrams)
def laplace(self, x, y, doc_id):
add_numer = 1 * self.laplace_gama
laplace_mode = 'n-1'
if laplace_mode == 'ch':
v = len(self.vocabulary)
elif laplace_mode == 'ch^n-1':
v = math.pow(len(self.vocabulary),self.n-1)
else:
if doc_id:
v = len(self.term_count_n_1[doc_id]['ngrams'])
else:
v = len(self.corpus_count_n_1['ngrams'])
add_denom = v * self.laplace_gama
return float(x + add_numer) / float(y + add_denom)
def witten(self, count, n, t, log, new_doc):
self.return_unseen = True if new_doc else False
#print 'Witten (New Doc? %s)' % new_doc
#print 'W:', count, n, t
if count:
return float(count) / (n+t)
elif self.return_unseen:
return float(t) / (n+t)
elif log:
return 1
else:
return 1
def pr_ngram(self, doc_id, ngram, new_doc=False, log=True, logbase=2, doc_length=-1):
ngram_n = self.joiner.join(ngram)
ngram_n_1 = self.joiner.join(ngram[:-1])
ngram_n_count = self.term_count_n[doc_id]['ngrams'].get(ngram_n,0)
ngram_n_1_count = self.term_count_n_1[doc_id]['ngrams'].get(ngram_n_1,0)
#print ngram, ngram_n_count, ngram_n_1_count #DEBUG
# Apply smoothing
if self.smoothing == 'Laplace':
pr = self.laplace(ngram_n_count, ngram_n_1_count, doc_id)
#print pr #DEBUG
if self.corpus_mix == 'c' or self.corpus_mix == 'l':
corpus_ngram_n_count = self.corpus_count_n['ngrams'].get(ngram_n,0)
corpus_ngram_n_1_count = self.corpus_count_n_1['ngrams'].get(ngram_n_1,0)
pr_dash = self.laplace(corpus_ngram_n_count - ngram_n_count,
corpus_ngram_n_1_count - ngram_n_1_count,
doc_id='')
pr_mix = float(pr) / float(pr_dash)
elif type(float(self.corpus_mix)) is float and self.corpus_mix > 0:
pr_corpus = self.pr_corpus(ngram_n, ngram_n_1)
pr_mix = self.corpus_mix * pr_corpus + (1 - self.corpus_mix) * pr
else:
pr_mix = pr
elif self.smoothing == 'Witten':
wittenn = ngram_n_1_count
wittent = len([key for key in self.term_count_n[doc_id]['ngrams'] if key.startswith(ngram_n_1)])
pr = self.witten(ngram_n_count, wittenn, wittent, log, new_doc)
pr_mix = pr
else:
pr = float(ngram_n_count) / float(ngram_n_1_count)
if type(float(self.corpus_mix)) is float and self.corpus_mix > 0:
pr_corpus = self.pr_corpus(ngram_n, ngram_n_1)
pr_mix = self.corpus_mix * pr_corpus + (1 - self.corpus_mix) * pr
else:
pr_mix = pr
# Update seen/unseen counts
if ngram_n_count:
seen = True
else:
seen = False
if log:
return (-math.log(pr_mix,logbase),seen)
else:
return (pr_mix,seen)
def pr_corpus(self, ngram_n, ngram_n_1):
if self.corpus_mode == 'Hiemstra':
df = 0
df_total = 0
for doc_id in self.term_count_n_1:
if self.term_count_n[doc_id]['ngrams'].get(ngram_n,0):
df += 1
df_total += 1
pr = float(df) / float(df_total)
if not pr:
pr += 0.0001
else:
corpus_ngram_n_count = self.corpus_count_n['ngrams'].get(ngram_n,0)
corpus_ngram_n_1_count = self.corpus_count_n_1['ngrams'].get(ngram_n_1,0)
pr = self.laplace(corpus_ngram_n_count,
corpus_ngram_n_1_count,
doc_id='')
return pr
def pr_doc(self, doc_id, log=True, logbase=2, doc_length=-1):
''' This method may be overridden by implementers
Here we assume uniform Pr(doc) within Bayes rule
i.e. Pr(doc/string) = Pr(string/doc)
rather than Pr(doc/string) = Pr(string/doc) * Pr(doc)
'''
if log:
return 0
else:
return 1
def calculate(self, doc_terms=[], actual_id='', doc_length=-1):
'''
Given a set of terms, doc_terms
We find the doc in training data (calc_id),
whose LM is more likely to produce those terms
Then return the data structure calculated back
doc_length is passed to pr_ngram() and pr_doc()
it is up to them to use it or not.
normally, it should be ignored if doc_length == -1
calculated{
prob: calculated probability Pr(calc_id/doc_terms)
calc_id: Document ID in training data.
actual_id: Just returned back as passed to us.
seen_unseen_count: Counts for terms seen/unseen in training data
}
'''
calculated = {
'prob': -1,
'calc_id': '',
'actual_id': actual_id,
'seen_unseen_count': (0,0),
'all_probs': []
}
terms = self.lr_padding(doc_terms)
ngrams = self.to_ngrams(terms)
for doc_id in self.term_count_n:
#print '\n', doc_id, ':'
doc_pr = 0
new_doc = True
seen_count = 0
unseen_count = 0
for ngram in ngrams:
(ngram_pr, ngram_seen) = self.pr_ngram(doc_id, ngram,
new_doc=new_doc, log=True, logbase=2, doc_length=doc_length)
doc_pr += ngram_pr
new_doc = False
if ngram_seen:
seen_count += 1
else:
unseen_count += 1
doc_pr += self.pr_doc(doc_id, doc_length=doc_length)
if self.verbose:
print doc_id, actual_id, doc_pr
calculated['all_probs'].append(doc_pr)
if calculated['prob'] == -1 or doc_pr < calculated['prob']:
calculated['prob'] = doc_pr
calculated['calc_id'] = doc_id
calculated['seen_unseen_count'] = (seen_count, unseen_count)
self.unseen_counts.per_doc(doc_id=calculated['actual_id'],
seen_unseen=calculated['seen_unseen_count'])
self.unseen_counts.per_cic(calculated_id=calculated['calc_id'],
actual_id=calculated['actual_id'],
seen_unseen=calculated['seen_unseen_count'])
return calculated
|
gr33ndata/dysl | dysl/dyslib/lm.py | LM.to_ngrams | python | def to_ngrams(self, terms):
''' Converts terms to all possibe ngrams
terms: list of terms
'''
if len(terms) <= self.n:
return terms
if self.n == 1:
n_grams = [[term] for term in terms]
else:
n_grams = []
for i in range(0,len(terms)-self.n+1):
n_grams.append(terms[i:i+self.n])
return n_grams | Converts terms to all possibe ngrams
terms: list of terms | train | https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/dyslib/lm.py#L204-L216 | null | class LM:
def __init__(self, n=3, lpad='', rpad='',
smoothing='Laplace', laplace_gama=1,
corpus_mix=0, corpus_mode='Miller',
verbose=False):
'''
Initialize our LM
n: Order of ngram LM, e.g. for bigram LM, n=2
lpad, rpad: Left and right padding.
If empty string '', then don't pad, else
For each document read pad terms
with n-1 repitition on lpad and/or rpad
smoothing: 'Laplace' or 'Witten'
laplace_gama: Multiply 1 and V by this factor gamma
corpus_mix: 0 (default) only use document probabilites
: or value between 0 and 1 lambda
: or c (or l) for Log-likelihood Odds model (Cooper LDA)
corpus_mode: 'Hiemstra' or 'Miller'
: This tell us how to calculate pr_corpus(t)
: In fact, 'Hiemstra' makes sense only with Multivaria LM
: Whereas the LM here is Multinomial.
'''
self.n = n
#(self.n, self.m) = n if type(n) == tuple else (n,0)
# Counters for joint probabilities
# Count for w_1, w_2, w_3 ... w_{n-1}, w_n
self.term_count_n = {}
# Count for w_1, w_2, w_3 ... w_{n-1}
self.term_count_n_1 = {}
# To be used in case of mixing doc prob with corpus prob.
self.corpus_count_n = {'ngrams': {}, 'total': 0}
self.corpus_count_n_1 = {'ngrams': {}, 'total': 0}
self.doc_lengths = {}
# The vocabulary of all classes (for Laplace smoothing)
self.vocabulary = set()
self.lpad=lpad
self.rpad=rpad
self.smoothing = smoothing
self.laplace_gama = float(laplace_gama)
if not type(corpus_mix) is str:
self.corpus_mix = min(float(corpus_mix),1)
else:
self.corpus_mix = corpus_mix
self.corpus_mode = corpus_mode
self.joiner = ' '
self.unseen_counts = UnseenTerms()
self.verbose = verbose
def display(self):
'''
Displays statistics about our LM
'''
voc_list = []
doc_ids = self.term_count_n.keys()
doc_ids.sort()
for doc_id in doc_ids:
ngrams = len(self.term_count_n[doc_id]['ngrams'])
print 'n-Grams (doc %s): %d' % (str(doc_id), ngrams)
ngrams1 = len(self.term_count_n_1[doc_id]['ngrams'])
print '(n-1)-Grams (doc %s): %d' % (str(doc_id), ngrams1)
voc_list.append(ngrams)
print 'Classed Vocabularies:', voc_list
print ''
corpus_ngrams = len(self.corpus_count_n['ngrams'])
print 'n-Grams (collection): %d' % (corpus_ngrams)
corpus_ngrams1 = len(self.corpus_count_n_1['ngrams'])
print '(n-1)-Grams (collection): %d' % (corpus_ngrams1)
self.unseen_counts.display()
# Display overlapping n-grams between classes
#self.overlaps()
def get_ngram_counts(self):
''' Returns a list of n-gram counts
Array of classes counts and last item is for corpus
'''
ngram_counts = {
'classes': [],
'corpus': 0
}
doc_ids = self.term_count_n.keys()
doc_ids.sort()
for doc_id in doc_ids:
print self.term_count_n[doc_id]
class_ngrams = len(self.term_count_n[doc_id]['ngrams'])
ngram_counts['classes'].append(class_ngrams)
corpus_ngrams = len(self.corpus_count_n['ngrams'])
ngram_counts['corpus'] = corpus_ngrams
return ngram_counts
def overlaps(self):
omx = []
doc_ids = self.term_count_n.keys()
doc_ids.sort()
for doc_id in doc_ids:
row = [0] * len(doc_ids)
omx.append(row)
for i in range(len(doc_ids)):
doc_id_i = doc_ids[i]
ngrams = len(self.term_count_n[doc_id_i]['ngrams'])
omx[i][i] = ngrams
for j in range(i):
doc_id_j = doc_ids[j]
ongrams = 0
#for ngram in self.term_count_n[doc_id_j]['ngrams']:
for ngram in self.term_count_n[doc_id_i]['ngrams']:
#if ngram in self.term_count_n[doc_id_i]['ngrams']:
if ngram in self.term_count_n[doc_id_j]['ngrams']:
ongrams += 1
omx[i][j] = 0 #ongrams
omx[j][i] = ongrams
print '\nn-gram overlaps:'
print doc_ids
for i in range(len(omx)):
row = []
for j in range(len(omx[i])):
row.append( round( float(omx[i][j] * 100) / omx[i][i],2 ) )
#row.append(omx[i][j])
print row
def lr_padding(self, terms):
'''
Pad doc from the left and right before adding,
depending on what's in self.lpad and self.rpad
If any of them is '', then don't pad there.
'''
lpad = rpad = []
if self.lpad:
lpad = [self.lpad] * (self.n - 1)
if self.rpad:
rpad = [self.rpad] * (self.n - 1)
return lpad + terms + rpad
def update_counts(self, doc_id ='', ngrams=[]):
if not doc_id in self.term_count_n:
self.term_count_n[doc_id] = {'ngrams': {}, 'total': 0}
if not doc_id in self.term_count_n_1:
self.term_count_n_1[doc_id] = {'ngrams': {}, 'total': 0}
for ngram in ngrams:
# Generate n-grams and sub-ngrams
# For example (n=2): ['t1','t2','t3','t4']
# ngram_n: ['t1 t2','t2 t3', 't3 t4']
# ngram_n_1: ['t1','t2','t3']
ngram_n = self.joiner.join(ngram)
ngram_n_1 = self.joiner.join(ngram[:-1])
# Update n-gram counts for doc_id
if ngram_n in self.term_count_n[doc_id]['ngrams']:
self.term_count_n[doc_id]['ngrams'][ngram_n] += 1
else:
self.term_count_n[doc_id]['ngrams'][ngram_n] = 1
self.term_count_n[doc_id]['total'] += 1
# Update n-gram counts for corpus
if ngram_n in self.corpus_count_n['ngrams']:
self.corpus_count_n['ngrams'][ngram_n] += 1
else:
self.corpus_count_n['ngrams'][ngram_n] = 1
self.corpus_count_n['total'] += 1
# Update (n-1)-gram counts for doc_id
if ngram_n_1 in self.term_count_n_1[doc_id]['ngrams']:
self.term_count_n_1[doc_id]['ngrams'][ngram_n_1] += 1
else:
self.term_count_n_1[doc_id]['ngrams'][ngram_n_1] = 1
self.term_count_n_1[doc_id]['total'] += 1
# Update (n-1)-gram counts for corpus
if ngram_n_1 in self.corpus_count_n_1['ngrams']:
self.corpus_count_n_1['ngrams'][ngram_n_1] += 1
else:
self.corpus_count_n_1['ngrams'][ngram_n_1] = 1
self.corpus_count_n_1['total'] += 1
def update_lengths(self, doc_id ='', doc_length=0):
if not doc_id in self.doc_lengths:
self.doc_lengths[doc_id] = {'lengths': [], 'mean': -1}
self.doc_lengths[doc_id]['lengths'].append(int(doc_length))
def get_mean_lengths(self, doc_id =''):
my_mean_len = 0
others_mean_len = []
for d_id in self.doc_lengths:
if self.doc_lengths[d_id]['mean'] == -1:
dlen = self.doc_lengths[d_id]['lengths']
doc_mean_len = float(sum(dlen)) / len(dlen)
self.doc_lengths[d_id]['mean'] = doc_mean_len
else:
doc_mean_len = self.doc_lengths[d_id]['mean']
if d_id == doc_id:
my_mean_len = doc_mean_len
else:
others_mean_len.append(doc_mean_len)
oth_mean_len = float(sum(others_mean_len)) / len(others_mean_len)
return (my_mean_len, oth_mean_len)
def add_doc(self, doc_id ='', doc_terms=[], doc_length=-1):
'''
Add new document to our Language Model (training phase)
doc_id is used here, so we build seperate LF for each doc_id
I.e. if you call it more than once with same doc_id,
then all terms given via doc_terms will contribute to same LM
doc_terms: list of words in document to be added
doc_length: the length of the document, you can provide it yourself,
otherwise, we use len(doc_terms) instead.
'''
if doc_length == -1:
self.update_lengths(doc_id=doc_id, doc_length=len(doc_terms))
else:
self.update_lengths(doc_id=doc_id, doc_length=int(doc_length))
for term in doc_terms:
self.vocabulary.add(term)
terms = self.lr_padding(doc_terms)
ngrams = self.to_ngrams(terms)
self.update_counts(doc_id, ngrams)
def laplace(self, x, y, doc_id):
add_numer = 1 * self.laplace_gama
laplace_mode = 'n-1'
if laplace_mode == 'ch':
v = len(self.vocabulary)
elif laplace_mode == 'ch^n-1':
v = math.pow(len(self.vocabulary),self.n-1)
else:
if doc_id:
v = len(self.term_count_n_1[doc_id]['ngrams'])
else:
v = len(self.corpus_count_n_1['ngrams'])
add_denom = v * self.laplace_gama
return float(x + add_numer) / float(y + add_denom)
def witten(self, count, n, t, log, new_doc):
self.return_unseen = True if new_doc else False
#print 'Witten (New Doc? %s)' % new_doc
#print 'W:', count, n, t
if count:
return float(count) / (n+t)
elif self.return_unseen:
return float(t) / (n+t)
elif log:
return 1
else:
return 1
def pr_ngram(self, doc_id, ngram, new_doc=False, log=True, logbase=2, doc_length=-1):
ngram_n = self.joiner.join(ngram)
ngram_n_1 = self.joiner.join(ngram[:-1])
ngram_n_count = self.term_count_n[doc_id]['ngrams'].get(ngram_n,0)
ngram_n_1_count = self.term_count_n_1[doc_id]['ngrams'].get(ngram_n_1,0)
#print ngram, ngram_n_count, ngram_n_1_count #DEBUG
# Apply smoothing
if self.smoothing == 'Laplace':
pr = self.laplace(ngram_n_count, ngram_n_1_count, doc_id)
#print pr #DEBUG
if self.corpus_mix == 'c' or self.corpus_mix == 'l':
corpus_ngram_n_count = self.corpus_count_n['ngrams'].get(ngram_n,0)
corpus_ngram_n_1_count = self.corpus_count_n_1['ngrams'].get(ngram_n_1,0)
pr_dash = self.laplace(corpus_ngram_n_count - ngram_n_count,
corpus_ngram_n_1_count - ngram_n_1_count,
doc_id='')
pr_mix = float(pr) / float(pr_dash)
elif type(float(self.corpus_mix)) is float and self.corpus_mix > 0:
pr_corpus = self.pr_corpus(ngram_n, ngram_n_1)
pr_mix = self.corpus_mix * pr_corpus + (1 - self.corpus_mix) * pr
else:
pr_mix = pr
elif self.smoothing == 'Witten':
wittenn = ngram_n_1_count
wittent = len([key for key in self.term_count_n[doc_id]['ngrams'] if key.startswith(ngram_n_1)])
pr = self.witten(ngram_n_count, wittenn, wittent, log, new_doc)
pr_mix = pr
else:
pr = float(ngram_n_count) / float(ngram_n_1_count)
if type(float(self.corpus_mix)) is float and self.corpus_mix > 0:
pr_corpus = self.pr_corpus(ngram_n, ngram_n_1)
pr_mix = self.corpus_mix * pr_corpus + (1 - self.corpus_mix) * pr
else:
pr_mix = pr
# Update seen/unseen counts
if ngram_n_count:
seen = True
else:
seen = False
if log:
return (-math.log(pr_mix,logbase),seen)
else:
return (pr_mix,seen)
def pr_corpus(self, ngram_n, ngram_n_1):
if self.corpus_mode == 'Hiemstra':
df = 0
df_total = 0
for doc_id in self.term_count_n_1:
if self.term_count_n[doc_id]['ngrams'].get(ngram_n,0):
df += 1
df_total += 1
pr = float(df) / float(df_total)
if not pr:
pr += 0.0001
else:
corpus_ngram_n_count = self.corpus_count_n['ngrams'].get(ngram_n,0)
corpus_ngram_n_1_count = self.corpus_count_n_1['ngrams'].get(ngram_n_1,0)
pr = self.laplace(corpus_ngram_n_count,
corpus_ngram_n_1_count,
doc_id='')
return pr
def pr_doc(self, doc_id, log=True, logbase=2, doc_length=-1):
''' This method may be overridden by implementers
Here we assume uniform Pr(doc) within Bayes rule
i.e. Pr(doc/string) = Pr(string/doc)
rather than Pr(doc/string) = Pr(string/doc) * Pr(doc)
'''
if log:
return 0
else:
return 1
def calculate(self, doc_terms=[], actual_id='', doc_length=-1):
'''
Given a set of terms, doc_terms
We find the doc in training data (calc_id),
whose LM is more likely to produce those terms
Then return the data structure calculated back
doc_length is passed to pr_ngram() and pr_doc()
it is up to them to use it or not.
normally, it should be ignored if doc_length == -1
calculated{
prob: calculated probability Pr(calc_id/doc_terms)
calc_id: Document ID in training data.
actual_id: Just returned back as passed to us.
seen_unseen_count: Counts for terms seen/unseen in training data
}
'''
calculated = {
'prob': -1,
'calc_id': '',
'actual_id': actual_id,
'seen_unseen_count': (0,0),
'all_probs': []
}
terms = self.lr_padding(doc_terms)
ngrams = self.to_ngrams(terms)
for doc_id in self.term_count_n:
#print '\n', doc_id, ':'
doc_pr = 0
new_doc = True
seen_count = 0
unseen_count = 0
for ngram in ngrams:
(ngram_pr, ngram_seen) = self.pr_ngram(doc_id, ngram,
new_doc=new_doc, log=True, logbase=2, doc_length=doc_length)
doc_pr += ngram_pr
new_doc = False
if ngram_seen:
seen_count += 1
else:
unseen_count += 1
doc_pr += self.pr_doc(doc_id, doc_length=doc_length)
if self.verbose:
print doc_id, actual_id, doc_pr
calculated['all_probs'].append(doc_pr)
if calculated['prob'] == -1 or doc_pr < calculated['prob']:
calculated['prob'] = doc_pr
calculated['calc_id'] = doc_id
calculated['seen_unseen_count'] = (seen_count, unseen_count)
self.unseen_counts.per_doc(doc_id=calculated['actual_id'],
seen_unseen=calculated['seen_unseen_count'])
self.unseen_counts.per_cic(calculated_id=calculated['calc_id'],
actual_id=calculated['actual_id'],
seen_unseen=calculated['seen_unseen_count'])
return calculated
|
gr33ndata/dysl | dysl/dyslib/lm.py | LM.lr_padding | python | def lr_padding(self, terms):
'''
Pad doc from the left and right before adding,
depending on what's in self.lpad and self.rpad
If any of them is '', then don't pad there.
'''
lpad = rpad = []
if self.lpad:
lpad = [self.lpad] * (self.n - 1)
if self.rpad:
rpad = [self.rpad] * (self.n - 1)
return lpad + terms + rpad | Pad doc from the left and right before adding,
depending on what's in self.lpad and self.rpad
If any of them is '', then don't pad there. | train | https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/dyslib/lm.py#L218-L229 | null | class LM:
def __init__(self, n=3, lpad='', rpad='',
smoothing='Laplace', laplace_gama=1,
corpus_mix=0, corpus_mode='Miller',
verbose=False):
'''
Initialize our LM
n: Order of ngram LM, e.g. for bigram LM, n=2
lpad, rpad: Left and right padding.
If empty string '', then don't pad, else
For each document read pad terms
with n-1 repitition on lpad and/or rpad
smoothing: 'Laplace' or 'Witten'
laplace_gama: Multiply 1 and V by this factor gamma
corpus_mix: 0 (default) only use document probabilites
: or value between 0 and 1 lambda
: or c (or l) for Log-likelihood Odds model (Cooper LDA)
corpus_mode: 'Hiemstra' or 'Miller'
: This tell us how to calculate pr_corpus(t)
: In fact, 'Hiemstra' makes sense only with Multivaria LM
: Whereas the LM here is Multinomial.
'''
self.n = n
#(self.n, self.m) = n if type(n) == tuple else (n,0)
# Counters for joint probabilities
# Count for w_1, w_2, w_3 ... w_{n-1}, w_n
self.term_count_n = {}
# Count for w_1, w_2, w_3 ... w_{n-1}
self.term_count_n_1 = {}
# To be used in case of mixing doc prob with corpus prob.
self.corpus_count_n = {'ngrams': {}, 'total': 0}
self.corpus_count_n_1 = {'ngrams': {}, 'total': 0}
self.doc_lengths = {}
# The vocabulary of all classes (for Laplace smoothing)
self.vocabulary = set()
self.lpad=lpad
self.rpad=rpad
self.smoothing = smoothing
self.laplace_gama = float(laplace_gama)
if not type(corpus_mix) is str:
self.corpus_mix = min(float(corpus_mix),1)
else:
self.corpus_mix = corpus_mix
self.corpus_mode = corpus_mode
self.joiner = ' '
self.unseen_counts = UnseenTerms()
self.verbose = verbose
def display(self):
'''
Displays statistics about our LM
'''
voc_list = []
doc_ids = self.term_count_n.keys()
doc_ids.sort()
for doc_id in doc_ids:
ngrams = len(self.term_count_n[doc_id]['ngrams'])
print 'n-Grams (doc %s): %d' % (str(doc_id), ngrams)
ngrams1 = len(self.term_count_n_1[doc_id]['ngrams'])
print '(n-1)-Grams (doc %s): %d' % (str(doc_id), ngrams1)
voc_list.append(ngrams)
print 'Classed Vocabularies:', voc_list
print ''
corpus_ngrams = len(self.corpus_count_n['ngrams'])
print 'n-Grams (collection): %d' % (corpus_ngrams)
corpus_ngrams1 = len(self.corpus_count_n_1['ngrams'])
print '(n-1)-Grams (collection): %d' % (corpus_ngrams1)
self.unseen_counts.display()
# Display overlapping n-grams between classes
#self.overlaps()
def get_ngram_counts(self):
''' Returns a list of n-gram counts
Array of classes counts and last item is for corpus
'''
ngram_counts = {
'classes': [],
'corpus': 0
}
doc_ids = self.term_count_n.keys()
doc_ids.sort()
for doc_id in doc_ids:
print self.term_count_n[doc_id]
class_ngrams = len(self.term_count_n[doc_id]['ngrams'])
ngram_counts['classes'].append(class_ngrams)
corpus_ngrams = len(self.corpus_count_n['ngrams'])
ngram_counts['corpus'] = corpus_ngrams
return ngram_counts
def overlaps(self):
omx = []
doc_ids = self.term_count_n.keys()
doc_ids.sort()
for doc_id in doc_ids:
row = [0] * len(doc_ids)
omx.append(row)
for i in range(len(doc_ids)):
doc_id_i = doc_ids[i]
ngrams = len(self.term_count_n[doc_id_i]['ngrams'])
omx[i][i] = ngrams
for j in range(i):
doc_id_j = doc_ids[j]
ongrams = 0
#for ngram in self.term_count_n[doc_id_j]['ngrams']:
for ngram in self.term_count_n[doc_id_i]['ngrams']:
#if ngram in self.term_count_n[doc_id_i]['ngrams']:
if ngram in self.term_count_n[doc_id_j]['ngrams']:
ongrams += 1
omx[i][j] = 0 #ongrams
omx[j][i] = ongrams
print '\nn-gram overlaps:'
print doc_ids
for i in range(len(omx)):
row = []
for j in range(len(omx[i])):
row.append( round( float(omx[i][j] * 100) / omx[i][i],2 ) )
#row.append(omx[i][j])
print row
def to_ngrams(self, terms):
''' Converts terms to all possibe ngrams
terms: list of terms
'''
if len(terms) <= self.n:
return terms
if self.n == 1:
n_grams = [[term] for term in terms]
else:
n_grams = []
for i in range(0,len(terms)-self.n+1):
n_grams.append(terms[i:i+self.n])
return n_grams
def update_counts(self, doc_id ='', ngrams=[]):
if not doc_id in self.term_count_n:
self.term_count_n[doc_id] = {'ngrams': {}, 'total': 0}
if not doc_id in self.term_count_n_1:
self.term_count_n_1[doc_id] = {'ngrams': {}, 'total': 0}
for ngram in ngrams:
# Generate n-grams and sub-ngrams
# For example (n=2): ['t1','t2','t3','t4']
# ngram_n: ['t1 t2','t2 t3', 't3 t4']
# ngram_n_1: ['t1','t2','t3']
ngram_n = self.joiner.join(ngram)
ngram_n_1 = self.joiner.join(ngram[:-1])
# Update n-gram counts for doc_id
if ngram_n in self.term_count_n[doc_id]['ngrams']:
self.term_count_n[doc_id]['ngrams'][ngram_n] += 1
else:
self.term_count_n[doc_id]['ngrams'][ngram_n] = 1
self.term_count_n[doc_id]['total'] += 1
# Update n-gram counts for corpus
if ngram_n in self.corpus_count_n['ngrams']:
self.corpus_count_n['ngrams'][ngram_n] += 1
else:
self.corpus_count_n['ngrams'][ngram_n] = 1
self.corpus_count_n['total'] += 1
# Update (n-1)-gram counts for doc_id
if ngram_n_1 in self.term_count_n_1[doc_id]['ngrams']:
self.term_count_n_1[doc_id]['ngrams'][ngram_n_1] += 1
else:
self.term_count_n_1[doc_id]['ngrams'][ngram_n_1] = 1
self.term_count_n_1[doc_id]['total'] += 1
# Update (n-1)-gram counts for corpus
if ngram_n_1 in self.corpus_count_n_1['ngrams']:
self.corpus_count_n_1['ngrams'][ngram_n_1] += 1
else:
self.corpus_count_n_1['ngrams'][ngram_n_1] = 1
self.corpus_count_n_1['total'] += 1
def update_lengths(self, doc_id ='', doc_length=0):
if not doc_id in self.doc_lengths:
self.doc_lengths[doc_id] = {'lengths': [], 'mean': -1}
self.doc_lengths[doc_id]['lengths'].append(int(doc_length))
def get_mean_lengths(self, doc_id =''):
my_mean_len = 0
others_mean_len = []
for d_id in self.doc_lengths:
if self.doc_lengths[d_id]['mean'] == -1:
dlen = self.doc_lengths[d_id]['lengths']
doc_mean_len = float(sum(dlen)) / len(dlen)
self.doc_lengths[d_id]['mean'] = doc_mean_len
else:
doc_mean_len = self.doc_lengths[d_id]['mean']
if d_id == doc_id:
my_mean_len = doc_mean_len
else:
others_mean_len.append(doc_mean_len)
oth_mean_len = float(sum(others_mean_len)) / len(others_mean_len)
return (my_mean_len, oth_mean_len)
def add_doc(self, doc_id ='', doc_terms=[], doc_length=-1):
'''
Add new document to our Language Model (training phase)
doc_id is used here, so we build seperate LF for each doc_id
I.e. if you call it more than once with same doc_id,
then all terms given via doc_terms will contribute to same LM
doc_terms: list of words in document to be added
doc_length: the length of the document, you can provide it yourself,
otherwise, we use len(doc_terms) instead.
'''
if doc_length == -1:
self.update_lengths(doc_id=doc_id, doc_length=len(doc_terms))
else:
self.update_lengths(doc_id=doc_id, doc_length=int(doc_length))
for term in doc_terms:
self.vocabulary.add(term)
terms = self.lr_padding(doc_terms)
ngrams = self.to_ngrams(terms)
self.update_counts(doc_id, ngrams)
def laplace(self, x, y, doc_id):
add_numer = 1 * self.laplace_gama
laplace_mode = 'n-1'
if laplace_mode == 'ch':
v = len(self.vocabulary)
elif laplace_mode == 'ch^n-1':
v = math.pow(len(self.vocabulary),self.n-1)
else:
if doc_id:
v = len(self.term_count_n_1[doc_id]['ngrams'])
else:
v = len(self.corpus_count_n_1['ngrams'])
add_denom = v * self.laplace_gama
return float(x + add_numer) / float(y + add_denom)
def witten(self, count, n, t, log, new_doc):
self.return_unseen = True if new_doc else False
#print 'Witten (New Doc? %s)' % new_doc
#print 'W:', count, n, t
if count:
return float(count) / (n+t)
elif self.return_unseen:
return float(t) / (n+t)
elif log:
return 1
else:
return 1
def pr_ngram(self, doc_id, ngram, new_doc=False, log=True, logbase=2, doc_length=-1):
ngram_n = self.joiner.join(ngram)
ngram_n_1 = self.joiner.join(ngram[:-1])
ngram_n_count = self.term_count_n[doc_id]['ngrams'].get(ngram_n,0)
ngram_n_1_count = self.term_count_n_1[doc_id]['ngrams'].get(ngram_n_1,0)
#print ngram, ngram_n_count, ngram_n_1_count #DEBUG
# Apply smoothing
if self.smoothing == 'Laplace':
pr = self.laplace(ngram_n_count, ngram_n_1_count, doc_id)
#print pr #DEBUG
if self.corpus_mix == 'c' or self.corpus_mix == 'l':
corpus_ngram_n_count = self.corpus_count_n['ngrams'].get(ngram_n,0)
corpus_ngram_n_1_count = self.corpus_count_n_1['ngrams'].get(ngram_n_1,0)
pr_dash = self.laplace(corpus_ngram_n_count - ngram_n_count,
corpus_ngram_n_1_count - ngram_n_1_count,
doc_id='')
pr_mix = float(pr) / float(pr_dash)
elif type(float(self.corpus_mix)) is float and self.corpus_mix > 0:
pr_corpus = self.pr_corpus(ngram_n, ngram_n_1)
pr_mix = self.corpus_mix * pr_corpus + (1 - self.corpus_mix) * pr
else:
pr_mix = pr
elif self.smoothing == 'Witten':
wittenn = ngram_n_1_count
wittent = len([key for key in self.term_count_n[doc_id]['ngrams'] if key.startswith(ngram_n_1)])
pr = self.witten(ngram_n_count, wittenn, wittent, log, new_doc)
pr_mix = pr
else:
pr = float(ngram_n_count) / float(ngram_n_1_count)
if type(float(self.corpus_mix)) is float and self.corpus_mix > 0:
pr_corpus = self.pr_corpus(ngram_n, ngram_n_1)
pr_mix = self.corpus_mix * pr_corpus + (1 - self.corpus_mix) * pr
else:
pr_mix = pr
# Update seen/unseen counts
if ngram_n_count:
seen = True
else:
seen = False
if log:
return (-math.log(pr_mix,logbase),seen)
else:
return (pr_mix,seen)
def pr_corpus(self, ngram_n, ngram_n_1):
if self.corpus_mode == 'Hiemstra':
df = 0
df_total = 0
for doc_id in self.term_count_n_1:
if self.term_count_n[doc_id]['ngrams'].get(ngram_n,0):
df += 1
df_total += 1
pr = float(df) / float(df_total)
if not pr:
pr += 0.0001
else:
corpus_ngram_n_count = self.corpus_count_n['ngrams'].get(ngram_n,0)
corpus_ngram_n_1_count = self.corpus_count_n_1['ngrams'].get(ngram_n_1,0)
pr = self.laplace(corpus_ngram_n_count,
corpus_ngram_n_1_count,
doc_id='')
return pr
def pr_doc(self, doc_id, log=True, logbase=2, doc_length=-1):
''' This method may be overridden by implementers
Here we assume uniform Pr(doc) within Bayes rule
i.e. Pr(doc/string) = Pr(string/doc)
rather than Pr(doc/string) = Pr(string/doc) * Pr(doc)
'''
if log:
return 0
else:
return 1
def calculate(self, doc_terms=[], actual_id='', doc_length=-1):
'''
Given a set of terms, doc_terms
We find the doc in training data (calc_id),
whose LM is more likely to produce those terms
Then return the data structure calculated back
doc_length is passed to pr_ngram() and pr_doc()
it is up to them to use it or not.
normally, it should be ignored if doc_length == -1
calculated{
prob: calculated probability Pr(calc_id/doc_terms)
calc_id: Document ID in training data.
actual_id: Just returned back as passed to us.
seen_unseen_count: Counts for terms seen/unseen in training data
}
'''
calculated = {
'prob': -1,
'calc_id': '',
'actual_id': actual_id,
'seen_unseen_count': (0,0),
'all_probs': []
}
terms = self.lr_padding(doc_terms)
ngrams = self.to_ngrams(terms)
for doc_id in self.term_count_n:
#print '\n', doc_id, ':'
doc_pr = 0
new_doc = True
seen_count = 0
unseen_count = 0
for ngram in ngrams:
(ngram_pr, ngram_seen) = self.pr_ngram(doc_id, ngram,
new_doc=new_doc, log=True, logbase=2, doc_length=doc_length)
doc_pr += ngram_pr
new_doc = False
if ngram_seen:
seen_count += 1
else:
unseen_count += 1
doc_pr += self.pr_doc(doc_id, doc_length=doc_length)
if self.verbose:
print doc_id, actual_id, doc_pr
calculated['all_probs'].append(doc_pr)
if calculated['prob'] == -1 or doc_pr < calculated['prob']:
calculated['prob'] = doc_pr
calculated['calc_id'] = doc_id
calculated['seen_unseen_count'] = (seen_count, unseen_count)
self.unseen_counts.per_doc(doc_id=calculated['actual_id'],
seen_unseen=calculated['seen_unseen_count'])
self.unseen_counts.per_cic(calculated_id=calculated['calc_id'],
actual_id=calculated['actual_id'],
seen_unseen=calculated['seen_unseen_count'])
return calculated
|
gr33ndata/dysl | dysl/dyslib/lm.py | LM.add_doc | python | def add_doc(self, doc_id ='', doc_terms=[], doc_length=-1):
'''
Add new document to our Language Model (training phase)
doc_id is used here, so we build seperate LF for each doc_id
I.e. if you call it more than once with same doc_id,
then all terms given via doc_terms will contribute to same LM
doc_terms: list of words in document to be added
doc_length: the length of the document, you can provide it yourself,
otherwise, we use len(doc_terms) instead.
'''
if doc_length == -1:
self.update_lengths(doc_id=doc_id, doc_length=len(doc_terms))
else:
self.update_lengths(doc_id=doc_id, doc_length=int(doc_length))
for term in doc_terms:
self.vocabulary.add(term)
terms = self.lr_padding(doc_terms)
ngrams = self.to_ngrams(terms)
self.update_counts(doc_id, ngrams) | Add new document to our Language Model (training phase)
doc_id is used here, so we build seperate LF for each doc_id
I.e. if you call it more than once with same doc_id,
then all terms given via doc_terms will contribute to same LM
doc_terms: list of words in document to be added
doc_length: the length of the document, you can provide it yourself,
otherwise, we use len(doc_terms) instead. | train | https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/dyslib/lm.py#L290-L308 | [
"def to_ngrams(self, terms):\n ''' Converts terms to all possibe ngrams \n terms: list of terms\n '''\n if len(terms) <= self.n:\n return terms\n if self.n == 1:\n n_grams = [[term] for term in terms]\n else:\n n_grams = []\n for i in range(0,len(terms)-self.n+1):\n n_grams.append(terms[i:i+self.n])\n return n_grams\n",
"def lr_padding(self, terms):\n '''\n Pad doc from the left and right before adding,\n depending on what's in self.lpad and self.rpad\n If any of them is '', then don't pad there. \n '''\n lpad = rpad = []\n if self.lpad:\n lpad = [self.lpad] * (self.n - 1) \n if self.rpad:\n rpad = [self.rpad] * (self.n - 1) \n return lpad + terms + rpad\n",
"def update_counts(self, doc_id ='', ngrams=[]):\n if not doc_id in self.term_count_n:\n self.term_count_n[doc_id] = {'ngrams': {}, 'total': 0}\n if not doc_id in self.term_count_n_1:\n self.term_count_n_1[doc_id] = {'ngrams': {}, 'total': 0}\n for ngram in ngrams:\n # Generate n-grams and sub-ngrams\n # For example (n=2): ['t1','t2','t3','t4']\n # ngram_n: ['t1 t2','t2 t3', 't3 t4']\n # ngram_n_1: ['t1','t2','t3']\n ngram_n = self.joiner.join(ngram)\n ngram_n_1 = self.joiner.join(ngram[:-1])\n # Update n-gram counts for doc_id\n if ngram_n in self.term_count_n[doc_id]['ngrams']:\n self.term_count_n[doc_id]['ngrams'][ngram_n] += 1\n else:\n self.term_count_n[doc_id]['ngrams'][ngram_n] = 1\n self.term_count_n[doc_id]['total'] += 1\n # Update n-gram counts for corpus\n if ngram_n in self.corpus_count_n['ngrams']:\n self.corpus_count_n['ngrams'][ngram_n] += 1\n else:\n self.corpus_count_n['ngrams'][ngram_n] = 1\n self.corpus_count_n['total'] += 1\n # Update (n-1)-gram counts for doc_id \n if ngram_n_1 in self.term_count_n_1[doc_id]['ngrams']:\n self.term_count_n_1[doc_id]['ngrams'][ngram_n_1] += 1\n else:\n self.term_count_n_1[doc_id]['ngrams'][ngram_n_1] = 1\n self.term_count_n_1[doc_id]['total'] += 1 \n # Update (n-1)-gram counts for corpus\n if ngram_n_1 in self.corpus_count_n_1['ngrams']:\n self.corpus_count_n_1['ngrams'][ngram_n_1] += 1\n else:\n self.corpus_count_n_1['ngrams'][ngram_n_1] = 1\n self.corpus_count_n_1['total'] += 1 \n",
"def update_lengths(self, doc_id ='', doc_length=0):\n if not doc_id in self.doc_lengths:\n self.doc_lengths[doc_id] = {'lengths': [], 'mean': -1}\n self.doc_lengths[doc_id]['lengths'].append(int(doc_length))\n"
] | class LM:
def __init__(self, n=3, lpad='', rpad='',
smoothing='Laplace', laplace_gama=1,
corpus_mix=0, corpus_mode='Miller',
verbose=False):
'''
Initialize our LM
n: Order of ngram LM, e.g. for bigram LM, n=2
lpad, rpad: Left and right padding.
If empty string '', then don't pad, else
For each document read pad terms
with n-1 repitition on lpad and/or rpad
smoothing: 'Laplace' or 'Witten'
laplace_gama: Multiply 1 and V by this factor gamma
corpus_mix: 0 (default) only use document probabilites
: or value between 0 and 1 lambda
: or c (or l) for Log-likelihood Odds model (Cooper LDA)
corpus_mode: 'Hiemstra' or 'Miller'
: This tell us how to calculate pr_corpus(t)
: In fact, 'Hiemstra' makes sense only with Multivaria LM
: Whereas the LM here is Multinomial.
'''
self.n = n
#(self.n, self.m) = n if type(n) == tuple else (n,0)
# Counters for joint probabilities
# Count for w_1, w_2, w_3 ... w_{n-1}, w_n
self.term_count_n = {}
# Count for w_1, w_2, w_3 ... w_{n-1}
self.term_count_n_1 = {}
# To be used in case of mixing doc prob with corpus prob.
self.corpus_count_n = {'ngrams': {}, 'total': 0}
self.corpus_count_n_1 = {'ngrams': {}, 'total': 0}
self.doc_lengths = {}
# The vocabulary of all classes (for Laplace smoothing)
self.vocabulary = set()
self.lpad=lpad
self.rpad=rpad
self.smoothing = smoothing
self.laplace_gama = float(laplace_gama)
if not type(corpus_mix) is str:
self.corpus_mix = min(float(corpus_mix),1)
else:
self.corpus_mix = corpus_mix
self.corpus_mode = corpus_mode
self.joiner = ' '
self.unseen_counts = UnseenTerms()
self.verbose = verbose
def display(self):
'''
Displays statistics about our LM
'''
voc_list = []
doc_ids = self.term_count_n.keys()
doc_ids.sort()
for doc_id in doc_ids:
ngrams = len(self.term_count_n[doc_id]['ngrams'])
print 'n-Grams (doc %s): %d' % (str(doc_id), ngrams)
ngrams1 = len(self.term_count_n_1[doc_id]['ngrams'])
print '(n-1)-Grams (doc %s): %d' % (str(doc_id), ngrams1)
voc_list.append(ngrams)
print 'Classed Vocabularies:', voc_list
print ''
corpus_ngrams = len(self.corpus_count_n['ngrams'])
print 'n-Grams (collection): %d' % (corpus_ngrams)
corpus_ngrams1 = len(self.corpus_count_n_1['ngrams'])
print '(n-1)-Grams (collection): %d' % (corpus_ngrams1)
self.unseen_counts.display()
# Display overlapping n-grams between classes
#self.overlaps()
def get_ngram_counts(self):
''' Returns a list of n-gram counts
Array of classes counts and last item is for corpus
'''
ngram_counts = {
'classes': [],
'corpus': 0
}
doc_ids = self.term_count_n.keys()
doc_ids.sort()
for doc_id in doc_ids:
print self.term_count_n[doc_id]
class_ngrams = len(self.term_count_n[doc_id]['ngrams'])
ngram_counts['classes'].append(class_ngrams)
corpus_ngrams = len(self.corpus_count_n['ngrams'])
ngram_counts['corpus'] = corpus_ngrams
return ngram_counts
def overlaps(self):
omx = []
doc_ids = self.term_count_n.keys()
doc_ids.sort()
for doc_id in doc_ids:
row = [0] * len(doc_ids)
omx.append(row)
for i in range(len(doc_ids)):
doc_id_i = doc_ids[i]
ngrams = len(self.term_count_n[doc_id_i]['ngrams'])
omx[i][i] = ngrams
for j in range(i):
doc_id_j = doc_ids[j]
ongrams = 0
#for ngram in self.term_count_n[doc_id_j]['ngrams']:
for ngram in self.term_count_n[doc_id_i]['ngrams']:
#if ngram in self.term_count_n[doc_id_i]['ngrams']:
if ngram in self.term_count_n[doc_id_j]['ngrams']:
ongrams += 1
omx[i][j] = 0 #ongrams
omx[j][i] = ongrams
print '\nn-gram overlaps:'
print doc_ids
for i in range(len(omx)):
row = []
for j in range(len(omx[i])):
row.append( round( float(omx[i][j] * 100) / omx[i][i],2 ) )
#row.append(omx[i][j])
print row
def to_ngrams(self, terms):
''' Converts terms to all possibe ngrams
terms: list of terms
'''
if len(terms) <= self.n:
return terms
if self.n == 1:
n_grams = [[term] for term in terms]
else:
n_grams = []
for i in range(0,len(terms)-self.n+1):
n_grams.append(terms[i:i+self.n])
return n_grams
def lr_padding(self, terms):
'''
Pad doc from the left and right before adding,
depending on what's in self.lpad and self.rpad
If any of them is '', then don't pad there.
'''
lpad = rpad = []
if self.lpad:
lpad = [self.lpad] * (self.n - 1)
if self.rpad:
rpad = [self.rpad] * (self.n - 1)
return lpad + terms + rpad
def update_counts(self, doc_id ='', ngrams=[]):
if not doc_id in self.term_count_n:
self.term_count_n[doc_id] = {'ngrams': {}, 'total': 0}
if not doc_id in self.term_count_n_1:
self.term_count_n_1[doc_id] = {'ngrams': {}, 'total': 0}
for ngram in ngrams:
# Generate n-grams and sub-ngrams
# For example (n=2): ['t1','t2','t3','t4']
# ngram_n: ['t1 t2','t2 t3', 't3 t4']
# ngram_n_1: ['t1','t2','t3']
ngram_n = self.joiner.join(ngram)
ngram_n_1 = self.joiner.join(ngram[:-1])
# Update n-gram counts for doc_id
if ngram_n in self.term_count_n[doc_id]['ngrams']:
self.term_count_n[doc_id]['ngrams'][ngram_n] += 1
else:
self.term_count_n[doc_id]['ngrams'][ngram_n] = 1
self.term_count_n[doc_id]['total'] += 1
# Update n-gram counts for corpus
if ngram_n in self.corpus_count_n['ngrams']:
self.corpus_count_n['ngrams'][ngram_n] += 1
else:
self.corpus_count_n['ngrams'][ngram_n] = 1
self.corpus_count_n['total'] += 1
# Update (n-1)-gram counts for doc_id
if ngram_n_1 in self.term_count_n_1[doc_id]['ngrams']:
self.term_count_n_1[doc_id]['ngrams'][ngram_n_1] += 1
else:
self.term_count_n_1[doc_id]['ngrams'][ngram_n_1] = 1
self.term_count_n_1[doc_id]['total'] += 1
# Update (n-1)-gram counts for corpus
if ngram_n_1 in self.corpus_count_n_1['ngrams']:
self.corpus_count_n_1['ngrams'][ngram_n_1] += 1
else:
self.corpus_count_n_1['ngrams'][ngram_n_1] = 1
self.corpus_count_n_1['total'] += 1
def update_lengths(self, doc_id ='', doc_length=0):
if not doc_id in self.doc_lengths:
self.doc_lengths[doc_id] = {'lengths': [], 'mean': -1}
self.doc_lengths[doc_id]['lengths'].append(int(doc_length))
def get_mean_lengths(self, doc_id =''):
my_mean_len = 0
others_mean_len = []
for d_id in self.doc_lengths:
if self.doc_lengths[d_id]['mean'] == -1:
dlen = self.doc_lengths[d_id]['lengths']
doc_mean_len = float(sum(dlen)) / len(dlen)
self.doc_lengths[d_id]['mean'] = doc_mean_len
else:
doc_mean_len = self.doc_lengths[d_id]['mean']
if d_id == doc_id:
my_mean_len = doc_mean_len
else:
others_mean_len.append(doc_mean_len)
oth_mean_len = float(sum(others_mean_len)) / len(others_mean_len)
return (my_mean_len, oth_mean_len)
def laplace(self, x, y, doc_id):
add_numer = 1 * self.laplace_gama
laplace_mode = 'n-1'
if laplace_mode == 'ch':
v = len(self.vocabulary)
elif laplace_mode == 'ch^n-1':
v = math.pow(len(self.vocabulary),self.n-1)
else:
if doc_id:
v = len(self.term_count_n_1[doc_id]['ngrams'])
else:
v = len(self.corpus_count_n_1['ngrams'])
add_denom = v * self.laplace_gama
return float(x + add_numer) / float(y + add_denom)
def witten(self, count, n, t, log, new_doc):
self.return_unseen = True if new_doc else False
#print 'Witten (New Doc? %s)' % new_doc
#print 'W:', count, n, t
if count:
return float(count) / (n+t)
elif self.return_unseen:
return float(t) / (n+t)
elif log:
return 1
else:
return 1
def pr_ngram(self, doc_id, ngram, new_doc=False, log=True, logbase=2, doc_length=-1):
ngram_n = self.joiner.join(ngram)
ngram_n_1 = self.joiner.join(ngram[:-1])
ngram_n_count = self.term_count_n[doc_id]['ngrams'].get(ngram_n,0)
ngram_n_1_count = self.term_count_n_1[doc_id]['ngrams'].get(ngram_n_1,0)
#print ngram, ngram_n_count, ngram_n_1_count #DEBUG
# Apply smoothing
if self.smoothing == 'Laplace':
pr = self.laplace(ngram_n_count, ngram_n_1_count, doc_id)
#print pr #DEBUG
if self.corpus_mix == 'c' or self.corpus_mix == 'l':
corpus_ngram_n_count = self.corpus_count_n['ngrams'].get(ngram_n,0)
corpus_ngram_n_1_count = self.corpus_count_n_1['ngrams'].get(ngram_n_1,0)
pr_dash = self.laplace(corpus_ngram_n_count - ngram_n_count,
corpus_ngram_n_1_count - ngram_n_1_count,
doc_id='')
pr_mix = float(pr) / float(pr_dash)
elif type(float(self.corpus_mix)) is float and self.corpus_mix > 0:
pr_corpus = self.pr_corpus(ngram_n, ngram_n_1)
pr_mix = self.corpus_mix * pr_corpus + (1 - self.corpus_mix) * pr
else:
pr_mix = pr
elif self.smoothing == 'Witten':
wittenn = ngram_n_1_count
wittent = len([key for key in self.term_count_n[doc_id]['ngrams'] if key.startswith(ngram_n_1)])
pr = self.witten(ngram_n_count, wittenn, wittent, log, new_doc)
pr_mix = pr
else:
pr = float(ngram_n_count) / float(ngram_n_1_count)
if type(float(self.corpus_mix)) is float and self.corpus_mix > 0:
pr_corpus = self.pr_corpus(ngram_n, ngram_n_1)
pr_mix = self.corpus_mix * pr_corpus + (1 - self.corpus_mix) * pr
else:
pr_mix = pr
# Update seen/unseen counts
if ngram_n_count:
seen = True
else:
seen = False
if log:
return (-math.log(pr_mix,logbase),seen)
else:
return (pr_mix,seen)
def pr_corpus(self, ngram_n, ngram_n_1):
if self.corpus_mode == 'Hiemstra':
df = 0
df_total = 0
for doc_id in self.term_count_n_1:
if self.term_count_n[doc_id]['ngrams'].get(ngram_n,0):
df += 1
df_total += 1
pr = float(df) / float(df_total)
if not pr:
pr += 0.0001
else:
corpus_ngram_n_count = self.corpus_count_n['ngrams'].get(ngram_n,0)
corpus_ngram_n_1_count = self.corpus_count_n_1['ngrams'].get(ngram_n_1,0)
pr = self.laplace(corpus_ngram_n_count,
corpus_ngram_n_1_count,
doc_id='')
return pr
def pr_doc(self, doc_id, log=True, logbase=2, doc_length=-1):
''' This method may be overridden by implementers
Here we assume uniform Pr(doc) within Bayes rule
i.e. Pr(doc/string) = Pr(string/doc)
rather than Pr(doc/string) = Pr(string/doc) * Pr(doc)
'''
if log:
return 0
else:
return 1
def calculate(self, doc_terms=[], actual_id='', doc_length=-1):
'''
Given a set of terms, doc_terms
We find the doc in training data (calc_id),
whose LM is more likely to produce those terms
Then return the data structure calculated back
doc_length is passed to pr_ngram() and pr_doc()
it is up to them to use it or not.
normally, it should be ignored if doc_length == -1
calculated{
prob: calculated probability Pr(calc_id/doc_terms)
calc_id: Document ID in training data.
actual_id: Just returned back as passed to us.
seen_unseen_count: Counts for terms seen/unseen in training data
}
'''
calculated = {
'prob': -1,
'calc_id': '',
'actual_id': actual_id,
'seen_unseen_count': (0,0),
'all_probs': []
}
terms = self.lr_padding(doc_terms)
ngrams = self.to_ngrams(terms)
for doc_id in self.term_count_n:
#print '\n', doc_id, ':'
doc_pr = 0
new_doc = True
seen_count = 0
unseen_count = 0
for ngram in ngrams:
(ngram_pr, ngram_seen) = self.pr_ngram(doc_id, ngram,
new_doc=new_doc, log=True, logbase=2, doc_length=doc_length)
doc_pr += ngram_pr
new_doc = False
if ngram_seen:
seen_count += 1
else:
unseen_count += 1
doc_pr += self.pr_doc(doc_id, doc_length=doc_length)
if self.verbose:
print doc_id, actual_id, doc_pr
calculated['all_probs'].append(doc_pr)
if calculated['prob'] == -1 or doc_pr < calculated['prob']:
calculated['prob'] = doc_pr
calculated['calc_id'] = doc_id
calculated['seen_unseen_count'] = (seen_count, unseen_count)
self.unseen_counts.per_doc(doc_id=calculated['actual_id'],
seen_unseen=calculated['seen_unseen_count'])
self.unseen_counts.per_cic(calculated_id=calculated['calc_id'],
actual_id=calculated['actual_id'],
seen_unseen=calculated['seen_unseen_count'])
return calculated
|
gr33ndata/dysl | dysl/dyslib/lm.py | LM.calculate | python | def calculate(self, doc_terms=[], actual_id='', doc_length=-1):
'''
Given a set of terms, doc_terms
We find the doc in training data (calc_id),
whose LM is more likely to produce those terms
Then return the data structure calculated back
doc_length is passed to pr_ngram() and pr_doc()
it is up to them to use it or not.
normally, it should be ignored if doc_length == -1
calculated{
prob: calculated probability Pr(calc_id/doc_terms)
calc_id: Document ID in training data.
actual_id: Just returned back as passed to us.
seen_unseen_count: Counts for terms seen/unseen in training data
}
'''
calculated = {
'prob': -1,
'calc_id': '',
'actual_id': actual_id,
'seen_unseen_count': (0,0),
'all_probs': []
}
terms = self.lr_padding(doc_terms)
ngrams = self.to_ngrams(terms)
for doc_id in self.term_count_n:
#print '\n', doc_id, ':'
doc_pr = 0
new_doc = True
seen_count = 0
unseen_count = 0
for ngram in ngrams:
(ngram_pr, ngram_seen) = self.pr_ngram(doc_id, ngram,
new_doc=new_doc, log=True, logbase=2, doc_length=doc_length)
doc_pr += ngram_pr
new_doc = False
if ngram_seen:
seen_count += 1
else:
unseen_count += 1
doc_pr += self.pr_doc(doc_id, doc_length=doc_length)
if self.verbose:
print doc_id, actual_id, doc_pr
calculated['all_probs'].append(doc_pr)
if calculated['prob'] == -1 or doc_pr < calculated['prob']:
calculated['prob'] = doc_pr
calculated['calc_id'] = doc_id
calculated['seen_unseen_count'] = (seen_count, unseen_count)
self.unseen_counts.per_doc(doc_id=calculated['actual_id'],
seen_unseen=calculated['seen_unseen_count'])
self.unseen_counts.per_cic(calculated_id=calculated['calc_id'],
actual_id=calculated['actual_id'],
seen_unseen=calculated['seen_unseen_count'])
return calculated | Given a set of terms, doc_terms
We find the doc in training data (calc_id),
whose LM is more likely to produce those terms
Then return the data structure calculated back
doc_length is passed to pr_ngram() and pr_doc()
it is up to them to use it or not.
normally, it should be ignored if doc_length == -1
calculated{
prob: calculated probability Pr(calc_id/doc_terms)
calc_id: Document ID in training data.
actual_id: Just returned back as passed to us.
seen_unseen_count: Counts for terms seen/unseen in training data
} | train | https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/dyslib/lm.py#L413-L466 | [
"def per_cic(self, calculated_id='', actual_id='', seen_unseen=None):\n calculated_id = str(calculated_id).strip()\n actual_id =str(actual_id).strip()\n unseen_ratio_str = self._seen_unseel_label(*seen_unseen)\n if not unseen_ratio_str in self.cic_counts:\n self.cic_counts[unseen_ratio_str] = [0,0]\n if calculated_id == actual_id:\n self.cic_counts[unseen_ratio_str][0] += 1\n else:\n self.cic_counts[unseen_ratio_str][1] += 1\n",
"def per_doc(self, doc_id='', seen_unseen=None):\n if not doc_id in self.doc_counts:\n self.doc_counts[doc_id] = {\n 'seen': 0,\n 'unseen': 0\n } \n self.doc_counts[doc_id]['seen'] += seen_unseen[0] \n self.doc_counts[doc_id]['unseen'] += seen_unseen[1] \n",
"def to_ngrams(self, terms):\n ''' Converts terms to all possibe ngrams \n terms: list of terms\n '''\n if len(terms) <= self.n:\n return terms\n if self.n == 1:\n n_grams = [[term] for term in terms]\n else:\n n_grams = []\n for i in range(0,len(terms)-self.n+1):\n n_grams.append(terms[i:i+self.n])\n return n_grams\n",
"def lr_padding(self, terms):\n '''\n Pad doc from the left and right before adding,\n depending on what's in self.lpad and self.rpad\n If any of them is '', then don't pad there. \n '''\n lpad = rpad = []\n if self.lpad:\n lpad = [self.lpad] * (self.n - 1) \n if self.rpad:\n rpad = [self.rpad] * (self.n - 1) \n return lpad + terms + rpad\n",
"def pr_ngram(self, doc_id, ngram, new_doc=False, log=True, logbase=2, doc_length=-1):\n ngram_n = self.joiner.join(ngram)\n ngram_n_1 = self.joiner.join(ngram[:-1])\n ngram_n_count = self.term_count_n[doc_id]['ngrams'].get(ngram_n,0)\n ngram_n_1_count = self.term_count_n_1[doc_id]['ngrams'].get(ngram_n_1,0)\n #print ngram, ngram_n_count, ngram_n_1_count #DEBUG\n # Apply smoothing\n if self.smoothing == 'Laplace':\n pr = self.laplace(ngram_n_count, ngram_n_1_count, doc_id)\n #print pr #DEBUG\n if self.corpus_mix == 'c' or self.corpus_mix == 'l':\n corpus_ngram_n_count = self.corpus_count_n['ngrams'].get(ngram_n,0)\n corpus_ngram_n_1_count = self.corpus_count_n_1['ngrams'].get(ngram_n_1,0)\n pr_dash = self.laplace(corpus_ngram_n_count - ngram_n_count,\n corpus_ngram_n_1_count - ngram_n_1_count,\n doc_id='')\n pr_mix = float(pr) / float(pr_dash) \n elif type(float(self.corpus_mix)) is float and self.corpus_mix > 0: \n pr_corpus = self.pr_corpus(ngram_n, ngram_n_1)\n pr_mix = self.corpus_mix * pr_corpus + (1 - self.corpus_mix) * pr\n else:\n pr_mix = pr\n elif self.smoothing == 'Witten':\n wittenn = ngram_n_1_count\n wittent = len([key for key in self.term_count_n[doc_id]['ngrams'] if key.startswith(ngram_n_1)])\n pr = self.witten(ngram_n_count, wittenn, wittent, log, new_doc)\n pr_mix = pr\n else:\n pr = float(ngram_n_count) / float(ngram_n_1_count)\n if type(float(self.corpus_mix)) is float and self.corpus_mix > 0: \n pr_corpus = self.pr_corpus(ngram_n, ngram_n_1)\n pr_mix = self.corpus_mix * pr_corpus + (1 - self.corpus_mix) * pr\n else:\n pr_mix = pr\n\n # Update seen/unseen counts\n if ngram_n_count:\n seen = True \n else:\n seen = False\n if log:\n return (-math.log(pr_mix,logbase),seen)\n else:\n return (pr_mix,seen)\n",
"def pr_doc(self, doc_id, log=True, logbase=2, doc_length=-1):\n ''' This method may be overridden by implementers\n Here we assume uniform Pr(doc) within Bayes rule\n i.e. Pr(doc/string) = Pr(string/doc)\n rather than Pr(doc/string) = Pr(string/doc) * Pr(doc)\n ''' \n if log:\n return 0\n else:\n return 1\n"
] | class LM:
def __init__(self, n=3, lpad='', rpad='',
smoothing='Laplace', laplace_gama=1,
corpus_mix=0, corpus_mode='Miller',
verbose=False):
'''
Initialize our LM
n: Order of ngram LM, e.g. for bigram LM, n=2
lpad, rpad: Left and right padding.
If empty string '', then don't pad, else
For each document read pad terms
with n-1 repitition on lpad and/or rpad
smoothing: 'Laplace' or 'Witten'
laplace_gama: Multiply 1 and V by this factor gamma
corpus_mix: 0 (default) only use document probabilites
: or value between 0 and 1 lambda
: or c (or l) for Log-likelihood Odds model (Cooper LDA)
corpus_mode: 'Hiemstra' or 'Miller'
: This tell us how to calculate pr_corpus(t)
: In fact, 'Hiemstra' makes sense only with Multivaria LM
: Whereas the LM here is Multinomial.
'''
self.n = n
#(self.n, self.m) = n if type(n) == tuple else (n,0)
# Counters for joint probabilities
# Count for w_1, w_2, w_3 ... w_{n-1}, w_n
self.term_count_n = {}
# Count for w_1, w_2, w_3 ... w_{n-1}
self.term_count_n_1 = {}
# To be used in case of mixing doc prob with corpus prob.
self.corpus_count_n = {'ngrams': {}, 'total': 0}
self.corpus_count_n_1 = {'ngrams': {}, 'total': 0}
self.doc_lengths = {}
# The vocabulary of all classes (for Laplace smoothing)
self.vocabulary = set()
self.lpad=lpad
self.rpad=rpad
self.smoothing = smoothing
self.laplace_gama = float(laplace_gama)
if not type(corpus_mix) is str:
self.corpus_mix = min(float(corpus_mix),1)
else:
self.corpus_mix = corpus_mix
self.corpus_mode = corpus_mode
self.joiner = ' '
self.unseen_counts = UnseenTerms()
self.verbose = verbose
def display(self):
'''
Displays statistics about our LM
'''
voc_list = []
doc_ids = self.term_count_n.keys()
doc_ids.sort()
for doc_id in doc_ids:
ngrams = len(self.term_count_n[doc_id]['ngrams'])
print 'n-Grams (doc %s): %d' % (str(doc_id), ngrams)
ngrams1 = len(self.term_count_n_1[doc_id]['ngrams'])
print '(n-1)-Grams (doc %s): %d' % (str(doc_id), ngrams1)
voc_list.append(ngrams)
print 'Classed Vocabularies:', voc_list
print ''
corpus_ngrams = len(self.corpus_count_n['ngrams'])
print 'n-Grams (collection): %d' % (corpus_ngrams)
corpus_ngrams1 = len(self.corpus_count_n_1['ngrams'])
print '(n-1)-Grams (collection): %d' % (corpus_ngrams1)
self.unseen_counts.display()
# Display overlapping n-grams between classes
#self.overlaps()
def get_ngram_counts(self):
''' Returns a list of n-gram counts
Array of classes counts and last item is for corpus
'''
ngram_counts = {
'classes': [],
'corpus': 0
}
doc_ids = self.term_count_n.keys()
doc_ids.sort()
for doc_id in doc_ids:
print self.term_count_n[doc_id]
class_ngrams = len(self.term_count_n[doc_id]['ngrams'])
ngram_counts['classes'].append(class_ngrams)
corpus_ngrams = len(self.corpus_count_n['ngrams'])
ngram_counts['corpus'] = corpus_ngrams
return ngram_counts
def overlaps(self):
omx = []
doc_ids = self.term_count_n.keys()
doc_ids.sort()
for doc_id in doc_ids:
row = [0] * len(doc_ids)
omx.append(row)
for i in range(len(doc_ids)):
doc_id_i = doc_ids[i]
ngrams = len(self.term_count_n[doc_id_i]['ngrams'])
omx[i][i] = ngrams
for j in range(i):
doc_id_j = doc_ids[j]
ongrams = 0
#for ngram in self.term_count_n[doc_id_j]['ngrams']:
for ngram in self.term_count_n[doc_id_i]['ngrams']:
#if ngram in self.term_count_n[doc_id_i]['ngrams']:
if ngram in self.term_count_n[doc_id_j]['ngrams']:
ongrams += 1
omx[i][j] = 0 #ongrams
omx[j][i] = ongrams
print '\nn-gram overlaps:'
print doc_ids
for i in range(len(omx)):
row = []
for j in range(len(omx[i])):
row.append( round( float(omx[i][j] * 100) / omx[i][i],2 ) )
#row.append(omx[i][j])
print row
def to_ngrams(self, terms):
''' Converts terms to all possibe ngrams
terms: list of terms
'''
if len(terms) <= self.n:
return terms
if self.n == 1:
n_grams = [[term] for term in terms]
else:
n_grams = []
for i in range(0,len(terms)-self.n+1):
n_grams.append(terms[i:i+self.n])
return n_grams
def lr_padding(self, terms):
'''
Pad doc from the left and right before adding,
depending on what's in self.lpad and self.rpad
If any of them is '', then don't pad there.
'''
lpad = rpad = []
if self.lpad:
lpad = [self.lpad] * (self.n - 1)
if self.rpad:
rpad = [self.rpad] * (self.n - 1)
return lpad + terms + rpad
def update_counts(self, doc_id ='', ngrams=[]):
if not doc_id in self.term_count_n:
self.term_count_n[doc_id] = {'ngrams': {}, 'total': 0}
if not doc_id in self.term_count_n_1:
self.term_count_n_1[doc_id] = {'ngrams': {}, 'total': 0}
for ngram in ngrams:
# Generate n-grams and sub-ngrams
# For example (n=2): ['t1','t2','t3','t4']
# ngram_n: ['t1 t2','t2 t3', 't3 t4']
# ngram_n_1: ['t1','t2','t3']
ngram_n = self.joiner.join(ngram)
ngram_n_1 = self.joiner.join(ngram[:-1])
# Update n-gram counts for doc_id
if ngram_n in self.term_count_n[doc_id]['ngrams']:
self.term_count_n[doc_id]['ngrams'][ngram_n] += 1
else:
self.term_count_n[doc_id]['ngrams'][ngram_n] = 1
self.term_count_n[doc_id]['total'] += 1
# Update n-gram counts for corpus
if ngram_n in self.corpus_count_n['ngrams']:
self.corpus_count_n['ngrams'][ngram_n] += 1
else:
self.corpus_count_n['ngrams'][ngram_n] = 1
self.corpus_count_n['total'] += 1
# Update (n-1)-gram counts for doc_id
if ngram_n_1 in self.term_count_n_1[doc_id]['ngrams']:
self.term_count_n_1[doc_id]['ngrams'][ngram_n_1] += 1
else:
self.term_count_n_1[doc_id]['ngrams'][ngram_n_1] = 1
self.term_count_n_1[doc_id]['total'] += 1
# Update (n-1)-gram counts for corpus
if ngram_n_1 in self.corpus_count_n_1['ngrams']:
self.corpus_count_n_1['ngrams'][ngram_n_1] += 1
else:
self.corpus_count_n_1['ngrams'][ngram_n_1] = 1
self.corpus_count_n_1['total'] += 1
def update_lengths(self, doc_id ='', doc_length=0):
if not doc_id in self.doc_lengths:
self.doc_lengths[doc_id] = {'lengths': [], 'mean': -1}
self.doc_lengths[doc_id]['lengths'].append(int(doc_length))
def get_mean_lengths(self, doc_id =''):
my_mean_len = 0
others_mean_len = []
for d_id in self.doc_lengths:
if self.doc_lengths[d_id]['mean'] == -1:
dlen = self.doc_lengths[d_id]['lengths']
doc_mean_len = float(sum(dlen)) / len(dlen)
self.doc_lengths[d_id]['mean'] = doc_mean_len
else:
doc_mean_len = self.doc_lengths[d_id]['mean']
if d_id == doc_id:
my_mean_len = doc_mean_len
else:
others_mean_len.append(doc_mean_len)
oth_mean_len = float(sum(others_mean_len)) / len(others_mean_len)
return (my_mean_len, oth_mean_len)
def add_doc(self, doc_id ='', doc_terms=[], doc_length=-1):
'''
Add new document to our Language Model (training phase)
doc_id is used here, so we build seperate LF for each doc_id
I.e. if you call it more than once with same doc_id,
then all terms given via doc_terms will contribute to same LM
doc_terms: list of words in document to be added
doc_length: the length of the document, you can provide it yourself,
otherwise, we use len(doc_terms) instead.
'''
if doc_length == -1:
self.update_lengths(doc_id=doc_id, doc_length=len(doc_terms))
else:
self.update_lengths(doc_id=doc_id, doc_length=int(doc_length))
for term in doc_terms:
self.vocabulary.add(term)
terms = self.lr_padding(doc_terms)
ngrams = self.to_ngrams(terms)
self.update_counts(doc_id, ngrams)
def laplace(self, x, y, doc_id):
add_numer = 1 * self.laplace_gama
laplace_mode = 'n-1'
if laplace_mode == 'ch':
v = len(self.vocabulary)
elif laplace_mode == 'ch^n-1':
v = math.pow(len(self.vocabulary),self.n-1)
else:
if doc_id:
v = len(self.term_count_n_1[doc_id]['ngrams'])
else:
v = len(self.corpus_count_n_1['ngrams'])
add_denom = v * self.laplace_gama
return float(x + add_numer) / float(y + add_denom)
def witten(self, count, n, t, log, new_doc):
self.return_unseen = True if new_doc else False
#print 'Witten (New Doc? %s)' % new_doc
#print 'W:', count, n, t
if count:
return float(count) / (n+t)
elif self.return_unseen:
return float(t) / (n+t)
elif log:
return 1
else:
return 1
def pr_ngram(self, doc_id, ngram, new_doc=False, log=True, logbase=2, doc_length=-1):
ngram_n = self.joiner.join(ngram)
ngram_n_1 = self.joiner.join(ngram[:-1])
ngram_n_count = self.term_count_n[doc_id]['ngrams'].get(ngram_n,0)
ngram_n_1_count = self.term_count_n_1[doc_id]['ngrams'].get(ngram_n_1,0)
#print ngram, ngram_n_count, ngram_n_1_count #DEBUG
# Apply smoothing
if self.smoothing == 'Laplace':
pr = self.laplace(ngram_n_count, ngram_n_1_count, doc_id)
#print pr #DEBUG
if self.corpus_mix == 'c' or self.corpus_mix == 'l':
corpus_ngram_n_count = self.corpus_count_n['ngrams'].get(ngram_n,0)
corpus_ngram_n_1_count = self.corpus_count_n_1['ngrams'].get(ngram_n_1,0)
pr_dash = self.laplace(corpus_ngram_n_count - ngram_n_count,
corpus_ngram_n_1_count - ngram_n_1_count,
doc_id='')
pr_mix = float(pr) / float(pr_dash)
elif type(float(self.corpus_mix)) is float and self.corpus_mix > 0:
pr_corpus = self.pr_corpus(ngram_n, ngram_n_1)
pr_mix = self.corpus_mix * pr_corpus + (1 - self.corpus_mix) * pr
else:
pr_mix = pr
elif self.smoothing == 'Witten':
wittenn = ngram_n_1_count
wittent = len([key for key in self.term_count_n[doc_id]['ngrams'] if key.startswith(ngram_n_1)])
pr = self.witten(ngram_n_count, wittenn, wittent, log, new_doc)
pr_mix = pr
else:
pr = float(ngram_n_count) / float(ngram_n_1_count)
if type(float(self.corpus_mix)) is float and self.corpus_mix > 0:
pr_corpus = self.pr_corpus(ngram_n, ngram_n_1)
pr_mix = self.corpus_mix * pr_corpus + (1 - self.corpus_mix) * pr
else:
pr_mix = pr
# Update seen/unseen counts
if ngram_n_count:
seen = True
else:
seen = False
if log:
return (-math.log(pr_mix,logbase),seen)
else:
return (pr_mix,seen)
def pr_corpus(self, ngram_n, ngram_n_1):
if self.corpus_mode == 'Hiemstra':
df = 0
df_total = 0
for doc_id in self.term_count_n_1:
if self.term_count_n[doc_id]['ngrams'].get(ngram_n,0):
df += 1
df_total += 1
pr = float(df) / float(df_total)
if not pr:
pr += 0.0001
else:
corpus_ngram_n_count = self.corpus_count_n['ngrams'].get(ngram_n,0)
corpus_ngram_n_1_count = self.corpus_count_n_1['ngrams'].get(ngram_n_1,0)
pr = self.laplace(corpus_ngram_n_count,
corpus_ngram_n_1_count,
doc_id='')
return pr
def pr_doc(self, doc_id, log=True, logbase=2, doc_length=-1):
''' This method may be overridden by implementers
Here we assume uniform Pr(doc) within Bayes rule
i.e. Pr(doc/string) = Pr(string/doc)
rather than Pr(doc/string) = Pr(string/doc) * Pr(doc)
'''
if log:
return 0
else:
return 1
|
gr33ndata/dysl | dysl/social.py | SocialLM.tokenize | python | def tokenize(cls, text, mode='c'):
if mode == 'c':
return [ch for ch in text]
else:
return [w for w in text.split()] | Converts text into tokens
:param text: string to be tokenized
:param mode: split into chars (c) or words (w) | train | https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/social.py#L15-L24 | null | class SocialLM(LM):
""" Social Media Flavoured Language Model
"""
@classmethod
def karbasa(self, result):
""" Finding if class probabilities are close to eachother
Ratio of the distance between 1st and 2nd class,
to the distance between 1st and last class.
:param result: The dict returned by LM.calculate()
"""
probs = result['all_probs']
probs.sort()
return float(probs[1] - probs[0]) / float(probs[-1] - probs[0])
def classify(self, text=u''):
""" Predicts the Language of a given text.
:param text: Unicode text to be classified.
"""
result = self.calculate(doc_terms=self.tokenize(text))
#return (result['calc_id'], result)
return (result['calc_id'], self.karbasa(result))
@classmethod
def is_mention_line(cls, word):
""" Detects links and mentions
:param word: Token to be evaluated
"""
if word.startswith('@'):
return True
elif word.startswith('http://'):
return True
elif word.startswith('https://'):
return True
else:
return False
def strip_mentions_links(self, text):
""" Strips Mentions and Links
:param text: Text to be stripped from.
"""
#print 'Before:', text
new_text = [word for word in text.split() if not self.is_mention_line(word)]
#print 'After:', u' '.join(new_text)
return u' '.join(new_text)
def normalize(self, text):
""" Normalizes text.
Converts to lowercase,
Unicode NFC normalization
and removes mentions and links
:param text: Text to be normalized.
"""
#print 'Normalize...\n'
text = text.lower()
text = unicodedata.normalize('NFC', text)
text = self.strip_mentions_links(text)
return text
|
gr33ndata/dysl | dysl/social.py | SocialLM.karbasa | python | def karbasa(self, result):
probs = result['all_probs']
probs.sort()
return float(probs[1] - probs[0]) / float(probs[-1] - probs[0]) | Finding if class probabilities are close to eachother
Ratio of the distance between 1st and 2nd class,
to the distance between 1st and last class.
:param result: The dict returned by LM.calculate() | train | https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/social.py#L26-L35 | null | class SocialLM(LM):
""" Social Media Flavoured Language Model
"""
@classmethod
def tokenize(cls, text, mode='c'):
""" Converts text into tokens
:param text: string to be tokenized
:param mode: split into chars (c) or words (w)
"""
if mode == 'c':
return [ch for ch in text]
else:
return [w for w in text.split()]
def classify(self, text=u''):
""" Predicts the Language of a given text.
:param text: Unicode text to be classified.
"""
result = self.calculate(doc_terms=self.tokenize(text))
#return (result['calc_id'], result)
return (result['calc_id'], self.karbasa(result))
@classmethod
def is_mention_line(cls, word):
""" Detects links and mentions
:param word: Token to be evaluated
"""
if word.startswith('@'):
return True
elif word.startswith('http://'):
return True
elif word.startswith('https://'):
return True
else:
return False
def strip_mentions_links(self, text):
""" Strips Mentions and Links
:param text: Text to be stripped from.
"""
#print 'Before:', text
new_text = [word for word in text.split() if not self.is_mention_line(word)]
#print 'After:', u' '.join(new_text)
return u' '.join(new_text)
def normalize(self, text):
""" Normalizes text.
Converts to lowercase,
Unicode NFC normalization
and removes mentions and links
:param text: Text to be normalized.
"""
#print 'Normalize...\n'
text = text.lower()
text = unicodedata.normalize('NFC', text)
text = self.strip_mentions_links(text)
return text
|
gr33ndata/dysl | dysl/social.py | SocialLM.classify | python | def classify(self, text=u''):
result = self.calculate(doc_terms=self.tokenize(text))
#return (result['calc_id'], result)
return (result['calc_id'], self.karbasa(result)) | Predicts the Language of a given text.
:param text: Unicode text to be classified. | train | https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/social.py#L37-L44 | null | class SocialLM(LM):
""" Social Media Flavoured Language Model
"""
@classmethod
def tokenize(cls, text, mode='c'):
""" Converts text into tokens
:param text: string to be tokenized
:param mode: split into chars (c) or words (w)
"""
if mode == 'c':
return [ch for ch in text]
else:
return [w for w in text.split()]
def karbasa(self, result):
""" Finding if class probabilities are close to eachother
Ratio of the distance between 1st and 2nd class,
to the distance between 1st and last class.
:param result: The dict returned by LM.calculate()
"""
probs = result['all_probs']
probs.sort()
return float(probs[1] - probs[0]) / float(probs[-1] - probs[0])
@classmethod
def is_mention_line(cls, word):
""" Detects links and mentions
:param word: Token to be evaluated
"""
if word.startswith('@'):
return True
elif word.startswith('http://'):
return True
elif word.startswith('https://'):
return True
else:
return False
def strip_mentions_links(self, text):
""" Strips Mentions and Links
:param text: Text to be stripped from.
"""
#print 'Before:', text
new_text = [word for word in text.split() if not self.is_mention_line(word)]
#print 'After:', u' '.join(new_text)
return u' '.join(new_text)
def normalize(self, text):
""" Normalizes text.
Converts to lowercase,
Unicode NFC normalization
and removes mentions and links
:param text: Text to be normalized.
"""
#print 'Normalize...\n'
text = text.lower()
text = unicodedata.normalize('NFC', text)
text = self.strip_mentions_links(text)
return text
|
gr33ndata/dysl | dysl/social.py | SocialLM.is_mention_line | python | def is_mention_line(cls, word):
if word.startswith('@'):
return True
elif word.startswith('http://'):
return True
elif word.startswith('https://'):
return True
else:
return False | Detects links and mentions
:param word: Token to be evaluated | train | https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/social.py#L47-L59 | null | class SocialLM(LM):
""" Social Media Flavoured Language Model
"""
@classmethod
def tokenize(cls, text, mode='c'):
""" Converts text into tokens
:param text: string to be tokenized
:param mode: split into chars (c) or words (w)
"""
if mode == 'c':
return [ch for ch in text]
else:
return [w for w in text.split()]
def karbasa(self, result):
""" Finding if class probabilities are close to eachother
Ratio of the distance between 1st and 2nd class,
to the distance between 1st and last class.
:param result: The dict returned by LM.calculate()
"""
probs = result['all_probs']
probs.sort()
return float(probs[1] - probs[0]) / float(probs[-1] - probs[0])
def classify(self, text=u''):
""" Predicts the Language of a given text.
:param text: Unicode text to be classified.
"""
result = self.calculate(doc_terms=self.tokenize(text))
#return (result['calc_id'], result)
return (result['calc_id'], self.karbasa(result))
@classmethod
def strip_mentions_links(self, text):
""" Strips Mentions and Links
:param text: Text to be stripped from.
"""
#print 'Before:', text
new_text = [word for word in text.split() if not self.is_mention_line(word)]
#print 'After:', u' '.join(new_text)
return u' '.join(new_text)
def normalize(self, text):
""" Normalizes text.
Converts to lowercase,
Unicode NFC normalization
and removes mentions and links
:param text: Text to be normalized.
"""
#print 'Normalize...\n'
text = text.lower()
text = unicodedata.normalize('NFC', text)
text = self.strip_mentions_links(text)
return text
|
gr33ndata/dysl | dysl/social.py | SocialLM.strip_mentions_links | python | def strip_mentions_links(self, text):
#print 'Before:', text
new_text = [word for word in text.split() if not self.is_mention_line(word)]
#print 'After:', u' '.join(new_text)
return u' '.join(new_text) | Strips Mentions and Links
:param text: Text to be stripped from. | train | https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/social.py#L61-L69 | null | class SocialLM(LM):
""" Social Media Flavoured Language Model
"""
@classmethod
def tokenize(cls, text, mode='c'):
""" Converts text into tokens
:param text: string to be tokenized
:param mode: split into chars (c) or words (w)
"""
if mode == 'c':
return [ch for ch in text]
else:
return [w for w in text.split()]
def karbasa(self, result):
""" Finding if class probabilities are close to eachother
Ratio of the distance between 1st and 2nd class,
to the distance between 1st and last class.
:param result: The dict returned by LM.calculate()
"""
probs = result['all_probs']
probs.sort()
return float(probs[1] - probs[0]) / float(probs[-1] - probs[0])
def classify(self, text=u''):
""" Predicts the Language of a given text.
:param text: Unicode text to be classified.
"""
result = self.calculate(doc_terms=self.tokenize(text))
#return (result['calc_id'], result)
return (result['calc_id'], self.karbasa(result))
@classmethod
def is_mention_line(cls, word):
""" Detects links and mentions
:param word: Token to be evaluated
"""
if word.startswith('@'):
return True
elif word.startswith('http://'):
return True
elif word.startswith('https://'):
return True
else:
return False
def normalize(self, text):
""" Normalizes text.
Converts to lowercase,
Unicode NFC normalization
and removes mentions and links
:param text: Text to be normalized.
"""
#print 'Normalize...\n'
text = text.lower()
text = unicodedata.normalize('NFC', text)
text = self.strip_mentions_links(text)
return text
|
gr33ndata/dysl | dysl/social.py | SocialLM.normalize | python | def normalize(self, text):
#print 'Normalize...\n'
text = text.lower()
text = unicodedata.normalize('NFC', text)
text = self.strip_mentions_links(text)
return text | Normalizes text.
Converts to lowercase,
Unicode NFC normalization
and removes mentions and links
:param text: Text to be normalized. | train | https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/social.py#L71-L83 | null | class SocialLM(LM):
""" Social Media Flavoured Language Model
"""
@classmethod
def tokenize(cls, text, mode='c'):
""" Converts text into tokens
:param text: string to be tokenized
:param mode: split into chars (c) or words (w)
"""
if mode == 'c':
return [ch for ch in text]
else:
return [w for w in text.split()]
def karbasa(self, result):
""" Finding if class probabilities are close to eachother
Ratio of the distance between 1st and 2nd class,
to the distance between 1st and last class.
:param result: The dict returned by LM.calculate()
"""
probs = result['all_probs']
probs.sort()
return float(probs[1] - probs[0]) / float(probs[-1] - probs[0])
def classify(self, text=u''):
""" Predicts the Language of a given text.
:param text: Unicode text to be classified.
"""
result = self.calculate(doc_terms=self.tokenize(text))
#return (result['calc_id'], result)
return (result['calc_id'], self.karbasa(result))
@classmethod
def is_mention_line(cls, word):
""" Detects links and mentions
:param word: Token to be evaluated
"""
if word.startswith('@'):
return True
elif word.startswith('http://'):
return True
elif word.startswith('https://'):
return True
else:
return False
def strip_mentions_links(self, text):
""" Strips Mentions and Links
:param text: Text to be stripped from.
"""
#print 'Before:', text
new_text = [word for word in text.split() if not self.is_mention_line(word)]
#print 'After:', u' '.join(new_text)
return u' '.join(new_text)
|
gr33ndata/dysl | dysl/utils.py | decode_input | python | def decode_input(text_in):
if type(text_in) == list:
text_out = u' '.join([t.decode('utf-8') for t in text_in])
else:
text_out = text_in.decode('utf-8')
return text_out | Decodes `text_in`
If text_in is is a string,
then decode it as utf-8 string.
If text_in is is a list of strings,
then decode each string of it,
then combine them into one outpust string. | train | https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/utils.py#L1-L14 | null | def decode_input(text_in):
""" Decodes `text_in`
If text_in is is a string,
then decode it as utf-8 string.
If text_in is is a list of strings,
then decode each string of it,
then combine them into one outpust string.
"""
if type(text_in) == list:
text_out = u' '.join([t.decode('utf-8') for t in text_in])
else:
text_out = text_in.decode('utf-8')
return text_out |
gr33ndata/dysl | dysl/langid.py | LangID._readfile | python | def _readfile(cls, filename):
f = codecs.open(filename, encoding='utf-8')
filedata = f.read()
f.close()
tokenz = LM.tokenize(filedata, mode='c')
#print tokenz
return tokenz | Reads a file a utf-8 file,
and retuns character tokens.
:param filename: Name of file to be read. | train | https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/langid.py#L40-L51 | null | class LangID:
""" Language Identification Class
"""
def __init__(self, unk=False):
# Shall we mark some text as unk,
# if top languages are too close?
self.unk = unk
self.min_karbasa = 0.08
# LM Parameters
ngram = 3
lrpad = u' '
verbose = False
corpus_mix = 'l'
self.lm = LM(n=ngram, verbose=verbose, lpad=lrpad, rpad=lrpad,
smoothing='Laplace', laplace_gama=0.1,
corpus_mix=corpus_mix)
self.trainer = None
self.training_timestamp = 0
@classmethod
def train(self, root=''):
""" Trains our Language Model.
:param root: Path to training data.
"""
self.trainer = Train(root=root)
corpus = self.trainer.get_corpus()
# Show loaded Languages
#print 'Lang Set: ' + ' '.join(train.get_lang_set())
for item in corpus:
self.lm.add_doc(doc_id=item[0], doc_terms=self._readfile(item[1]))
# Save training timestamp
self.training_timestamp = self.trainer.get_last_modified()
def is_training_modified(self):
""" Returns `True` if training data
was modified since last training.
Returns `False` otherwise,
or if using builtin training data.
"""
last_modified = self.trainer.get_last_modified()
if last_modified > self.training_timestamp:
return True
else:
return False
def was_training_modified(self):
""" For Grammar Nazis,
just alias for is_training_modified()
"""
return self.is_training_modified()
def add_training_sample(self, text=u'', lang=''):
""" Initial step for adding new sample to training data.
You need to call `save_training_samples()` afterwards.
:param text: Sample text to be added.
:param lang: Language label for the input text.
"""
self.trainer.add(text=text, lang=lang)
def save_training_samples(self, domain='', filename=''):
""" Saves data previously added via add_training_sample().
Data saved in folder specified by Train.get_corpus_path().
:param domain: Name for domain folder.
If not set, current timestamp will be used.
:param filename: Name for file to save data in.
If not set, file.txt will be used.
Check the README file for more information about Domains.
"""
self.trainer.save(domain=domain, filename=filename)
def get_lang_set(self):
""" Returns a list of languages in training data.
"""
return self.trainer.get_lang_set()
def classify(self, text=u''):
""" Predicts the Language of a given text.
:param text: Unicode text to be classified.
"""
text = self.lm.normalize(text)
tokenz = LM.tokenize(text, mode='c')
result = self.lm.calculate(doc_terms=tokenz)
#print 'Karbasa:', self.karbasa(result)
if self.unk and self.lm.karbasa(result) < self.min_karbasa:
lang = 'unk'
else:
lang = result['calc_id']
return lang
|
gr33ndata/dysl | dysl/langid.py | LangID.train | python | def train(self, root=''):
self.trainer = Train(root=root)
corpus = self.trainer.get_corpus()
# Show loaded Languages
#print 'Lang Set: ' + ' '.join(train.get_lang_set())
for item in corpus:
self.lm.add_doc(doc_id=item[0], doc_terms=self._readfile(item[1]))
# Save training timestamp
self.training_timestamp = self.trainer.get_last_modified() | Trains our Language Model.
:param root: Path to training data. | train | https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/langid.py#L53-L69 | [
"def _readfile(cls, filename):\n \"\"\" Reads a file a utf-8 file,\n and retuns character tokens.\n\n :param filename: Name of file to be read.\n \"\"\"\n f = codecs.open(filename, encoding='utf-8')\n filedata = f.read()\n f.close()\n tokenz = LM.tokenize(filedata, mode='c')\n #print tokenz\n return tokenz\n"
] | class LangID:
""" Language Identification Class
"""
def __init__(self, unk=False):
# Shall we mark some text as unk,
# if top languages are too close?
self.unk = unk
self.min_karbasa = 0.08
# LM Parameters
ngram = 3
lrpad = u' '
verbose = False
corpus_mix = 'l'
self.lm = LM(n=ngram, verbose=verbose, lpad=lrpad, rpad=lrpad,
smoothing='Laplace', laplace_gama=0.1,
corpus_mix=corpus_mix)
self.trainer = None
self.training_timestamp = 0
@classmethod
def _readfile(cls, filename):
""" Reads a file a utf-8 file,
and retuns character tokens.
:param filename: Name of file to be read.
"""
f = codecs.open(filename, encoding='utf-8')
filedata = f.read()
f.close()
tokenz = LM.tokenize(filedata, mode='c')
#print tokenz
return tokenz
def is_training_modified(self):
""" Returns `True` if training data
was modified since last training.
Returns `False` otherwise,
or if using builtin training data.
"""
last_modified = self.trainer.get_last_modified()
if last_modified > self.training_timestamp:
return True
else:
return False
def was_training_modified(self):
""" For Grammar Nazis,
just alias for is_training_modified()
"""
return self.is_training_modified()
def add_training_sample(self, text=u'', lang=''):
""" Initial step for adding new sample to training data.
You need to call `save_training_samples()` afterwards.
:param text: Sample text to be added.
:param lang: Language label for the input text.
"""
self.trainer.add(text=text, lang=lang)
def save_training_samples(self, domain='', filename=''):
""" Saves data previously added via add_training_sample().
Data saved in folder specified by Train.get_corpus_path().
:param domain: Name for domain folder.
If not set, current timestamp will be used.
:param filename: Name for file to save data in.
If not set, file.txt will be used.
Check the README file for more information about Domains.
"""
self.trainer.save(domain=domain, filename=filename)
def get_lang_set(self):
""" Returns a list of languages in training data.
"""
return self.trainer.get_lang_set()
def classify(self, text=u''):
""" Predicts the Language of a given text.
:param text: Unicode text to be classified.
"""
text = self.lm.normalize(text)
tokenz = LM.tokenize(text, mode='c')
result = self.lm.calculate(doc_terms=tokenz)
#print 'Karbasa:', self.karbasa(result)
if self.unk and self.lm.karbasa(result) < self.min_karbasa:
lang = 'unk'
else:
lang = result['calc_id']
return lang
|
gr33ndata/dysl | dysl/langid.py | LangID.is_training_modified | python | def is_training_modified(self):
last_modified = self.trainer.get_last_modified()
if last_modified > self.training_timestamp:
return True
else:
return False | Returns `True` if training data
was modified since last training.
Returns `False` otherwise,
or if using builtin training data. | train | https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/langid.py#L71-L82 | null | class LangID:
""" Language Identification Class
"""
def __init__(self, unk=False):
# Shall we mark some text as unk,
# if top languages are too close?
self.unk = unk
self.min_karbasa = 0.08
# LM Parameters
ngram = 3
lrpad = u' '
verbose = False
corpus_mix = 'l'
self.lm = LM(n=ngram, verbose=verbose, lpad=lrpad, rpad=lrpad,
smoothing='Laplace', laplace_gama=0.1,
corpus_mix=corpus_mix)
self.trainer = None
self.training_timestamp = 0
@classmethod
def _readfile(cls, filename):
""" Reads a file a utf-8 file,
and retuns character tokens.
:param filename: Name of file to be read.
"""
f = codecs.open(filename, encoding='utf-8')
filedata = f.read()
f.close()
tokenz = LM.tokenize(filedata, mode='c')
#print tokenz
return tokenz
def train(self, root=''):
""" Trains our Language Model.
:param root: Path to training data.
"""
self.trainer = Train(root=root)
corpus = self.trainer.get_corpus()
# Show loaded Languages
#print 'Lang Set: ' + ' '.join(train.get_lang_set())
for item in corpus:
self.lm.add_doc(doc_id=item[0], doc_terms=self._readfile(item[1]))
# Save training timestamp
self.training_timestamp = self.trainer.get_last_modified()
def was_training_modified(self):
""" For Grammar Nazis,
just alias for is_training_modified()
"""
return self.is_training_modified()
def add_training_sample(self, text=u'', lang=''):
""" Initial step for adding new sample to training data.
You need to call `save_training_samples()` afterwards.
:param text: Sample text to be added.
:param lang: Language label for the input text.
"""
self.trainer.add(text=text, lang=lang)
def save_training_samples(self, domain='', filename=''):
""" Saves data previously added via add_training_sample().
Data saved in folder specified by Train.get_corpus_path().
:param domain: Name for domain folder.
If not set, current timestamp will be used.
:param filename: Name for file to save data in.
If not set, file.txt will be used.
Check the README file for more information about Domains.
"""
self.trainer.save(domain=domain, filename=filename)
def get_lang_set(self):
""" Returns a list of languages in training data.
"""
return self.trainer.get_lang_set()
def classify(self, text=u''):
""" Predicts the Language of a given text.
:param text: Unicode text to be classified.
"""
text = self.lm.normalize(text)
tokenz = LM.tokenize(text, mode='c')
result = self.lm.calculate(doc_terms=tokenz)
#print 'Karbasa:', self.karbasa(result)
if self.unk and self.lm.karbasa(result) < self.min_karbasa:
lang = 'unk'
else:
lang = result['calc_id']
return lang
|
gr33ndata/dysl | dysl/langid.py | LangID.add_training_sample | python | def add_training_sample(self, text=u'', lang=''):
self.trainer.add(text=text, lang=lang) | Initial step for adding new sample to training data.
You need to call `save_training_samples()` afterwards.
:param text: Sample text to be added.
:param lang: Language label for the input text. | train | https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/langid.py#L90-L97 | null | class LangID:
""" Language Identification Class
"""
def __init__(self, unk=False):
# Shall we mark some text as unk,
# if top languages are too close?
self.unk = unk
self.min_karbasa = 0.08
# LM Parameters
ngram = 3
lrpad = u' '
verbose = False
corpus_mix = 'l'
self.lm = LM(n=ngram, verbose=verbose, lpad=lrpad, rpad=lrpad,
smoothing='Laplace', laplace_gama=0.1,
corpus_mix=corpus_mix)
self.trainer = None
self.training_timestamp = 0
@classmethod
def _readfile(cls, filename):
""" Reads a file a utf-8 file,
and retuns character tokens.
:param filename: Name of file to be read.
"""
f = codecs.open(filename, encoding='utf-8')
filedata = f.read()
f.close()
tokenz = LM.tokenize(filedata, mode='c')
#print tokenz
return tokenz
def train(self, root=''):
""" Trains our Language Model.
:param root: Path to training data.
"""
self.trainer = Train(root=root)
corpus = self.trainer.get_corpus()
# Show loaded Languages
#print 'Lang Set: ' + ' '.join(train.get_lang_set())
for item in corpus:
self.lm.add_doc(doc_id=item[0], doc_terms=self._readfile(item[1]))
# Save training timestamp
self.training_timestamp = self.trainer.get_last_modified()
def is_training_modified(self):
""" Returns `True` if training data
was modified since last training.
Returns `False` otherwise,
or if using builtin training data.
"""
last_modified = self.trainer.get_last_modified()
if last_modified > self.training_timestamp:
return True
else:
return False
def was_training_modified(self):
""" For Grammar Nazis,
just alias for is_training_modified()
"""
return self.is_training_modified()
def save_training_samples(self, domain='', filename=''):
""" Saves data previously added via add_training_sample().
Data saved in folder specified by Train.get_corpus_path().
:param domain: Name for domain folder.
If not set, current timestamp will be used.
:param filename: Name for file to save data in.
If not set, file.txt will be used.
Check the README file for more information about Domains.
"""
self.trainer.save(domain=domain, filename=filename)
def get_lang_set(self):
""" Returns a list of languages in training data.
"""
return self.trainer.get_lang_set()
def classify(self, text=u''):
""" Predicts the Language of a given text.
:param text: Unicode text to be classified.
"""
text = self.lm.normalize(text)
tokenz = LM.tokenize(text, mode='c')
result = self.lm.calculate(doc_terms=tokenz)
#print 'Karbasa:', self.karbasa(result)
if self.unk and self.lm.karbasa(result) < self.min_karbasa:
lang = 'unk'
else:
lang = result['calc_id']
return lang
|
gr33ndata/dysl | dysl/langid.py | LangID.save_training_samples | python | def save_training_samples(self, domain='', filename=''):
self.trainer.save(domain=domain, filename=filename) | Saves data previously added via add_training_sample().
Data saved in folder specified by Train.get_corpus_path().
:param domain: Name for domain folder.
If not set, current timestamp will be used.
:param filename: Name for file to save data in.
If not set, file.txt will be used.
Check the README file for more information about Domains. | train | https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/langid.py#L99-L110 | null | class LangID:
""" Language Identification Class
"""
def __init__(self, unk=False):
# Shall we mark some text as unk,
# if top languages are too close?
self.unk = unk
self.min_karbasa = 0.08
# LM Parameters
ngram = 3
lrpad = u' '
verbose = False
corpus_mix = 'l'
self.lm = LM(n=ngram, verbose=verbose, lpad=lrpad, rpad=lrpad,
smoothing='Laplace', laplace_gama=0.1,
corpus_mix=corpus_mix)
self.trainer = None
self.training_timestamp = 0
@classmethod
def _readfile(cls, filename):
""" Reads a file a utf-8 file,
and retuns character tokens.
:param filename: Name of file to be read.
"""
f = codecs.open(filename, encoding='utf-8')
filedata = f.read()
f.close()
tokenz = LM.tokenize(filedata, mode='c')
#print tokenz
return tokenz
def train(self, root=''):
""" Trains our Language Model.
:param root: Path to training data.
"""
self.trainer = Train(root=root)
corpus = self.trainer.get_corpus()
# Show loaded Languages
#print 'Lang Set: ' + ' '.join(train.get_lang_set())
for item in corpus:
self.lm.add_doc(doc_id=item[0], doc_terms=self._readfile(item[1]))
# Save training timestamp
self.training_timestamp = self.trainer.get_last_modified()
def is_training_modified(self):
""" Returns `True` if training data
was modified since last training.
Returns `False` otherwise,
or if using builtin training data.
"""
last_modified = self.trainer.get_last_modified()
if last_modified > self.training_timestamp:
return True
else:
return False
def was_training_modified(self):
""" For Grammar Nazis,
just alias for is_training_modified()
"""
return self.is_training_modified()
def add_training_sample(self, text=u'', lang=''):
""" Initial step for adding new sample to training data.
You need to call `save_training_samples()` afterwards.
:param text: Sample text to be added.
:param lang: Language label for the input text.
"""
self.trainer.add(text=text, lang=lang)
def get_lang_set(self):
""" Returns a list of languages in training data.
"""
return self.trainer.get_lang_set()
def classify(self, text=u''):
""" Predicts the Language of a given text.
:param text: Unicode text to be classified.
"""
text = self.lm.normalize(text)
tokenz = LM.tokenize(text, mode='c')
result = self.lm.calculate(doc_terms=tokenz)
#print 'Karbasa:', self.karbasa(result)
if self.unk and self.lm.karbasa(result) < self.min_karbasa:
lang = 'unk'
else:
lang = result['calc_id']
return lang
|
gr33ndata/dysl | dysl/langid.py | LangID.classify | python | def classify(self, text=u''):
text = self.lm.normalize(text)
tokenz = LM.tokenize(text, mode='c')
result = self.lm.calculate(doc_terms=tokenz)
#print 'Karbasa:', self.karbasa(result)
if self.unk and self.lm.karbasa(result) < self.min_karbasa:
lang = 'unk'
else:
lang = result['calc_id']
return lang | Predicts the Language of a given text.
:param text: Unicode text to be classified. | train | https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/langid.py#L117-L131 | null | class LangID:
""" Language Identification Class
"""
def __init__(self, unk=False):
# Shall we mark some text as unk,
# if top languages are too close?
self.unk = unk
self.min_karbasa = 0.08
# LM Parameters
ngram = 3
lrpad = u' '
verbose = False
corpus_mix = 'l'
self.lm = LM(n=ngram, verbose=verbose, lpad=lrpad, rpad=lrpad,
smoothing='Laplace', laplace_gama=0.1,
corpus_mix=corpus_mix)
self.trainer = None
self.training_timestamp = 0
@classmethod
def _readfile(cls, filename):
""" Reads a file a utf-8 file,
and retuns character tokens.
:param filename: Name of file to be read.
"""
f = codecs.open(filename, encoding='utf-8')
filedata = f.read()
f.close()
tokenz = LM.tokenize(filedata, mode='c')
#print tokenz
return tokenz
def train(self, root=''):
""" Trains our Language Model.
:param root: Path to training data.
"""
self.trainer = Train(root=root)
corpus = self.trainer.get_corpus()
# Show loaded Languages
#print 'Lang Set: ' + ' '.join(train.get_lang_set())
for item in corpus:
self.lm.add_doc(doc_id=item[0], doc_terms=self._readfile(item[1]))
# Save training timestamp
self.training_timestamp = self.trainer.get_last_modified()
def is_training_modified(self):
""" Returns `True` if training data
was modified since last training.
Returns `False` otherwise,
or if using builtin training data.
"""
last_modified = self.trainer.get_last_modified()
if last_modified > self.training_timestamp:
return True
else:
return False
def was_training_modified(self):
""" For Grammar Nazis,
just alias for is_training_modified()
"""
return self.is_training_modified()
def add_training_sample(self, text=u'', lang=''):
""" Initial step for adding new sample to training data.
You need to call `save_training_samples()` afterwards.
:param text: Sample text to be added.
:param lang: Language label for the input text.
"""
self.trainer.add(text=text, lang=lang)
def save_training_samples(self, domain='', filename=''):
""" Saves data previously added via add_training_sample().
Data saved in folder specified by Train.get_corpus_path().
:param domain: Name for domain folder.
If not set, current timestamp will be used.
:param filename: Name for file to save data in.
If not set, file.txt will be used.
Check the README file for more information about Domains.
"""
self.trainer.save(domain=domain, filename=filename)
def get_lang_set(self):
""" Returns a list of languages in training data.
"""
return self.trainer.get_lang_set()
|
hobson/pug-ann | pug/ann/data/weather.py | hourly | python | def hourly(location='Fresno, CA', days=1, start=None, end=None, years=1, use_cache=True, verbosity=1):
airport_code = airport(location, default=location)
if isinstance(days, int):
start = start or None
end = end or datetime.datetime.today().date()
days = pd.date_range(start=start, end=end, periods=days)
# refresh the cache each calendar month or each change in the number of days in the dataset
cache_path = 'hourly-{}-{}-{:02d}-{:04d}.csv'.format(airport_code, days[-1].year, days[-1].month, len(days))
cache_path = os.path.join(CACHE_PATH, cache_path)
if use_cache:
try:
return pd.DataFrame.from_csv(cache_path)
except:
pass
df = pd.DataFrame()
for day in days:
url = ('http://www.wunderground.com/history/airport/{airport_code}/{year}/{month}/{day}/DailyHistory.html?MR=1&format=1'.format(
airport_code=airport_code,
year=day.year,
month=day.month,
day=day.day))
if verbosity > 1:
print('GETing *.CSV using "{0}"'.format(url))
buf = urllib.urlopen(url).read()
if verbosity > 0:
N = buf.count('\n')
M = (buf.count(',') + N) / float(N)
print('Retrieved CSV for airport code "{}" with appox. {} lines and {} columns = {} cells.'.format(
airport_code, N, int(round(M)), int(round(M)) * N))
if (buf.count('\n') > 2) or ((buf.count('\n') > 1) and buf.split('\n')[1].count(',') > 0):
table = util.read_csv(buf, format='header+values-list', numbers=True)
columns = [s.strip() for s in table[0]]
table = table[1:]
tzs = [s[4:] for s in columns if (s[5:] in ['ST', 'DT'] and s[4] in 'PMCE' and s[:4].lower() == 'time')]
if tzs:
tz = tzs[0]
else:
tz = 'UTC'
for rownum, row in enumerate(table):
try:
table[rownum] = [util.make_tz_aware(row[0], tz)] + row[1:]
except ValueError:
pass
dates = [row[-1] for row in table]
if not all(isinstance(date, (datetime.datetime, pd.Timestamp)) for date in dates):
dates = [row[0] for row in table]
if len(columns) == len(table[0]):
df0 = pd.DataFrame(table, columns=columns, index=dates)
df = df.append(df0)
elif verbosity >= 0:
msg = "The number of columns in the 1st row of the table:\n {}\n doesn't match the number of column labels:\n {}\n".format(
table[0], columns)
msg += "Wunderground.com probably can't find the airport: {} ({})\n or the date: {}\n in its database.\n".format(
airport_code, location, day)
msg += "Attempted a GET request using the URI:\n {0}\n".format(url)
warnings.warn(msg)
try:
df.to_csv(cache_path)
except:
if verbosity > 0 and use_cache:
from traceback import print_exc
print_exc()
warnings.warn('Unable to write weather data to cache file at {}'.format(cache_path))
return df | Get detailed (hourly) weather data for the requested days and location
The Weather Underground URL for Fresno, CA on 1/1/2011 is:
http://www.wunderground.com/history/airport/KFAT/2011/1/1/DailyHistory.html?MR=1&format=1
This will fail periodically on Travis, b/c wunderground says "No daily or hourly history data available"
>> df = hourly('Fresno, CA', verbosity=-1)
>> 1 <= len(df) <= 24 * 2
True
The time zone of the client where this is used to compose the first column label, hence the ellipsis
>> df.columns # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Index([u'Time...
>> df = hourly('Fresno, CA', days=5, verbosity=-1)
>> 24 * 4 <= len(df) <= 24 * (5 + 1) * 2
True | train | https://github.com/hobson/pug-ann/blob/8a4d7103a744d15b4a737fc0f9a84c823973e0ec/pug/ann/data/weather.py#L24-L107 | [
"def airport(location, default=None):\n return airport.locations.get(location, default)\n"
] | import os
import urllib
# import re
import datetime
import json
import warnings
import pandas as pd
from pug.nlp import util, env
np = pd.np
DATA_PATH = os.path.dirname(os.path.realpath(__file__))
CACHE_PATH = os.path.join(DATA_PATH, 'cache')
def airport(location, default=None):
return airport.locations.get(location, default)
airport.locations = json.load(open(os.path.join(CACHE_PATH, 'airport.locations.json'), 'rUb'))
def api(feature='conditions', city='Portland', state='OR', key=None):
"""Use the wunderground API to get current conditions instead of scraping
Please be kind and use your own key (they're FREE!):
http://www.wunderground.com/weather/api/d/login.html
References:
http://www.wunderground.com/weather/api/d/terms.html
Examples:
>>> api('hurric', 'Boise', 'ID') # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
{u'currenthurricane': ...}}}
>>> features = ('alerts astronomy conditions currenthurricane forecast forecast10day geolookup history hourly hourly10day ' +
... 'planner rawtide satellite tide webcams yesterday').split(' ')
>> everything = [api(f, 'Portland') for f in features]
>> js = api('alerts', 'Portland', 'OR')
>> js = api('condit', 'Sacramento', 'CA')
>> js = api('forecast', 'Mobile', 'AL')
>> js = api('10day', 'Fairhope', 'AL')
>> js = api('geo', 'Decatur', 'AL')
>> js = api('hist', 'history', 'AL')
>> js = api('astro')
"""
features = ('alerts astronomy conditions currenthurricane forecast forecast10day geolookup history hourly hourly10day ' +
'planner rawtide satellite tide webcams yesterday').split(' ')
feature = util.fuzzy_get(features, feature)
# Please be kind and use your own key (they're FREE!):
# http://www.wunderground.com/weather/api/d/login.html
key = key or env.get('WUNDERGROUND', None, verbosity=-1) or env.get('WUNDERGROUND_KEY', 'c45a86c2fc63f7d0', verbosity=-1)
url = 'http://api.wunderground.com/api/{key}/{feature}/q/{state}/{city}.json'.format(
key=key, feature=feature, state=state, city=city)
return json.load(urllib.urlopen(url))
def daily(location='Fresno, CA', years=1, use_cache=True, verbosity=1):
"""Retrieve weather for the indicated airport code or 'City, ST' string.
>>> df = daily('Camas, WA', verbosity=-1)
>>> 365 <= len(df) <= 365 * 2 + 1
True
Sacramento data has gaps (airport KMCC):
8/21/2013 is missing from 2013.
Whole months are missing from 2014.
>>> df = daily('Sacramento, CA', years=[2013], verbosity=-1)
>>> 364 <= len(df) <= 365
True
>>> df.columns
Index([u'PST', u'Max TemperatureF', u'Mean TemperatureF', u'Min TemperatureF', u'Max Dew PointF', u'MeanDew PointF', u'Min DewpointF', ...
"""
this_year = datetime.date.today().year
if isinstance(years, (int, float)):
# current (incomplete) year doesn't count in total number of years
# so 0 would return this calendar year's weather data
years = np.arange(0, int(years) + 1)
years = sorted(years)
if not all(1900 <= yr <= this_year for yr in years):
years = np.array([abs(yr) if (1900 <= abs(yr) <= this_year) else (this_year - abs(int(yr))) for yr in years])[::-1]
airport_code = airport(location, default=location)
# refresh the cache each time the start or end year changes
cache_path = 'daily-{}-{}-{}.csv'.format(airport_code, years[0], years[-1])
cache_path = os.path.join(CACHE_PATH, cache_path)
if use_cache:
try:
return pd.DataFrame.from_csv(cache_path)
except:
pass
df = pd.DataFrame()
for year in years:
url = ('http://www.wunderground.com/history/airport/{airport}/{yearstart}/1/1/' +
'CustomHistory.html?dayend=31&monthend=12&yearend={yearend}' +
'&req_city=&req_state=&req_statename=&reqdb.zip=&reqdb.magic=&reqdb.wmo=&MR=1&format=1').format(
airport=airport_code,
yearstart=year,
yearend=year
)
if verbosity > 1:
print('GETing *.CSV using "{0}"'.format(url))
buf = urllib.urlopen(url).read()
if verbosity > 0:
N = buf.count('\n')
M = (buf.count(',') + N) / float(N)
print('Retrieved CSV for airport code "{}" with appox. {} lines and {} columns = {} cells.'.format(
airport_code, N, int(round(M)), int(round(M)) * N))
if verbosity > 2:
print(buf)
table = util.read_csv(buf, format='header+values-list', numbers=True)
# # clean up the last column (if it contains <br> tags)
table = [util.strip_br(row) if len(row) > 1 else row for row in table]
# numcols = max(len(row) for row in table)
# table = [row for row in table if len(row) == numcols]
columns = table.pop(0)
tzs = [s for s in columns if (s[1:] in ['ST', 'DT'] and s[0] in 'PMCE')]
dates = [float('nan')] * len(table)
for i, row in enumerate(table):
for j, value in enumerate(row):
if not value and value is not None:
value = 0
continue
if columns[j] in tzs:
table[i][j] = util.make_tz_aware(value, tz=columns[j])
if isinstance(table[i][j], datetime.datetime):
dates[i] = table[i][j]
continue
try:
table[i][j] = float(value)
if not (table[i][j] % 1):
table[i][j] = int(table[i][j])
except:
pass
df0 = pd.DataFrame(table, columns=columns, index=dates)
df = df.append(df0)
if verbosity > 1:
print(df)
try:
df.to_csv(cache_path)
except:
if verbosity > 0 and use_cache:
from traceback import print_exc
print_exc()
warnings.warn('Unable to write weather data to cache file at {}'.format(cache_path))
return df
|
hobson/pug-ann | pug/ann/data/weather.py | api | python | def api(feature='conditions', city='Portland', state='OR', key=None):
features = ('alerts astronomy conditions currenthurricane forecast forecast10day geolookup history hourly hourly10day ' +
'planner rawtide satellite tide webcams yesterday').split(' ')
feature = util.fuzzy_get(features, feature)
# Please be kind and use your own key (they're FREE!):
# http://www.wunderground.com/weather/api/d/login.html
key = key or env.get('WUNDERGROUND', None, verbosity=-1) or env.get('WUNDERGROUND_KEY', 'c45a86c2fc63f7d0', verbosity=-1)
url = 'http://api.wunderground.com/api/{key}/{feature}/q/{state}/{city}.json'.format(
key=key, feature=feature, state=state, city=city)
return json.load(urllib.urlopen(url)) | Use the wunderground API to get current conditions instead of scraping
Please be kind and use your own key (they're FREE!):
http://www.wunderground.com/weather/api/d/login.html
References:
http://www.wunderground.com/weather/api/d/terms.html
Examples:
>>> api('hurric', 'Boise', 'ID') # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
{u'currenthurricane': ...}}}
>>> features = ('alerts astronomy conditions currenthurricane forecast forecast10day geolookup history hourly hourly10day ' +
... 'planner rawtide satellite tide webcams yesterday').split(' ')
>> everything = [api(f, 'Portland') for f in features]
>> js = api('alerts', 'Portland', 'OR')
>> js = api('condit', 'Sacramento', 'CA')
>> js = api('forecast', 'Mobile', 'AL')
>> js = api('10day', 'Fairhope', 'AL')
>> js = api('geo', 'Decatur', 'AL')
>> js = api('hist', 'history', 'AL')
>> js = api('astro') | train | https://github.com/hobson/pug-ann/blob/8a4d7103a744d15b4a737fc0f9a84c823973e0ec/pug/ann/data/weather.py#L110-L142 | null | import os
import urllib
# import re
import datetime
import json
import warnings
import pandas as pd
from pug.nlp import util, env
np = pd.np
DATA_PATH = os.path.dirname(os.path.realpath(__file__))
CACHE_PATH = os.path.join(DATA_PATH, 'cache')
def airport(location, default=None):
return airport.locations.get(location, default)
airport.locations = json.load(open(os.path.join(CACHE_PATH, 'airport.locations.json'), 'rUb'))
def hourly(location='Fresno, CA', days=1, start=None, end=None, years=1, use_cache=True, verbosity=1):
""" Get detailed (hourly) weather data for the requested days and location
The Weather Underground URL for Fresno, CA on 1/1/2011 is:
http://www.wunderground.com/history/airport/KFAT/2011/1/1/DailyHistory.html?MR=1&format=1
This will fail periodically on Travis, b/c wunderground says "No daily or hourly history data available"
>> df = hourly('Fresno, CA', verbosity=-1)
>> 1 <= len(df) <= 24 * 2
True
The time zone of the client where this is used to compose the first column label, hence the ellipsis
>> df.columns # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Index([u'Time...
>> df = hourly('Fresno, CA', days=5, verbosity=-1)
>> 24 * 4 <= len(df) <= 24 * (5 + 1) * 2
True
"""
airport_code = airport(location, default=location)
if isinstance(days, int):
start = start or None
end = end or datetime.datetime.today().date()
days = pd.date_range(start=start, end=end, periods=days)
# refresh the cache each calendar month or each change in the number of days in the dataset
cache_path = 'hourly-{}-{}-{:02d}-{:04d}.csv'.format(airport_code, days[-1].year, days[-1].month, len(days))
cache_path = os.path.join(CACHE_PATH, cache_path)
if use_cache:
try:
return pd.DataFrame.from_csv(cache_path)
except:
pass
df = pd.DataFrame()
for day in days:
url = ('http://www.wunderground.com/history/airport/{airport_code}/{year}/{month}/{day}/DailyHistory.html?MR=1&format=1'.format(
airport_code=airport_code,
year=day.year,
month=day.month,
day=day.day))
if verbosity > 1:
print('GETing *.CSV using "{0}"'.format(url))
buf = urllib.urlopen(url).read()
if verbosity > 0:
N = buf.count('\n')
M = (buf.count(',') + N) / float(N)
print('Retrieved CSV for airport code "{}" with appox. {} lines and {} columns = {} cells.'.format(
airport_code, N, int(round(M)), int(round(M)) * N))
if (buf.count('\n') > 2) or ((buf.count('\n') > 1) and buf.split('\n')[1].count(',') > 0):
table = util.read_csv(buf, format='header+values-list', numbers=True)
columns = [s.strip() for s in table[0]]
table = table[1:]
tzs = [s[4:] for s in columns if (s[5:] in ['ST', 'DT'] and s[4] in 'PMCE' and s[:4].lower() == 'time')]
if tzs:
tz = tzs[0]
else:
tz = 'UTC'
for rownum, row in enumerate(table):
try:
table[rownum] = [util.make_tz_aware(row[0], tz)] + row[1:]
except ValueError:
pass
dates = [row[-1] for row in table]
if not all(isinstance(date, (datetime.datetime, pd.Timestamp)) for date in dates):
dates = [row[0] for row in table]
if len(columns) == len(table[0]):
df0 = pd.DataFrame(table, columns=columns, index=dates)
df = df.append(df0)
elif verbosity >= 0:
msg = "The number of columns in the 1st row of the table:\n {}\n doesn't match the number of column labels:\n {}\n".format(
table[0], columns)
msg += "Wunderground.com probably can't find the airport: {} ({})\n or the date: {}\n in its database.\n".format(
airport_code, location, day)
msg += "Attempted a GET request using the URI:\n {0}\n".format(url)
warnings.warn(msg)
try:
df.to_csv(cache_path)
except:
if verbosity > 0 and use_cache:
from traceback import print_exc
print_exc()
warnings.warn('Unable to write weather data to cache file at {}'.format(cache_path))
return df
def daily(location='Fresno, CA', years=1, use_cache=True, verbosity=1):
"""Retrieve weather for the indicated airport code or 'City, ST' string.
>>> df = daily('Camas, WA', verbosity=-1)
>>> 365 <= len(df) <= 365 * 2 + 1
True
Sacramento data has gaps (airport KMCC):
8/21/2013 is missing from 2013.
Whole months are missing from 2014.
>>> df = daily('Sacramento, CA', years=[2013], verbosity=-1)
>>> 364 <= len(df) <= 365
True
>>> df.columns
Index([u'PST', u'Max TemperatureF', u'Mean TemperatureF', u'Min TemperatureF', u'Max Dew PointF', u'MeanDew PointF', u'Min DewpointF', ...
"""
this_year = datetime.date.today().year
if isinstance(years, (int, float)):
# current (incomplete) year doesn't count in total number of years
# so 0 would return this calendar year's weather data
years = np.arange(0, int(years) + 1)
years = sorted(years)
if not all(1900 <= yr <= this_year for yr in years):
years = np.array([abs(yr) if (1900 <= abs(yr) <= this_year) else (this_year - abs(int(yr))) for yr in years])[::-1]
airport_code = airport(location, default=location)
# refresh the cache each time the start or end year changes
cache_path = 'daily-{}-{}-{}.csv'.format(airport_code, years[0], years[-1])
cache_path = os.path.join(CACHE_PATH, cache_path)
if use_cache:
try:
return pd.DataFrame.from_csv(cache_path)
except:
pass
df = pd.DataFrame()
for year in years:
url = ('http://www.wunderground.com/history/airport/{airport}/{yearstart}/1/1/' +
'CustomHistory.html?dayend=31&monthend=12&yearend={yearend}' +
'&req_city=&req_state=&req_statename=&reqdb.zip=&reqdb.magic=&reqdb.wmo=&MR=1&format=1').format(
airport=airport_code,
yearstart=year,
yearend=year
)
if verbosity > 1:
print('GETing *.CSV using "{0}"'.format(url))
buf = urllib.urlopen(url).read()
if verbosity > 0:
N = buf.count('\n')
M = (buf.count(',') + N) / float(N)
print('Retrieved CSV for airport code "{}" with appox. {} lines and {} columns = {} cells.'.format(
airport_code, N, int(round(M)), int(round(M)) * N))
if verbosity > 2:
print(buf)
table = util.read_csv(buf, format='header+values-list', numbers=True)
# # clean up the last column (if it contains <br> tags)
table = [util.strip_br(row) if len(row) > 1 else row for row in table]
# numcols = max(len(row) for row in table)
# table = [row for row in table if len(row) == numcols]
columns = table.pop(0)
tzs = [s for s in columns if (s[1:] in ['ST', 'DT'] and s[0] in 'PMCE')]
dates = [float('nan')] * len(table)
for i, row in enumerate(table):
for j, value in enumerate(row):
if not value and value is not None:
value = 0
continue
if columns[j] in tzs:
table[i][j] = util.make_tz_aware(value, tz=columns[j])
if isinstance(table[i][j], datetime.datetime):
dates[i] = table[i][j]
continue
try:
table[i][j] = float(value)
if not (table[i][j] % 1):
table[i][j] = int(table[i][j])
except:
pass
df0 = pd.DataFrame(table, columns=columns, index=dates)
df = df.append(df0)
if verbosity > 1:
print(df)
try:
df.to_csv(cache_path)
except:
if verbosity > 0 and use_cache:
from traceback import print_exc
print_exc()
warnings.warn('Unable to write weather data to cache file at {}'.format(cache_path))
return df
|
hobson/pug-ann | pug/ann/data/weather.py | daily | python | def daily(location='Fresno, CA', years=1, use_cache=True, verbosity=1):
this_year = datetime.date.today().year
if isinstance(years, (int, float)):
# current (incomplete) year doesn't count in total number of years
# so 0 would return this calendar year's weather data
years = np.arange(0, int(years) + 1)
years = sorted(years)
if not all(1900 <= yr <= this_year for yr in years):
years = np.array([abs(yr) if (1900 <= abs(yr) <= this_year) else (this_year - abs(int(yr))) for yr in years])[::-1]
airport_code = airport(location, default=location)
# refresh the cache each time the start or end year changes
cache_path = 'daily-{}-{}-{}.csv'.format(airport_code, years[0], years[-1])
cache_path = os.path.join(CACHE_PATH, cache_path)
if use_cache:
try:
return pd.DataFrame.from_csv(cache_path)
except:
pass
df = pd.DataFrame()
for year in years:
url = ('http://www.wunderground.com/history/airport/{airport}/{yearstart}/1/1/' +
'CustomHistory.html?dayend=31&monthend=12&yearend={yearend}' +
'&req_city=&req_state=&req_statename=&reqdb.zip=&reqdb.magic=&reqdb.wmo=&MR=1&format=1').format(
airport=airport_code,
yearstart=year,
yearend=year
)
if verbosity > 1:
print('GETing *.CSV using "{0}"'.format(url))
buf = urllib.urlopen(url).read()
if verbosity > 0:
N = buf.count('\n')
M = (buf.count(',') + N) / float(N)
print('Retrieved CSV for airport code "{}" with appox. {} lines and {} columns = {} cells.'.format(
airport_code, N, int(round(M)), int(round(M)) * N))
if verbosity > 2:
print(buf)
table = util.read_csv(buf, format='header+values-list', numbers=True)
# # clean up the last column (if it contains <br> tags)
table = [util.strip_br(row) if len(row) > 1 else row for row in table]
# numcols = max(len(row) for row in table)
# table = [row for row in table if len(row) == numcols]
columns = table.pop(0)
tzs = [s for s in columns if (s[1:] in ['ST', 'DT'] and s[0] in 'PMCE')]
dates = [float('nan')] * len(table)
for i, row in enumerate(table):
for j, value in enumerate(row):
if not value and value is not None:
value = 0
continue
if columns[j] in tzs:
table[i][j] = util.make_tz_aware(value, tz=columns[j])
if isinstance(table[i][j], datetime.datetime):
dates[i] = table[i][j]
continue
try:
table[i][j] = float(value)
if not (table[i][j] % 1):
table[i][j] = int(table[i][j])
except:
pass
df0 = pd.DataFrame(table, columns=columns, index=dates)
df = df.append(df0)
if verbosity > 1:
print(df)
try:
df.to_csv(cache_path)
except:
if verbosity > 0 and use_cache:
from traceback import print_exc
print_exc()
warnings.warn('Unable to write weather data to cache file at {}'.format(cache_path))
return df | Retrieve weather for the indicated airport code or 'City, ST' string.
>>> df = daily('Camas, WA', verbosity=-1)
>>> 365 <= len(df) <= 365 * 2 + 1
True
Sacramento data has gaps (airport KMCC):
8/21/2013 is missing from 2013.
Whole months are missing from 2014.
>>> df = daily('Sacramento, CA', years=[2013], verbosity=-1)
>>> 364 <= len(df) <= 365
True
>>> df.columns
Index([u'PST', u'Max TemperatureF', u'Mean TemperatureF', u'Min TemperatureF', u'Max Dew PointF', u'MeanDew PointF', u'Min DewpointF', ... | train | https://github.com/hobson/pug-ann/blob/8a4d7103a744d15b4a737fc0f9a84c823973e0ec/pug/ann/data/weather.py#L145-L238 | [
"def airport(location, default=None):\n return airport.locations.get(location, default)\n"
] | import os
import urllib
# import re
import datetime
import json
import warnings
import pandas as pd
from pug.nlp import util, env
np = pd.np
DATA_PATH = os.path.dirname(os.path.realpath(__file__))
CACHE_PATH = os.path.join(DATA_PATH, 'cache')
def airport(location, default=None):
return airport.locations.get(location, default)
airport.locations = json.load(open(os.path.join(CACHE_PATH, 'airport.locations.json'), 'rUb'))
def hourly(location='Fresno, CA', days=1, start=None, end=None, years=1, use_cache=True, verbosity=1):
""" Get detailed (hourly) weather data for the requested days and location
The Weather Underground URL for Fresno, CA on 1/1/2011 is:
http://www.wunderground.com/history/airport/KFAT/2011/1/1/DailyHistory.html?MR=1&format=1
This will fail periodically on Travis, b/c wunderground says "No daily or hourly history data available"
>> df = hourly('Fresno, CA', verbosity=-1)
>> 1 <= len(df) <= 24 * 2
True
The time zone of the client where this is used to compose the first column label, hence the ellipsis
>> df.columns # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Index([u'Time...
>> df = hourly('Fresno, CA', days=5, verbosity=-1)
>> 24 * 4 <= len(df) <= 24 * (5 + 1) * 2
True
"""
airport_code = airport(location, default=location)
if isinstance(days, int):
start = start or None
end = end or datetime.datetime.today().date()
days = pd.date_range(start=start, end=end, periods=days)
# refresh the cache each calendar month or each change in the number of days in the dataset
cache_path = 'hourly-{}-{}-{:02d}-{:04d}.csv'.format(airport_code, days[-1].year, days[-1].month, len(days))
cache_path = os.path.join(CACHE_PATH, cache_path)
if use_cache:
try:
return pd.DataFrame.from_csv(cache_path)
except:
pass
df = pd.DataFrame()
for day in days:
url = ('http://www.wunderground.com/history/airport/{airport_code}/{year}/{month}/{day}/DailyHistory.html?MR=1&format=1'.format(
airport_code=airport_code,
year=day.year,
month=day.month,
day=day.day))
if verbosity > 1:
print('GETing *.CSV using "{0}"'.format(url))
buf = urllib.urlopen(url).read()
if verbosity > 0:
N = buf.count('\n')
M = (buf.count(',') + N) / float(N)
print('Retrieved CSV for airport code "{}" with appox. {} lines and {} columns = {} cells.'.format(
airport_code, N, int(round(M)), int(round(M)) * N))
if (buf.count('\n') > 2) or ((buf.count('\n') > 1) and buf.split('\n')[1].count(',') > 0):
table = util.read_csv(buf, format='header+values-list', numbers=True)
columns = [s.strip() for s in table[0]]
table = table[1:]
tzs = [s[4:] for s in columns if (s[5:] in ['ST', 'DT'] and s[4] in 'PMCE' and s[:4].lower() == 'time')]
if tzs:
tz = tzs[0]
else:
tz = 'UTC'
for rownum, row in enumerate(table):
try:
table[rownum] = [util.make_tz_aware(row[0], tz)] + row[1:]
except ValueError:
pass
dates = [row[-1] for row in table]
if not all(isinstance(date, (datetime.datetime, pd.Timestamp)) for date in dates):
dates = [row[0] for row in table]
if len(columns) == len(table[0]):
df0 = pd.DataFrame(table, columns=columns, index=dates)
df = df.append(df0)
elif verbosity >= 0:
msg = "The number of columns in the 1st row of the table:\n {}\n doesn't match the number of column labels:\n {}\n".format(
table[0], columns)
msg += "Wunderground.com probably can't find the airport: {} ({})\n or the date: {}\n in its database.\n".format(
airport_code, location, day)
msg += "Attempted a GET request using the URI:\n {0}\n".format(url)
warnings.warn(msg)
try:
df.to_csv(cache_path)
except:
if verbosity > 0 and use_cache:
from traceback import print_exc
print_exc()
warnings.warn('Unable to write weather data to cache file at {}'.format(cache_path))
return df
def api(feature='conditions', city='Portland', state='OR', key=None):
"""Use the wunderground API to get current conditions instead of scraping
Please be kind and use your own key (they're FREE!):
http://www.wunderground.com/weather/api/d/login.html
References:
http://www.wunderground.com/weather/api/d/terms.html
Examples:
>>> api('hurric', 'Boise', 'ID') # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
{u'currenthurricane': ...}}}
>>> features = ('alerts astronomy conditions currenthurricane forecast forecast10day geolookup history hourly hourly10day ' +
... 'planner rawtide satellite tide webcams yesterday').split(' ')
>> everything = [api(f, 'Portland') for f in features]
>> js = api('alerts', 'Portland', 'OR')
>> js = api('condit', 'Sacramento', 'CA')
>> js = api('forecast', 'Mobile', 'AL')
>> js = api('10day', 'Fairhope', 'AL')
>> js = api('geo', 'Decatur', 'AL')
>> js = api('hist', 'history', 'AL')
>> js = api('astro')
"""
features = ('alerts astronomy conditions currenthurricane forecast forecast10day geolookup history hourly hourly10day ' +
'planner rawtide satellite tide webcams yesterday').split(' ')
feature = util.fuzzy_get(features, feature)
# Please be kind and use your own key (they're FREE!):
# http://www.wunderground.com/weather/api/d/login.html
key = key or env.get('WUNDERGROUND', None, verbosity=-1) or env.get('WUNDERGROUND_KEY', 'c45a86c2fc63f7d0', verbosity=-1)
url = 'http://api.wunderground.com/api/{key}/{feature}/q/{state}/{city}.json'.format(
key=key, feature=feature, state=state, city=city)
return json.load(urllib.urlopen(url))
|
hobson/pug-ann | pug/ann/util.py | build_ann | python | def build_ann(N_input=None, N_hidden=2, N_output=1, hidden_layer_type='Linear', verbosity=1):
N_input = N_input or 1
N_output = N_output or 1
N_hidden = N_hidden or tuple()
if isinstance(N_hidden, (int, float, basestring)):
N_hidden = (int(N_hidden),)
hidden_layer_type = hidden_layer_type or tuple()
hidden_layer_type = tuplify(normalize_layer_type(hidden_layer_type))
if verbosity > 0:
print(N_hidden, ' layers of type ', hidden_layer_type)
assert(len(N_hidden) == len(hidden_layer_type))
nn = pb.structure.FeedForwardNetwork()
# layers
nn.addInputModule(pb.structure.BiasUnit(name='bias'))
nn.addInputModule(pb.structure.LinearLayer(N_input, name='input'))
for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden, hidden_layer_type)):
Nhid = int(Nhid)
nn.addModule(hidlaytype(Nhid, name=('hidden-{}'.format(i) if i else 'hidden')))
nn.addOutputModule(pb.structure.LinearLayer(N_output, name='output'))
# connections
nn.addConnection(pb.structure.FullConnection(nn['bias'], nn['hidden'] if N_hidden else nn['output']))
nn.addConnection(pb.structure.FullConnection(nn['input'], nn['hidden'] if N_hidden else nn['output']))
for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden[:-1], hidden_layer_type[:-1])):
Nhid = int(Nhid)
nn.addConnection(pb.structure.FullConnection(nn[('hidden-{}'.format(i) if i else 'hidden')],
nn['hidden-{}'.format(i + 1)]))
i = len(N_hidden) - 1
nn.addConnection(pb.structure.FullConnection(nn['hidden-{}'.format(i) if i else 'hidden'], nn['output']))
nn.sortModules()
if FAST:
try:
nn.convertToFastNetwork()
except:
if verbosity > 0:
print('Unable to convert slow PyBrain NN to a fast ARAC network...')
if verbosity > 0:
print(nn.connections)
return nn | Build a neural net with the indicated input, hidden, and outout dimensions
Arguments:
params (dict or PyBrainParams namedtuple):
default: {'N_hidden': 6}
(this is the only parameter that affects the NN build)
Returns:
FeedForwardNetwork with N_input + N_hidden + N_output nodes in 3 layers | train | https://github.com/hobson/pug-ann/blob/8a4d7103a744d15b4a737fc0f9a84c823973e0ec/pug/ann/util.py#L62-L115 | [
"def normalize_layer_type(layer_type):\n try:\n if layer_type in LAYER_TYPES:\n return layer_type\n except TypeError:\n pass\n try:\n return getattr(pb.structure, layer_type.strip())\n except AttributeError:\n try:\n return getattr(pb.structure, layer_type.strip() + 'Layer')\n except AttributeError:\n pass\n return [normalize_layer_type(lt) for lt in layer_type]\n"
] | """Maniuplate, analyze and plot `pybrain` `Network` and `DataSet` objects
TODO:
Incorporate into pybrain fork so pug doesn't have to depend on pybrain
"""
from __future__ import print_function
import os
import warnings
import pandas as pd
from scipy import ndarray, reshape # array, amin, amax,
np = pd.np
from matplotlib import pyplot as plt
import pybrain.datasets
import pybrain.structure
import pybrain.supervised
import pybrain.tools
pb = pybrain
# from pybrain.supervised.trainers import Trainer
from pybrain.tools.customxml import NetworkReader
from pybrain.structure.parametercontainer import ParameterContainer
from pybrain.structure.connections.connection import Connection
from pug.nlp.util import tuplify, fuzzy_get
from pug.ann.data import weather
#import pug.nlp.util as nlp
# print(os.path.realpath(__file__))
DATA_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(DATA_PATH, '..', 'data')
LAYER_TYPES = set([getattr(pybrain.structure, s) for s in
dir(pybrain.structure) if s.endswith('Layer')])
FAST = False
try:
from arac.pybrainbridge import _FeedForwardNetwork as FeedForwardNetwork
FAST = True
except:
from pybrain.structure import FeedForwardNetwork
print("No fast (ARAC, a FC++ library) FeedForwardNetwork was found, "
"so using the slower pybrain python implementation of FFNN.")
def normalize_layer_type(layer_type):
try:
if layer_type in LAYER_TYPES:
return layer_type
except TypeError:
pass
try:
return getattr(pb.structure, layer_type.strip())
except AttributeError:
try:
return getattr(pb.structure, layer_type.strip() + 'Layer')
except AttributeError:
pass
return [normalize_layer_type(lt) for lt in layer_type]
def ann_from_ds(ds=None, N_input=3, N_hidden=0, N_output=1, verbosity=1):
N_input = getattr(ds, 'indim', N_input)
N_output = getattr(ds, 'outdim', N_output)
N_hidden = N_hidden or getattr(ds, 'paramdim', N_hidden + N_input + N_output) - N_input - N_output
N_hidden = max(round(min(N_hidden, len(ds) / float(N_input) / float(N_output) / 5.)), N_output)
return build_ann(N_input=N_input, N_hidden=N_hidden, N_output=N_output, verbosity=verbosity)
def prepend_dataset_with_weather(samples, location='Fresno, CA', weather_columns=None, use_cache=True, verbosity=0):
""" Prepend weather the values specified (e.g. Max TempF) to the samples[0..N]['input'] vectors
samples[0..N]['target'] should have an index with the date timestamp
If you use_cache for the curent year, you may not get the most recent data.
Arguments:
samples (list of dict): {'input': np.array(), 'target': pandas.DataFrame}
"""
if verbosity > 1:
print('Prepending weather data for {} to dataset samples'.format(weather_columns))
if not weather_columns:
return samples
timestamps = pd.DatetimeIndex([s['target'].index[0] for s in samples])
years = range(timestamps.min().date().year, timestamps.max().date().year + 1)
weather_df = weather.daily(location=location, years=years, use_cache=use_cache)
# FIXME: weather_df.resample('D') fails
weather_df.index = [d.date() for d in weather_df.index]
if verbosity > 1:
print('Retrieved weather for years {}:'.format(years))
print(weather_df)
weather_columns = [label if label in weather_df.columns else weather_df.columns[int(label)]
for label in (weather_columns or [])]
for sampnum, sample in enumerate(samples):
timestamp = timestamps[sampnum]
try:
weather_day = weather_df.loc[timestamp.date()]
except:
from traceback import print_exc
print_exc()
weather_day = {}
if verbosity >= 0:
warnings.warn('Unable to find weather for the date {}'.format(timestamp.date()))
NaN = float('NaN')
sample['input'] = [weather_day.get(label, None) for label in weather_columns] + list(sample['input'])
if verbosity > 0 and NaN in sample['input']:
warnings.warn('Unable to find weather features {} in the weather for date {}'.format(
[label for i, label in enumerate(weather_columns) if sample['input'][i] == NaN], timestamp))
return samples
def dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=(-1,), normalize=False, verbosity=1):
"""Compose a pybrain.dataset from a pandas DataFrame
Arguments:
delays (list of int): sample delays to use for the input tapped delay line
Positive and negative values are treated the same as sample counts into the past.
default: [1, 2, 3], in z-transform notation: z^-1 + z^-2 + z^-3
inputs (list of int or list of str): column indices or labels for the inputs
outputs (list of int or list of str): column indices or labels for the outputs
normalize (bool): whether to divide each input to be normally distributed about 0 with std 1
Returns:
3-tuple: tuple(dataset, list of means, list of stds)
means and stds allow normalization of new inputs and denormalization of the outputs
TODO:
Detect categorical variables with low dimensionality and split into separate bits
Vowpel Wabbit hashes strings into an int?
Detect ordinal variables and convert to continuous int sequence
SEE: http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
"""
if isinstance(delays, int):
if delays:
delays = range(1, delays + 1)
else:
delays = [0]
delays = np.abs(np.array([int(i) for i in delays]))
inputs = [df.columns[int(inp)] if isinstance(inp, (float, int)) else str(inp) for inp in inputs]
outputs = [df.columns[int(out)] if isinstance(out, (float, int)) else str(out) for out in (outputs or [])]
inputs = [fuzzy_get(df.columns, i) for i in inputs]
outputs = [fuzzy_get(df.columns, o) for o in outputs]
N_inp = len(inputs)
N_out = len(outputs)
inp_outs = inputs + outputs
if verbosity > 0:
print("inputs: {}\noutputs: {}\ndelays: {}\n".format(inputs, outputs, delays))
means, stds = np.zeros(len(inp_outs)), np.ones(len(inp_outs))
if normalize:
means, stds = df[inp_outs].mean(), df[inp_outs].std()
if normalize and verbosity > 0:
print("Input mean values (used to normalize input biases): {}".format(means[:N_inp]))
print("Output mean values (used to normalize output biases): {}".format(means[N_inp:]))
ds = pb.datasets.SupervisedDataSet(N_inp * len(delays), N_out)
if verbosity > 0:
print("Dataset dimensions are {}x{}x{} (records x indim x outdim) for {} delays, {} inputs, {} outputs".format(
len(df), ds.indim, ds.outdim, len(delays), len(inputs), len(outputs)))
# FIXME: normalize the whole matrix at once and add it quickly rather than one sample at a time
if delays == np.array([0]) and not normalize:
if verbosity > 0:
print("No tapped delay lines (delays) were requested, so using undelayed features for the dataset.")
assert(df[inputs].values.shape[0] == df[outputs].values.shape[0])
ds.setField('input', df[inputs].values)
ds.setField('target', df[outputs].values)
ds.linkFields(['input', 'target'])
# for inp, outp in zip(df[inputs].values, df[outputs].values):
# ds.appendLinked(inp, outp)
assert(len(ds['input']) == len(ds['target']))
else:
for i, out_vec in enumerate(df[outputs].values):
if verbosity > 0 and i % 100 == 0:
print("{}%".format(i / .01 / len(df)))
elif verbosity > 1:
print('sample[{i}].target={out_vec}'.format(i=i, out_vec=out_vec))
if i < max(delays):
continue
inp_vec = []
for delay in delays:
inp_vec += list((df[inputs].values[i - delay] - means[:N_inp]) / stds[:N_inp])
ds.addSample(inp_vec, (out_vec - means[N_inp:]) / stds[N_inp:])
if verbosity > 0:
print("Dataset now has {} samples".format(len(ds)))
if normalize:
return ds, means, stds
else:
return ds
# def dataset_from_feature_names(df, delays=(1, 2,), quantiles=(), input_columns=(0,), target_columns=(-1,),
# weather_columns=(), features=(), verbosity=1):
# """FIXME: Untested. Transform Datafram columns and append to input_columns as additional features
# Arguments:
# features (seq of str): names of feautures to be appended to the input vector (feature set)
# """
# raise NotImplementedError("Better to implement this as a separate feature transformation function")
# # Compute the length of extra features added on to the vector from the rolling window of previous
# # threshold values
# extras = (+ int('dow' in features) * 7
# + int('moy' in features) * 12
# + int('woy' in features)
# + int('date' in features)
# + len(quantiles)
# + len(weather_columns)
# )
# N = max(list(delays) + list(quantiles))
# last_date = first_date = 0
# try:
# if verbosity > 0:
# print('The total input vector length (dimension) is now {0}'.format(len(N) + extras))
# print('Starting to augment {0}x{1} sample inputs by adding adding {2} additional features'.format(
# len(df), len(input_columns), extras))
# if verbosity > 0:
# print('The first sample input was {0}'.format(
# df[input_columns].iloc[0]))
# print('The first sample target was {0}'.format(
# df[target_columns].iloc[0]))
# print('The last sample input was {0}'.format(
# df[input_columns].iloc[-1]))
# print('The last sample target was {0}'.format(
# df[target_columns].iloc[-1]))
# first_date = df.index.iloc[0].date().toordinal()
# last_date = df.index.iloc[-1].date().toordinal()
# except:
# if verbosity > -1:
# from traceback import format_exc
# warnings.warn(format_exc())
# if verbosity > 1:
# import ipdb
# ipdb.set_trace()
# date_range = (last_date - first_date) + 1 or 1
# # FIXME: scale each feature/column/dimension independently using pug.ann.util.dataset_from_dataframe
# # but mean and std become vectors of the same dimension as the feature/input vector.
# # scikit-learn has transformations that do this more reasonably
# bit_scale = 5 # number of standard deviations for the magnitude of bit
# # convert the list of dicts ((input, output) supervised dataset pairs) into a pybrains.Dataset
# ds = pybrain.datasets.SupervisedDataSet(len(N) + extras, 1)
# sorted_features = sorted(features)
# for sampnum, (input_vector, target_vector) in enumerate(
# zip(df[input_columns].values, df[target_columns].values)):
# # sample['input'] and ['output'] are pd.Series tables so convert them to normal list()
# # inputs = list(sample['input'])
# # the date we're trying to predict the threshold for
# timestamp = target_vector.index[0]
# for feature_name in sorted_features:
# if feature_name.startswith('morn'):
# day = get_day(series, date=timestamp.date())
# morning_loads = (day.values[:morn] - mean) / std
# if verbosity > 2:
# print('day = {0} and morning = {1}'.format(len(day), len(morning_loads)))
# inputs = list(morning_loads) + inputs
# elif feature_name == 'dow':
# dow_bits = [0] * 7
# dow_bits[timestamp.weekday()] = bit_scale
# inputs = dow_bits + inputs
# elif feature_name == 'moy':
# moy_bits = [0] * 12
# moy_bits[timestamp.month - 1] = bit_scale
# inputs = moy_bits + inputs
# elif feature_name == 'woy':
# inputs = [(timestamp.weekofyear - 26.) * 3 * bit_scale / 52] + inputs
# elif feature_name == 'date':
# inputs = [(timestamp.date().toordinal() - first_date - date_range / 2.) * 3 * bit_scale / date_range
# ] + inputs
# if pd.isnull(inputs).any():
# msg = 'Feature "{0}" within the feature list: {1} created null/NaN input values\n'.format(
# feature_name, sorted_features)
# msg += 'For sample {} and date {}\n'.format(sampnum, timestamp)
# msg += 'Input vector positions {}:\nInput vector: {}'.format(
# ann.table_nan_locs(inputs), inputs)
# msg += '\nBuilding load Series:\n{0}\n'.format(series)
# if ignore_nans:
# warnings.warn(msg)
# else:
# raise ValueError(msg)
# ds.addSample(inputs, list(target_vector.values))
def input_dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):
""" Build a dataset with an empty output/target vector
Identical to `dataset_from_dataframe`, except that default values for 2 arguments:
outputs: None
"""
return dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,
normalize=normalize, verbosity=verbosity)
def inputs_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):
""" Build a sequence of vectors suitable for "activation" by a neural net
Identical to `dataset_from_dataframe`, except that only the input vectors are
returned (not a full DataSet instance) and default values for 2 arguments are changed:
outputs: None
And only the input vectors are return
"""
ds = input_dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,
normalize=normalize, verbosity=verbosity)
return ds['input']
def build_trainer(nn, ds, verbosity=1):
"""Configure neural net trainer from a pybrain dataset"""
return pb.supervised.trainers.rprop.RPropMinusTrainer(nn, dataset=ds, batchlearning=True, verbose=bool(verbosity))
def weight_matrices(nn):
""" Extract list of weight matrices from a Network, Layer (module), Trainer, Connection or other pybrain object"""
if isinstance(nn, ndarray):
return nn
try:
return weight_matrices(nn.connections)
except:
pass
try:
return weight_matrices(nn.module)
except:
pass
# Network objects are ParameterContainer's too, but won't reshape into a single matrix,
# so this must come after try nn.connections
if isinstance(nn, (ParameterContainer, Connection)):
return reshape(nn.params, (nn.outdim, nn.indim))
if isinstance(nn, basestring):
try:
fn = nn
nn = NetworkReader(fn, newfile=False)
return weight_matrices(nn.readFrom(fn))
except:
pass
# FIXME: what does NetworkReader output? (Module? Layer?) need to handle it's type here
try:
return [weight_matrices(v) for (k, v) in nn.iteritems()]
except:
try:
connections = nn.module.connections.values()
nn = []
for conlist in connections:
nn += conlist
return weight_matrices(nn)
except:
return [weight_matrices(v) for v in nn]
# # FIXME: resolve all these NLP dependencies and get this working
# def dataset_from_time_series(df, N_inp=None, features=('moy',), verbosity=1):
# """Build a pybrains.dataset from the time series contained in a dataframe"""
# N_inp = N_inp or len(df.columns)
# features = features or []
# # Add features to input vector in reverse alphabetical order by feature name,
# # so woy will be added first, and date will be added last.
# # The order that the feature vectors should appear in the input vector to remain consistent
# # and neural net architecture can have structure that anticipates this.
# sorted_features = nlp.sort_strings(features, ('dat', 'dow', 'moy', 'dom', 'moy', 'mor'), case_sensitive=False)
# if verbosity > 0:
# print('dataset_from_thresh(features={0})'.format(features))
# samples, mean, std, thresh = simple_dataset_from_thresh(thresh, N=N, max_window=max_window,
# normalize=normalize, ignore_below=ignore_below)
# name = getattr(thresh, 'name', None)
# if name:
# name = normalize_building_name(name)
# if name:
# series = get_series(name)
# else:
# if isinstance(series, basestring):
# name = normalize_building_name(series.strip()) or thresh.name or 'Unknown'
# series = get_series(name)
# elif isinstance(series, pd.DataFrame):
# name = normalize_building_name(series.columns[0]) or thresh.name or 'Unknown'
# series = series[name]
# elif isinstance(series, pd.Series):
# name = normalize_building_name(series.name) or thresh.name or 'Unknown'
# else:
# name = None
# # Compute the length of extra features added on to the vector from the rolling window of previous
# # threshold values
# # TODO: pre-process features list of strings in a separate function
# morn = 0
# # if the building name isn't known, you can't retrieve the morning load values for it
# if name:
# for s in features:
# if s.startswith('morn'):
# try:
# morn = int(s[4:])
# except:
# if verbosity > 0:
# warnings.warn('Unable to determine morning length from feature named "{0}" so using default '
# '(8 am = 8 * 4 = 32)')
# morn = 32 # default to 9 am morning ending
# break
# if verbosity > 0:
# print('In dataset_from_thresh() using {0} morning load values for Building {1}'
# ' because series arg is of type {2}'.format(morn, name, type(series)))
# extras = (+ int('dow' in features) * 7
# + int('moy' in features) * 12
# + int('woy' in features)
# + int('date' in features)
# + morn)
# if verbosity > 0:
# print('The total input vector length (dimension) is now {0}'.format(N + extras))
# ds = pb.datasets.SupervisedDataSet(N + extras, 1)
# first_date = samples[0]['target'].index[0].date().toordinal()
# last_date = samples[-1]['target'].index[0].date().toordinal()
# date_range = (last_date - first_date) or 1
# bit_scale = 5 # number of standard deviations for the magnitude of bit
# if verbosity > 0:
# print('Adding features for building {3}, {0}, and a morning time series of len {2}, '
# 'to each of the {1} vectors (samples)'.format(features, len(samples), morn, name))
# for sampnum, sample in enumerate(samples):
# # sample['input'] and ['output'] are pd.Series tables so convert them to normal list()
# inputs = list(sample['input'].values)
# # the date we're trying to predict the rhreshold for
# timestamp = sample['target'].index[0]
# for feature_name in sorted_features:
# if feature_name.startswith('morn'):
# day = get_day(series, date=timestamp.date())
# morning_loads = (day.values[:morn] - mean) / std
# if verbosity > 1:
# print('day = {0} and morning = {1}'.format(len(day), len(morning_loads)))
# inputs = list(morning_loads) + inputs
# elif feature_name == 'dow':
# dow_bits = [0] * 7
# dow_bits[timestamp.weekday()] = bit_scale
# inputs = dow_bits + inputs
# elif feature_name == 'moy':
# moy_bits = [0] * 12
# moy_bits[timestamp.month - 1] = bit_scale
# inputs = moy_bits + inputs
# elif feature_name == 'woy':
# inputs = [(timestamp.weekofyear - 26.) * 3 * bit_scale / 52] + inputs
# elif feature_name == 'date':
# inputs = [(timestamp.date().toordinal() - first_date - date_range / 2.) * 3 * bit_scale /
# date_range ] + inputs
# if pd.isnull(inputs).any():
# msg = 'Feature "{0}" within the feature list: {1} created null/NaN input values\nFor sample {2}'
# ' and date {3}\nInput vector positions {4}:\nInput vector: {5}'.format(
# feature_name, sorted_features, sampnum, timestamp, ann.table_nan_locs(inputs), inputs)
# msg += '\nBuilding load Series:\n{0}\n'.format(series)
# if ignore_nans:
# warnings.warn(msg)
# else:
# raise ValueError(msg)
# ds.addSample(inputs, list(sample['target'].values))
# return ds, mean, std, thresh
def dataset_nan_locs(ds):
"""
from http://stackoverflow.com/a/14033137/623735
# gets the indices of the rows with nan values in a dataframe
pd.isnull(df).any(1).nonzero()[0]
"""
ans = []
for sampnum, sample in enumerate(ds):
if pd.isnull(sample).any():
ans += [{
'sample': sampnum,
'input': pd.isnull(sample[0]).nonzero()[0],
'output': pd.isnull(sample[1]).nonzero()[0],
}]
return ans
def table_nan_locs(table):
"""
from http://stackoverflow.com/a/14033137/623735
# gets the indices of the rows with nan values in a dataframe
pd.isnull(df).any(1).nonzero()[0]
"""
ans = []
for rownum, row in enumerate(table):
try:
if pd.isnull(row).any():
colnums = pd.isnull(row).nonzero()[0]
ans += [(rownum, colnum) for colnum in colnums]
except AttributeError: # table is really just a sequence of scalars
if pd.isnull(row):
ans += [(rownum, 0)]
return ans
def plot_network_results(network, ds=None, mean=0, std=1, title='', show=True, save=True):
"""Identical to plot_trainer except `network` and `ds` must be provided separately"""
df = sim_network(network=network, ds=ds, mean=mean, std=std)
df.plot()
plt.xlabel('Date')
plt.ylabel('Threshold (kW)')
plt.title(title)
if show:
try:
# ipython notebook overrides plt.show and doesn't have a block kwarg
plt.show(block=False)
except TypeError:
plt.show()
if save:
filename = 'ann_performance_for_{0}.png'.format(title).replace(' ', '_')
if isinstance(save, basestring) and os.path.isdir(save):
filename = os.path.join(save, filename)
plt.savefig(filename)
if not show:
plt.clf()
return network, mean, std
def trainer_results(trainer, mean=0, std=1, title='', show=True, save=True):
"""Plot the performance of the Network and SupervisedDataSet in a pybrain Trainer
DataSet target and output values are denormalized before plotting with:
output * std + mean
Which inverses the normalization
(output - mean) / std
Args:
trainer (Trainer): a pybrain Trainer instance containing a valid Network and DataSet
ds (DataSet): a pybrain DataSet to override the one contained in `trainer`.
Required if trainer is a Network instance rather than a Trainer instance.
mean (float): mean of the denormalized dataset (default: 0)
Only affects the scale of the plot
std (float): std (standard deviation) of the denormalized dataset (default: 1)
title (str): title to display on the plot.
Returns:
3-tuple: (trainer, mean, std), A trainer/dataset along with denormalization info
"""
return plot_network_results(network=trainer.module, ds=trainer.ds, mean=mean, std=std, title=title,
show=show, save=save)
def sim_trainer(trainer, mean=0, std=1):
"""Simulate a trainer by activating its DataSet and returning DataFrame(columns=['Output','Target'])
"""
return sim_network(network=trainer.module, ds=trainer.ds, mean=mean, std=std)
def sim_network(network, ds=None, index=None, mean=0, std=1):
"""Simulate/activate a Network on a SupervisedDataSet and return DataFrame(columns=['Output','Target'])
The DataSet's target and output values are denormalized before populating the dataframe columns:
denormalized_output = normalized_output * std + mean
Which inverses the normalization that produced the normalized output in the first place: \
normalized_output = (denormalzied_output - mean) / std
Args:
network (Network): a pybrain Network instance to activate with the provided DataSet, `ds`
ds (DataSet): a pybrain DataSet to activate the Network on to produce an output sequence
mean (float): mean of the denormalized dataset (default: 0)
Output is scaled
std (float): std (standard deviation) of the denormalized dataset (default: 1)
title (str): title to display on the plot.
Returns:
DataFrame: DataFrame with columns "Output" and "Target" suitable for df.plot-ting
"""
# just in case network is a trainer or has a Module-derived instance as one of it's attribute
# isinstance(network.module, (networks.Network, modules.Module))
if hasattr(network, 'module') and hasattr(network.module, 'activate'):
# may want to also check: isinstance(network.module, (networks.Network, modules.Module))
network = network.module
ds = ds or network.ds
if not ds:
raise RuntimeError("Unable to find a `pybrain.datasets.DataSet` instance to activate the Network with, "
" to plot the outputs. A dataset can be provided as part of a network instance or "
"as a separate kwarg if `network` is used to provide the `pybrain.Network`"
" instance directly.")
results_generator = ((network.activate(ds['input'][i])[0] * std + mean, ds['target'][i][0] * std + mean)
for i in xrange(len(ds['input'])))
return pd.DataFrame(results_generator, columns=['Output', 'Target'], index=index or range(len(ds['input'])))
|
hobson/pug-ann | pug/ann/util.py | prepend_dataset_with_weather | python | def prepend_dataset_with_weather(samples, location='Fresno, CA', weather_columns=None, use_cache=True, verbosity=0):
if verbosity > 1:
print('Prepending weather data for {} to dataset samples'.format(weather_columns))
if not weather_columns:
return samples
timestamps = pd.DatetimeIndex([s['target'].index[0] for s in samples])
years = range(timestamps.min().date().year, timestamps.max().date().year + 1)
weather_df = weather.daily(location=location, years=years, use_cache=use_cache)
# FIXME: weather_df.resample('D') fails
weather_df.index = [d.date() for d in weather_df.index]
if verbosity > 1:
print('Retrieved weather for years {}:'.format(years))
print(weather_df)
weather_columns = [label if label in weather_df.columns else weather_df.columns[int(label)]
for label in (weather_columns or [])]
for sampnum, sample in enumerate(samples):
timestamp = timestamps[sampnum]
try:
weather_day = weather_df.loc[timestamp.date()]
except:
from traceback import print_exc
print_exc()
weather_day = {}
if verbosity >= 0:
warnings.warn('Unable to find weather for the date {}'.format(timestamp.date()))
NaN = float('NaN')
sample['input'] = [weather_day.get(label, None) for label in weather_columns] + list(sample['input'])
if verbosity > 0 and NaN in sample['input']:
warnings.warn('Unable to find weather features {} in the weather for date {}'.format(
[label for i, label in enumerate(weather_columns) if sample['input'][i] == NaN], timestamp))
return samples | Prepend weather the values specified (e.g. Max TempF) to the samples[0..N]['input'] vectors
samples[0..N]['target'] should have an index with the date timestamp
If you use_cache for the curent year, you may not get the most recent data.
Arguments:
samples (list of dict): {'input': np.array(), 'target': pandas.DataFrame} | train | https://github.com/hobson/pug-ann/blob/8a4d7103a744d15b4a737fc0f9a84c823973e0ec/pug/ann/util.py#L127-L166 | [
"def daily(location='Fresno, CA', years=1, use_cache=True, verbosity=1):\n \"\"\"Retrieve weather for the indicated airport code or 'City, ST' string.\n\n >>> df = daily('Camas, WA', verbosity=-1)\n >>> 365 <= len(df) <= 365 * 2 + 1\n True\n\n Sacramento data has gaps (airport KMCC):\n 8/21/2013 is missing from 2013.\n Whole months are missing from 2014.\n >>> df = daily('Sacramento, CA', years=[2013], verbosity=-1)\n >>> 364 <= len(df) <= 365\n True\n >>> df.columns\n Index([u'PST', u'Max TemperatureF', u'Mean TemperatureF', u'Min TemperatureF', u'Max Dew PointF', u'MeanDew PointF', u'Min DewpointF', ...\n \"\"\"\n this_year = datetime.date.today().year\n if isinstance(years, (int, float)):\n # current (incomplete) year doesn't count in total number of years\n # so 0 would return this calendar year's weather data\n years = np.arange(0, int(years) + 1)\n years = sorted(years)\n if not all(1900 <= yr <= this_year for yr in years):\n years = np.array([abs(yr) if (1900 <= abs(yr) <= this_year) else (this_year - abs(int(yr))) for yr in years])[::-1]\n\n airport_code = airport(location, default=location)\n\n # refresh the cache each time the start or end year changes\n cache_path = 'daily-{}-{}-{}.csv'.format(airport_code, years[0], years[-1])\n cache_path = os.path.join(CACHE_PATH, cache_path)\n if use_cache:\n try:\n return pd.DataFrame.from_csv(cache_path)\n except:\n pass\n\n df = pd.DataFrame()\n for year in years:\n url = ('http://www.wunderground.com/history/airport/{airport}/{yearstart}/1/1/' +\n 'CustomHistory.html?dayend=31&monthend=12&yearend={yearend}' +\n '&req_city=&req_state=&req_statename=&reqdb.zip=&reqdb.magic=&reqdb.wmo=&MR=1&format=1').format(\n airport=airport_code,\n yearstart=year,\n yearend=year\n )\n if verbosity > 1:\n print('GETing *.CSV using \"{0}\"'.format(url))\n buf = urllib.urlopen(url).read()\n if verbosity > 0:\n N = buf.count('\\n')\n M = (buf.count(',') + N) / float(N)\n print('Retrieved CSV for airport code \"{}\" with appox. {} lines and {} columns = {} cells.'.format(\n airport_code, N, int(round(M)), int(round(M)) * N))\n if verbosity > 2:\n print(buf)\n table = util.read_csv(buf, format='header+values-list', numbers=True)\n # # clean up the last column (if it contains <br> tags)\n table = [util.strip_br(row) if len(row) > 1 else row for row in table]\n # numcols = max(len(row) for row in table)\n # table = [row for row in table if len(row) == numcols]\n columns = table.pop(0)\n tzs = [s for s in columns if (s[1:] in ['ST', 'DT'] and s[0] in 'PMCE')]\n dates = [float('nan')] * len(table)\n for i, row in enumerate(table):\n for j, value in enumerate(row):\n if not value and value is not None:\n value = 0\n continue\n if columns[j] in tzs:\n table[i][j] = util.make_tz_aware(value, tz=columns[j])\n if isinstance(table[i][j], datetime.datetime):\n dates[i] = table[i][j]\n continue\n try:\n table[i][j] = float(value)\n if not (table[i][j] % 1):\n table[i][j] = int(table[i][j])\n except:\n pass\n df0 = pd.DataFrame(table, columns=columns, index=dates)\n df = df.append(df0)\n\n if verbosity > 1:\n print(df)\n\n try:\n df.to_csv(cache_path)\n except:\n if verbosity > 0 and use_cache:\n from traceback import print_exc\n print_exc()\n warnings.warn('Unable to write weather data to cache file at {}'.format(cache_path))\n\n return df\n"
] | """Maniuplate, analyze and plot `pybrain` `Network` and `DataSet` objects
TODO:
Incorporate into pybrain fork so pug doesn't have to depend on pybrain
"""
from __future__ import print_function
import os
import warnings
import pandas as pd
from scipy import ndarray, reshape # array, amin, amax,
np = pd.np
from matplotlib import pyplot as plt
import pybrain.datasets
import pybrain.structure
import pybrain.supervised
import pybrain.tools
pb = pybrain
# from pybrain.supervised.trainers import Trainer
from pybrain.tools.customxml import NetworkReader
from pybrain.structure.parametercontainer import ParameterContainer
from pybrain.structure.connections.connection import Connection
from pug.nlp.util import tuplify, fuzzy_get
from pug.ann.data import weather
#import pug.nlp.util as nlp
# print(os.path.realpath(__file__))
DATA_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(DATA_PATH, '..', 'data')
LAYER_TYPES = set([getattr(pybrain.structure, s) for s in
dir(pybrain.structure) if s.endswith('Layer')])
FAST = False
try:
from arac.pybrainbridge import _FeedForwardNetwork as FeedForwardNetwork
FAST = True
except:
from pybrain.structure import FeedForwardNetwork
print("No fast (ARAC, a FC++ library) FeedForwardNetwork was found, "
"so using the slower pybrain python implementation of FFNN.")
def normalize_layer_type(layer_type):
try:
if layer_type in LAYER_TYPES:
return layer_type
except TypeError:
pass
try:
return getattr(pb.structure, layer_type.strip())
except AttributeError:
try:
return getattr(pb.structure, layer_type.strip() + 'Layer')
except AttributeError:
pass
return [normalize_layer_type(lt) for lt in layer_type]
def build_ann(N_input=None, N_hidden=2, N_output=1, hidden_layer_type='Linear', verbosity=1):
"""Build a neural net with the indicated input, hidden, and outout dimensions
Arguments:
params (dict or PyBrainParams namedtuple):
default: {'N_hidden': 6}
(this is the only parameter that affects the NN build)
Returns:
FeedForwardNetwork with N_input + N_hidden + N_output nodes in 3 layers
"""
N_input = N_input or 1
N_output = N_output or 1
N_hidden = N_hidden or tuple()
if isinstance(N_hidden, (int, float, basestring)):
N_hidden = (int(N_hidden),)
hidden_layer_type = hidden_layer_type or tuple()
hidden_layer_type = tuplify(normalize_layer_type(hidden_layer_type))
if verbosity > 0:
print(N_hidden, ' layers of type ', hidden_layer_type)
assert(len(N_hidden) == len(hidden_layer_type))
nn = pb.structure.FeedForwardNetwork()
# layers
nn.addInputModule(pb.structure.BiasUnit(name='bias'))
nn.addInputModule(pb.structure.LinearLayer(N_input, name='input'))
for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden, hidden_layer_type)):
Nhid = int(Nhid)
nn.addModule(hidlaytype(Nhid, name=('hidden-{}'.format(i) if i else 'hidden')))
nn.addOutputModule(pb.structure.LinearLayer(N_output, name='output'))
# connections
nn.addConnection(pb.structure.FullConnection(nn['bias'], nn['hidden'] if N_hidden else nn['output']))
nn.addConnection(pb.structure.FullConnection(nn['input'], nn['hidden'] if N_hidden else nn['output']))
for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden[:-1], hidden_layer_type[:-1])):
Nhid = int(Nhid)
nn.addConnection(pb.structure.FullConnection(nn[('hidden-{}'.format(i) if i else 'hidden')],
nn['hidden-{}'.format(i + 1)]))
i = len(N_hidden) - 1
nn.addConnection(pb.structure.FullConnection(nn['hidden-{}'.format(i) if i else 'hidden'], nn['output']))
nn.sortModules()
if FAST:
try:
nn.convertToFastNetwork()
except:
if verbosity > 0:
print('Unable to convert slow PyBrain NN to a fast ARAC network...')
if verbosity > 0:
print(nn.connections)
return nn
def ann_from_ds(ds=None, N_input=3, N_hidden=0, N_output=1, verbosity=1):
N_input = getattr(ds, 'indim', N_input)
N_output = getattr(ds, 'outdim', N_output)
N_hidden = N_hidden or getattr(ds, 'paramdim', N_hidden + N_input + N_output) - N_input - N_output
N_hidden = max(round(min(N_hidden, len(ds) / float(N_input) / float(N_output) / 5.)), N_output)
return build_ann(N_input=N_input, N_hidden=N_hidden, N_output=N_output, verbosity=verbosity)
def dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=(-1,), normalize=False, verbosity=1):
"""Compose a pybrain.dataset from a pandas DataFrame
Arguments:
delays (list of int): sample delays to use for the input tapped delay line
Positive and negative values are treated the same as sample counts into the past.
default: [1, 2, 3], in z-transform notation: z^-1 + z^-2 + z^-3
inputs (list of int or list of str): column indices or labels for the inputs
outputs (list of int or list of str): column indices or labels for the outputs
normalize (bool): whether to divide each input to be normally distributed about 0 with std 1
Returns:
3-tuple: tuple(dataset, list of means, list of stds)
means and stds allow normalization of new inputs and denormalization of the outputs
TODO:
Detect categorical variables with low dimensionality and split into separate bits
Vowpel Wabbit hashes strings into an int?
Detect ordinal variables and convert to continuous int sequence
SEE: http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
"""
if isinstance(delays, int):
if delays:
delays = range(1, delays + 1)
else:
delays = [0]
delays = np.abs(np.array([int(i) for i in delays]))
inputs = [df.columns[int(inp)] if isinstance(inp, (float, int)) else str(inp) for inp in inputs]
outputs = [df.columns[int(out)] if isinstance(out, (float, int)) else str(out) for out in (outputs or [])]
inputs = [fuzzy_get(df.columns, i) for i in inputs]
outputs = [fuzzy_get(df.columns, o) for o in outputs]
N_inp = len(inputs)
N_out = len(outputs)
inp_outs = inputs + outputs
if verbosity > 0:
print("inputs: {}\noutputs: {}\ndelays: {}\n".format(inputs, outputs, delays))
means, stds = np.zeros(len(inp_outs)), np.ones(len(inp_outs))
if normalize:
means, stds = df[inp_outs].mean(), df[inp_outs].std()
if normalize and verbosity > 0:
print("Input mean values (used to normalize input biases): {}".format(means[:N_inp]))
print("Output mean values (used to normalize output biases): {}".format(means[N_inp:]))
ds = pb.datasets.SupervisedDataSet(N_inp * len(delays), N_out)
if verbosity > 0:
print("Dataset dimensions are {}x{}x{} (records x indim x outdim) for {} delays, {} inputs, {} outputs".format(
len(df), ds.indim, ds.outdim, len(delays), len(inputs), len(outputs)))
# FIXME: normalize the whole matrix at once and add it quickly rather than one sample at a time
if delays == np.array([0]) and not normalize:
if verbosity > 0:
print("No tapped delay lines (delays) were requested, so using undelayed features for the dataset.")
assert(df[inputs].values.shape[0] == df[outputs].values.shape[0])
ds.setField('input', df[inputs].values)
ds.setField('target', df[outputs].values)
ds.linkFields(['input', 'target'])
# for inp, outp in zip(df[inputs].values, df[outputs].values):
# ds.appendLinked(inp, outp)
assert(len(ds['input']) == len(ds['target']))
else:
for i, out_vec in enumerate(df[outputs].values):
if verbosity > 0 and i % 100 == 0:
print("{}%".format(i / .01 / len(df)))
elif verbosity > 1:
print('sample[{i}].target={out_vec}'.format(i=i, out_vec=out_vec))
if i < max(delays):
continue
inp_vec = []
for delay in delays:
inp_vec += list((df[inputs].values[i - delay] - means[:N_inp]) / stds[:N_inp])
ds.addSample(inp_vec, (out_vec - means[N_inp:]) / stds[N_inp:])
if verbosity > 0:
print("Dataset now has {} samples".format(len(ds)))
if normalize:
return ds, means, stds
else:
return ds
# def dataset_from_feature_names(df, delays=(1, 2,), quantiles=(), input_columns=(0,), target_columns=(-1,),
# weather_columns=(), features=(), verbosity=1):
# """FIXME: Untested. Transform Datafram columns and append to input_columns as additional features
# Arguments:
# features (seq of str): names of feautures to be appended to the input vector (feature set)
# """
# raise NotImplementedError("Better to implement this as a separate feature transformation function")
# # Compute the length of extra features added on to the vector from the rolling window of previous
# # threshold values
# extras = (+ int('dow' in features) * 7
# + int('moy' in features) * 12
# + int('woy' in features)
# + int('date' in features)
# + len(quantiles)
# + len(weather_columns)
# )
# N = max(list(delays) + list(quantiles))
# last_date = first_date = 0
# try:
# if verbosity > 0:
# print('The total input vector length (dimension) is now {0}'.format(len(N) + extras))
# print('Starting to augment {0}x{1} sample inputs by adding adding {2} additional features'.format(
# len(df), len(input_columns), extras))
# if verbosity > 0:
# print('The first sample input was {0}'.format(
# df[input_columns].iloc[0]))
# print('The first sample target was {0}'.format(
# df[target_columns].iloc[0]))
# print('The last sample input was {0}'.format(
# df[input_columns].iloc[-1]))
# print('The last sample target was {0}'.format(
# df[target_columns].iloc[-1]))
# first_date = df.index.iloc[0].date().toordinal()
# last_date = df.index.iloc[-1].date().toordinal()
# except:
# if verbosity > -1:
# from traceback import format_exc
# warnings.warn(format_exc())
# if verbosity > 1:
# import ipdb
# ipdb.set_trace()
# date_range = (last_date - first_date) + 1 or 1
# # FIXME: scale each feature/column/dimension independently using pug.ann.util.dataset_from_dataframe
# # but mean and std become vectors of the same dimension as the feature/input vector.
# # scikit-learn has transformations that do this more reasonably
# bit_scale = 5 # number of standard deviations for the magnitude of bit
# # convert the list of dicts ((input, output) supervised dataset pairs) into a pybrains.Dataset
# ds = pybrain.datasets.SupervisedDataSet(len(N) + extras, 1)
# sorted_features = sorted(features)
# for sampnum, (input_vector, target_vector) in enumerate(
# zip(df[input_columns].values, df[target_columns].values)):
# # sample['input'] and ['output'] are pd.Series tables so convert them to normal list()
# # inputs = list(sample['input'])
# # the date we're trying to predict the threshold for
# timestamp = target_vector.index[0]
# for feature_name in sorted_features:
# if feature_name.startswith('morn'):
# day = get_day(series, date=timestamp.date())
# morning_loads = (day.values[:morn] - mean) / std
# if verbosity > 2:
# print('day = {0} and morning = {1}'.format(len(day), len(morning_loads)))
# inputs = list(morning_loads) + inputs
# elif feature_name == 'dow':
# dow_bits = [0] * 7
# dow_bits[timestamp.weekday()] = bit_scale
# inputs = dow_bits + inputs
# elif feature_name == 'moy':
# moy_bits = [0] * 12
# moy_bits[timestamp.month - 1] = bit_scale
# inputs = moy_bits + inputs
# elif feature_name == 'woy':
# inputs = [(timestamp.weekofyear - 26.) * 3 * bit_scale / 52] + inputs
# elif feature_name == 'date':
# inputs = [(timestamp.date().toordinal() - first_date - date_range / 2.) * 3 * bit_scale / date_range
# ] + inputs
# if pd.isnull(inputs).any():
# msg = 'Feature "{0}" within the feature list: {1} created null/NaN input values\n'.format(
# feature_name, sorted_features)
# msg += 'For sample {} and date {}\n'.format(sampnum, timestamp)
# msg += 'Input vector positions {}:\nInput vector: {}'.format(
# ann.table_nan_locs(inputs), inputs)
# msg += '\nBuilding load Series:\n{0}\n'.format(series)
# if ignore_nans:
# warnings.warn(msg)
# else:
# raise ValueError(msg)
# ds.addSample(inputs, list(target_vector.values))
def input_dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):
""" Build a dataset with an empty output/target vector
Identical to `dataset_from_dataframe`, except that default values for 2 arguments:
outputs: None
"""
return dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,
normalize=normalize, verbosity=verbosity)
def inputs_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):
""" Build a sequence of vectors suitable for "activation" by a neural net
Identical to `dataset_from_dataframe`, except that only the input vectors are
returned (not a full DataSet instance) and default values for 2 arguments are changed:
outputs: None
And only the input vectors are return
"""
ds = input_dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,
normalize=normalize, verbosity=verbosity)
return ds['input']
def build_trainer(nn, ds, verbosity=1):
"""Configure neural net trainer from a pybrain dataset"""
return pb.supervised.trainers.rprop.RPropMinusTrainer(nn, dataset=ds, batchlearning=True, verbose=bool(verbosity))
def weight_matrices(nn):
""" Extract list of weight matrices from a Network, Layer (module), Trainer, Connection or other pybrain object"""
if isinstance(nn, ndarray):
return nn
try:
return weight_matrices(nn.connections)
except:
pass
try:
return weight_matrices(nn.module)
except:
pass
# Network objects are ParameterContainer's too, but won't reshape into a single matrix,
# so this must come after try nn.connections
if isinstance(nn, (ParameterContainer, Connection)):
return reshape(nn.params, (nn.outdim, nn.indim))
if isinstance(nn, basestring):
try:
fn = nn
nn = NetworkReader(fn, newfile=False)
return weight_matrices(nn.readFrom(fn))
except:
pass
# FIXME: what does NetworkReader output? (Module? Layer?) need to handle it's type here
try:
return [weight_matrices(v) for (k, v) in nn.iteritems()]
except:
try:
connections = nn.module.connections.values()
nn = []
for conlist in connections:
nn += conlist
return weight_matrices(nn)
except:
return [weight_matrices(v) for v in nn]
# # FIXME: resolve all these NLP dependencies and get this working
# def dataset_from_time_series(df, N_inp=None, features=('moy',), verbosity=1):
# """Build a pybrains.dataset from the time series contained in a dataframe"""
# N_inp = N_inp or len(df.columns)
# features = features or []
# # Add features to input vector in reverse alphabetical order by feature name,
# # so woy will be added first, and date will be added last.
# # The order that the feature vectors should appear in the input vector to remain consistent
# # and neural net architecture can have structure that anticipates this.
# sorted_features = nlp.sort_strings(features, ('dat', 'dow', 'moy', 'dom', 'moy', 'mor'), case_sensitive=False)
# if verbosity > 0:
# print('dataset_from_thresh(features={0})'.format(features))
# samples, mean, std, thresh = simple_dataset_from_thresh(thresh, N=N, max_window=max_window,
# normalize=normalize, ignore_below=ignore_below)
# name = getattr(thresh, 'name', None)
# if name:
# name = normalize_building_name(name)
# if name:
# series = get_series(name)
# else:
# if isinstance(series, basestring):
# name = normalize_building_name(series.strip()) or thresh.name or 'Unknown'
# series = get_series(name)
# elif isinstance(series, pd.DataFrame):
# name = normalize_building_name(series.columns[0]) or thresh.name or 'Unknown'
# series = series[name]
# elif isinstance(series, pd.Series):
# name = normalize_building_name(series.name) or thresh.name or 'Unknown'
# else:
# name = None
# # Compute the length of extra features added on to the vector from the rolling window of previous
# # threshold values
# # TODO: pre-process features list of strings in a separate function
# morn = 0
# # if the building name isn't known, you can't retrieve the morning load values for it
# if name:
# for s in features:
# if s.startswith('morn'):
# try:
# morn = int(s[4:])
# except:
# if verbosity > 0:
# warnings.warn('Unable to determine morning length from feature named "{0}" so using default '
# '(8 am = 8 * 4 = 32)')
# morn = 32 # default to 9 am morning ending
# break
# if verbosity > 0:
# print('In dataset_from_thresh() using {0} morning load values for Building {1}'
# ' because series arg is of type {2}'.format(morn, name, type(series)))
# extras = (+ int('dow' in features) * 7
# + int('moy' in features) * 12
# + int('woy' in features)
# + int('date' in features)
# + morn)
# if verbosity > 0:
# print('The total input vector length (dimension) is now {0}'.format(N + extras))
# ds = pb.datasets.SupervisedDataSet(N + extras, 1)
# first_date = samples[0]['target'].index[0].date().toordinal()
# last_date = samples[-1]['target'].index[0].date().toordinal()
# date_range = (last_date - first_date) or 1
# bit_scale = 5 # number of standard deviations for the magnitude of bit
# if verbosity > 0:
# print('Adding features for building {3}, {0}, and a morning time series of len {2}, '
# 'to each of the {1} vectors (samples)'.format(features, len(samples), morn, name))
# for sampnum, sample in enumerate(samples):
# # sample['input'] and ['output'] are pd.Series tables so convert them to normal list()
# inputs = list(sample['input'].values)
# # the date we're trying to predict the rhreshold for
# timestamp = sample['target'].index[0]
# for feature_name in sorted_features:
# if feature_name.startswith('morn'):
# day = get_day(series, date=timestamp.date())
# morning_loads = (day.values[:morn] - mean) / std
# if verbosity > 1:
# print('day = {0} and morning = {1}'.format(len(day), len(morning_loads)))
# inputs = list(morning_loads) + inputs
# elif feature_name == 'dow':
# dow_bits = [0] * 7
# dow_bits[timestamp.weekday()] = bit_scale
# inputs = dow_bits + inputs
# elif feature_name == 'moy':
# moy_bits = [0] * 12
# moy_bits[timestamp.month - 1] = bit_scale
# inputs = moy_bits + inputs
# elif feature_name == 'woy':
# inputs = [(timestamp.weekofyear - 26.) * 3 * bit_scale / 52] + inputs
# elif feature_name == 'date':
# inputs = [(timestamp.date().toordinal() - first_date - date_range / 2.) * 3 * bit_scale /
# date_range ] + inputs
# if pd.isnull(inputs).any():
# msg = 'Feature "{0}" within the feature list: {1} created null/NaN input values\nFor sample {2}'
# ' and date {3}\nInput vector positions {4}:\nInput vector: {5}'.format(
# feature_name, sorted_features, sampnum, timestamp, ann.table_nan_locs(inputs), inputs)
# msg += '\nBuilding load Series:\n{0}\n'.format(series)
# if ignore_nans:
# warnings.warn(msg)
# else:
# raise ValueError(msg)
# ds.addSample(inputs, list(sample['target'].values))
# return ds, mean, std, thresh
def dataset_nan_locs(ds):
"""
from http://stackoverflow.com/a/14033137/623735
# gets the indices of the rows with nan values in a dataframe
pd.isnull(df).any(1).nonzero()[0]
"""
ans = []
for sampnum, sample in enumerate(ds):
if pd.isnull(sample).any():
ans += [{
'sample': sampnum,
'input': pd.isnull(sample[0]).nonzero()[0],
'output': pd.isnull(sample[1]).nonzero()[0],
}]
return ans
def table_nan_locs(table):
"""
from http://stackoverflow.com/a/14033137/623735
# gets the indices of the rows with nan values in a dataframe
pd.isnull(df).any(1).nonzero()[0]
"""
ans = []
for rownum, row in enumerate(table):
try:
if pd.isnull(row).any():
colnums = pd.isnull(row).nonzero()[0]
ans += [(rownum, colnum) for colnum in colnums]
except AttributeError: # table is really just a sequence of scalars
if pd.isnull(row):
ans += [(rownum, 0)]
return ans
def plot_network_results(network, ds=None, mean=0, std=1, title='', show=True, save=True):
"""Identical to plot_trainer except `network` and `ds` must be provided separately"""
df = sim_network(network=network, ds=ds, mean=mean, std=std)
df.plot()
plt.xlabel('Date')
plt.ylabel('Threshold (kW)')
plt.title(title)
if show:
try:
# ipython notebook overrides plt.show and doesn't have a block kwarg
plt.show(block=False)
except TypeError:
plt.show()
if save:
filename = 'ann_performance_for_{0}.png'.format(title).replace(' ', '_')
if isinstance(save, basestring) and os.path.isdir(save):
filename = os.path.join(save, filename)
plt.savefig(filename)
if not show:
plt.clf()
return network, mean, std
def trainer_results(trainer, mean=0, std=1, title='', show=True, save=True):
"""Plot the performance of the Network and SupervisedDataSet in a pybrain Trainer
DataSet target and output values are denormalized before plotting with:
output * std + mean
Which inverses the normalization
(output - mean) / std
Args:
trainer (Trainer): a pybrain Trainer instance containing a valid Network and DataSet
ds (DataSet): a pybrain DataSet to override the one contained in `trainer`.
Required if trainer is a Network instance rather than a Trainer instance.
mean (float): mean of the denormalized dataset (default: 0)
Only affects the scale of the plot
std (float): std (standard deviation) of the denormalized dataset (default: 1)
title (str): title to display on the plot.
Returns:
3-tuple: (trainer, mean, std), A trainer/dataset along with denormalization info
"""
return plot_network_results(network=trainer.module, ds=trainer.ds, mean=mean, std=std, title=title,
show=show, save=save)
def sim_trainer(trainer, mean=0, std=1):
"""Simulate a trainer by activating its DataSet and returning DataFrame(columns=['Output','Target'])
"""
return sim_network(network=trainer.module, ds=trainer.ds, mean=mean, std=std)
def sim_network(network, ds=None, index=None, mean=0, std=1):
"""Simulate/activate a Network on a SupervisedDataSet and return DataFrame(columns=['Output','Target'])
The DataSet's target and output values are denormalized before populating the dataframe columns:
denormalized_output = normalized_output * std + mean
Which inverses the normalization that produced the normalized output in the first place: \
normalized_output = (denormalzied_output - mean) / std
Args:
network (Network): a pybrain Network instance to activate with the provided DataSet, `ds`
ds (DataSet): a pybrain DataSet to activate the Network on to produce an output sequence
mean (float): mean of the denormalized dataset (default: 0)
Output is scaled
std (float): std (standard deviation) of the denormalized dataset (default: 1)
title (str): title to display on the plot.
Returns:
DataFrame: DataFrame with columns "Output" and "Target" suitable for df.plot-ting
"""
# just in case network is a trainer or has a Module-derived instance as one of it's attribute
# isinstance(network.module, (networks.Network, modules.Module))
if hasattr(network, 'module') and hasattr(network.module, 'activate'):
# may want to also check: isinstance(network.module, (networks.Network, modules.Module))
network = network.module
ds = ds or network.ds
if not ds:
raise RuntimeError("Unable to find a `pybrain.datasets.DataSet` instance to activate the Network with, "
" to plot the outputs. A dataset can be provided as part of a network instance or "
"as a separate kwarg if `network` is used to provide the `pybrain.Network`"
" instance directly.")
results_generator = ((network.activate(ds['input'][i])[0] * std + mean, ds['target'][i][0] * std + mean)
for i in xrange(len(ds['input'])))
return pd.DataFrame(results_generator, columns=['Output', 'Target'], index=index or range(len(ds['input'])))
|
hobson/pug-ann | pug/ann/util.py | dataset_from_dataframe | python | def dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=(-1,), normalize=False, verbosity=1):
if isinstance(delays, int):
if delays:
delays = range(1, delays + 1)
else:
delays = [0]
delays = np.abs(np.array([int(i) for i in delays]))
inputs = [df.columns[int(inp)] if isinstance(inp, (float, int)) else str(inp) for inp in inputs]
outputs = [df.columns[int(out)] if isinstance(out, (float, int)) else str(out) for out in (outputs or [])]
inputs = [fuzzy_get(df.columns, i) for i in inputs]
outputs = [fuzzy_get(df.columns, o) for o in outputs]
N_inp = len(inputs)
N_out = len(outputs)
inp_outs = inputs + outputs
if verbosity > 0:
print("inputs: {}\noutputs: {}\ndelays: {}\n".format(inputs, outputs, delays))
means, stds = np.zeros(len(inp_outs)), np.ones(len(inp_outs))
if normalize:
means, stds = df[inp_outs].mean(), df[inp_outs].std()
if normalize and verbosity > 0:
print("Input mean values (used to normalize input biases): {}".format(means[:N_inp]))
print("Output mean values (used to normalize output biases): {}".format(means[N_inp:]))
ds = pb.datasets.SupervisedDataSet(N_inp * len(delays), N_out)
if verbosity > 0:
print("Dataset dimensions are {}x{}x{} (records x indim x outdim) for {} delays, {} inputs, {} outputs".format(
len(df), ds.indim, ds.outdim, len(delays), len(inputs), len(outputs)))
# FIXME: normalize the whole matrix at once and add it quickly rather than one sample at a time
if delays == np.array([0]) and not normalize:
if verbosity > 0:
print("No tapped delay lines (delays) were requested, so using undelayed features for the dataset.")
assert(df[inputs].values.shape[0] == df[outputs].values.shape[0])
ds.setField('input', df[inputs].values)
ds.setField('target', df[outputs].values)
ds.linkFields(['input', 'target'])
# for inp, outp in zip(df[inputs].values, df[outputs].values):
# ds.appendLinked(inp, outp)
assert(len(ds['input']) == len(ds['target']))
else:
for i, out_vec in enumerate(df[outputs].values):
if verbosity > 0 and i % 100 == 0:
print("{}%".format(i / .01 / len(df)))
elif verbosity > 1:
print('sample[{i}].target={out_vec}'.format(i=i, out_vec=out_vec))
if i < max(delays):
continue
inp_vec = []
for delay in delays:
inp_vec += list((df[inputs].values[i - delay] - means[:N_inp]) / stds[:N_inp])
ds.addSample(inp_vec, (out_vec - means[N_inp:]) / stds[N_inp:])
if verbosity > 0:
print("Dataset now has {} samples".format(len(ds)))
if normalize:
return ds, means, stds
else:
return ds | Compose a pybrain.dataset from a pandas DataFrame
Arguments:
delays (list of int): sample delays to use for the input tapped delay line
Positive and negative values are treated the same as sample counts into the past.
default: [1, 2, 3], in z-transform notation: z^-1 + z^-2 + z^-3
inputs (list of int or list of str): column indices or labels for the inputs
outputs (list of int or list of str): column indices or labels for the outputs
normalize (bool): whether to divide each input to be normally distributed about 0 with std 1
Returns:
3-tuple: tuple(dataset, list of means, list of stds)
means and stds allow normalization of new inputs and denormalization of the outputs
TODO:
Detect categorical variables with low dimensionality and split into separate bits
Vowpel Wabbit hashes strings into an int?
Detect ordinal variables and convert to continuous int sequence
SEE: http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm | train | https://github.com/hobson/pug-ann/blob/8a4d7103a744d15b4a737fc0f9a84c823973e0ec/pug/ann/util.py#L169-L248 | null | """Maniuplate, analyze and plot `pybrain` `Network` and `DataSet` objects
TODO:
Incorporate into pybrain fork so pug doesn't have to depend on pybrain
"""
from __future__ import print_function
import os
import warnings
import pandas as pd
from scipy import ndarray, reshape # array, amin, amax,
np = pd.np
from matplotlib import pyplot as plt
import pybrain.datasets
import pybrain.structure
import pybrain.supervised
import pybrain.tools
pb = pybrain
# from pybrain.supervised.trainers import Trainer
from pybrain.tools.customxml import NetworkReader
from pybrain.structure.parametercontainer import ParameterContainer
from pybrain.structure.connections.connection import Connection
from pug.nlp.util import tuplify, fuzzy_get
from pug.ann.data import weather
#import pug.nlp.util as nlp
# print(os.path.realpath(__file__))
DATA_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(DATA_PATH, '..', 'data')
LAYER_TYPES = set([getattr(pybrain.structure, s) for s in
dir(pybrain.structure) if s.endswith('Layer')])
FAST = False
try:
from arac.pybrainbridge import _FeedForwardNetwork as FeedForwardNetwork
FAST = True
except:
from pybrain.structure import FeedForwardNetwork
print("No fast (ARAC, a FC++ library) FeedForwardNetwork was found, "
"so using the slower pybrain python implementation of FFNN.")
def normalize_layer_type(layer_type):
try:
if layer_type in LAYER_TYPES:
return layer_type
except TypeError:
pass
try:
return getattr(pb.structure, layer_type.strip())
except AttributeError:
try:
return getattr(pb.structure, layer_type.strip() + 'Layer')
except AttributeError:
pass
return [normalize_layer_type(lt) for lt in layer_type]
def build_ann(N_input=None, N_hidden=2, N_output=1, hidden_layer_type='Linear', verbosity=1):
"""Build a neural net with the indicated input, hidden, and outout dimensions
Arguments:
params (dict or PyBrainParams namedtuple):
default: {'N_hidden': 6}
(this is the only parameter that affects the NN build)
Returns:
FeedForwardNetwork with N_input + N_hidden + N_output nodes in 3 layers
"""
N_input = N_input or 1
N_output = N_output or 1
N_hidden = N_hidden or tuple()
if isinstance(N_hidden, (int, float, basestring)):
N_hidden = (int(N_hidden),)
hidden_layer_type = hidden_layer_type or tuple()
hidden_layer_type = tuplify(normalize_layer_type(hidden_layer_type))
if verbosity > 0:
print(N_hidden, ' layers of type ', hidden_layer_type)
assert(len(N_hidden) == len(hidden_layer_type))
nn = pb.structure.FeedForwardNetwork()
# layers
nn.addInputModule(pb.structure.BiasUnit(name='bias'))
nn.addInputModule(pb.structure.LinearLayer(N_input, name='input'))
for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden, hidden_layer_type)):
Nhid = int(Nhid)
nn.addModule(hidlaytype(Nhid, name=('hidden-{}'.format(i) if i else 'hidden')))
nn.addOutputModule(pb.structure.LinearLayer(N_output, name='output'))
# connections
nn.addConnection(pb.structure.FullConnection(nn['bias'], nn['hidden'] if N_hidden else nn['output']))
nn.addConnection(pb.structure.FullConnection(nn['input'], nn['hidden'] if N_hidden else nn['output']))
for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden[:-1], hidden_layer_type[:-1])):
Nhid = int(Nhid)
nn.addConnection(pb.structure.FullConnection(nn[('hidden-{}'.format(i) if i else 'hidden')],
nn['hidden-{}'.format(i + 1)]))
i = len(N_hidden) - 1
nn.addConnection(pb.structure.FullConnection(nn['hidden-{}'.format(i) if i else 'hidden'], nn['output']))
nn.sortModules()
if FAST:
try:
nn.convertToFastNetwork()
except:
if verbosity > 0:
print('Unable to convert slow PyBrain NN to a fast ARAC network...')
if verbosity > 0:
print(nn.connections)
return nn
def ann_from_ds(ds=None, N_input=3, N_hidden=0, N_output=1, verbosity=1):
N_input = getattr(ds, 'indim', N_input)
N_output = getattr(ds, 'outdim', N_output)
N_hidden = N_hidden or getattr(ds, 'paramdim', N_hidden + N_input + N_output) - N_input - N_output
N_hidden = max(round(min(N_hidden, len(ds) / float(N_input) / float(N_output) / 5.)), N_output)
return build_ann(N_input=N_input, N_hidden=N_hidden, N_output=N_output, verbosity=verbosity)
def prepend_dataset_with_weather(samples, location='Fresno, CA', weather_columns=None, use_cache=True, verbosity=0):
""" Prepend weather the values specified (e.g. Max TempF) to the samples[0..N]['input'] vectors
samples[0..N]['target'] should have an index with the date timestamp
If you use_cache for the curent year, you may not get the most recent data.
Arguments:
samples (list of dict): {'input': np.array(), 'target': pandas.DataFrame}
"""
if verbosity > 1:
print('Prepending weather data for {} to dataset samples'.format(weather_columns))
if not weather_columns:
return samples
timestamps = pd.DatetimeIndex([s['target'].index[0] for s in samples])
years = range(timestamps.min().date().year, timestamps.max().date().year + 1)
weather_df = weather.daily(location=location, years=years, use_cache=use_cache)
# FIXME: weather_df.resample('D') fails
weather_df.index = [d.date() for d in weather_df.index]
if verbosity > 1:
print('Retrieved weather for years {}:'.format(years))
print(weather_df)
weather_columns = [label if label in weather_df.columns else weather_df.columns[int(label)]
for label in (weather_columns or [])]
for sampnum, sample in enumerate(samples):
timestamp = timestamps[sampnum]
try:
weather_day = weather_df.loc[timestamp.date()]
except:
from traceback import print_exc
print_exc()
weather_day = {}
if verbosity >= 0:
warnings.warn('Unable to find weather for the date {}'.format(timestamp.date()))
NaN = float('NaN')
sample['input'] = [weather_day.get(label, None) for label in weather_columns] + list(sample['input'])
if verbosity > 0 and NaN in sample['input']:
warnings.warn('Unable to find weather features {} in the weather for date {}'.format(
[label for i, label in enumerate(weather_columns) if sample['input'][i] == NaN], timestamp))
return samples
# def dataset_from_feature_names(df, delays=(1, 2,), quantiles=(), input_columns=(0,), target_columns=(-1,),
# weather_columns=(), features=(), verbosity=1):
# """FIXME: Untested. Transform Datafram columns and append to input_columns as additional features
# Arguments:
# features (seq of str): names of feautures to be appended to the input vector (feature set)
# """
# raise NotImplementedError("Better to implement this as a separate feature transformation function")
# # Compute the length of extra features added on to the vector from the rolling window of previous
# # threshold values
# extras = (+ int('dow' in features) * 7
# + int('moy' in features) * 12
# + int('woy' in features)
# + int('date' in features)
# + len(quantiles)
# + len(weather_columns)
# )
# N = max(list(delays) + list(quantiles))
# last_date = first_date = 0
# try:
# if verbosity > 0:
# print('The total input vector length (dimension) is now {0}'.format(len(N) + extras))
# print('Starting to augment {0}x{1} sample inputs by adding adding {2} additional features'.format(
# len(df), len(input_columns), extras))
# if verbosity > 0:
# print('The first sample input was {0}'.format(
# df[input_columns].iloc[0]))
# print('The first sample target was {0}'.format(
# df[target_columns].iloc[0]))
# print('The last sample input was {0}'.format(
# df[input_columns].iloc[-1]))
# print('The last sample target was {0}'.format(
# df[target_columns].iloc[-1]))
# first_date = df.index.iloc[0].date().toordinal()
# last_date = df.index.iloc[-1].date().toordinal()
# except:
# if verbosity > -1:
# from traceback import format_exc
# warnings.warn(format_exc())
# if verbosity > 1:
# import ipdb
# ipdb.set_trace()
# date_range = (last_date - first_date) + 1 or 1
# # FIXME: scale each feature/column/dimension independently using pug.ann.util.dataset_from_dataframe
# # but mean and std become vectors of the same dimension as the feature/input vector.
# # scikit-learn has transformations that do this more reasonably
# bit_scale = 5 # number of standard deviations for the magnitude of bit
# # convert the list of dicts ((input, output) supervised dataset pairs) into a pybrains.Dataset
# ds = pybrain.datasets.SupervisedDataSet(len(N) + extras, 1)
# sorted_features = sorted(features)
# for sampnum, (input_vector, target_vector) in enumerate(
# zip(df[input_columns].values, df[target_columns].values)):
# # sample['input'] and ['output'] are pd.Series tables so convert them to normal list()
# # inputs = list(sample['input'])
# # the date we're trying to predict the threshold for
# timestamp = target_vector.index[0]
# for feature_name in sorted_features:
# if feature_name.startswith('morn'):
# day = get_day(series, date=timestamp.date())
# morning_loads = (day.values[:morn] - mean) / std
# if verbosity > 2:
# print('day = {0} and morning = {1}'.format(len(day), len(morning_loads)))
# inputs = list(morning_loads) + inputs
# elif feature_name == 'dow':
# dow_bits = [0] * 7
# dow_bits[timestamp.weekday()] = bit_scale
# inputs = dow_bits + inputs
# elif feature_name == 'moy':
# moy_bits = [0] * 12
# moy_bits[timestamp.month - 1] = bit_scale
# inputs = moy_bits + inputs
# elif feature_name == 'woy':
# inputs = [(timestamp.weekofyear - 26.) * 3 * bit_scale / 52] + inputs
# elif feature_name == 'date':
# inputs = [(timestamp.date().toordinal() - first_date - date_range / 2.) * 3 * bit_scale / date_range
# ] + inputs
# if pd.isnull(inputs).any():
# msg = 'Feature "{0}" within the feature list: {1} created null/NaN input values\n'.format(
# feature_name, sorted_features)
# msg += 'For sample {} and date {}\n'.format(sampnum, timestamp)
# msg += 'Input vector positions {}:\nInput vector: {}'.format(
# ann.table_nan_locs(inputs), inputs)
# msg += '\nBuilding load Series:\n{0}\n'.format(series)
# if ignore_nans:
# warnings.warn(msg)
# else:
# raise ValueError(msg)
# ds.addSample(inputs, list(target_vector.values))
def input_dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):
""" Build a dataset with an empty output/target vector
Identical to `dataset_from_dataframe`, except that default values for 2 arguments:
outputs: None
"""
return dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,
normalize=normalize, verbosity=verbosity)
def inputs_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):
""" Build a sequence of vectors suitable for "activation" by a neural net
Identical to `dataset_from_dataframe`, except that only the input vectors are
returned (not a full DataSet instance) and default values for 2 arguments are changed:
outputs: None
And only the input vectors are return
"""
ds = input_dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,
normalize=normalize, verbosity=verbosity)
return ds['input']
def build_trainer(nn, ds, verbosity=1):
"""Configure neural net trainer from a pybrain dataset"""
return pb.supervised.trainers.rprop.RPropMinusTrainer(nn, dataset=ds, batchlearning=True, verbose=bool(verbosity))
def weight_matrices(nn):
""" Extract list of weight matrices from a Network, Layer (module), Trainer, Connection or other pybrain object"""
if isinstance(nn, ndarray):
return nn
try:
return weight_matrices(nn.connections)
except:
pass
try:
return weight_matrices(nn.module)
except:
pass
# Network objects are ParameterContainer's too, but won't reshape into a single matrix,
# so this must come after try nn.connections
if isinstance(nn, (ParameterContainer, Connection)):
return reshape(nn.params, (nn.outdim, nn.indim))
if isinstance(nn, basestring):
try:
fn = nn
nn = NetworkReader(fn, newfile=False)
return weight_matrices(nn.readFrom(fn))
except:
pass
# FIXME: what does NetworkReader output? (Module? Layer?) need to handle it's type here
try:
return [weight_matrices(v) for (k, v) in nn.iteritems()]
except:
try:
connections = nn.module.connections.values()
nn = []
for conlist in connections:
nn += conlist
return weight_matrices(nn)
except:
return [weight_matrices(v) for v in nn]
# # FIXME: resolve all these NLP dependencies and get this working
# def dataset_from_time_series(df, N_inp=None, features=('moy',), verbosity=1):
# """Build a pybrains.dataset from the time series contained in a dataframe"""
# N_inp = N_inp or len(df.columns)
# features = features or []
# # Add features to input vector in reverse alphabetical order by feature name,
# # so woy will be added first, and date will be added last.
# # The order that the feature vectors should appear in the input vector to remain consistent
# # and neural net architecture can have structure that anticipates this.
# sorted_features = nlp.sort_strings(features, ('dat', 'dow', 'moy', 'dom', 'moy', 'mor'), case_sensitive=False)
# if verbosity > 0:
# print('dataset_from_thresh(features={0})'.format(features))
# samples, mean, std, thresh = simple_dataset_from_thresh(thresh, N=N, max_window=max_window,
# normalize=normalize, ignore_below=ignore_below)
# name = getattr(thresh, 'name', None)
# if name:
# name = normalize_building_name(name)
# if name:
# series = get_series(name)
# else:
# if isinstance(series, basestring):
# name = normalize_building_name(series.strip()) or thresh.name or 'Unknown'
# series = get_series(name)
# elif isinstance(series, pd.DataFrame):
# name = normalize_building_name(series.columns[0]) or thresh.name or 'Unknown'
# series = series[name]
# elif isinstance(series, pd.Series):
# name = normalize_building_name(series.name) or thresh.name or 'Unknown'
# else:
# name = None
# # Compute the length of extra features added on to the vector from the rolling window of previous
# # threshold values
# # TODO: pre-process features list of strings in a separate function
# morn = 0
# # if the building name isn't known, you can't retrieve the morning load values for it
# if name:
# for s in features:
# if s.startswith('morn'):
# try:
# morn = int(s[4:])
# except:
# if verbosity > 0:
# warnings.warn('Unable to determine morning length from feature named "{0}" so using default '
# '(8 am = 8 * 4 = 32)')
# morn = 32 # default to 9 am morning ending
# break
# if verbosity > 0:
# print('In dataset_from_thresh() using {0} morning load values for Building {1}'
# ' because series arg is of type {2}'.format(morn, name, type(series)))
# extras = (+ int('dow' in features) * 7
# + int('moy' in features) * 12
# + int('woy' in features)
# + int('date' in features)
# + morn)
# if verbosity > 0:
# print('The total input vector length (dimension) is now {0}'.format(N + extras))
# ds = pb.datasets.SupervisedDataSet(N + extras, 1)
# first_date = samples[0]['target'].index[0].date().toordinal()
# last_date = samples[-1]['target'].index[0].date().toordinal()
# date_range = (last_date - first_date) or 1
# bit_scale = 5 # number of standard deviations for the magnitude of bit
# if verbosity > 0:
# print('Adding features for building {3}, {0}, and a morning time series of len {2}, '
# 'to each of the {1} vectors (samples)'.format(features, len(samples), morn, name))
# for sampnum, sample in enumerate(samples):
# # sample['input'] and ['output'] are pd.Series tables so convert them to normal list()
# inputs = list(sample['input'].values)
# # the date we're trying to predict the rhreshold for
# timestamp = sample['target'].index[0]
# for feature_name in sorted_features:
# if feature_name.startswith('morn'):
# day = get_day(series, date=timestamp.date())
# morning_loads = (day.values[:morn] - mean) / std
# if verbosity > 1:
# print('day = {0} and morning = {1}'.format(len(day), len(morning_loads)))
# inputs = list(morning_loads) + inputs
# elif feature_name == 'dow':
# dow_bits = [0] * 7
# dow_bits[timestamp.weekday()] = bit_scale
# inputs = dow_bits + inputs
# elif feature_name == 'moy':
# moy_bits = [0] * 12
# moy_bits[timestamp.month - 1] = bit_scale
# inputs = moy_bits + inputs
# elif feature_name == 'woy':
# inputs = [(timestamp.weekofyear - 26.) * 3 * bit_scale / 52] + inputs
# elif feature_name == 'date':
# inputs = [(timestamp.date().toordinal() - first_date - date_range / 2.) * 3 * bit_scale /
# date_range ] + inputs
# if pd.isnull(inputs).any():
# msg = 'Feature "{0}" within the feature list: {1} created null/NaN input values\nFor sample {2}'
# ' and date {3}\nInput vector positions {4}:\nInput vector: {5}'.format(
# feature_name, sorted_features, sampnum, timestamp, ann.table_nan_locs(inputs), inputs)
# msg += '\nBuilding load Series:\n{0}\n'.format(series)
# if ignore_nans:
# warnings.warn(msg)
# else:
# raise ValueError(msg)
# ds.addSample(inputs, list(sample['target'].values))
# return ds, mean, std, thresh
def dataset_nan_locs(ds):
"""
from http://stackoverflow.com/a/14033137/623735
# gets the indices of the rows with nan values in a dataframe
pd.isnull(df).any(1).nonzero()[0]
"""
ans = []
for sampnum, sample in enumerate(ds):
if pd.isnull(sample).any():
ans += [{
'sample': sampnum,
'input': pd.isnull(sample[0]).nonzero()[0],
'output': pd.isnull(sample[1]).nonzero()[0],
}]
return ans
def table_nan_locs(table):
"""
from http://stackoverflow.com/a/14033137/623735
# gets the indices of the rows with nan values in a dataframe
pd.isnull(df).any(1).nonzero()[0]
"""
ans = []
for rownum, row in enumerate(table):
try:
if pd.isnull(row).any():
colnums = pd.isnull(row).nonzero()[0]
ans += [(rownum, colnum) for colnum in colnums]
except AttributeError: # table is really just a sequence of scalars
if pd.isnull(row):
ans += [(rownum, 0)]
return ans
def plot_network_results(network, ds=None, mean=0, std=1, title='', show=True, save=True):
"""Identical to plot_trainer except `network` and `ds` must be provided separately"""
df = sim_network(network=network, ds=ds, mean=mean, std=std)
df.plot()
plt.xlabel('Date')
plt.ylabel('Threshold (kW)')
plt.title(title)
if show:
try:
# ipython notebook overrides plt.show and doesn't have a block kwarg
plt.show(block=False)
except TypeError:
plt.show()
if save:
filename = 'ann_performance_for_{0}.png'.format(title).replace(' ', '_')
if isinstance(save, basestring) and os.path.isdir(save):
filename = os.path.join(save, filename)
plt.savefig(filename)
if not show:
plt.clf()
return network, mean, std
def trainer_results(trainer, mean=0, std=1, title='', show=True, save=True):
"""Plot the performance of the Network and SupervisedDataSet in a pybrain Trainer
DataSet target and output values are denormalized before plotting with:
output * std + mean
Which inverses the normalization
(output - mean) / std
Args:
trainer (Trainer): a pybrain Trainer instance containing a valid Network and DataSet
ds (DataSet): a pybrain DataSet to override the one contained in `trainer`.
Required if trainer is a Network instance rather than a Trainer instance.
mean (float): mean of the denormalized dataset (default: 0)
Only affects the scale of the plot
std (float): std (standard deviation) of the denormalized dataset (default: 1)
title (str): title to display on the plot.
Returns:
3-tuple: (trainer, mean, std), A trainer/dataset along with denormalization info
"""
return plot_network_results(network=trainer.module, ds=trainer.ds, mean=mean, std=std, title=title,
show=show, save=save)
def sim_trainer(trainer, mean=0, std=1):
"""Simulate a trainer by activating its DataSet and returning DataFrame(columns=['Output','Target'])
"""
return sim_network(network=trainer.module, ds=trainer.ds, mean=mean, std=std)
def sim_network(network, ds=None, index=None, mean=0, std=1):
"""Simulate/activate a Network on a SupervisedDataSet and return DataFrame(columns=['Output','Target'])
The DataSet's target and output values are denormalized before populating the dataframe columns:
denormalized_output = normalized_output * std + mean
Which inverses the normalization that produced the normalized output in the first place: \
normalized_output = (denormalzied_output - mean) / std
Args:
network (Network): a pybrain Network instance to activate with the provided DataSet, `ds`
ds (DataSet): a pybrain DataSet to activate the Network on to produce an output sequence
mean (float): mean of the denormalized dataset (default: 0)
Output is scaled
std (float): std (standard deviation) of the denormalized dataset (default: 1)
title (str): title to display on the plot.
Returns:
DataFrame: DataFrame with columns "Output" and "Target" suitable for df.plot-ting
"""
# just in case network is a trainer or has a Module-derived instance as one of it's attribute
# isinstance(network.module, (networks.Network, modules.Module))
if hasattr(network, 'module') and hasattr(network.module, 'activate'):
# may want to also check: isinstance(network.module, (networks.Network, modules.Module))
network = network.module
ds = ds or network.ds
if not ds:
raise RuntimeError("Unable to find a `pybrain.datasets.DataSet` instance to activate the Network with, "
" to plot the outputs. A dataset can be provided as part of a network instance or "
"as a separate kwarg if `network` is used to provide the `pybrain.Network`"
" instance directly.")
results_generator = ((network.activate(ds['input'][i])[0] * std + mean, ds['target'][i][0] * std + mean)
for i in xrange(len(ds['input'])))
return pd.DataFrame(results_generator, columns=['Output', 'Target'], index=index or range(len(ds['input'])))
|
hobson/pug-ann | pug/ann/util.py | input_dataset_from_dataframe | python | def input_dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):
return dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,
normalize=normalize, verbosity=verbosity) | Build a dataset with an empty output/target vector
Identical to `dataset_from_dataframe`, except that default values for 2 arguments:
outputs: None | train | https://github.com/hobson/pug-ann/blob/8a4d7103a744d15b4a737fc0f9a84c823973e0ec/pug/ann/util.py#L348-L355 | [
"def dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=(-1,), normalize=False, verbosity=1):\n \"\"\"Compose a pybrain.dataset from a pandas DataFrame\n\n Arguments:\n delays (list of int): sample delays to use for the input tapped delay line\n Positive and negative values are treated the same as sample counts into the past.\n default: [1, 2, 3], in z-transform notation: z^-1 + z^-2 + z^-3\n inputs (list of int or list of str): column indices or labels for the inputs\n outputs (list of int or list of str): column indices or labels for the outputs\n normalize (bool): whether to divide each input to be normally distributed about 0 with std 1\n\n Returns:\n 3-tuple: tuple(dataset, list of means, list of stds)\n means and stds allow normalization of new inputs and denormalization of the outputs\n\n TODO:\n\n Detect categorical variables with low dimensionality and split into separate bits\n Vowpel Wabbit hashes strings into an int?\n Detect ordinal variables and convert to continuous int sequence\n SEE: http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm\n \"\"\"\n if isinstance(delays, int):\n if delays:\n delays = range(1, delays + 1)\n else:\n delays = [0]\n delays = np.abs(np.array([int(i) for i in delays]))\n inputs = [df.columns[int(inp)] if isinstance(inp, (float, int)) else str(inp) for inp in inputs]\n outputs = [df.columns[int(out)] if isinstance(out, (float, int)) else str(out) for out in (outputs or [])]\n\n inputs = [fuzzy_get(df.columns, i) for i in inputs]\n outputs = [fuzzy_get(df.columns, o) for o in outputs]\n\n N_inp = len(inputs)\n N_out = len(outputs)\n\n inp_outs = inputs + outputs\n if verbosity > 0:\n print(\"inputs: {}\\noutputs: {}\\ndelays: {}\\n\".format(inputs, outputs, delays))\n means, stds = np.zeros(len(inp_outs)), np.ones(len(inp_outs))\n if normalize:\n means, stds = df[inp_outs].mean(), df[inp_outs].std()\n\n if normalize and verbosity > 0:\n print(\"Input mean values (used to normalize input biases): {}\".format(means[:N_inp]))\n print(\"Output mean values (used to normalize output biases): {}\".format(means[N_inp:]))\n ds = pb.datasets.SupervisedDataSet(N_inp * len(delays), N_out)\n if verbosity > 0:\n print(\"Dataset dimensions are {}x{}x{} (records x indim x outdim) for {} delays, {} inputs, {} outputs\".format(\n len(df), ds.indim, ds.outdim, len(delays), len(inputs), len(outputs)))\n # FIXME: normalize the whole matrix at once and add it quickly rather than one sample at a time\n if delays == np.array([0]) and not normalize:\n if verbosity > 0:\n print(\"No tapped delay lines (delays) were requested, so using undelayed features for the dataset.\")\n assert(df[inputs].values.shape[0] == df[outputs].values.shape[0])\n ds.setField('input', df[inputs].values)\n ds.setField('target', df[outputs].values)\n ds.linkFields(['input', 'target'])\n # for inp, outp in zip(df[inputs].values, df[outputs].values):\n # ds.appendLinked(inp, outp)\n assert(len(ds['input']) == len(ds['target']))\n else:\n for i, out_vec in enumerate(df[outputs].values):\n if verbosity > 0 and i % 100 == 0:\n print(\"{}%\".format(i / .01 / len(df)))\n elif verbosity > 1:\n print('sample[{i}].target={out_vec}'.format(i=i, out_vec=out_vec))\n if i < max(delays):\n continue\n inp_vec = []\n for delay in delays:\n inp_vec += list((df[inputs].values[i - delay] - means[:N_inp]) / stds[:N_inp])\n ds.addSample(inp_vec, (out_vec - means[N_inp:]) / stds[N_inp:])\n if verbosity > 0:\n print(\"Dataset now has {} samples\".format(len(ds)))\n if normalize:\n return ds, means, stds\n else:\n return ds\n"
] | """Maniuplate, analyze and plot `pybrain` `Network` and `DataSet` objects
TODO:
Incorporate into pybrain fork so pug doesn't have to depend on pybrain
"""
from __future__ import print_function
import os
import warnings
import pandas as pd
from scipy import ndarray, reshape # array, amin, amax,
np = pd.np
from matplotlib import pyplot as plt
import pybrain.datasets
import pybrain.structure
import pybrain.supervised
import pybrain.tools
pb = pybrain
# from pybrain.supervised.trainers import Trainer
from pybrain.tools.customxml import NetworkReader
from pybrain.structure.parametercontainer import ParameterContainer
from pybrain.structure.connections.connection import Connection
from pug.nlp.util import tuplify, fuzzy_get
from pug.ann.data import weather
#import pug.nlp.util as nlp
# print(os.path.realpath(__file__))
DATA_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(DATA_PATH, '..', 'data')
LAYER_TYPES = set([getattr(pybrain.structure, s) for s in
dir(pybrain.structure) if s.endswith('Layer')])
FAST = False
try:
from arac.pybrainbridge import _FeedForwardNetwork as FeedForwardNetwork
FAST = True
except:
from pybrain.structure import FeedForwardNetwork
print("No fast (ARAC, a FC++ library) FeedForwardNetwork was found, "
"so using the slower pybrain python implementation of FFNN.")
def normalize_layer_type(layer_type):
try:
if layer_type in LAYER_TYPES:
return layer_type
except TypeError:
pass
try:
return getattr(pb.structure, layer_type.strip())
except AttributeError:
try:
return getattr(pb.structure, layer_type.strip() + 'Layer')
except AttributeError:
pass
return [normalize_layer_type(lt) for lt in layer_type]
def build_ann(N_input=None, N_hidden=2, N_output=1, hidden_layer_type='Linear', verbosity=1):
"""Build a neural net with the indicated input, hidden, and outout dimensions
Arguments:
params (dict or PyBrainParams namedtuple):
default: {'N_hidden': 6}
(this is the only parameter that affects the NN build)
Returns:
FeedForwardNetwork with N_input + N_hidden + N_output nodes in 3 layers
"""
N_input = N_input or 1
N_output = N_output or 1
N_hidden = N_hidden or tuple()
if isinstance(N_hidden, (int, float, basestring)):
N_hidden = (int(N_hidden),)
hidden_layer_type = hidden_layer_type or tuple()
hidden_layer_type = tuplify(normalize_layer_type(hidden_layer_type))
if verbosity > 0:
print(N_hidden, ' layers of type ', hidden_layer_type)
assert(len(N_hidden) == len(hidden_layer_type))
nn = pb.structure.FeedForwardNetwork()
# layers
nn.addInputModule(pb.structure.BiasUnit(name='bias'))
nn.addInputModule(pb.structure.LinearLayer(N_input, name='input'))
for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden, hidden_layer_type)):
Nhid = int(Nhid)
nn.addModule(hidlaytype(Nhid, name=('hidden-{}'.format(i) if i else 'hidden')))
nn.addOutputModule(pb.structure.LinearLayer(N_output, name='output'))
# connections
nn.addConnection(pb.structure.FullConnection(nn['bias'], nn['hidden'] if N_hidden else nn['output']))
nn.addConnection(pb.structure.FullConnection(nn['input'], nn['hidden'] if N_hidden else nn['output']))
for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden[:-1], hidden_layer_type[:-1])):
Nhid = int(Nhid)
nn.addConnection(pb.structure.FullConnection(nn[('hidden-{}'.format(i) if i else 'hidden')],
nn['hidden-{}'.format(i + 1)]))
i = len(N_hidden) - 1
nn.addConnection(pb.structure.FullConnection(nn['hidden-{}'.format(i) if i else 'hidden'], nn['output']))
nn.sortModules()
if FAST:
try:
nn.convertToFastNetwork()
except:
if verbosity > 0:
print('Unable to convert slow PyBrain NN to a fast ARAC network...')
if verbosity > 0:
print(nn.connections)
return nn
def ann_from_ds(ds=None, N_input=3, N_hidden=0, N_output=1, verbosity=1):
N_input = getattr(ds, 'indim', N_input)
N_output = getattr(ds, 'outdim', N_output)
N_hidden = N_hidden or getattr(ds, 'paramdim', N_hidden + N_input + N_output) - N_input - N_output
N_hidden = max(round(min(N_hidden, len(ds) / float(N_input) / float(N_output) / 5.)), N_output)
return build_ann(N_input=N_input, N_hidden=N_hidden, N_output=N_output, verbosity=verbosity)
def prepend_dataset_with_weather(samples, location='Fresno, CA', weather_columns=None, use_cache=True, verbosity=0):
""" Prepend weather the values specified (e.g. Max TempF) to the samples[0..N]['input'] vectors
samples[0..N]['target'] should have an index with the date timestamp
If you use_cache for the curent year, you may not get the most recent data.
Arguments:
samples (list of dict): {'input': np.array(), 'target': pandas.DataFrame}
"""
if verbosity > 1:
print('Prepending weather data for {} to dataset samples'.format(weather_columns))
if not weather_columns:
return samples
timestamps = pd.DatetimeIndex([s['target'].index[0] for s in samples])
years = range(timestamps.min().date().year, timestamps.max().date().year + 1)
weather_df = weather.daily(location=location, years=years, use_cache=use_cache)
# FIXME: weather_df.resample('D') fails
weather_df.index = [d.date() for d in weather_df.index]
if verbosity > 1:
print('Retrieved weather for years {}:'.format(years))
print(weather_df)
weather_columns = [label if label in weather_df.columns else weather_df.columns[int(label)]
for label in (weather_columns or [])]
for sampnum, sample in enumerate(samples):
timestamp = timestamps[sampnum]
try:
weather_day = weather_df.loc[timestamp.date()]
except:
from traceback import print_exc
print_exc()
weather_day = {}
if verbosity >= 0:
warnings.warn('Unable to find weather for the date {}'.format(timestamp.date()))
NaN = float('NaN')
sample['input'] = [weather_day.get(label, None) for label in weather_columns] + list(sample['input'])
if verbosity > 0 and NaN in sample['input']:
warnings.warn('Unable to find weather features {} in the weather for date {}'.format(
[label for i, label in enumerate(weather_columns) if sample['input'][i] == NaN], timestamp))
return samples
def dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=(-1,), normalize=False, verbosity=1):
"""Compose a pybrain.dataset from a pandas DataFrame
Arguments:
delays (list of int): sample delays to use for the input tapped delay line
Positive and negative values are treated the same as sample counts into the past.
default: [1, 2, 3], in z-transform notation: z^-1 + z^-2 + z^-3
inputs (list of int or list of str): column indices or labels for the inputs
outputs (list of int or list of str): column indices or labels for the outputs
normalize (bool): whether to divide each input to be normally distributed about 0 with std 1
Returns:
3-tuple: tuple(dataset, list of means, list of stds)
means and stds allow normalization of new inputs and denormalization of the outputs
TODO:
Detect categorical variables with low dimensionality and split into separate bits
Vowpel Wabbit hashes strings into an int?
Detect ordinal variables and convert to continuous int sequence
SEE: http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
"""
if isinstance(delays, int):
if delays:
delays = range(1, delays + 1)
else:
delays = [0]
delays = np.abs(np.array([int(i) for i in delays]))
inputs = [df.columns[int(inp)] if isinstance(inp, (float, int)) else str(inp) for inp in inputs]
outputs = [df.columns[int(out)] if isinstance(out, (float, int)) else str(out) for out in (outputs or [])]
inputs = [fuzzy_get(df.columns, i) for i in inputs]
outputs = [fuzzy_get(df.columns, o) for o in outputs]
N_inp = len(inputs)
N_out = len(outputs)
inp_outs = inputs + outputs
if verbosity > 0:
print("inputs: {}\noutputs: {}\ndelays: {}\n".format(inputs, outputs, delays))
means, stds = np.zeros(len(inp_outs)), np.ones(len(inp_outs))
if normalize:
means, stds = df[inp_outs].mean(), df[inp_outs].std()
if normalize and verbosity > 0:
print("Input mean values (used to normalize input biases): {}".format(means[:N_inp]))
print("Output mean values (used to normalize output biases): {}".format(means[N_inp:]))
ds = pb.datasets.SupervisedDataSet(N_inp * len(delays), N_out)
if verbosity > 0:
print("Dataset dimensions are {}x{}x{} (records x indim x outdim) for {} delays, {} inputs, {} outputs".format(
len(df), ds.indim, ds.outdim, len(delays), len(inputs), len(outputs)))
# FIXME: normalize the whole matrix at once and add it quickly rather than one sample at a time
if delays == np.array([0]) and not normalize:
if verbosity > 0:
print("No tapped delay lines (delays) were requested, so using undelayed features for the dataset.")
assert(df[inputs].values.shape[0] == df[outputs].values.shape[0])
ds.setField('input', df[inputs].values)
ds.setField('target', df[outputs].values)
ds.linkFields(['input', 'target'])
# for inp, outp in zip(df[inputs].values, df[outputs].values):
# ds.appendLinked(inp, outp)
assert(len(ds['input']) == len(ds['target']))
else:
for i, out_vec in enumerate(df[outputs].values):
if verbosity > 0 and i % 100 == 0:
print("{}%".format(i / .01 / len(df)))
elif verbosity > 1:
print('sample[{i}].target={out_vec}'.format(i=i, out_vec=out_vec))
if i < max(delays):
continue
inp_vec = []
for delay in delays:
inp_vec += list((df[inputs].values[i - delay] - means[:N_inp]) / stds[:N_inp])
ds.addSample(inp_vec, (out_vec - means[N_inp:]) / stds[N_inp:])
if verbosity > 0:
print("Dataset now has {} samples".format(len(ds)))
if normalize:
return ds, means, stds
else:
return ds
# def dataset_from_feature_names(df, delays=(1, 2,), quantiles=(), input_columns=(0,), target_columns=(-1,),
# weather_columns=(), features=(), verbosity=1):
# """FIXME: Untested. Transform Datafram columns and append to input_columns as additional features
# Arguments:
# features (seq of str): names of feautures to be appended to the input vector (feature set)
# """
# raise NotImplementedError("Better to implement this as a separate feature transformation function")
# # Compute the length of extra features added on to the vector from the rolling window of previous
# # threshold values
# extras = (+ int('dow' in features) * 7
# + int('moy' in features) * 12
# + int('woy' in features)
# + int('date' in features)
# + len(quantiles)
# + len(weather_columns)
# )
# N = max(list(delays) + list(quantiles))
# last_date = first_date = 0
# try:
# if verbosity > 0:
# print('The total input vector length (dimension) is now {0}'.format(len(N) + extras))
# print('Starting to augment {0}x{1} sample inputs by adding adding {2} additional features'.format(
# len(df), len(input_columns), extras))
# if verbosity > 0:
# print('The first sample input was {0}'.format(
# df[input_columns].iloc[0]))
# print('The first sample target was {0}'.format(
# df[target_columns].iloc[0]))
# print('The last sample input was {0}'.format(
# df[input_columns].iloc[-1]))
# print('The last sample target was {0}'.format(
# df[target_columns].iloc[-1]))
# first_date = df.index.iloc[0].date().toordinal()
# last_date = df.index.iloc[-1].date().toordinal()
# except:
# if verbosity > -1:
# from traceback import format_exc
# warnings.warn(format_exc())
# if verbosity > 1:
# import ipdb
# ipdb.set_trace()
# date_range = (last_date - first_date) + 1 or 1
# # FIXME: scale each feature/column/dimension independently using pug.ann.util.dataset_from_dataframe
# # but mean and std become vectors of the same dimension as the feature/input vector.
# # scikit-learn has transformations that do this more reasonably
# bit_scale = 5 # number of standard deviations for the magnitude of bit
# # convert the list of dicts ((input, output) supervised dataset pairs) into a pybrains.Dataset
# ds = pybrain.datasets.SupervisedDataSet(len(N) + extras, 1)
# sorted_features = sorted(features)
# for sampnum, (input_vector, target_vector) in enumerate(
# zip(df[input_columns].values, df[target_columns].values)):
# # sample['input'] and ['output'] are pd.Series tables so convert them to normal list()
# # inputs = list(sample['input'])
# # the date we're trying to predict the threshold for
# timestamp = target_vector.index[0]
# for feature_name in sorted_features:
# if feature_name.startswith('morn'):
# day = get_day(series, date=timestamp.date())
# morning_loads = (day.values[:morn] - mean) / std
# if verbosity > 2:
# print('day = {0} and morning = {1}'.format(len(day), len(morning_loads)))
# inputs = list(morning_loads) + inputs
# elif feature_name == 'dow':
# dow_bits = [0] * 7
# dow_bits[timestamp.weekday()] = bit_scale
# inputs = dow_bits + inputs
# elif feature_name == 'moy':
# moy_bits = [0] * 12
# moy_bits[timestamp.month - 1] = bit_scale
# inputs = moy_bits + inputs
# elif feature_name == 'woy':
# inputs = [(timestamp.weekofyear - 26.) * 3 * bit_scale / 52] + inputs
# elif feature_name == 'date':
# inputs = [(timestamp.date().toordinal() - first_date - date_range / 2.) * 3 * bit_scale / date_range
# ] + inputs
# if pd.isnull(inputs).any():
# msg = 'Feature "{0}" within the feature list: {1} created null/NaN input values\n'.format(
# feature_name, sorted_features)
# msg += 'For sample {} and date {}\n'.format(sampnum, timestamp)
# msg += 'Input vector positions {}:\nInput vector: {}'.format(
# ann.table_nan_locs(inputs), inputs)
# msg += '\nBuilding load Series:\n{0}\n'.format(series)
# if ignore_nans:
# warnings.warn(msg)
# else:
# raise ValueError(msg)
# ds.addSample(inputs, list(target_vector.values))
def inputs_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):
""" Build a sequence of vectors suitable for "activation" by a neural net
Identical to `dataset_from_dataframe`, except that only the input vectors are
returned (not a full DataSet instance) and default values for 2 arguments are changed:
outputs: None
And only the input vectors are return
"""
ds = input_dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,
normalize=normalize, verbosity=verbosity)
return ds['input']
def build_trainer(nn, ds, verbosity=1):
"""Configure neural net trainer from a pybrain dataset"""
return pb.supervised.trainers.rprop.RPropMinusTrainer(nn, dataset=ds, batchlearning=True, verbose=bool(verbosity))
def weight_matrices(nn):
""" Extract list of weight matrices from a Network, Layer (module), Trainer, Connection or other pybrain object"""
if isinstance(nn, ndarray):
return nn
try:
return weight_matrices(nn.connections)
except:
pass
try:
return weight_matrices(nn.module)
except:
pass
# Network objects are ParameterContainer's too, but won't reshape into a single matrix,
# so this must come after try nn.connections
if isinstance(nn, (ParameterContainer, Connection)):
return reshape(nn.params, (nn.outdim, nn.indim))
if isinstance(nn, basestring):
try:
fn = nn
nn = NetworkReader(fn, newfile=False)
return weight_matrices(nn.readFrom(fn))
except:
pass
# FIXME: what does NetworkReader output? (Module? Layer?) need to handle it's type here
try:
return [weight_matrices(v) for (k, v) in nn.iteritems()]
except:
try:
connections = nn.module.connections.values()
nn = []
for conlist in connections:
nn += conlist
return weight_matrices(nn)
except:
return [weight_matrices(v) for v in nn]
# # FIXME: resolve all these NLP dependencies and get this working
# def dataset_from_time_series(df, N_inp=None, features=('moy',), verbosity=1):
# """Build a pybrains.dataset from the time series contained in a dataframe"""
# N_inp = N_inp or len(df.columns)
# features = features or []
# # Add features to input vector in reverse alphabetical order by feature name,
# # so woy will be added first, and date will be added last.
# # The order that the feature vectors should appear in the input vector to remain consistent
# # and neural net architecture can have structure that anticipates this.
# sorted_features = nlp.sort_strings(features, ('dat', 'dow', 'moy', 'dom', 'moy', 'mor'), case_sensitive=False)
# if verbosity > 0:
# print('dataset_from_thresh(features={0})'.format(features))
# samples, mean, std, thresh = simple_dataset_from_thresh(thresh, N=N, max_window=max_window,
# normalize=normalize, ignore_below=ignore_below)
# name = getattr(thresh, 'name', None)
# if name:
# name = normalize_building_name(name)
# if name:
# series = get_series(name)
# else:
# if isinstance(series, basestring):
# name = normalize_building_name(series.strip()) or thresh.name or 'Unknown'
# series = get_series(name)
# elif isinstance(series, pd.DataFrame):
# name = normalize_building_name(series.columns[0]) or thresh.name or 'Unknown'
# series = series[name]
# elif isinstance(series, pd.Series):
# name = normalize_building_name(series.name) or thresh.name or 'Unknown'
# else:
# name = None
# # Compute the length of extra features added on to the vector from the rolling window of previous
# # threshold values
# # TODO: pre-process features list of strings in a separate function
# morn = 0
# # if the building name isn't known, you can't retrieve the morning load values for it
# if name:
# for s in features:
# if s.startswith('morn'):
# try:
# morn = int(s[4:])
# except:
# if verbosity > 0:
# warnings.warn('Unable to determine morning length from feature named "{0}" so using default '
# '(8 am = 8 * 4 = 32)')
# morn = 32 # default to 9 am morning ending
# break
# if verbosity > 0:
# print('In dataset_from_thresh() using {0} morning load values for Building {1}'
# ' because series arg is of type {2}'.format(morn, name, type(series)))
# extras = (+ int('dow' in features) * 7
# + int('moy' in features) * 12
# + int('woy' in features)
# + int('date' in features)
# + morn)
# if verbosity > 0:
# print('The total input vector length (dimension) is now {0}'.format(N + extras))
# ds = pb.datasets.SupervisedDataSet(N + extras, 1)
# first_date = samples[0]['target'].index[0].date().toordinal()
# last_date = samples[-1]['target'].index[0].date().toordinal()
# date_range = (last_date - first_date) or 1
# bit_scale = 5 # number of standard deviations for the magnitude of bit
# if verbosity > 0:
# print('Adding features for building {3}, {0}, and a morning time series of len {2}, '
# 'to each of the {1} vectors (samples)'.format(features, len(samples), morn, name))
# for sampnum, sample in enumerate(samples):
# # sample['input'] and ['output'] are pd.Series tables so convert them to normal list()
# inputs = list(sample['input'].values)
# # the date we're trying to predict the rhreshold for
# timestamp = sample['target'].index[0]
# for feature_name in sorted_features:
# if feature_name.startswith('morn'):
# day = get_day(series, date=timestamp.date())
# morning_loads = (day.values[:morn] - mean) / std
# if verbosity > 1:
# print('day = {0} and morning = {1}'.format(len(day), len(morning_loads)))
# inputs = list(morning_loads) + inputs
# elif feature_name == 'dow':
# dow_bits = [0] * 7
# dow_bits[timestamp.weekday()] = bit_scale
# inputs = dow_bits + inputs
# elif feature_name == 'moy':
# moy_bits = [0] * 12
# moy_bits[timestamp.month - 1] = bit_scale
# inputs = moy_bits + inputs
# elif feature_name == 'woy':
# inputs = [(timestamp.weekofyear - 26.) * 3 * bit_scale / 52] + inputs
# elif feature_name == 'date':
# inputs = [(timestamp.date().toordinal() - first_date - date_range / 2.) * 3 * bit_scale /
# date_range ] + inputs
# if pd.isnull(inputs).any():
# msg = 'Feature "{0}" within the feature list: {1} created null/NaN input values\nFor sample {2}'
# ' and date {3}\nInput vector positions {4}:\nInput vector: {5}'.format(
# feature_name, sorted_features, sampnum, timestamp, ann.table_nan_locs(inputs), inputs)
# msg += '\nBuilding load Series:\n{0}\n'.format(series)
# if ignore_nans:
# warnings.warn(msg)
# else:
# raise ValueError(msg)
# ds.addSample(inputs, list(sample['target'].values))
# return ds, mean, std, thresh
def dataset_nan_locs(ds):
"""
from http://stackoverflow.com/a/14033137/623735
# gets the indices of the rows with nan values in a dataframe
pd.isnull(df).any(1).nonzero()[0]
"""
ans = []
for sampnum, sample in enumerate(ds):
if pd.isnull(sample).any():
ans += [{
'sample': sampnum,
'input': pd.isnull(sample[0]).nonzero()[0],
'output': pd.isnull(sample[1]).nonzero()[0],
}]
return ans
def table_nan_locs(table):
"""
from http://stackoverflow.com/a/14033137/623735
# gets the indices of the rows with nan values in a dataframe
pd.isnull(df).any(1).nonzero()[0]
"""
ans = []
for rownum, row in enumerate(table):
try:
if pd.isnull(row).any():
colnums = pd.isnull(row).nonzero()[0]
ans += [(rownum, colnum) for colnum in colnums]
except AttributeError: # table is really just a sequence of scalars
if pd.isnull(row):
ans += [(rownum, 0)]
return ans
def plot_network_results(network, ds=None, mean=0, std=1, title='', show=True, save=True):
"""Identical to plot_trainer except `network` and `ds` must be provided separately"""
df = sim_network(network=network, ds=ds, mean=mean, std=std)
df.plot()
plt.xlabel('Date')
plt.ylabel('Threshold (kW)')
plt.title(title)
if show:
try:
# ipython notebook overrides plt.show and doesn't have a block kwarg
plt.show(block=False)
except TypeError:
plt.show()
if save:
filename = 'ann_performance_for_{0}.png'.format(title).replace(' ', '_')
if isinstance(save, basestring) and os.path.isdir(save):
filename = os.path.join(save, filename)
plt.savefig(filename)
if not show:
plt.clf()
return network, mean, std
def trainer_results(trainer, mean=0, std=1, title='', show=True, save=True):
"""Plot the performance of the Network and SupervisedDataSet in a pybrain Trainer
DataSet target and output values are denormalized before plotting with:
output * std + mean
Which inverses the normalization
(output - mean) / std
Args:
trainer (Trainer): a pybrain Trainer instance containing a valid Network and DataSet
ds (DataSet): a pybrain DataSet to override the one contained in `trainer`.
Required if trainer is a Network instance rather than a Trainer instance.
mean (float): mean of the denormalized dataset (default: 0)
Only affects the scale of the plot
std (float): std (standard deviation) of the denormalized dataset (default: 1)
title (str): title to display on the plot.
Returns:
3-tuple: (trainer, mean, std), A trainer/dataset along with denormalization info
"""
return plot_network_results(network=trainer.module, ds=trainer.ds, mean=mean, std=std, title=title,
show=show, save=save)
def sim_trainer(trainer, mean=0, std=1):
"""Simulate a trainer by activating its DataSet and returning DataFrame(columns=['Output','Target'])
"""
return sim_network(network=trainer.module, ds=trainer.ds, mean=mean, std=std)
def sim_network(network, ds=None, index=None, mean=0, std=1):
"""Simulate/activate a Network on a SupervisedDataSet and return DataFrame(columns=['Output','Target'])
The DataSet's target and output values are denormalized before populating the dataframe columns:
denormalized_output = normalized_output * std + mean
Which inverses the normalization that produced the normalized output in the first place: \
normalized_output = (denormalzied_output - mean) / std
Args:
network (Network): a pybrain Network instance to activate with the provided DataSet, `ds`
ds (DataSet): a pybrain DataSet to activate the Network on to produce an output sequence
mean (float): mean of the denormalized dataset (default: 0)
Output is scaled
std (float): std (standard deviation) of the denormalized dataset (default: 1)
title (str): title to display on the plot.
Returns:
DataFrame: DataFrame with columns "Output" and "Target" suitable for df.plot-ting
"""
# just in case network is a trainer or has a Module-derived instance as one of it's attribute
# isinstance(network.module, (networks.Network, modules.Module))
if hasattr(network, 'module') and hasattr(network.module, 'activate'):
# may want to also check: isinstance(network.module, (networks.Network, modules.Module))
network = network.module
ds = ds or network.ds
if not ds:
raise RuntimeError("Unable to find a `pybrain.datasets.DataSet` instance to activate the Network with, "
" to plot the outputs. A dataset can be provided as part of a network instance or "
"as a separate kwarg if `network` is used to provide the `pybrain.Network`"
" instance directly.")
results_generator = ((network.activate(ds['input'][i])[0] * std + mean, ds['target'][i][0] * std + mean)
for i in xrange(len(ds['input'])))
return pd.DataFrame(results_generator, columns=['Output', 'Target'], index=index or range(len(ds['input'])))
|
hobson/pug-ann | pug/ann/util.py | inputs_from_dataframe | python | def inputs_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):
ds = input_dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,
normalize=normalize, verbosity=verbosity)
return ds['input'] | Build a sequence of vectors suitable for "activation" by a neural net
Identical to `dataset_from_dataframe`, except that only the input vectors are
returned (not a full DataSet instance) and default values for 2 arguments are changed:
outputs: None
And only the input vectors are return | train | https://github.com/hobson/pug-ann/blob/8a4d7103a744d15b4a737fc0f9a84c823973e0ec/pug/ann/util.py#L358-L369 | [
"def input_dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):\n \"\"\" Build a dataset with an empty output/target vector\n\n Identical to `dataset_from_dataframe`, except that default values for 2 arguments:\n outputs: None\n \"\"\"\n return dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,\n normalize=normalize, verbosity=verbosity)\n"
] | """Maniuplate, analyze and plot `pybrain` `Network` and `DataSet` objects
TODO:
Incorporate into pybrain fork so pug doesn't have to depend on pybrain
"""
from __future__ import print_function
import os
import warnings
import pandas as pd
from scipy import ndarray, reshape # array, amin, amax,
np = pd.np
from matplotlib import pyplot as plt
import pybrain.datasets
import pybrain.structure
import pybrain.supervised
import pybrain.tools
pb = pybrain
# from pybrain.supervised.trainers import Trainer
from pybrain.tools.customxml import NetworkReader
from pybrain.structure.parametercontainer import ParameterContainer
from pybrain.structure.connections.connection import Connection
from pug.nlp.util import tuplify, fuzzy_get
from pug.ann.data import weather
#import pug.nlp.util as nlp
# print(os.path.realpath(__file__))
DATA_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(DATA_PATH, '..', 'data')
LAYER_TYPES = set([getattr(pybrain.structure, s) for s in
dir(pybrain.structure) if s.endswith('Layer')])
FAST = False
try:
from arac.pybrainbridge import _FeedForwardNetwork as FeedForwardNetwork
FAST = True
except:
from pybrain.structure import FeedForwardNetwork
print("No fast (ARAC, a FC++ library) FeedForwardNetwork was found, "
"so using the slower pybrain python implementation of FFNN.")
def normalize_layer_type(layer_type):
try:
if layer_type in LAYER_TYPES:
return layer_type
except TypeError:
pass
try:
return getattr(pb.structure, layer_type.strip())
except AttributeError:
try:
return getattr(pb.structure, layer_type.strip() + 'Layer')
except AttributeError:
pass
return [normalize_layer_type(lt) for lt in layer_type]
def build_ann(N_input=None, N_hidden=2, N_output=1, hidden_layer_type='Linear', verbosity=1):
"""Build a neural net with the indicated input, hidden, and outout dimensions
Arguments:
params (dict or PyBrainParams namedtuple):
default: {'N_hidden': 6}
(this is the only parameter that affects the NN build)
Returns:
FeedForwardNetwork with N_input + N_hidden + N_output nodes in 3 layers
"""
N_input = N_input or 1
N_output = N_output or 1
N_hidden = N_hidden or tuple()
if isinstance(N_hidden, (int, float, basestring)):
N_hidden = (int(N_hidden),)
hidden_layer_type = hidden_layer_type or tuple()
hidden_layer_type = tuplify(normalize_layer_type(hidden_layer_type))
if verbosity > 0:
print(N_hidden, ' layers of type ', hidden_layer_type)
assert(len(N_hidden) == len(hidden_layer_type))
nn = pb.structure.FeedForwardNetwork()
# layers
nn.addInputModule(pb.structure.BiasUnit(name='bias'))
nn.addInputModule(pb.structure.LinearLayer(N_input, name='input'))
for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden, hidden_layer_type)):
Nhid = int(Nhid)
nn.addModule(hidlaytype(Nhid, name=('hidden-{}'.format(i) if i else 'hidden')))
nn.addOutputModule(pb.structure.LinearLayer(N_output, name='output'))
# connections
nn.addConnection(pb.structure.FullConnection(nn['bias'], nn['hidden'] if N_hidden else nn['output']))
nn.addConnection(pb.structure.FullConnection(nn['input'], nn['hidden'] if N_hidden else nn['output']))
for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden[:-1], hidden_layer_type[:-1])):
Nhid = int(Nhid)
nn.addConnection(pb.structure.FullConnection(nn[('hidden-{}'.format(i) if i else 'hidden')],
nn['hidden-{}'.format(i + 1)]))
i = len(N_hidden) - 1
nn.addConnection(pb.structure.FullConnection(nn['hidden-{}'.format(i) if i else 'hidden'], nn['output']))
nn.sortModules()
if FAST:
try:
nn.convertToFastNetwork()
except:
if verbosity > 0:
print('Unable to convert slow PyBrain NN to a fast ARAC network...')
if verbosity > 0:
print(nn.connections)
return nn
def ann_from_ds(ds=None, N_input=3, N_hidden=0, N_output=1, verbosity=1):
N_input = getattr(ds, 'indim', N_input)
N_output = getattr(ds, 'outdim', N_output)
N_hidden = N_hidden or getattr(ds, 'paramdim', N_hidden + N_input + N_output) - N_input - N_output
N_hidden = max(round(min(N_hidden, len(ds) / float(N_input) / float(N_output) / 5.)), N_output)
return build_ann(N_input=N_input, N_hidden=N_hidden, N_output=N_output, verbosity=verbosity)
def prepend_dataset_with_weather(samples, location='Fresno, CA', weather_columns=None, use_cache=True, verbosity=0):
""" Prepend weather the values specified (e.g. Max TempF) to the samples[0..N]['input'] vectors
samples[0..N]['target'] should have an index with the date timestamp
If you use_cache for the curent year, you may not get the most recent data.
Arguments:
samples (list of dict): {'input': np.array(), 'target': pandas.DataFrame}
"""
if verbosity > 1:
print('Prepending weather data for {} to dataset samples'.format(weather_columns))
if not weather_columns:
return samples
timestamps = pd.DatetimeIndex([s['target'].index[0] for s in samples])
years = range(timestamps.min().date().year, timestamps.max().date().year + 1)
weather_df = weather.daily(location=location, years=years, use_cache=use_cache)
# FIXME: weather_df.resample('D') fails
weather_df.index = [d.date() for d in weather_df.index]
if verbosity > 1:
print('Retrieved weather for years {}:'.format(years))
print(weather_df)
weather_columns = [label if label in weather_df.columns else weather_df.columns[int(label)]
for label in (weather_columns or [])]
for sampnum, sample in enumerate(samples):
timestamp = timestamps[sampnum]
try:
weather_day = weather_df.loc[timestamp.date()]
except:
from traceback import print_exc
print_exc()
weather_day = {}
if verbosity >= 0:
warnings.warn('Unable to find weather for the date {}'.format(timestamp.date()))
NaN = float('NaN')
sample['input'] = [weather_day.get(label, None) for label in weather_columns] + list(sample['input'])
if verbosity > 0 and NaN in sample['input']:
warnings.warn('Unable to find weather features {} in the weather for date {}'.format(
[label for i, label in enumerate(weather_columns) if sample['input'][i] == NaN], timestamp))
return samples
def dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=(-1,), normalize=False, verbosity=1):
"""Compose a pybrain.dataset from a pandas DataFrame
Arguments:
delays (list of int): sample delays to use for the input tapped delay line
Positive and negative values are treated the same as sample counts into the past.
default: [1, 2, 3], in z-transform notation: z^-1 + z^-2 + z^-3
inputs (list of int or list of str): column indices or labels for the inputs
outputs (list of int or list of str): column indices or labels for the outputs
normalize (bool): whether to divide each input to be normally distributed about 0 with std 1
Returns:
3-tuple: tuple(dataset, list of means, list of stds)
means and stds allow normalization of new inputs and denormalization of the outputs
TODO:
Detect categorical variables with low dimensionality and split into separate bits
Vowpel Wabbit hashes strings into an int?
Detect ordinal variables and convert to continuous int sequence
SEE: http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
"""
if isinstance(delays, int):
if delays:
delays = range(1, delays + 1)
else:
delays = [0]
delays = np.abs(np.array([int(i) for i in delays]))
inputs = [df.columns[int(inp)] if isinstance(inp, (float, int)) else str(inp) for inp in inputs]
outputs = [df.columns[int(out)] if isinstance(out, (float, int)) else str(out) for out in (outputs or [])]
inputs = [fuzzy_get(df.columns, i) for i in inputs]
outputs = [fuzzy_get(df.columns, o) for o in outputs]
N_inp = len(inputs)
N_out = len(outputs)
inp_outs = inputs + outputs
if verbosity > 0:
print("inputs: {}\noutputs: {}\ndelays: {}\n".format(inputs, outputs, delays))
means, stds = np.zeros(len(inp_outs)), np.ones(len(inp_outs))
if normalize:
means, stds = df[inp_outs].mean(), df[inp_outs].std()
if normalize and verbosity > 0:
print("Input mean values (used to normalize input biases): {}".format(means[:N_inp]))
print("Output mean values (used to normalize output biases): {}".format(means[N_inp:]))
ds = pb.datasets.SupervisedDataSet(N_inp * len(delays), N_out)
if verbosity > 0:
print("Dataset dimensions are {}x{}x{} (records x indim x outdim) for {} delays, {} inputs, {} outputs".format(
len(df), ds.indim, ds.outdim, len(delays), len(inputs), len(outputs)))
# FIXME: normalize the whole matrix at once and add it quickly rather than one sample at a time
if delays == np.array([0]) and not normalize:
if verbosity > 0:
print("No tapped delay lines (delays) were requested, so using undelayed features for the dataset.")
assert(df[inputs].values.shape[0] == df[outputs].values.shape[0])
ds.setField('input', df[inputs].values)
ds.setField('target', df[outputs].values)
ds.linkFields(['input', 'target'])
# for inp, outp in zip(df[inputs].values, df[outputs].values):
# ds.appendLinked(inp, outp)
assert(len(ds['input']) == len(ds['target']))
else:
for i, out_vec in enumerate(df[outputs].values):
if verbosity > 0 and i % 100 == 0:
print("{}%".format(i / .01 / len(df)))
elif verbosity > 1:
print('sample[{i}].target={out_vec}'.format(i=i, out_vec=out_vec))
if i < max(delays):
continue
inp_vec = []
for delay in delays:
inp_vec += list((df[inputs].values[i - delay] - means[:N_inp]) / stds[:N_inp])
ds.addSample(inp_vec, (out_vec - means[N_inp:]) / stds[N_inp:])
if verbosity > 0:
print("Dataset now has {} samples".format(len(ds)))
if normalize:
return ds, means, stds
else:
return ds
# def dataset_from_feature_names(df, delays=(1, 2,), quantiles=(), input_columns=(0,), target_columns=(-1,),
# weather_columns=(), features=(), verbosity=1):
# """FIXME: Untested. Transform Datafram columns and append to input_columns as additional features
# Arguments:
# features (seq of str): names of feautures to be appended to the input vector (feature set)
# """
# raise NotImplementedError("Better to implement this as a separate feature transformation function")
# # Compute the length of extra features added on to the vector from the rolling window of previous
# # threshold values
# extras = (+ int('dow' in features) * 7
# + int('moy' in features) * 12
# + int('woy' in features)
# + int('date' in features)
# + len(quantiles)
# + len(weather_columns)
# )
# N = max(list(delays) + list(quantiles))
# last_date = first_date = 0
# try:
# if verbosity > 0:
# print('The total input vector length (dimension) is now {0}'.format(len(N) + extras))
# print('Starting to augment {0}x{1} sample inputs by adding adding {2} additional features'.format(
# len(df), len(input_columns), extras))
# if verbosity > 0:
# print('The first sample input was {0}'.format(
# df[input_columns].iloc[0]))
# print('The first sample target was {0}'.format(
# df[target_columns].iloc[0]))
# print('The last sample input was {0}'.format(
# df[input_columns].iloc[-1]))
# print('The last sample target was {0}'.format(
# df[target_columns].iloc[-1]))
# first_date = df.index.iloc[0].date().toordinal()
# last_date = df.index.iloc[-1].date().toordinal()
# except:
# if verbosity > -1:
# from traceback import format_exc
# warnings.warn(format_exc())
# if verbosity > 1:
# import ipdb
# ipdb.set_trace()
# date_range = (last_date - first_date) + 1 or 1
# # FIXME: scale each feature/column/dimension independently using pug.ann.util.dataset_from_dataframe
# # but mean and std become vectors of the same dimension as the feature/input vector.
# # scikit-learn has transformations that do this more reasonably
# bit_scale = 5 # number of standard deviations for the magnitude of bit
# # convert the list of dicts ((input, output) supervised dataset pairs) into a pybrains.Dataset
# ds = pybrain.datasets.SupervisedDataSet(len(N) + extras, 1)
# sorted_features = sorted(features)
# for sampnum, (input_vector, target_vector) in enumerate(
# zip(df[input_columns].values, df[target_columns].values)):
# # sample['input'] and ['output'] are pd.Series tables so convert them to normal list()
# # inputs = list(sample['input'])
# # the date we're trying to predict the threshold for
# timestamp = target_vector.index[0]
# for feature_name in sorted_features:
# if feature_name.startswith('morn'):
# day = get_day(series, date=timestamp.date())
# morning_loads = (day.values[:morn] - mean) / std
# if verbosity > 2:
# print('day = {0} and morning = {1}'.format(len(day), len(morning_loads)))
# inputs = list(morning_loads) + inputs
# elif feature_name == 'dow':
# dow_bits = [0] * 7
# dow_bits[timestamp.weekday()] = bit_scale
# inputs = dow_bits + inputs
# elif feature_name == 'moy':
# moy_bits = [0] * 12
# moy_bits[timestamp.month - 1] = bit_scale
# inputs = moy_bits + inputs
# elif feature_name == 'woy':
# inputs = [(timestamp.weekofyear - 26.) * 3 * bit_scale / 52] + inputs
# elif feature_name == 'date':
# inputs = [(timestamp.date().toordinal() - first_date - date_range / 2.) * 3 * bit_scale / date_range
# ] + inputs
# if pd.isnull(inputs).any():
# msg = 'Feature "{0}" within the feature list: {1} created null/NaN input values\n'.format(
# feature_name, sorted_features)
# msg += 'For sample {} and date {}\n'.format(sampnum, timestamp)
# msg += 'Input vector positions {}:\nInput vector: {}'.format(
# ann.table_nan_locs(inputs), inputs)
# msg += '\nBuilding load Series:\n{0}\n'.format(series)
# if ignore_nans:
# warnings.warn(msg)
# else:
# raise ValueError(msg)
# ds.addSample(inputs, list(target_vector.values))
def input_dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):
""" Build a dataset with an empty output/target vector
Identical to `dataset_from_dataframe`, except that default values for 2 arguments:
outputs: None
"""
return dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,
normalize=normalize, verbosity=verbosity)
def build_trainer(nn, ds, verbosity=1):
"""Configure neural net trainer from a pybrain dataset"""
return pb.supervised.trainers.rprop.RPropMinusTrainer(nn, dataset=ds, batchlearning=True, verbose=bool(verbosity))
def weight_matrices(nn):
""" Extract list of weight matrices from a Network, Layer (module), Trainer, Connection or other pybrain object"""
if isinstance(nn, ndarray):
return nn
try:
return weight_matrices(nn.connections)
except:
pass
try:
return weight_matrices(nn.module)
except:
pass
# Network objects are ParameterContainer's too, but won't reshape into a single matrix,
# so this must come after try nn.connections
if isinstance(nn, (ParameterContainer, Connection)):
return reshape(nn.params, (nn.outdim, nn.indim))
if isinstance(nn, basestring):
try:
fn = nn
nn = NetworkReader(fn, newfile=False)
return weight_matrices(nn.readFrom(fn))
except:
pass
# FIXME: what does NetworkReader output? (Module? Layer?) need to handle it's type here
try:
return [weight_matrices(v) for (k, v) in nn.iteritems()]
except:
try:
connections = nn.module.connections.values()
nn = []
for conlist in connections:
nn += conlist
return weight_matrices(nn)
except:
return [weight_matrices(v) for v in nn]
# # FIXME: resolve all these NLP dependencies and get this working
# def dataset_from_time_series(df, N_inp=None, features=('moy',), verbosity=1):
# """Build a pybrains.dataset from the time series contained in a dataframe"""
# N_inp = N_inp or len(df.columns)
# features = features or []
# # Add features to input vector in reverse alphabetical order by feature name,
# # so woy will be added first, and date will be added last.
# # The order that the feature vectors should appear in the input vector to remain consistent
# # and neural net architecture can have structure that anticipates this.
# sorted_features = nlp.sort_strings(features, ('dat', 'dow', 'moy', 'dom', 'moy', 'mor'), case_sensitive=False)
# if verbosity > 0:
# print('dataset_from_thresh(features={0})'.format(features))
# samples, mean, std, thresh = simple_dataset_from_thresh(thresh, N=N, max_window=max_window,
# normalize=normalize, ignore_below=ignore_below)
# name = getattr(thresh, 'name', None)
# if name:
# name = normalize_building_name(name)
# if name:
# series = get_series(name)
# else:
# if isinstance(series, basestring):
# name = normalize_building_name(series.strip()) or thresh.name or 'Unknown'
# series = get_series(name)
# elif isinstance(series, pd.DataFrame):
# name = normalize_building_name(series.columns[0]) or thresh.name or 'Unknown'
# series = series[name]
# elif isinstance(series, pd.Series):
# name = normalize_building_name(series.name) or thresh.name or 'Unknown'
# else:
# name = None
# # Compute the length of extra features added on to the vector from the rolling window of previous
# # threshold values
# # TODO: pre-process features list of strings in a separate function
# morn = 0
# # if the building name isn't known, you can't retrieve the morning load values for it
# if name:
# for s in features:
# if s.startswith('morn'):
# try:
# morn = int(s[4:])
# except:
# if verbosity > 0:
# warnings.warn('Unable to determine morning length from feature named "{0}" so using default '
# '(8 am = 8 * 4 = 32)')
# morn = 32 # default to 9 am morning ending
# break
# if verbosity > 0:
# print('In dataset_from_thresh() using {0} morning load values for Building {1}'
# ' because series arg is of type {2}'.format(morn, name, type(series)))
# extras = (+ int('dow' in features) * 7
# + int('moy' in features) * 12
# + int('woy' in features)
# + int('date' in features)
# + morn)
# if verbosity > 0:
# print('The total input vector length (dimension) is now {0}'.format(N + extras))
# ds = pb.datasets.SupervisedDataSet(N + extras, 1)
# first_date = samples[0]['target'].index[0].date().toordinal()
# last_date = samples[-1]['target'].index[0].date().toordinal()
# date_range = (last_date - first_date) or 1
# bit_scale = 5 # number of standard deviations for the magnitude of bit
# if verbosity > 0:
# print('Adding features for building {3}, {0}, and a morning time series of len {2}, '
# 'to each of the {1} vectors (samples)'.format(features, len(samples), morn, name))
# for sampnum, sample in enumerate(samples):
# # sample['input'] and ['output'] are pd.Series tables so convert them to normal list()
# inputs = list(sample['input'].values)
# # the date we're trying to predict the rhreshold for
# timestamp = sample['target'].index[0]
# for feature_name in sorted_features:
# if feature_name.startswith('morn'):
# day = get_day(series, date=timestamp.date())
# morning_loads = (day.values[:morn] - mean) / std
# if verbosity > 1:
# print('day = {0} and morning = {1}'.format(len(day), len(morning_loads)))
# inputs = list(morning_loads) + inputs
# elif feature_name == 'dow':
# dow_bits = [0] * 7
# dow_bits[timestamp.weekday()] = bit_scale
# inputs = dow_bits + inputs
# elif feature_name == 'moy':
# moy_bits = [0] * 12
# moy_bits[timestamp.month - 1] = bit_scale
# inputs = moy_bits + inputs
# elif feature_name == 'woy':
# inputs = [(timestamp.weekofyear - 26.) * 3 * bit_scale / 52] + inputs
# elif feature_name == 'date':
# inputs = [(timestamp.date().toordinal() - first_date - date_range / 2.) * 3 * bit_scale /
# date_range ] + inputs
# if pd.isnull(inputs).any():
# msg = 'Feature "{0}" within the feature list: {1} created null/NaN input values\nFor sample {2}'
# ' and date {3}\nInput vector positions {4}:\nInput vector: {5}'.format(
# feature_name, sorted_features, sampnum, timestamp, ann.table_nan_locs(inputs), inputs)
# msg += '\nBuilding load Series:\n{0}\n'.format(series)
# if ignore_nans:
# warnings.warn(msg)
# else:
# raise ValueError(msg)
# ds.addSample(inputs, list(sample['target'].values))
# return ds, mean, std, thresh
def dataset_nan_locs(ds):
"""
from http://stackoverflow.com/a/14033137/623735
# gets the indices of the rows with nan values in a dataframe
pd.isnull(df).any(1).nonzero()[0]
"""
ans = []
for sampnum, sample in enumerate(ds):
if pd.isnull(sample).any():
ans += [{
'sample': sampnum,
'input': pd.isnull(sample[0]).nonzero()[0],
'output': pd.isnull(sample[1]).nonzero()[0],
}]
return ans
def table_nan_locs(table):
"""
from http://stackoverflow.com/a/14033137/623735
# gets the indices of the rows with nan values in a dataframe
pd.isnull(df).any(1).nonzero()[0]
"""
ans = []
for rownum, row in enumerate(table):
try:
if pd.isnull(row).any():
colnums = pd.isnull(row).nonzero()[0]
ans += [(rownum, colnum) for colnum in colnums]
except AttributeError: # table is really just a sequence of scalars
if pd.isnull(row):
ans += [(rownum, 0)]
return ans
def plot_network_results(network, ds=None, mean=0, std=1, title='', show=True, save=True):
"""Identical to plot_trainer except `network` and `ds` must be provided separately"""
df = sim_network(network=network, ds=ds, mean=mean, std=std)
df.plot()
plt.xlabel('Date')
plt.ylabel('Threshold (kW)')
plt.title(title)
if show:
try:
# ipython notebook overrides plt.show and doesn't have a block kwarg
plt.show(block=False)
except TypeError:
plt.show()
if save:
filename = 'ann_performance_for_{0}.png'.format(title).replace(' ', '_')
if isinstance(save, basestring) and os.path.isdir(save):
filename = os.path.join(save, filename)
plt.savefig(filename)
if not show:
plt.clf()
return network, mean, std
def trainer_results(trainer, mean=0, std=1, title='', show=True, save=True):
"""Plot the performance of the Network and SupervisedDataSet in a pybrain Trainer
DataSet target and output values are denormalized before plotting with:
output * std + mean
Which inverses the normalization
(output - mean) / std
Args:
trainer (Trainer): a pybrain Trainer instance containing a valid Network and DataSet
ds (DataSet): a pybrain DataSet to override the one contained in `trainer`.
Required if trainer is a Network instance rather than a Trainer instance.
mean (float): mean of the denormalized dataset (default: 0)
Only affects the scale of the plot
std (float): std (standard deviation) of the denormalized dataset (default: 1)
title (str): title to display on the plot.
Returns:
3-tuple: (trainer, mean, std), A trainer/dataset along with denormalization info
"""
return plot_network_results(network=trainer.module, ds=trainer.ds, mean=mean, std=std, title=title,
show=show, save=save)
def sim_trainer(trainer, mean=0, std=1):
"""Simulate a trainer by activating its DataSet and returning DataFrame(columns=['Output','Target'])
"""
return sim_network(network=trainer.module, ds=trainer.ds, mean=mean, std=std)
def sim_network(network, ds=None, index=None, mean=0, std=1):
"""Simulate/activate a Network on a SupervisedDataSet and return DataFrame(columns=['Output','Target'])
The DataSet's target and output values are denormalized before populating the dataframe columns:
denormalized_output = normalized_output * std + mean
Which inverses the normalization that produced the normalized output in the first place: \
normalized_output = (denormalzied_output - mean) / std
Args:
network (Network): a pybrain Network instance to activate with the provided DataSet, `ds`
ds (DataSet): a pybrain DataSet to activate the Network on to produce an output sequence
mean (float): mean of the denormalized dataset (default: 0)
Output is scaled
std (float): std (standard deviation) of the denormalized dataset (default: 1)
title (str): title to display on the plot.
Returns:
DataFrame: DataFrame with columns "Output" and "Target" suitable for df.plot-ting
"""
# just in case network is a trainer or has a Module-derived instance as one of it's attribute
# isinstance(network.module, (networks.Network, modules.Module))
if hasattr(network, 'module') and hasattr(network.module, 'activate'):
# may want to also check: isinstance(network.module, (networks.Network, modules.Module))
network = network.module
ds = ds or network.ds
if not ds:
raise RuntimeError("Unable to find a `pybrain.datasets.DataSet` instance to activate the Network with, "
" to plot the outputs. A dataset can be provided as part of a network instance or "
"as a separate kwarg if `network` is used to provide the `pybrain.Network`"
" instance directly.")
results_generator = ((network.activate(ds['input'][i])[0] * std + mean, ds['target'][i][0] * std + mean)
for i in xrange(len(ds['input'])))
return pd.DataFrame(results_generator, columns=['Output', 'Target'], index=index or range(len(ds['input'])))
|
hobson/pug-ann | pug/ann/util.py | build_trainer | python | def build_trainer(nn, ds, verbosity=1):
return pb.supervised.trainers.rprop.RPropMinusTrainer(nn, dataset=ds, batchlearning=True, verbose=bool(verbosity)) | Configure neural net trainer from a pybrain dataset | train | https://github.com/hobson/pug-ann/blob/8a4d7103a744d15b4a737fc0f9a84c823973e0ec/pug/ann/util.py#L372-L374 | null | """Maniuplate, analyze and plot `pybrain` `Network` and `DataSet` objects
TODO:
Incorporate into pybrain fork so pug doesn't have to depend on pybrain
"""
from __future__ import print_function
import os
import warnings
import pandas as pd
from scipy import ndarray, reshape # array, amin, amax,
np = pd.np
from matplotlib import pyplot as plt
import pybrain.datasets
import pybrain.structure
import pybrain.supervised
import pybrain.tools
pb = pybrain
# from pybrain.supervised.trainers import Trainer
from pybrain.tools.customxml import NetworkReader
from pybrain.structure.parametercontainer import ParameterContainer
from pybrain.structure.connections.connection import Connection
from pug.nlp.util import tuplify, fuzzy_get
from pug.ann.data import weather
#import pug.nlp.util as nlp
# print(os.path.realpath(__file__))
DATA_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(DATA_PATH, '..', 'data')
LAYER_TYPES = set([getattr(pybrain.structure, s) for s in
dir(pybrain.structure) if s.endswith('Layer')])
FAST = False
try:
from arac.pybrainbridge import _FeedForwardNetwork as FeedForwardNetwork
FAST = True
except:
from pybrain.structure import FeedForwardNetwork
print("No fast (ARAC, a FC++ library) FeedForwardNetwork was found, "
"so using the slower pybrain python implementation of FFNN.")
def normalize_layer_type(layer_type):
try:
if layer_type in LAYER_TYPES:
return layer_type
except TypeError:
pass
try:
return getattr(pb.structure, layer_type.strip())
except AttributeError:
try:
return getattr(pb.structure, layer_type.strip() + 'Layer')
except AttributeError:
pass
return [normalize_layer_type(lt) for lt in layer_type]
def build_ann(N_input=None, N_hidden=2, N_output=1, hidden_layer_type='Linear', verbosity=1):
"""Build a neural net with the indicated input, hidden, and outout dimensions
Arguments:
params (dict or PyBrainParams namedtuple):
default: {'N_hidden': 6}
(this is the only parameter that affects the NN build)
Returns:
FeedForwardNetwork with N_input + N_hidden + N_output nodes in 3 layers
"""
N_input = N_input or 1
N_output = N_output or 1
N_hidden = N_hidden or tuple()
if isinstance(N_hidden, (int, float, basestring)):
N_hidden = (int(N_hidden),)
hidden_layer_type = hidden_layer_type or tuple()
hidden_layer_type = tuplify(normalize_layer_type(hidden_layer_type))
if verbosity > 0:
print(N_hidden, ' layers of type ', hidden_layer_type)
assert(len(N_hidden) == len(hidden_layer_type))
nn = pb.structure.FeedForwardNetwork()
# layers
nn.addInputModule(pb.structure.BiasUnit(name='bias'))
nn.addInputModule(pb.structure.LinearLayer(N_input, name='input'))
for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden, hidden_layer_type)):
Nhid = int(Nhid)
nn.addModule(hidlaytype(Nhid, name=('hidden-{}'.format(i) if i else 'hidden')))
nn.addOutputModule(pb.structure.LinearLayer(N_output, name='output'))
# connections
nn.addConnection(pb.structure.FullConnection(nn['bias'], nn['hidden'] if N_hidden else nn['output']))
nn.addConnection(pb.structure.FullConnection(nn['input'], nn['hidden'] if N_hidden else nn['output']))
for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden[:-1], hidden_layer_type[:-1])):
Nhid = int(Nhid)
nn.addConnection(pb.structure.FullConnection(nn[('hidden-{}'.format(i) if i else 'hidden')],
nn['hidden-{}'.format(i + 1)]))
i = len(N_hidden) - 1
nn.addConnection(pb.structure.FullConnection(nn['hidden-{}'.format(i) if i else 'hidden'], nn['output']))
nn.sortModules()
if FAST:
try:
nn.convertToFastNetwork()
except:
if verbosity > 0:
print('Unable to convert slow PyBrain NN to a fast ARAC network...')
if verbosity > 0:
print(nn.connections)
return nn
def ann_from_ds(ds=None, N_input=3, N_hidden=0, N_output=1, verbosity=1):
N_input = getattr(ds, 'indim', N_input)
N_output = getattr(ds, 'outdim', N_output)
N_hidden = N_hidden or getattr(ds, 'paramdim', N_hidden + N_input + N_output) - N_input - N_output
N_hidden = max(round(min(N_hidden, len(ds) / float(N_input) / float(N_output) / 5.)), N_output)
return build_ann(N_input=N_input, N_hidden=N_hidden, N_output=N_output, verbosity=verbosity)
def prepend_dataset_with_weather(samples, location='Fresno, CA', weather_columns=None, use_cache=True, verbosity=0):
""" Prepend weather the values specified (e.g. Max TempF) to the samples[0..N]['input'] vectors
samples[0..N]['target'] should have an index with the date timestamp
If you use_cache for the curent year, you may not get the most recent data.
Arguments:
samples (list of dict): {'input': np.array(), 'target': pandas.DataFrame}
"""
if verbosity > 1:
print('Prepending weather data for {} to dataset samples'.format(weather_columns))
if not weather_columns:
return samples
timestamps = pd.DatetimeIndex([s['target'].index[0] for s in samples])
years = range(timestamps.min().date().year, timestamps.max().date().year + 1)
weather_df = weather.daily(location=location, years=years, use_cache=use_cache)
# FIXME: weather_df.resample('D') fails
weather_df.index = [d.date() for d in weather_df.index]
if verbosity > 1:
print('Retrieved weather for years {}:'.format(years))
print(weather_df)
weather_columns = [label if label in weather_df.columns else weather_df.columns[int(label)]
for label in (weather_columns or [])]
for sampnum, sample in enumerate(samples):
timestamp = timestamps[sampnum]
try:
weather_day = weather_df.loc[timestamp.date()]
except:
from traceback import print_exc
print_exc()
weather_day = {}
if verbosity >= 0:
warnings.warn('Unable to find weather for the date {}'.format(timestamp.date()))
NaN = float('NaN')
sample['input'] = [weather_day.get(label, None) for label in weather_columns] + list(sample['input'])
if verbosity > 0 and NaN in sample['input']:
warnings.warn('Unable to find weather features {} in the weather for date {}'.format(
[label for i, label in enumerate(weather_columns) if sample['input'][i] == NaN], timestamp))
return samples
def dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=(-1,), normalize=False, verbosity=1):
"""Compose a pybrain.dataset from a pandas DataFrame
Arguments:
delays (list of int): sample delays to use for the input tapped delay line
Positive and negative values are treated the same as sample counts into the past.
default: [1, 2, 3], in z-transform notation: z^-1 + z^-2 + z^-3
inputs (list of int or list of str): column indices or labels for the inputs
outputs (list of int or list of str): column indices or labels for the outputs
normalize (bool): whether to divide each input to be normally distributed about 0 with std 1
Returns:
3-tuple: tuple(dataset, list of means, list of stds)
means and stds allow normalization of new inputs and denormalization of the outputs
TODO:
Detect categorical variables with low dimensionality and split into separate bits
Vowpel Wabbit hashes strings into an int?
Detect ordinal variables and convert to continuous int sequence
SEE: http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
"""
if isinstance(delays, int):
if delays:
delays = range(1, delays + 1)
else:
delays = [0]
delays = np.abs(np.array([int(i) for i in delays]))
inputs = [df.columns[int(inp)] if isinstance(inp, (float, int)) else str(inp) for inp in inputs]
outputs = [df.columns[int(out)] if isinstance(out, (float, int)) else str(out) for out in (outputs or [])]
inputs = [fuzzy_get(df.columns, i) for i in inputs]
outputs = [fuzzy_get(df.columns, o) for o in outputs]
N_inp = len(inputs)
N_out = len(outputs)
inp_outs = inputs + outputs
if verbosity > 0:
print("inputs: {}\noutputs: {}\ndelays: {}\n".format(inputs, outputs, delays))
means, stds = np.zeros(len(inp_outs)), np.ones(len(inp_outs))
if normalize:
means, stds = df[inp_outs].mean(), df[inp_outs].std()
if normalize and verbosity > 0:
print("Input mean values (used to normalize input biases): {}".format(means[:N_inp]))
print("Output mean values (used to normalize output biases): {}".format(means[N_inp:]))
ds = pb.datasets.SupervisedDataSet(N_inp * len(delays), N_out)
if verbosity > 0:
print("Dataset dimensions are {}x{}x{} (records x indim x outdim) for {} delays, {} inputs, {} outputs".format(
len(df), ds.indim, ds.outdim, len(delays), len(inputs), len(outputs)))
# FIXME: normalize the whole matrix at once and add it quickly rather than one sample at a time
if delays == np.array([0]) and not normalize:
if verbosity > 0:
print("No tapped delay lines (delays) were requested, so using undelayed features for the dataset.")
assert(df[inputs].values.shape[0] == df[outputs].values.shape[0])
ds.setField('input', df[inputs].values)
ds.setField('target', df[outputs].values)
ds.linkFields(['input', 'target'])
# for inp, outp in zip(df[inputs].values, df[outputs].values):
# ds.appendLinked(inp, outp)
assert(len(ds['input']) == len(ds['target']))
else:
for i, out_vec in enumerate(df[outputs].values):
if verbosity > 0 and i % 100 == 0:
print("{}%".format(i / .01 / len(df)))
elif verbosity > 1:
print('sample[{i}].target={out_vec}'.format(i=i, out_vec=out_vec))
if i < max(delays):
continue
inp_vec = []
for delay in delays:
inp_vec += list((df[inputs].values[i - delay] - means[:N_inp]) / stds[:N_inp])
ds.addSample(inp_vec, (out_vec - means[N_inp:]) / stds[N_inp:])
if verbosity > 0:
print("Dataset now has {} samples".format(len(ds)))
if normalize:
return ds, means, stds
else:
return ds
# def dataset_from_feature_names(df, delays=(1, 2,), quantiles=(), input_columns=(0,), target_columns=(-1,),
# weather_columns=(), features=(), verbosity=1):
# """FIXME: Untested. Transform Datafram columns and append to input_columns as additional features
# Arguments:
# features (seq of str): names of feautures to be appended to the input vector (feature set)
# """
# raise NotImplementedError("Better to implement this as a separate feature transformation function")
# # Compute the length of extra features added on to the vector from the rolling window of previous
# # threshold values
# extras = (+ int('dow' in features) * 7
# + int('moy' in features) * 12
# + int('woy' in features)
# + int('date' in features)
# + len(quantiles)
# + len(weather_columns)
# )
# N = max(list(delays) + list(quantiles))
# last_date = first_date = 0
# try:
# if verbosity > 0:
# print('The total input vector length (dimension) is now {0}'.format(len(N) + extras))
# print('Starting to augment {0}x{1} sample inputs by adding adding {2} additional features'.format(
# len(df), len(input_columns), extras))
# if verbosity > 0:
# print('The first sample input was {0}'.format(
# df[input_columns].iloc[0]))
# print('The first sample target was {0}'.format(
# df[target_columns].iloc[0]))
# print('The last sample input was {0}'.format(
# df[input_columns].iloc[-1]))
# print('The last sample target was {0}'.format(
# df[target_columns].iloc[-1]))
# first_date = df.index.iloc[0].date().toordinal()
# last_date = df.index.iloc[-1].date().toordinal()
# except:
# if verbosity > -1:
# from traceback import format_exc
# warnings.warn(format_exc())
# if verbosity > 1:
# import ipdb
# ipdb.set_trace()
# date_range = (last_date - first_date) + 1 or 1
# # FIXME: scale each feature/column/dimension independently using pug.ann.util.dataset_from_dataframe
# # but mean and std become vectors of the same dimension as the feature/input vector.
# # scikit-learn has transformations that do this more reasonably
# bit_scale = 5 # number of standard deviations for the magnitude of bit
# # convert the list of dicts ((input, output) supervised dataset pairs) into a pybrains.Dataset
# ds = pybrain.datasets.SupervisedDataSet(len(N) + extras, 1)
# sorted_features = sorted(features)
# for sampnum, (input_vector, target_vector) in enumerate(
# zip(df[input_columns].values, df[target_columns].values)):
# # sample['input'] and ['output'] are pd.Series tables so convert them to normal list()
# # inputs = list(sample['input'])
# # the date we're trying to predict the threshold for
# timestamp = target_vector.index[0]
# for feature_name in sorted_features:
# if feature_name.startswith('morn'):
# day = get_day(series, date=timestamp.date())
# morning_loads = (day.values[:morn] - mean) / std
# if verbosity > 2:
# print('day = {0} and morning = {1}'.format(len(day), len(morning_loads)))
# inputs = list(morning_loads) + inputs
# elif feature_name == 'dow':
# dow_bits = [0] * 7
# dow_bits[timestamp.weekday()] = bit_scale
# inputs = dow_bits + inputs
# elif feature_name == 'moy':
# moy_bits = [0] * 12
# moy_bits[timestamp.month - 1] = bit_scale
# inputs = moy_bits + inputs
# elif feature_name == 'woy':
# inputs = [(timestamp.weekofyear - 26.) * 3 * bit_scale / 52] + inputs
# elif feature_name == 'date':
# inputs = [(timestamp.date().toordinal() - first_date - date_range / 2.) * 3 * bit_scale / date_range
# ] + inputs
# if pd.isnull(inputs).any():
# msg = 'Feature "{0}" within the feature list: {1} created null/NaN input values\n'.format(
# feature_name, sorted_features)
# msg += 'For sample {} and date {}\n'.format(sampnum, timestamp)
# msg += 'Input vector positions {}:\nInput vector: {}'.format(
# ann.table_nan_locs(inputs), inputs)
# msg += '\nBuilding load Series:\n{0}\n'.format(series)
# if ignore_nans:
# warnings.warn(msg)
# else:
# raise ValueError(msg)
# ds.addSample(inputs, list(target_vector.values))
def input_dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):
""" Build a dataset with an empty output/target vector
Identical to `dataset_from_dataframe`, except that default values for 2 arguments:
outputs: None
"""
return dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,
normalize=normalize, verbosity=verbosity)
def inputs_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):
""" Build a sequence of vectors suitable for "activation" by a neural net
Identical to `dataset_from_dataframe`, except that only the input vectors are
returned (not a full DataSet instance) and default values for 2 arguments are changed:
outputs: None
And only the input vectors are return
"""
ds = input_dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,
normalize=normalize, verbosity=verbosity)
return ds['input']
def weight_matrices(nn):
""" Extract list of weight matrices from a Network, Layer (module), Trainer, Connection or other pybrain object"""
if isinstance(nn, ndarray):
return nn
try:
return weight_matrices(nn.connections)
except:
pass
try:
return weight_matrices(nn.module)
except:
pass
# Network objects are ParameterContainer's too, but won't reshape into a single matrix,
# so this must come after try nn.connections
if isinstance(nn, (ParameterContainer, Connection)):
return reshape(nn.params, (nn.outdim, nn.indim))
if isinstance(nn, basestring):
try:
fn = nn
nn = NetworkReader(fn, newfile=False)
return weight_matrices(nn.readFrom(fn))
except:
pass
# FIXME: what does NetworkReader output? (Module? Layer?) need to handle it's type here
try:
return [weight_matrices(v) for (k, v) in nn.iteritems()]
except:
try:
connections = nn.module.connections.values()
nn = []
for conlist in connections:
nn += conlist
return weight_matrices(nn)
except:
return [weight_matrices(v) for v in nn]
# # FIXME: resolve all these NLP dependencies and get this working
# def dataset_from_time_series(df, N_inp=None, features=('moy',), verbosity=1):
# """Build a pybrains.dataset from the time series contained in a dataframe"""
# N_inp = N_inp or len(df.columns)
# features = features or []
# # Add features to input vector in reverse alphabetical order by feature name,
# # so woy will be added first, and date will be added last.
# # The order that the feature vectors should appear in the input vector to remain consistent
# # and neural net architecture can have structure that anticipates this.
# sorted_features = nlp.sort_strings(features, ('dat', 'dow', 'moy', 'dom', 'moy', 'mor'), case_sensitive=False)
# if verbosity > 0:
# print('dataset_from_thresh(features={0})'.format(features))
# samples, mean, std, thresh = simple_dataset_from_thresh(thresh, N=N, max_window=max_window,
# normalize=normalize, ignore_below=ignore_below)
# name = getattr(thresh, 'name', None)
# if name:
# name = normalize_building_name(name)
# if name:
# series = get_series(name)
# else:
# if isinstance(series, basestring):
# name = normalize_building_name(series.strip()) or thresh.name or 'Unknown'
# series = get_series(name)
# elif isinstance(series, pd.DataFrame):
# name = normalize_building_name(series.columns[0]) or thresh.name or 'Unknown'
# series = series[name]
# elif isinstance(series, pd.Series):
# name = normalize_building_name(series.name) or thresh.name or 'Unknown'
# else:
# name = None
# # Compute the length of extra features added on to the vector from the rolling window of previous
# # threshold values
# # TODO: pre-process features list of strings in a separate function
# morn = 0
# # if the building name isn't known, you can't retrieve the morning load values for it
# if name:
# for s in features:
# if s.startswith('morn'):
# try:
# morn = int(s[4:])
# except:
# if verbosity > 0:
# warnings.warn('Unable to determine morning length from feature named "{0}" so using default '
# '(8 am = 8 * 4 = 32)')
# morn = 32 # default to 9 am morning ending
# break
# if verbosity > 0:
# print('In dataset_from_thresh() using {0} morning load values for Building {1}'
# ' because series arg is of type {2}'.format(morn, name, type(series)))
# extras = (+ int('dow' in features) * 7
# + int('moy' in features) * 12
# + int('woy' in features)
# + int('date' in features)
# + morn)
# if verbosity > 0:
# print('The total input vector length (dimension) is now {0}'.format(N + extras))
# ds = pb.datasets.SupervisedDataSet(N + extras, 1)
# first_date = samples[0]['target'].index[0].date().toordinal()
# last_date = samples[-1]['target'].index[0].date().toordinal()
# date_range = (last_date - first_date) or 1
# bit_scale = 5 # number of standard deviations for the magnitude of bit
# if verbosity > 0:
# print('Adding features for building {3}, {0}, and a morning time series of len {2}, '
# 'to each of the {1} vectors (samples)'.format(features, len(samples), morn, name))
# for sampnum, sample in enumerate(samples):
# # sample['input'] and ['output'] are pd.Series tables so convert them to normal list()
# inputs = list(sample['input'].values)
# # the date we're trying to predict the rhreshold for
# timestamp = sample['target'].index[0]
# for feature_name in sorted_features:
# if feature_name.startswith('morn'):
# day = get_day(series, date=timestamp.date())
# morning_loads = (day.values[:morn] - mean) / std
# if verbosity > 1:
# print('day = {0} and morning = {1}'.format(len(day), len(morning_loads)))
# inputs = list(morning_loads) + inputs
# elif feature_name == 'dow':
# dow_bits = [0] * 7
# dow_bits[timestamp.weekday()] = bit_scale
# inputs = dow_bits + inputs
# elif feature_name == 'moy':
# moy_bits = [0] * 12
# moy_bits[timestamp.month - 1] = bit_scale
# inputs = moy_bits + inputs
# elif feature_name == 'woy':
# inputs = [(timestamp.weekofyear - 26.) * 3 * bit_scale / 52] + inputs
# elif feature_name == 'date':
# inputs = [(timestamp.date().toordinal() - first_date - date_range / 2.) * 3 * bit_scale /
# date_range ] + inputs
# if pd.isnull(inputs).any():
# msg = 'Feature "{0}" within the feature list: {1} created null/NaN input values\nFor sample {2}'
# ' and date {3}\nInput vector positions {4}:\nInput vector: {5}'.format(
# feature_name, sorted_features, sampnum, timestamp, ann.table_nan_locs(inputs), inputs)
# msg += '\nBuilding load Series:\n{0}\n'.format(series)
# if ignore_nans:
# warnings.warn(msg)
# else:
# raise ValueError(msg)
# ds.addSample(inputs, list(sample['target'].values))
# return ds, mean, std, thresh
def dataset_nan_locs(ds):
"""
from http://stackoverflow.com/a/14033137/623735
# gets the indices of the rows with nan values in a dataframe
pd.isnull(df).any(1).nonzero()[0]
"""
ans = []
for sampnum, sample in enumerate(ds):
if pd.isnull(sample).any():
ans += [{
'sample': sampnum,
'input': pd.isnull(sample[0]).nonzero()[0],
'output': pd.isnull(sample[1]).nonzero()[0],
}]
return ans
def table_nan_locs(table):
"""
from http://stackoverflow.com/a/14033137/623735
# gets the indices of the rows with nan values in a dataframe
pd.isnull(df).any(1).nonzero()[0]
"""
ans = []
for rownum, row in enumerate(table):
try:
if pd.isnull(row).any():
colnums = pd.isnull(row).nonzero()[0]
ans += [(rownum, colnum) for colnum in colnums]
except AttributeError: # table is really just a sequence of scalars
if pd.isnull(row):
ans += [(rownum, 0)]
return ans
def plot_network_results(network, ds=None, mean=0, std=1, title='', show=True, save=True):
"""Identical to plot_trainer except `network` and `ds` must be provided separately"""
df = sim_network(network=network, ds=ds, mean=mean, std=std)
df.plot()
plt.xlabel('Date')
plt.ylabel('Threshold (kW)')
plt.title(title)
if show:
try:
# ipython notebook overrides plt.show and doesn't have a block kwarg
plt.show(block=False)
except TypeError:
plt.show()
if save:
filename = 'ann_performance_for_{0}.png'.format(title).replace(' ', '_')
if isinstance(save, basestring) and os.path.isdir(save):
filename = os.path.join(save, filename)
plt.savefig(filename)
if not show:
plt.clf()
return network, mean, std
def trainer_results(trainer, mean=0, std=1, title='', show=True, save=True):
"""Plot the performance of the Network and SupervisedDataSet in a pybrain Trainer
DataSet target and output values are denormalized before plotting with:
output * std + mean
Which inverses the normalization
(output - mean) / std
Args:
trainer (Trainer): a pybrain Trainer instance containing a valid Network and DataSet
ds (DataSet): a pybrain DataSet to override the one contained in `trainer`.
Required if trainer is a Network instance rather than a Trainer instance.
mean (float): mean of the denormalized dataset (default: 0)
Only affects the scale of the plot
std (float): std (standard deviation) of the denormalized dataset (default: 1)
title (str): title to display on the plot.
Returns:
3-tuple: (trainer, mean, std), A trainer/dataset along with denormalization info
"""
return plot_network_results(network=trainer.module, ds=trainer.ds, mean=mean, std=std, title=title,
show=show, save=save)
def sim_trainer(trainer, mean=0, std=1):
"""Simulate a trainer by activating its DataSet and returning DataFrame(columns=['Output','Target'])
"""
return sim_network(network=trainer.module, ds=trainer.ds, mean=mean, std=std)
def sim_network(network, ds=None, index=None, mean=0, std=1):
"""Simulate/activate a Network on a SupervisedDataSet and return DataFrame(columns=['Output','Target'])
The DataSet's target and output values are denormalized before populating the dataframe columns:
denormalized_output = normalized_output * std + mean
Which inverses the normalization that produced the normalized output in the first place: \
normalized_output = (denormalzied_output - mean) / std
Args:
network (Network): a pybrain Network instance to activate with the provided DataSet, `ds`
ds (DataSet): a pybrain DataSet to activate the Network on to produce an output sequence
mean (float): mean of the denormalized dataset (default: 0)
Output is scaled
std (float): std (standard deviation) of the denormalized dataset (default: 1)
title (str): title to display on the plot.
Returns:
DataFrame: DataFrame with columns "Output" and "Target" suitable for df.plot-ting
"""
# just in case network is a trainer or has a Module-derived instance as one of it's attribute
# isinstance(network.module, (networks.Network, modules.Module))
if hasattr(network, 'module') and hasattr(network.module, 'activate'):
# may want to also check: isinstance(network.module, (networks.Network, modules.Module))
network = network.module
ds = ds or network.ds
if not ds:
raise RuntimeError("Unable to find a `pybrain.datasets.DataSet` instance to activate the Network with, "
" to plot the outputs. A dataset can be provided as part of a network instance or "
"as a separate kwarg if `network` is used to provide the `pybrain.Network`"
" instance directly.")
results_generator = ((network.activate(ds['input'][i])[0] * std + mean, ds['target'][i][0] * std + mean)
for i in xrange(len(ds['input'])))
return pd.DataFrame(results_generator, columns=['Output', 'Target'], index=index or range(len(ds['input'])))
|
hobson/pug-ann | pug/ann/util.py | weight_matrices | python | def weight_matrices(nn):
if isinstance(nn, ndarray):
return nn
try:
return weight_matrices(nn.connections)
except:
pass
try:
return weight_matrices(nn.module)
except:
pass
# Network objects are ParameterContainer's too, but won't reshape into a single matrix,
# so this must come after try nn.connections
if isinstance(nn, (ParameterContainer, Connection)):
return reshape(nn.params, (nn.outdim, nn.indim))
if isinstance(nn, basestring):
try:
fn = nn
nn = NetworkReader(fn, newfile=False)
return weight_matrices(nn.readFrom(fn))
except:
pass
# FIXME: what does NetworkReader output? (Module? Layer?) need to handle it's type here
try:
return [weight_matrices(v) for (k, v) in nn.iteritems()]
except:
try:
connections = nn.module.connections.values()
nn = []
for conlist in connections:
nn += conlist
return weight_matrices(nn)
except:
return [weight_matrices(v) for v in nn] | Extract list of weight matrices from a Network, Layer (module), Trainer, Connection or other pybrain object | train | https://github.com/hobson/pug-ann/blob/8a4d7103a744d15b4a737fc0f9a84c823973e0ec/pug/ann/util.py#L377-L417 | [
"def weight_matrices(nn):\n \"\"\" Extract list of weight matrices from a Network, Layer (module), Trainer, Connection or other pybrain object\"\"\"\n\n if isinstance(nn, ndarray):\n return nn\n\n try:\n return weight_matrices(nn.connections)\n except:\n pass\n\n try:\n return weight_matrices(nn.module)\n except:\n pass\n\n # Network objects are ParameterContainer's too, but won't reshape into a single matrix,\n # so this must come after try nn.connections\n if isinstance(nn, (ParameterContainer, Connection)):\n return reshape(nn.params, (nn.outdim, nn.indim))\n\n if isinstance(nn, basestring):\n try:\n fn = nn\n nn = NetworkReader(fn, newfile=False)\n return weight_matrices(nn.readFrom(fn))\n except:\n pass\n # FIXME: what does NetworkReader output? (Module? Layer?) need to handle it's type here\n\n try:\n return [weight_matrices(v) for (k, v) in nn.iteritems()]\n except:\n try:\n connections = nn.module.connections.values()\n nn = []\n for conlist in connections:\n nn += conlist\n return weight_matrices(nn)\n except:\n return [weight_matrices(v) for v in nn]\n"
] | """Maniuplate, analyze and plot `pybrain` `Network` and `DataSet` objects
TODO:
Incorporate into pybrain fork so pug doesn't have to depend on pybrain
"""
from __future__ import print_function
import os
import warnings
import pandas as pd
from scipy import ndarray, reshape # array, amin, amax,
np = pd.np
from matplotlib import pyplot as plt
import pybrain.datasets
import pybrain.structure
import pybrain.supervised
import pybrain.tools
pb = pybrain
# from pybrain.supervised.trainers import Trainer
from pybrain.tools.customxml import NetworkReader
from pybrain.structure.parametercontainer import ParameterContainer
from pybrain.structure.connections.connection import Connection
from pug.nlp.util import tuplify, fuzzy_get
from pug.ann.data import weather
#import pug.nlp.util as nlp
# print(os.path.realpath(__file__))
DATA_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(DATA_PATH, '..', 'data')
LAYER_TYPES = set([getattr(pybrain.structure, s) for s in
dir(pybrain.structure) if s.endswith('Layer')])
FAST = False
try:
from arac.pybrainbridge import _FeedForwardNetwork as FeedForwardNetwork
FAST = True
except:
from pybrain.structure import FeedForwardNetwork
print("No fast (ARAC, a FC++ library) FeedForwardNetwork was found, "
"so using the slower pybrain python implementation of FFNN.")
def normalize_layer_type(layer_type):
try:
if layer_type in LAYER_TYPES:
return layer_type
except TypeError:
pass
try:
return getattr(pb.structure, layer_type.strip())
except AttributeError:
try:
return getattr(pb.structure, layer_type.strip() + 'Layer')
except AttributeError:
pass
return [normalize_layer_type(lt) for lt in layer_type]
def build_ann(N_input=None, N_hidden=2, N_output=1, hidden_layer_type='Linear', verbosity=1):
"""Build a neural net with the indicated input, hidden, and outout dimensions
Arguments:
params (dict or PyBrainParams namedtuple):
default: {'N_hidden': 6}
(this is the only parameter that affects the NN build)
Returns:
FeedForwardNetwork with N_input + N_hidden + N_output nodes in 3 layers
"""
N_input = N_input or 1
N_output = N_output or 1
N_hidden = N_hidden or tuple()
if isinstance(N_hidden, (int, float, basestring)):
N_hidden = (int(N_hidden),)
hidden_layer_type = hidden_layer_type or tuple()
hidden_layer_type = tuplify(normalize_layer_type(hidden_layer_type))
if verbosity > 0:
print(N_hidden, ' layers of type ', hidden_layer_type)
assert(len(N_hidden) == len(hidden_layer_type))
nn = pb.structure.FeedForwardNetwork()
# layers
nn.addInputModule(pb.structure.BiasUnit(name='bias'))
nn.addInputModule(pb.structure.LinearLayer(N_input, name='input'))
for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden, hidden_layer_type)):
Nhid = int(Nhid)
nn.addModule(hidlaytype(Nhid, name=('hidden-{}'.format(i) if i else 'hidden')))
nn.addOutputModule(pb.structure.LinearLayer(N_output, name='output'))
# connections
nn.addConnection(pb.structure.FullConnection(nn['bias'], nn['hidden'] if N_hidden else nn['output']))
nn.addConnection(pb.structure.FullConnection(nn['input'], nn['hidden'] if N_hidden else nn['output']))
for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden[:-1], hidden_layer_type[:-1])):
Nhid = int(Nhid)
nn.addConnection(pb.structure.FullConnection(nn[('hidden-{}'.format(i) if i else 'hidden')],
nn['hidden-{}'.format(i + 1)]))
i = len(N_hidden) - 1
nn.addConnection(pb.structure.FullConnection(nn['hidden-{}'.format(i) if i else 'hidden'], nn['output']))
nn.sortModules()
if FAST:
try:
nn.convertToFastNetwork()
except:
if verbosity > 0:
print('Unable to convert slow PyBrain NN to a fast ARAC network...')
if verbosity > 0:
print(nn.connections)
return nn
def ann_from_ds(ds=None, N_input=3, N_hidden=0, N_output=1, verbosity=1):
N_input = getattr(ds, 'indim', N_input)
N_output = getattr(ds, 'outdim', N_output)
N_hidden = N_hidden or getattr(ds, 'paramdim', N_hidden + N_input + N_output) - N_input - N_output
N_hidden = max(round(min(N_hidden, len(ds) / float(N_input) / float(N_output) / 5.)), N_output)
return build_ann(N_input=N_input, N_hidden=N_hidden, N_output=N_output, verbosity=verbosity)
def prepend_dataset_with_weather(samples, location='Fresno, CA', weather_columns=None, use_cache=True, verbosity=0):
""" Prepend weather the values specified (e.g. Max TempF) to the samples[0..N]['input'] vectors
samples[0..N]['target'] should have an index with the date timestamp
If you use_cache for the curent year, you may not get the most recent data.
Arguments:
samples (list of dict): {'input': np.array(), 'target': pandas.DataFrame}
"""
if verbosity > 1:
print('Prepending weather data for {} to dataset samples'.format(weather_columns))
if not weather_columns:
return samples
timestamps = pd.DatetimeIndex([s['target'].index[0] for s in samples])
years = range(timestamps.min().date().year, timestamps.max().date().year + 1)
weather_df = weather.daily(location=location, years=years, use_cache=use_cache)
# FIXME: weather_df.resample('D') fails
weather_df.index = [d.date() for d in weather_df.index]
if verbosity > 1:
print('Retrieved weather for years {}:'.format(years))
print(weather_df)
weather_columns = [label if label in weather_df.columns else weather_df.columns[int(label)]
for label in (weather_columns or [])]
for sampnum, sample in enumerate(samples):
timestamp = timestamps[sampnum]
try:
weather_day = weather_df.loc[timestamp.date()]
except:
from traceback import print_exc
print_exc()
weather_day = {}
if verbosity >= 0:
warnings.warn('Unable to find weather for the date {}'.format(timestamp.date()))
NaN = float('NaN')
sample['input'] = [weather_day.get(label, None) for label in weather_columns] + list(sample['input'])
if verbosity > 0 and NaN in sample['input']:
warnings.warn('Unable to find weather features {} in the weather for date {}'.format(
[label for i, label in enumerate(weather_columns) if sample['input'][i] == NaN], timestamp))
return samples
def dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=(-1,), normalize=False, verbosity=1):
"""Compose a pybrain.dataset from a pandas DataFrame
Arguments:
delays (list of int): sample delays to use for the input tapped delay line
Positive and negative values are treated the same as sample counts into the past.
default: [1, 2, 3], in z-transform notation: z^-1 + z^-2 + z^-3
inputs (list of int or list of str): column indices or labels for the inputs
outputs (list of int or list of str): column indices or labels for the outputs
normalize (bool): whether to divide each input to be normally distributed about 0 with std 1
Returns:
3-tuple: tuple(dataset, list of means, list of stds)
means and stds allow normalization of new inputs and denormalization of the outputs
TODO:
Detect categorical variables with low dimensionality and split into separate bits
Vowpel Wabbit hashes strings into an int?
Detect ordinal variables and convert to continuous int sequence
SEE: http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
"""
if isinstance(delays, int):
if delays:
delays = range(1, delays + 1)
else:
delays = [0]
delays = np.abs(np.array([int(i) for i in delays]))
inputs = [df.columns[int(inp)] if isinstance(inp, (float, int)) else str(inp) for inp in inputs]
outputs = [df.columns[int(out)] if isinstance(out, (float, int)) else str(out) for out in (outputs or [])]
inputs = [fuzzy_get(df.columns, i) for i in inputs]
outputs = [fuzzy_get(df.columns, o) for o in outputs]
N_inp = len(inputs)
N_out = len(outputs)
inp_outs = inputs + outputs
if verbosity > 0:
print("inputs: {}\noutputs: {}\ndelays: {}\n".format(inputs, outputs, delays))
means, stds = np.zeros(len(inp_outs)), np.ones(len(inp_outs))
if normalize:
means, stds = df[inp_outs].mean(), df[inp_outs].std()
if normalize and verbosity > 0:
print("Input mean values (used to normalize input biases): {}".format(means[:N_inp]))
print("Output mean values (used to normalize output biases): {}".format(means[N_inp:]))
ds = pb.datasets.SupervisedDataSet(N_inp * len(delays), N_out)
if verbosity > 0:
print("Dataset dimensions are {}x{}x{} (records x indim x outdim) for {} delays, {} inputs, {} outputs".format(
len(df), ds.indim, ds.outdim, len(delays), len(inputs), len(outputs)))
# FIXME: normalize the whole matrix at once and add it quickly rather than one sample at a time
if delays == np.array([0]) and not normalize:
if verbosity > 0:
print("No tapped delay lines (delays) were requested, so using undelayed features for the dataset.")
assert(df[inputs].values.shape[0] == df[outputs].values.shape[0])
ds.setField('input', df[inputs].values)
ds.setField('target', df[outputs].values)
ds.linkFields(['input', 'target'])
# for inp, outp in zip(df[inputs].values, df[outputs].values):
# ds.appendLinked(inp, outp)
assert(len(ds['input']) == len(ds['target']))
else:
for i, out_vec in enumerate(df[outputs].values):
if verbosity > 0 and i % 100 == 0:
print("{}%".format(i / .01 / len(df)))
elif verbosity > 1:
print('sample[{i}].target={out_vec}'.format(i=i, out_vec=out_vec))
if i < max(delays):
continue
inp_vec = []
for delay in delays:
inp_vec += list((df[inputs].values[i - delay] - means[:N_inp]) / stds[:N_inp])
ds.addSample(inp_vec, (out_vec - means[N_inp:]) / stds[N_inp:])
if verbosity > 0:
print("Dataset now has {} samples".format(len(ds)))
if normalize:
return ds, means, stds
else:
return ds
# def dataset_from_feature_names(df, delays=(1, 2,), quantiles=(), input_columns=(0,), target_columns=(-1,),
# weather_columns=(), features=(), verbosity=1):
# """FIXME: Untested. Transform Datafram columns and append to input_columns as additional features
# Arguments:
# features (seq of str): names of feautures to be appended to the input vector (feature set)
# """
# raise NotImplementedError("Better to implement this as a separate feature transformation function")
# # Compute the length of extra features added on to the vector from the rolling window of previous
# # threshold values
# extras = (+ int('dow' in features) * 7
# + int('moy' in features) * 12
# + int('woy' in features)
# + int('date' in features)
# + len(quantiles)
# + len(weather_columns)
# )
# N = max(list(delays) + list(quantiles))
# last_date = first_date = 0
# try:
# if verbosity > 0:
# print('The total input vector length (dimension) is now {0}'.format(len(N) + extras))
# print('Starting to augment {0}x{1} sample inputs by adding adding {2} additional features'.format(
# len(df), len(input_columns), extras))
# if verbosity > 0:
# print('The first sample input was {0}'.format(
# df[input_columns].iloc[0]))
# print('The first sample target was {0}'.format(
# df[target_columns].iloc[0]))
# print('The last sample input was {0}'.format(
# df[input_columns].iloc[-1]))
# print('The last sample target was {0}'.format(
# df[target_columns].iloc[-1]))
# first_date = df.index.iloc[0].date().toordinal()
# last_date = df.index.iloc[-1].date().toordinal()
# except:
# if verbosity > -1:
# from traceback import format_exc
# warnings.warn(format_exc())
# if verbosity > 1:
# import ipdb
# ipdb.set_trace()
# date_range = (last_date - first_date) + 1 or 1
# # FIXME: scale each feature/column/dimension independently using pug.ann.util.dataset_from_dataframe
# # but mean and std become vectors of the same dimension as the feature/input vector.
# # scikit-learn has transformations that do this more reasonably
# bit_scale = 5 # number of standard deviations for the magnitude of bit
# # convert the list of dicts ((input, output) supervised dataset pairs) into a pybrains.Dataset
# ds = pybrain.datasets.SupervisedDataSet(len(N) + extras, 1)
# sorted_features = sorted(features)
# for sampnum, (input_vector, target_vector) in enumerate(
# zip(df[input_columns].values, df[target_columns].values)):
# # sample['input'] and ['output'] are pd.Series tables so convert them to normal list()
# # inputs = list(sample['input'])
# # the date we're trying to predict the threshold for
# timestamp = target_vector.index[0]
# for feature_name in sorted_features:
# if feature_name.startswith('morn'):
# day = get_day(series, date=timestamp.date())
# morning_loads = (day.values[:morn] - mean) / std
# if verbosity > 2:
# print('day = {0} and morning = {1}'.format(len(day), len(morning_loads)))
# inputs = list(morning_loads) + inputs
# elif feature_name == 'dow':
# dow_bits = [0] * 7
# dow_bits[timestamp.weekday()] = bit_scale
# inputs = dow_bits + inputs
# elif feature_name == 'moy':
# moy_bits = [0] * 12
# moy_bits[timestamp.month - 1] = bit_scale
# inputs = moy_bits + inputs
# elif feature_name == 'woy':
# inputs = [(timestamp.weekofyear - 26.) * 3 * bit_scale / 52] + inputs
# elif feature_name == 'date':
# inputs = [(timestamp.date().toordinal() - first_date - date_range / 2.) * 3 * bit_scale / date_range
# ] + inputs
# if pd.isnull(inputs).any():
# msg = 'Feature "{0}" within the feature list: {1} created null/NaN input values\n'.format(
# feature_name, sorted_features)
# msg += 'For sample {} and date {}\n'.format(sampnum, timestamp)
# msg += 'Input vector positions {}:\nInput vector: {}'.format(
# ann.table_nan_locs(inputs), inputs)
# msg += '\nBuilding load Series:\n{0}\n'.format(series)
# if ignore_nans:
# warnings.warn(msg)
# else:
# raise ValueError(msg)
# ds.addSample(inputs, list(target_vector.values))
def input_dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):
""" Build a dataset with an empty output/target vector
Identical to `dataset_from_dataframe`, except that default values for 2 arguments:
outputs: None
"""
return dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,
normalize=normalize, verbosity=verbosity)
def inputs_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):
""" Build a sequence of vectors suitable for "activation" by a neural net
Identical to `dataset_from_dataframe`, except that only the input vectors are
returned (not a full DataSet instance) and default values for 2 arguments are changed:
outputs: None
And only the input vectors are return
"""
ds = input_dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,
normalize=normalize, verbosity=verbosity)
return ds['input']
def build_trainer(nn, ds, verbosity=1):
"""Configure neural net trainer from a pybrain dataset"""
return pb.supervised.trainers.rprop.RPropMinusTrainer(nn, dataset=ds, batchlearning=True, verbose=bool(verbosity))
# # FIXME: resolve all these NLP dependencies and get this working
# def dataset_from_time_series(df, N_inp=None, features=('moy',), verbosity=1):
# """Build a pybrains.dataset from the time series contained in a dataframe"""
# N_inp = N_inp or len(df.columns)
# features = features or []
# # Add features to input vector in reverse alphabetical order by feature name,
# # so woy will be added first, and date will be added last.
# # The order that the feature vectors should appear in the input vector to remain consistent
# # and neural net architecture can have structure that anticipates this.
# sorted_features = nlp.sort_strings(features, ('dat', 'dow', 'moy', 'dom', 'moy', 'mor'), case_sensitive=False)
# if verbosity > 0:
# print('dataset_from_thresh(features={0})'.format(features))
# samples, mean, std, thresh = simple_dataset_from_thresh(thresh, N=N, max_window=max_window,
# normalize=normalize, ignore_below=ignore_below)
# name = getattr(thresh, 'name', None)
# if name:
# name = normalize_building_name(name)
# if name:
# series = get_series(name)
# else:
# if isinstance(series, basestring):
# name = normalize_building_name(series.strip()) or thresh.name or 'Unknown'
# series = get_series(name)
# elif isinstance(series, pd.DataFrame):
# name = normalize_building_name(series.columns[0]) or thresh.name or 'Unknown'
# series = series[name]
# elif isinstance(series, pd.Series):
# name = normalize_building_name(series.name) or thresh.name or 'Unknown'
# else:
# name = None
# # Compute the length of extra features added on to the vector from the rolling window of previous
# # threshold values
# # TODO: pre-process features list of strings in a separate function
# morn = 0
# # if the building name isn't known, you can't retrieve the morning load values for it
# if name:
# for s in features:
# if s.startswith('morn'):
# try:
# morn = int(s[4:])
# except:
# if verbosity > 0:
# warnings.warn('Unable to determine morning length from feature named "{0}" so using default '
# '(8 am = 8 * 4 = 32)')
# morn = 32 # default to 9 am morning ending
# break
# if verbosity > 0:
# print('In dataset_from_thresh() using {0} morning load values for Building {1}'
# ' because series arg is of type {2}'.format(morn, name, type(series)))
# extras = (+ int('dow' in features) * 7
# + int('moy' in features) * 12
# + int('woy' in features)
# + int('date' in features)
# + morn)
# if verbosity > 0:
# print('The total input vector length (dimension) is now {0}'.format(N + extras))
# ds = pb.datasets.SupervisedDataSet(N + extras, 1)
# first_date = samples[0]['target'].index[0].date().toordinal()
# last_date = samples[-1]['target'].index[0].date().toordinal()
# date_range = (last_date - first_date) or 1
# bit_scale = 5 # number of standard deviations for the magnitude of bit
# if verbosity > 0:
# print('Adding features for building {3}, {0}, and a morning time series of len {2}, '
# 'to each of the {1} vectors (samples)'.format(features, len(samples), morn, name))
# for sampnum, sample in enumerate(samples):
# # sample['input'] and ['output'] are pd.Series tables so convert them to normal list()
# inputs = list(sample['input'].values)
# # the date we're trying to predict the rhreshold for
# timestamp = sample['target'].index[0]
# for feature_name in sorted_features:
# if feature_name.startswith('morn'):
# day = get_day(series, date=timestamp.date())
# morning_loads = (day.values[:morn] - mean) / std
# if verbosity > 1:
# print('day = {0} and morning = {1}'.format(len(day), len(morning_loads)))
# inputs = list(morning_loads) + inputs
# elif feature_name == 'dow':
# dow_bits = [0] * 7
# dow_bits[timestamp.weekday()] = bit_scale
# inputs = dow_bits + inputs
# elif feature_name == 'moy':
# moy_bits = [0] * 12
# moy_bits[timestamp.month - 1] = bit_scale
# inputs = moy_bits + inputs
# elif feature_name == 'woy':
# inputs = [(timestamp.weekofyear - 26.) * 3 * bit_scale / 52] + inputs
# elif feature_name == 'date':
# inputs = [(timestamp.date().toordinal() - first_date - date_range / 2.) * 3 * bit_scale /
# date_range ] + inputs
# if pd.isnull(inputs).any():
# msg = 'Feature "{0}" within the feature list: {1} created null/NaN input values\nFor sample {2}'
# ' and date {3}\nInput vector positions {4}:\nInput vector: {5}'.format(
# feature_name, sorted_features, sampnum, timestamp, ann.table_nan_locs(inputs), inputs)
# msg += '\nBuilding load Series:\n{0}\n'.format(series)
# if ignore_nans:
# warnings.warn(msg)
# else:
# raise ValueError(msg)
# ds.addSample(inputs, list(sample['target'].values))
# return ds, mean, std, thresh
def dataset_nan_locs(ds):
"""
from http://stackoverflow.com/a/14033137/623735
# gets the indices of the rows with nan values in a dataframe
pd.isnull(df).any(1).nonzero()[0]
"""
ans = []
for sampnum, sample in enumerate(ds):
if pd.isnull(sample).any():
ans += [{
'sample': sampnum,
'input': pd.isnull(sample[0]).nonzero()[0],
'output': pd.isnull(sample[1]).nonzero()[0],
}]
return ans
def table_nan_locs(table):
"""
from http://stackoverflow.com/a/14033137/623735
# gets the indices of the rows with nan values in a dataframe
pd.isnull(df).any(1).nonzero()[0]
"""
ans = []
for rownum, row in enumerate(table):
try:
if pd.isnull(row).any():
colnums = pd.isnull(row).nonzero()[0]
ans += [(rownum, colnum) for colnum in colnums]
except AttributeError: # table is really just a sequence of scalars
if pd.isnull(row):
ans += [(rownum, 0)]
return ans
def plot_network_results(network, ds=None, mean=0, std=1, title='', show=True, save=True):
"""Identical to plot_trainer except `network` and `ds` must be provided separately"""
df = sim_network(network=network, ds=ds, mean=mean, std=std)
df.plot()
plt.xlabel('Date')
plt.ylabel('Threshold (kW)')
plt.title(title)
if show:
try:
# ipython notebook overrides plt.show and doesn't have a block kwarg
plt.show(block=False)
except TypeError:
plt.show()
if save:
filename = 'ann_performance_for_{0}.png'.format(title).replace(' ', '_')
if isinstance(save, basestring) and os.path.isdir(save):
filename = os.path.join(save, filename)
plt.savefig(filename)
if not show:
plt.clf()
return network, mean, std
def trainer_results(trainer, mean=0, std=1, title='', show=True, save=True):
"""Plot the performance of the Network and SupervisedDataSet in a pybrain Trainer
DataSet target and output values are denormalized before plotting with:
output * std + mean
Which inverses the normalization
(output - mean) / std
Args:
trainer (Trainer): a pybrain Trainer instance containing a valid Network and DataSet
ds (DataSet): a pybrain DataSet to override the one contained in `trainer`.
Required if trainer is a Network instance rather than a Trainer instance.
mean (float): mean of the denormalized dataset (default: 0)
Only affects the scale of the plot
std (float): std (standard deviation) of the denormalized dataset (default: 1)
title (str): title to display on the plot.
Returns:
3-tuple: (trainer, mean, std), A trainer/dataset along with denormalization info
"""
return plot_network_results(network=trainer.module, ds=trainer.ds, mean=mean, std=std, title=title,
show=show, save=save)
def sim_trainer(trainer, mean=0, std=1):
"""Simulate a trainer by activating its DataSet and returning DataFrame(columns=['Output','Target'])
"""
return sim_network(network=trainer.module, ds=trainer.ds, mean=mean, std=std)
def sim_network(network, ds=None, index=None, mean=0, std=1):
"""Simulate/activate a Network on a SupervisedDataSet and return DataFrame(columns=['Output','Target'])
The DataSet's target and output values are denormalized before populating the dataframe columns:
denormalized_output = normalized_output * std + mean
Which inverses the normalization that produced the normalized output in the first place: \
normalized_output = (denormalzied_output - mean) / std
Args:
network (Network): a pybrain Network instance to activate with the provided DataSet, `ds`
ds (DataSet): a pybrain DataSet to activate the Network on to produce an output sequence
mean (float): mean of the denormalized dataset (default: 0)
Output is scaled
std (float): std (standard deviation) of the denormalized dataset (default: 1)
title (str): title to display on the plot.
Returns:
DataFrame: DataFrame with columns "Output" and "Target" suitable for df.plot-ting
"""
# just in case network is a trainer or has a Module-derived instance as one of it's attribute
# isinstance(network.module, (networks.Network, modules.Module))
if hasattr(network, 'module') and hasattr(network.module, 'activate'):
# may want to also check: isinstance(network.module, (networks.Network, modules.Module))
network = network.module
ds = ds or network.ds
if not ds:
raise RuntimeError("Unable to find a `pybrain.datasets.DataSet` instance to activate the Network with, "
" to plot the outputs. A dataset can be provided as part of a network instance or "
"as a separate kwarg if `network` is used to provide the `pybrain.Network`"
" instance directly.")
results_generator = ((network.activate(ds['input'][i])[0] * std + mean, ds['target'][i][0] * std + mean)
for i in xrange(len(ds['input'])))
return pd.DataFrame(results_generator, columns=['Output', 'Target'], index=index or range(len(ds['input'])))
|
hobson/pug-ann | pug/ann/util.py | dataset_nan_locs | python | def dataset_nan_locs(ds):
ans = []
for sampnum, sample in enumerate(ds):
if pd.isnull(sample).any():
ans += [{
'sample': sampnum,
'input': pd.isnull(sample[0]).nonzero()[0],
'output': pd.isnull(sample[1]).nonzero()[0],
}]
return ans | from http://stackoverflow.com/a/14033137/623735
# gets the indices of the rows with nan values in a dataframe
pd.isnull(df).any(1).nonzero()[0] | train | https://github.com/hobson/pug-ann/blob/8a4d7103a744d15b4a737fc0f9a84c823973e0ec/pug/ann/util.py#L535-L549 | null | """Maniuplate, analyze and plot `pybrain` `Network` and `DataSet` objects
TODO:
Incorporate into pybrain fork so pug doesn't have to depend on pybrain
"""
from __future__ import print_function
import os
import warnings
import pandas as pd
from scipy import ndarray, reshape # array, amin, amax,
np = pd.np
from matplotlib import pyplot as plt
import pybrain.datasets
import pybrain.structure
import pybrain.supervised
import pybrain.tools
pb = pybrain
# from pybrain.supervised.trainers import Trainer
from pybrain.tools.customxml import NetworkReader
from pybrain.structure.parametercontainer import ParameterContainer
from pybrain.structure.connections.connection import Connection
from pug.nlp.util import tuplify, fuzzy_get
from pug.ann.data import weather
#import pug.nlp.util as nlp
# print(os.path.realpath(__file__))
DATA_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(DATA_PATH, '..', 'data')
LAYER_TYPES = set([getattr(pybrain.structure, s) for s in
dir(pybrain.structure) if s.endswith('Layer')])
FAST = False
try:
from arac.pybrainbridge import _FeedForwardNetwork as FeedForwardNetwork
FAST = True
except:
from pybrain.structure import FeedForwardNetwork
print("No fast (ARAC, a FC++ library) FeedForwardNetwork was found, "
"so using the slower pybrain python implementation of FFNN.")
def normalize_layer_type(layer_type):
try:
if layer_type in LAYER_TYPES:
return layer_type
except TypeError:
pass
try:
return getattr(pb.structure, layer_type.strip())
except AttributeError:
try:
return getattr(pb.structure, layer_type.strip() + 'Layer')
except AttributeError:
pass
return [normalize_layer_type(lt) for lt in layer_type]
def build_ann(N_input=None, N_hidden=2, N_output=1, hidden_layer_type='Linear', verbosity=1):
"""Build a neural net with the indicated input, hidden, and outout dimensions
Arguments:
params (dict or PyBrainParams namedtuple):
default: {'N_hidden': 6}
(this is the only parameter that affects the NN build)
Returns:
FeedForwardNetwork with N_input + N_hidden + N_output nodes in 3 layers
"""
N_input = N_input or 1
N_output = N_output or 1
N_hidden = N_hidden or tuple()
if isinstance(N_hidden, (int, float, basestring)):
N_hidden = (int(N_hidden),)
hidden_layer_type = hidden_layer_type or tuple()
hidden_layer_type = tuplify(normalize_layer_type(hidden_layer_type))
if verbosity > 0:
print(N_hidden, ' layers of type ', hidden_layer_type)
assert(len(N_hidden) == len(hidden_layer_type))
nn = pb.structure.FeedForwardNetwork()
# layers
nn.addInputModule(pb.structure.BiasUnit(name='bias'))
nn.addInputModule(pb.structure.LinearLayer(N_input, name='input'))
for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden, hidden_layer_type)):
Nhid = int(Nhid)
nn.addModule(hidlaytype(Nhid, name=('hidden-{}'.format(i) if i else 'hidden')))
nn.addOutputModule(pb.structure.LinearLayer(N_output, name='output'))
# connections
nn.addConnection(pb.structure.FullConnection(nn['bias'], nn['hidden'] if N_hidden else nn['output']))
nn.addConnection(pb.structure.FullConnection(nn['input'], nn['hidden'] if N_hidden else nn['output']))
for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden[:-1], hidden_layer_type[:-1])):
Nhid = int(Nhid)
nn.addConnection(pb.structure.FullConnection(nn[('hidden-{}'.format(i) if i else 'hidden')],
nn['hidden-{}'.format(i + 1)]))
i = len(N_hidden) - 1
nn.addConnection(pb.structure.FullConnection(nn['hidden-{}'.format(i) if i else 'hidden'], nn['output']))
nn.sortModules()
if FAST:
try:
nn.convertToFastNetwork()
except:
if verbosity > 0:
print('Unable to convert slow PyBrain NN to a fast ARAC network...')
if verbosity > 0:
print(nn.connections)
return nn
def ann_from_ds(ds=None, N_input=3, N_hidden=0, N_output=1, verbosity=1):
N_input = getattr(ds, 'indim', N_input)
N_output = getattr(ds, 'outdim', N_output)
N_hidden = N_hidden or getattr(ds, 'paramdim', N_hidden + N_input + N_output) - N_input - N_output
N_hidden = max(round(min(N_hidden, len(ds) / float(N_input) / float(N_output) / 5.)), N_output)
return build_ann(N_input=N_input, N_hidden=N_hidden, N_output=N_output, verbosity=verbosity)
def prepend_dataset_with_weather(samples, location='Fresno, CA', weather_columns=None, use_cache=True, verbosity=0):
""" Prepend weather the values specified (e.g. Max TempF) to the samples[0..N]['input'] vectors
samples[0..N]['target'] should have an index with the date timestamp
If you use_cache for the curent year, you may not get the most recent data.
Arguments:
samples (list of dict): {'input': np.array(), 'target': pandas.DataFrame}
"""
if verbosity > 1:
print('Prepending weather data for {} to dataset samples'.format(weather_columns))
if not weather_columns:
return samples
timestamps = pd.DatetimeIndex([s['target'].index[0] for s in samples])
years = range(timestamps.min().date().year, timestamps.max().date().year + 1)
weather_df = weather.daily(location=location, years=years, use_cache=use_cache)
# FIXME: weather_df.resample('D') fails
weather_df.index = [d.date() for d in weather_df.index]
if verbosity > 1:
print('Retrieved weather for years {}:'.format(years))
print(weather_df)
weather_columns = [label if label in weather_df.columns else weather_df.columns[int(label)]
for label in (weather_columns or [])]
for sampnum, sample in enumerate(samples):
timestamp = timestamps[sampnum]
try:
weather_day = weather_df.loc[timestamp.date()]
except:
from traceback import print_exc
print_exc()
weather_day = {}
if verbosity >= 0:
warnings.warn('Unable to find weather for the date {}'.format(timestamp.date()))
NaN = float('NaN')
sample['input'] = [weather_day.get(label, None) for label in weather_columns] + list(sample['input'])
if verbosity > 0 and NaN in sample['input']:
warnings.warn('Unable to find weather features {} in the weather for date {}'.format(
[label for i, label in enumerate(weather_columns) if sample['input'][i] == NaN], timestamp))
return samples
def dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=(-1,), normalize=False, verbosity=1):
"""Compose a pybrain.dataset from a pandas DataFrame
Arguments:
delays (list of int): sample delays to use for the input tapped delay line
Positive and negative values are treated the same as sample counts into the past.
default: [1, 2, 3], in z-transform notation: z^-1 + z^-2 + z^-3
inputs (list of int or list of str): column indices or labels for the inputs
outputs (list of int or list of str): column indices or labels for the outputs
normalize (bool): whether to divide each input to be normally distributed about 0 with std 1
Returns:
3-tuple: tuple(dataset, list of means, list of stds)
means and stds allow normalization of new inputs and denormalization of the outputs
TODO:
Detect categorical variables with low dimensionality and split into separate bits
Vowpel Wabbit hashes strings into an int?
Detect ordinal variables and convert to continuous int sequence
SEE: http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
"""
if isinstance(delays, int):
if delays:
delays = range(1, delays + 1)
else:
delays = [0]
delays = np.abs(np.array([int(i) for i in delays]))
inputs = [df.columns[int(inp)] if isinstance(inp, (float, int)) else str(inp) for inp in inputs]
outputs = [df.columns[int(out)] if isinstance(out, (float, int)) else str(out) for out in (outputs or [])]
inputs = [fuzzy_get(df.columns, i) for i in inputs]
outputs = [fuzzy_get(df.columns, o) for o in outputs]
N_inp = len(inputs)
N_out = len(outputs)
inp_outs = inputs + outputs
if verbosity > 0:
print("inputs: {}\noutputs: {}\ndelays: {}\n".format(inputs, outputs, delays))
means, stds = np.zeros(len(inp_outs)), np.ones(len(inp_outs))
if normalize:
means, stds = df[inp_outs].mean(), df[inp_outs].std()
if normalize and verbosity > 0:
print("Input mean values (used to normalize input biases): {}".format(means[:N_inp]))
print("Output mean values (used to normalize output biases): {}".format(means[N_inp:]))
ds = pb.datasets.SupervisedDataSet(N_inp * len(delays), N_out)
if verbosity > 0:
print("Dataset dimensions are {}x{}x{} (records x indim x outdim) for {} delays, {} inputs, {} outputs".format(
len(df), ds.indim, ds.outdim, len(delays), len(inputs), len(outputs)))
# FIXME: normalize the whole matrix at once and add it quickly rather than one sample at a time
if delays == np.array([0]) and not normalize:
if verbosity > 0:
print("No tapped delay lines (delays) were requested, so using undelayed features for the dataset.")
assert(df[inputs].values.shape[0] == df[outputs].values.shape[0])
ds.setField('input', df[inputs].values)
ds.setField('target', df[outputs].values)
ds.linkFields(['input', 'target'])
# for inp, outp in zip(df[inputs].values, df[outputs].values):
# ds.appendLinked(inp, outp)
assert(len(ds['input']) == len(ds['target']))
else:
for i, out_vec in enumerate(df[outputs].values):
if verbosity > 0 and i % 100 == 0:
print("{}%".format(i / .01 / len(df)))
elif verbosity > 1:
print('sample[{i}].target={out_vec}'.format(i=i, out_vec=out_vec))
if i < max(delays):
continue
inp_vec = []
for delay in delays:
inp_vec += list((df[inputs].values[i - delay] - means[:N_inp]) / stds[:N_inp])
ds.addSample(inp_vec, (out_vec - means[N_inp:]) / stds[N_inp:])
if verbosity > 0:
print("Dataset now has {} samples".format(len(ds)))
if normalize:
return ds, means, stds
else:
return ds
# def dataset_from_feature_names(df, delays=(1, 2,), quantiles=(), input_columns=(0,), target_columns=(-1,),
# weather_columns=(), features=(), verbosity=1):
# """FIXME: Untested. Transform Datafram columns and append to input_columns as additional features
# Arguments:
# features (seq of str): names of feautures to be appended to the input vector (feature set)
# """
# raise NotImplementedError("Better to implement this as a separate feature transformation function")
# # Compute the length of extra features added on to the vector from the rolling window of previous
# # threshold values
# extras = (+ int('dow' in features) * 7
# + int('moy' in features) * 12
# + int('woy' in features)
# + int('date' in features)
# + len(quantiles)
# + len(weather_columns)
# )
# N = max(list(delays) + list(quantiles))
# last_date = first_date = 0
# try:
# if verbosity > 0:
# print('The total input vector length (dimension) is now {0}'.format(len(N) + extras))
# print('Starting to augment {0}x{1} sample inputs by adding adding {2} additional features'.format(
# len(df), len(input_columns), extras))
# if verbosity > 0:
# print('The first sample input was {0}'.format(
# df[input_columns].iloc[0]))
# print('The first sample target was {0}'.format(
# df[target_columns].iloc[0]))
# print('The last sample input was {0}'.format(
# df[input_columns].iloc[-1]))
# print('The last sample target was {0}'.format(
# df[target_columns].iloc[-1]))
# first_date = df.index.iloc[0].date().toordinal()
# last_date = df.index.iloc[-1].date().toordinal()
# except:
# if verbosity > -1:
# from traceback import format_exc
# warnings.warn(format_exc())
# if verbosity > 1:
# import ipdb
# ipdb.set_trace()
# date_range = (last_date - first_date) + 1 or 1
# # FIXME: scale each feature/column/dimension independently using pug.ann.util.dataset_from_dataframe
# # but mean and std become vectors of the same dimension as the feature/input vector.
# # scikit-learn has transformations that do this more reasonably
# bit_scale = 5 # number of standard deviations for the magnitude of bit
# # convert the list of dicts ((input, output) supervised dataset pairs) into a pybrains.Dataset
# ds = pybrain.datasets.SupervisedDataSet(len(N) + extras, 1)
# sorted_features = sorted(features)
# for sampnum, (input_vector, target_vector) in enumerate(
# zip(df[input_columns].values, df[target_columns].values)):
# # sample['input'] and ['output'] are pd.Series tables so convert them to normal list()
# # inputs = list(sample['input'])
# # the date we're trying to predict the threshold for
# timestamp = target_vector.index[0]
# for feature_name in sorted_features:
# if feature_name.startswith('morn'):
# day = get_day(series, date=timestamp.date())
# morning_loads = (day.values[:morn] - mean) / std
# if verbosity > 2:
# print('day = {0} and morning = {1}'.format(len(day), len(morning_loads)))
# inputs = list(morning_loads) + inputs
# elif feature_name == 'dow':
# dow_bits = [0] * 7
# dow_bits[timestamp.weekday()] = bit_scale
# inputs = dow_bits + inputs
# elif feature_name == 'moy':
# moy_bits = [0] * 12
# moy_bits[timestamp.month - 1] = bit_scale
# inputs = moy_bits + inputs
# elif feature_name == 'woy':
# inputs = [(timestamp.weekofyear - 26.) * 3 * bit_scale / 52] + inputs
# elif feature_name == 'date':
# inputs = [(timestamp.date().toordinal() - first_date - date_range / 2.) * 3 * bit_scale / date_range
# ] + inputs
# if pd.isnull(inputs).any():
# msg = 'Feature "{0}" within the feature list: {1} created null/NaN input values\n'.format(
# feature_name, sorted_features)
# msg += 'For sample {} and date {}\n'.format(sampnum, timestamp)
# msg += 'Input vector positions {}:\nInput vector: {}'.format(
# ann.table_nan_locs(inputs), inputs)
# msg += '\nBuilding load Series:\n{0}\n'.format(series)
# if ignore_nans:
# warnings.warn(msg)
# else:
# raise ValueError(msg)
# ds.addSample(inputs, list(target_vector.values))
def input_dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):
""" Build a dataset with an empty output/target vector
Identical to `dataset_from_dataframe`, except that default values for 2 arguments:
outputs: None
"""
return dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,
normalize=normalize, verbosity=verbosity)
def inputs_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):
""" Build a sequence of vectors suitable for "activation" by a neural net
Identical to `dataset_from_dataframe`, except that only the input vectors are
returned (not a full DataSet instance) and default values for 2 arguments are changed:
outputs: None
And only the input vectors are return
"""
ds = input_dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,
normalize=normalize, verbosity=verbosity)
return ds['input']
def build_trainer(nn, ds, verbosity=1):
"""Configure neural net trainer from a pybrain dataset"""
return pb.supervised.trainers.rprop.RPropMinusTrainer(nn, dataset=ds, batchlearning=True, verbose=bool(verbosity))
def weight_matrices(nn):
""" Extract list of weight matrices from a Network, Layer (module), Trainer, Connection or other pybrain object"""
if isinstance(nn, ndarray):
return nn
try:
return weight_matrices(nn.connections)
except:
pass
try:
return weight_matrices(nn.module)
except:
pass
# Network objects are ParameterContainer's too, but won't reshape into a single matrix,
# so this must come after try nn.connections
if isinstance(nn, (ParameterContainer, Connection)):
return reshape(nn.params, (nn.outdim, nn.indim))
if isinstance(nn, basestring):
try:
fn = nn
nn = NetworkReader(fn, newfile=False)
return weight_matrices(nn.readFrom(fn))
except:
pass
# FIXME: what does NetworkReader output? (Module? Layer?) need to handle it's type here
try:
return [weight_matrices(v) for (k, v) in nn.iteritems()]
except:
try:
connections = nn.module.connections.values()
nn = []
for conlist in connections:
nn += conlist
return weight_matrices(nn)
except:
return [weight_matrices(v) for v in nn]
# # FIXME: resolve all these NLP dependencies and get this working
# def dataset_from_time_series(df, N_inp=None, features=('moy',), verbosity=1):
# """Build a pybrains.dataset from the time series contained in a dataframe"""
# N_inp = N_inp or len(df.columns)
# features = features or []
# # Add features to input vector in reverse alphabetical order by feature name,
# # so woy will be added first, and date will be added last.
# # The order that the feature vectors should appear in the input vector to remain consistent
# # and neural net architecture can have structure that anticipates this.
# sorted_features = nlp.sort_strings(features, ('dat', 'dow', 'moy', 'dom', 'moy', 'mor'), case_sensitive=False)
# if verbosity > 0:
# print('dataset_from_thresh(features={0})'.format(features))
# samples, mean, std, thresh = simple_dataset_from_thresh(thresh, N=N, max_window=max_window,
# normalize=normalize, ignore_below=ignore_below)
# name = getattr(thresh, 'name', None)
# if name:
# name = normalize_building_name(name)
# if name:
# series = get_series(name)
# else:
# if isinstance(series, basestring):
# name = normalize_building_name(series.strip()) or thresh.name or 'Unknown'
# series = get_series(name)
# elif isinstance(series, pd.DataFrame):
# name = normalize_building_name(series.columns[0]) or thresh.name or 'Unknown'
# series = series[name]
# elif isinstance(series, pd.Series):
# name = normalize_building_name(series.name) or thresh.name or 'Unknown'
# else:
# name = None
# # Compute the length of extra features added on to the vector from the rolling window of previous
# # threshold values
# # TODO: pre-process features list of strings in a separate function
# morn = 0
# # if the building name isn't known, you can't retrieve the morning load values for it
# if name:
# for s in features:
# if s.startswith('morn'):
# try:
# morn = int(s[4:])
# except:
# if verbosity > 0:
# warnings.warn('Unable to determine morning length from feature named "{0}" so using default '
# '(8 am = 8 * 4 = 32)')
# morn = 32 # default to 9 am morning ending
# break
# if verbosity > 0:
# print('In dataset_from_thresh() using {0} morning load values for Building {1}'
# ' because series arg is of type {2}'.format(morn, name, type(series)))
# extras = (+ int('dow' in features) * 7
# + int('moy' in features) * 12
# + int('woy' in features)
# + int('date' in features)
# + morn)
# if verbosity > 0:
# print('The total input vector length (dimension) is now {0}'.format(N + extras))
# ds = pb.datasets.SupervisedDataSet(N + extras, 1)
# first_date = samples[0]['target'].index[0].date().toordinal()
# last_date = samples[-1]['target'].index[0].date().toordinal()
# date_range = (last_date - first_date) or 1
# bit_scale = 5 # number of standard deviations for the magnitude of bit
# if verbosity > 0:
# print('Adding features for building {3}, {0}, and a morning time series of len {2}, '
# 'to each of the {1} vectors (samples)'.format(features, len(samples), morn, name))
# for sampnum, sample in enumerate(samples):
# # sample['input'] and ['output'] are pd.Series tables so convert them to normal list()
# inputs = list(sample['input'].values)
# # the date we're trying to predict the rhreshold for
# timestamp = sample['target'].index[0]
# for feature_name in sorted_features:
# if feature_name.startswith('morn'):
# day = get_day(series, date=timestamp.date())
# morning_loads = (day.values[:morn] - mean) / std
# if verbosity > 1:
# print('day = {0} and morning = {1}'.format(len(day), len(morning_loads)))
# inputs = list(morning_loads) + inputs
# elif feature_name == 'dow':
# dow_bits = [0] * 7
# dow_bits[timestamp.weekday()] = bit_scale
# inputs = dow_bits + inputs
# elif feature_name == 'moy':
# moy_bits = [0] * 12
# moy_bits[timestamp.month - 1] = bit_scale
# inputs = moy_bits + inputs
# elif feature_name == 'woy':
# inputs = [(timestamp.weekofyear - 26.) * 3 * bit_scale / 52] + inputs
# elif feature_name == 'date':
# inputs = [(timestamp.date().toordinal() - first_date - date_range / 2.) * 3 * bit_scale /
# date_range ] + inputs
# if pd.isnull(inputs).any():
# msg = 'Feature "{0}" within the feature list: {1} created null/NaN input values\nFor sample {2}'
# ' and date {3}\nInput vector positions {4}:\nInput vector: {5}'.format(
# feature_name, sorted_features, sampnum, timestamp, ann.table_nan_locs(inputs), inputs)
# msg += '\nBuilding load Series:\n{0}\n'.format(series)
# if ignore_nans:
# warnings.warn(msg)
# else:
# raise ValueError(msg)
# ds.addSample(inputs, list(sample['target'].values))
# return ds, mean, std, thresh
def table_nan_locs(table):
"""
from http://stackoverflow.com/a/14033137/623735
# gets the indices of the rows with nan values in a dataframe
pd.isnull(df).any(1).nonzero()[0]
"""
ans = []
for rownum, row in enumerate(table):
try:
if pd.isnull(row).any():
colnums = pd.isnull(row).nonzero()[0]
ans += [(rownum, colnum) for colnum in colnums]
except AttributeError: # table is really just a sequence of scalars
if pd.isnull(row):
ans += [(rownum, 0)]
return ans
def plot_network_results(network, ds=None, mean=0, std=1, title='', show=True, save=True):
"""Identical to plot_trainer except `network` and `ds` must be provided separately"""
df = sim_network(network=network, ds=ds, mean=mean, std=std)
df.plot()
plt.xlabel('Date')
plt.ylabel('Threshold (kW)')
plt.title(title)
if show:
try:
# ipython notebook overrides plt.show and doesn't have a block kwarg
plt.show(block=False)
except TypeError:
plt.show()
if save:
filename = 'ann_performance_for_{0}.png'.format(title).replace(' ', '_')
if isinstance(save, basestring) and os.path.isdir(save):
filename = os.path.join(save, filename)
plt.savefig(filename)
if not show:
plt.clf()
return network, mean, std
def trainer_results(trainer, mean=0, std=1, title='', show=True, save=True):
"""Plot the performance of the Network and SupervisedDataSet in a pybrain Trainer
DataSet target and output values are denormalized before plotting with:
output * std + mean
Which inverses the normalization
(output - mean) / std
Args:
trainer (Trainer): a pybrain Trainer instance containing a valid Network and DataSet
ds (DataSet): a pybrain DataSet to override the one contained in `trainer`.
Required if trainer is a Network instance rather than a Trainer instance.
mean (float): mean of the denormalized dataset (default: 0)
Only affects the scale of the plot
std (float): std (standard deviation) of the denormalized dataset (default: 1)
title (str): title to display on the plot.
Returns:
3-tuple: (trainer, mean, std), A trainer/dataset along with denormalization info
"""
return plot_network_results(network=trainer.module, ds=trainer.ds, mean=mean, std=std, title=title,
show=show, save=save)
def sim_trainer(trainer, mean=0, std=1):
"""Simulate a trainer by activating its DataSet and returning DataFrame(columns=['Output','Target'])
"""
return sim_network(network=trainer.module, ds=trainer.ds, mean=mean, std=std)
def sim_network(network, ds=None, index=None, mean=0, std=1):
"""Simulate/activate a Network on a SupervisedDataSet and return DataFrame(columns=['Output','Target'])
The DataSet's target and output values are denormalized before populating the dataframe columns:
denormalized_output = normalized_output * std + mean
Which inverses the normalization that produced the normalized output in the first place: \
normalized_output = (denormalzied_output - mean) / std
Args:
network (Network): a pybrain Network instance to activate with the provided DataSet, `ds`
ds (DataSet): a pybrain DataSet to activate the Network on to produce an output sequence
mean (float): mean of the denormalized dataset (default: 0)
Output is scaled
std (float): std (standard deviation) of the denormalized dataset (default: 1)
title (str): title to display on the plot.
Returns:
DataFrame: DataFrame with columns "Output" and "Target" suitable for df.plot-ting
"""
# just in case network is a trainer or has a Module-derived instance as one of it's attribute
# isinstance(network.module, (networks.Network, modules.Module))
if hasattr(network, 'module') and hasattr(network.module, 'activate'):
# may want to also check: isinstance(network.module, (networks.Network, modules.Module))
network = network.module
ds = ds or network.ds
if not ds:
raise RuntimeError("Unable to find a `pybrain.datasets.DataSet` instance to activate the Network with, "
" to plot the outputs. A dataset can be provided as part of a network instance or "
"as a separate kwarg if `network` is used to provide the `pybrain.Network`"
" instance directly.")
results_generator = ((network.activate(ds['input'][i])[0] * std + mean, ds['target'][i][0] * std + mean)
for i in xrange(len(ds['input'])))
return pd.DataFrame(results_generator, columns=['Output', 'Target'], index=index or range(len(ds['input'])))
|
hobson/pug-ann | pug/ann/util.py | table_nan_locs | python | def table_nan_locs(table):
ans = []
for rownum, row in enumerate(table):
try:
if pd.isnull(row).any():
colnums = pd.isnull(row).nonzero()[0]
ans += [(rownum, colnum) for colnum in colnums]
except AttributeError: # table is really just a sequence of scalars
if pd.isnull(row):
ans += [(rownum, 0)]
return ans | from http://stackoverflow.com/a/14033137/623735
# gets the indices of the rows with nan values in a dataframe
pd.isnull(df).any(1).nonzero()[0] | train | https://github.com/hobson/pug-ann/blob/8a4d7103a744d15b4a737fc0f9a84c823973e0ec/pug/ann/util.py#L552-L567 | null | """Maniuplate, analyze and plot `pybrain` `Network` and `DataSet` objects
TODO:
Incorporate into pybrain fork so pug doesn't have to depend on pybrain
"""
from __future__ import print_function
import os
import warnings
import pandas as pd
from scipy import ndarray, reshape # array, amin, amax,
np = pd.np
from matplotlib import pyplot as plt
import pybrain.datasets
import pybrain.structure
import pybrain.supervised
import pybrain.tools
pb = pybrain
# from pybrain.supervised.trainers import Trainer
from pybrain.tools.customxml import NetworkReader
from pybrain.structure.parametercontainer import ParameterContainer
from pybrain.structure.connections.connection import Connection
from pug.nlp.util import tuplify, fuzzy_get
from pug.ann.data import weather
#import pug.nlp.util as nlp
# print(os.path.realpath(__file__))
DATA_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(DATA_PATH, '..', 'data')
LAYER_TYPES = set([getattr(pybrain.structure, s) for s in
dir(pybrain.structure) if s.endswith('Layer')])
FAST = False
try:
from arac.pybrainbridge import _FeedForwardNetwork as FeedForwardNetwork
FAST = True
except:
from pybrain.structure import FeedForwardNetwork
print("No fast (ARAC, a FC++ library) FeedForwardNetwork was found, "
"so using the slower pybrain python implementation of FFNN.")
def normalize_layer_type(layer_type):
try:
if layer_type in LAYER_TYPES:
return layer_type
except TypeError:
pass
try:
return getattr(pb.structure, layer_type.strip())
except AttributeError:
try:
return getattr(pb.structure, layer_type.strip() + 'Layer')
except AttributeError:
pass
return [normalize_layer_type(lt) for lt in layer_type]
def build_ann(N_input=None, N_hidden=2, N_output=1, hidden_layer_type='Linear', verbosity=1):
"""Build a neural net with the indicated input, hidden, and outout dimensions
Arguments:
params (dict or PyBrainParams namedtuple):
default: {'N_hidden': 6}
(this is the only parameter that affects the NN build)
Returns:
FeedForwardNetwork with N_input + N_hidden + N_output nodes in 3 layers
"""
N_input = N_input or 1
N_output = N_output or 1
N_hidden = N_hidden or tuple()
if isinstance(N_hidden, (int, float, basestring)):
N_hidden = (int(N_hidden),)
hidden_layer_type = hidden_layer_type or tuple()
hidden_layer_type = tuplify(normalize_layer_type(hidden_layer_type))
if verbosity > 0:
print(N_hidden, ' layers of type ', hidden_layer_type)
assert(len(N_hidden) == len(hidden_layer_type))
nn = pb.structure.FeedForwardNetwork()
# layers
nn.addInputModule(pb.structure.BiasUnit(name='bias'))
nn.addInputModule(pb.structure.LinearLayer(N_input, name='input'))
for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden, hidden_layer_type)):
Nhid = int(Nhid)
nn.addModule(hidlaytype(Nhid, name=('hidden-{}'.format(i) if i else 'hidden')))
nn.addOutputModule(pb.structure.LinearLayer(N_output, name='output'))
# connections
nn.addConnection(pb.structure.FullConnection(nn['bias'], nn['hidden'] if N_hidden else nn['output']))
nn.addConnection(pb.structure.FullConnection(nn['input'], nn['hidden'] if N_hidden else nn['output']))
for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden[:-1], hidden_layer_type[:-1])):
Nhid = int(Nhid)
nn.addConnection(pb.structure.FullConnection(nn[('hidden-{}'.format(i) if i else 'hidden')],
nn['hidden-{}'.format(i + 1)]))
i = len(N_hidden) - 1
nn.addConnection(pb.structure.FullConnection(nn['hidden-{}'.format(i) if i else 'hidden'], nn['output']))
nn.sortModules()
if FAST:
try:
nn.convertToFastNetwork()
except:
if verbosity > 0:
print('Unable to convert slow PyBrain NN to a fast ARAC network...')
if verbosity > 0:
print(nn.connections)
return nn
def ann_from_ds(ds=None, N_input=3, N_hidden=0, N_output=1, verbosity=1):
N_input = getattr(ds, 'indim', N_input)
N_output = getattr(ds, 'outdim', N_output)
N_hidden = N_hidden or getattr(ds, 'paramdim', N_hidden + N_input + N_output) - N_input - N_output
N_hidden = max(round(min(N_hidden, len(ds) / float(N_input) / float(N_output) / 5.)), N_output)
return build_ann(N_input=N_input, N_hidden=N_hidden, N_output=N_output, verbosity=verbosity)
def prepend_dataset_with_weather(samples, location='Fresno, CA', weather_columns=None, use_cache=True, verbosity=0):
""" Prepend weather the values specified (e.g. Max TempF) to the samples[0..N]['input'] vectors
samples[0..N]['target'] should have an index with the date timestamp
If you use_cache for the curent year, you may not get the most recent data.
Arguments:
samples (list of dict): {'input': np.array(), 'target': pandas.DataFrame}
"""
if verbosity > 1:
print('Prepending weather data for {} to dataset samples'.format(weather_columns))
if not weather_columns:
return samples
timestamps = pd.DatetimeIndex([s['target'].index[0] for s in samples])
years = range(timestamps.min().date().year, timestamps.max().date().year + 1)
weather_df = weather.daily(location=location, years=years, use_cache=use_cache)
# FIXME: weather_df.resample('D') fails
weather_df.index = [d.date() for d in weather_df.index]
if verbosity > 1:
print('Retrieved weather for years {}:'.format(years))
print(weather_df)
weather_columns = [label if label in weather_df.columns else weather_df.columns[int(label)]
for label in (weather_columns or [])]
for sampnum, sample in enumerate(samples):
timestamp = timestamps[sampnum]
try:
weather_day = weather_df.loc[timestamp.date()]
except:
from traceback import print_exc
print_exc()
weather_day = {}
if verbosity >= 0:
warnings.warn('Unable to find weather for the date {}'.format(timestamp.date()))
NaN = float('NaN')
sample['input'] = [weather_day.get(label, None) for label in weather_columns] + list(sample['input'])
if verbosity > 0 and NaN in sample['input']:
warnings.warn('Unable to find weather features {} in the weather for date {}'.format(
[label for i, label in enumerate(weather_columns) if sample['input'][i] == NaN], timestamp))
return samples
def dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=(-1,), normalize=False, verbosity=1):
"""Compose a pybrain.dataset from a pandas DataFrame
Arguments:
delays (list of int): sample delays to use for the input tapped delay line
Positive and negative values are treated the same as sample counts into the past.
default: [1, 2, 3], in z-transform notation: z^-1 + z^-2 + z^-3
inputs (list of int or list of str): column indices or labels for the inputs
outputs (list of int or list of str): column indices or labels for the outputs
normalize (bool): whether to divide each input to be normally distributed about 0 with std 1
Returns:
3-tuple: tuple(dataset, list of means, list of stds)
means and stds allow normalization of new inputs and denormalization of the outputs
TODO:
Detect categorical variables with low dimensionality and split into separate bits
Vowpel Wabbit hashes strings into an int?
Detect ordinal variables and convert to continuous int sequence
SEE: http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
"""
if isinstance(delays, int):
if delays:
delays = range(1, delays + 1)
else:
delays = [0]
delays = np.abs(np.array([int(i) for i in delays]))
inputs = [df.columns[int(inp)] if isinstance(inp, (float, int)) else str(inp) for inp in inputs]
outputs = [df.columns[int(out)] if isinstance(out, (float, int)) else str(out) for out in (outputs or [])]
inputs = [fuzzy_get(df.columns, i) for i in inputs]
outputs = [fuzzy_get(df.columns, o) for o in outputs]
N_inp = len(inputs)
N_out = len(outputs)
inp_outs = inputs + outputs
if verbosity > 0:
print("inputs: {}\noutputs: {}\ndelays: {}\n".format(inputs, outputs, delays))
means, stds = np.zeros(len(inp_outs)), np.ones(len(inp_outs))
if normalize:
means, stds = df[inp_outs].mean(), df[inp_outs].std()
if normalize and verbosity > 0:
print("Input mean values (used to normalize input biases): {}".format(means[:N_inp]))
print("Output mean values (used to normalize output biases): {}".format(means[N_inp:]))
ds = pb.datasets.SupervisedDataSet(N_inp * len(delays), N_out)
if verbosity > 0:
print("Dataset dimensions are {}x{}x{} (records x indim x outdim) for {} delays, {} inputs, {} outputs".format(
len(df), ds.indim, ds.outdim, len(delays), len(inputs), len(outputs)))
# FIXME: normalize the whole matrix at once and add it quickly rather than one sample at a time
if delays == np.array([0]) and not normalize:
if verbosity > 0:
print("No tapped delay lines (delays) were requested, so using undelayed features for the dataset.")
assert(df[inputs].values.shape[0] == df[outputs].values.shape[0])
ds.setField('input', df[inputs].values)
ds.setField('target', df[outputs].values)
ds.linkFields(['input', 'target'])
# for inp, outp in zip(df[inputs].values, df[outputs].values):
# ds.appendLinked(inp, outp)
assert(len(ds['input']) == len(ds['target']))
else:
for i, out_vec in enumerate(df[outputs].values):
if verbosity > 0 and i % 100 == 0:
print("{}%".format(i / .01 / len(df)))
elif verbosity > 1:
print('sample[{i}].target={out_vec}'.format(i=i, out_vec=out_vec))
if i < max(delays):
continue
inp_vec = []
for delay in delays:
inp_vec += list((df[inputs].values[i - delay] - means[:N_inp]) / stds[:N_inp])
ds.addSample(inp_vec, (out_vec - means[N_inp:]) / stds[N_inp:])
if verbosity > 0:
print("Dataset now has {} samples".format(len(ds)))
if normalize:
return ds, means, stds
else:
return ds
# def dataset_from_feature_names(df, delays=(1, 2,), quantiles=(), input_columns=(0,), target_columns=(-1,),
# weather_columns=(), features=(), verbosity=1):
# """FIXME: Untested. Transform Datafram columns and append to input_columns as additional features
# Arguments:
# features (seq of str): names of feautures to be appended to the input vector (feature set)
# """
# raise NotImplementedError("Better to implement this as a separate feature transformation function")
# # Compute the length of extra features added on to the vector from the rolling window of previous
# # threshold values
# extras = (+ int('dow' in features) * 7
# + int('moy' in features) * 12
# + int('woy' in features)
# + int('date' in features)
# + len(quantiles)
# + len(weather_columns)
# )
# N = max(list(delays) + list(quantiles))
# last_date = first_date = 0
# try:
# if verbosity > 0:
# print('The total input vector length (dimension) is now {0}'.format(len(N) + extras))
# print('Starting to augment {0}x{1} sample inputs by adding adding {2} additional features'.format(
# len(df), len(input_columns), extras))
# if verbosity > 0:
# print('The first sample input was {0}'.format(
# df[input_columns].iloc[0]))
# print('The first sample target was {0}'.format(
# df[target_columns].iloc[0]))
# print('The last sample input was {0}'.format(
# df[input_columns].iloc[-1]))
# print('The last sample target was {0}'.format(
# df[target_columns].iloc[-1]))
# first_date = df.index.iloc[0].date().toordinal()
# last_date = df.index.iloc[-1].date().toordinal()
# except:
# if verbosity > -1:
# from traceback import format_exc
# warnings.warn(format_exc())
# if verbosity > 1:
# import ipdb
# ipdb.set_trace()
# date_range = (last_date - first_date) + 1 or 1
# # FIXME: scale each feature/column/dimension independently using pug.ann.util.dataset_from_dataframe
# # but mean and std become vectors of the same dimension as the feature/input vector.
# # scikit-learn has transformations that do this more reasonably
# bit_scale = 5 # number of standard deviations for the magnitude of bit
# # convert the list of dicts ((input, output) supervised dataset pairs) into a pybrains.Dataset
# ds = pybrain.datasets.SupervisedDataSet(len(N) + extras, 1)
# sorted_features = sorted(features)
# for sampnum, (input_vector, target_vector) in enumerate(
# zip(df[input_columns].values, df[target_columns].values)):
# # sample['input'] and ['output'] are pd.Series tables so convert them to normal list()
# # inputs = list(sample['input'])
# # the date we're trying to predict the threshold for
# timestamp = target_vector.index[0]
# for feature_name in sorted_features:
# if feature_name.startswith('morn'):
# day = get_day(series, date=timestamp.date())
# morning_loads = (day.values[:morn] - mean) / std
# if verbosity > 2:
# print('day = {0} and morning = {1}'.format(len(day), len(morning_loads)))
# inputs = list(morning_loads) + inputs
# elif feature_name == 'dow':
# dow_bits = [0] * 7
# dow_bits[timestamp.weekday()] = bit_scale
# inputs = dow_bits + inputs
# elif feature_name == 'moy':
# moy_bits = [0] * 12
# moy_bits[timestamp.month - 1] = bit_scale
# inputs = moy_bits + inputs
# elif feature_name == 'woy':
# inputs = [(timestamp.weekofyear - 26.) * 3 * bit_scale / 52] + inputs
# elif feature_name == 'date':
# inputs = [(timestamp.date().toordinal() - first_date - date_range / 2.) * 3 * bit_scale / date_range
# ] + inputs
# if pd.isnull(inputs).any():
# msg = 'Feature "{0}" within the feature list: {1} created null/NaN input values\n'.format(
# feature_name, sorted_features)
# msg += 'For sample {} and date {}\n'.format(sampnum, timestamp)
# msg += 'Input vector positions {}:\nInput vector: {}'.format(
# ann.table_nan_locs(inputs), inputs)
# msg += '\nBuilding load Series:\n{0}\n'.format(series)
# if ignore_nans:
# warnings.warn(msg)
# else:
# raise ValueError(msg)
# ds.addSample(inputs, list(target_vector.values))
def input_dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):
""" Build a dataset with an empty output/target vector
Identical to `dataset_from_dataframe`, except that default values for 2 arguments:
outputs: None
"""
return dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,
normalize=normalize, verbosity=verbosity)
def inputs_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):
""" Build a sequence of vectors suitable for "activation" by a neural net
Identical to `dataset_from_dataframe`, except that only the input vectors are
returned (not a full DataSet instance) and default values for 2 arguments are changed:
outputs: None
And only the input vectors are return
"""
ds = input_dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,
normalize=normalize, verbosity=verbosity)
return ds['input']
def build_trainer(nn, ds, verbosity=1):
"""Configure neural net trainer from a pybrain dataset"""
return pb.supervised.trainers.rprop.RPropMinusTrainer(nn, dataset=ds, batchlearning=True, verbose=bool(verbosity))
def weight_matrices(nn):
""" Extract list of weight matrices from a Network, Layer (module), Trainer, Connection or other pybrain object"""
if isinstance(nn, ndarray):
return nn
try:
return weight_matrices(nn.connections)
except:
pass
try:
return weight_matrices(nn.module)
except:
pass
# Network objects are ParameterContainer's too, but won't reshape into a single matrix,
# so this must come after try nn.connections
if isinstance(nn, (ParameterContainer, Connection)):
return reshape(nn.params, (nn.outdim, nn.indim))
if isinstance(nn, basestring):
try:
fn = nn
nn = NetworkReader(fn, newfile=False)
return weight_matrices(nn.readFrom(fn))
except:
pass
# FIXME: what does NetworkReader output? (Module? Layer?) need to handle it's type here
try:
return [weight_matrices(v) for (k, v) in nn.iteritems()]
except:
try:
connections = nn.module.connections.values()
nn = []
for conlist in connections:
nn += conlist
return weight_matrices(nn)
except:
return [weight_matrices(v) for v in nn]
# # FIXME: resolve all these NLP dependencies and get this working
# def dataset_from_time_series(df, N_inp=None, features=('moy',), verbosity=1):
# """Build a pybrains.dataset from the time series contained in a dataframe"""
# N_inp = N_inp or len(df.columns)
# features = features or []
# # Add features to input vector in reverse alphabetical order by feature name,
# # so woy will be added first, and date will be added last.
# # The order that the feature vectors should appear in the input vector to remain consistent
# # and neural net architecture can have structure that anticipates this.
# sorted_features = nlp.sort_strings(features, ('dat', 'dow', 'moy', 'dom', 'moy', 'mor'), case_sensitive=False)
# if verbosity > 0:
# print('dataset_from_thresh(features={0})'.format(features))
# samples, mean, std, thresh = simple_dataset_from_thresh(thresh, N=N, max_window=max_window,
# normalize=normalize, ignore_below=ignore_below)
# name = getattr(thresh, 'name', None)
# if name:
# name = normalize_building_name(name)
# if name:
# series = get_series(name)
# else:
# if isinstance(series, basestring):
# name = normalize_building_name(series.strip()) or thresh.name or 'Unknown'
# series = get_series(name)
# elif isinstance(series, pd.DataFrame):
# name = normalize_building_name(series.columns[0]) or thresh.name or 'Unknown'
# series = series[name]
# elif isinstance(series, pd.Series):
# name = normalize_building_name(series.name) or thresh.name or 'Unknown'
# else:
# name = None
# # Compute the length of extra features added on to the vector from the rolling window of previous
# # threshold values
# # TODO: pre-process features list of strings in a separate function
# morn = 0
# # if the building name isn't known, you can't retrieve the morning load values for it
# if name:
# for s in features:
# if s.startswith('morn'):
# try:
# morn = int(s[4:])
# except:
# if verbosity > 0:
# warnings.warn('Unable to determine morning length from feature named "{0}" so using default '
# '(8 am = 8 * 4 = 32)')
# morn = 32 # default to 9 am morning ending
# break
# if verbosity > 0:
# print('In dataset_from_thresh() using {0} morning load values for Building {1}'
# ' because series arg is of type {2}'.format(morn, name, type(series)))
# extras = (+ int('dow' in features) * 7
# + int('moy' in features) * 12
# + int('woy' in features)
# + int('date' in features)
# + morn)
# if verbosity > 0:
# print('The total input vector length (dimension) is now {0}'.format(N + extras))
# ds = pb.datasets.SupervisedDataSet(N + extras, 1)
# first_date = samples[0]['target'].index[0].date().toordinal()
# last_date = samples[-1]['target'].index[0].date().toordinal()
# date_range = (last_date - first_date) or 1
# bit_scale = 5 # number of standard deviations for the magnitude of bit
# if verbosity > 0:
# print('Adding features for building {3}, {0}, and a morning time series of len {2}, '
# 'to each of the {1} vectors (samples)'.format(features, len(samples), morn, name))
# for sampnum, sample in enumerate(samples):
# # sample['input'] and ['output'] are pd.Series tables so convert them to normal list()
# inputs = list(sample['input'].values)
# # the date we're trying to predict the rhreshold for
# timestamp = sample['target'].index[0]
# for feature_name in sorted_features:
# if feature_name.startswith('morn'):
# day = get_day(series, date=timestamp.date())
# morning_loads = (day.values[:morn] - mean) / std
# if verbosity > 1:
# print('day = {0} and morning = {1}'.format(len(day), len(morning_loads)))
# inputs = list(morning_loads) + inputs
# elif feature_name == 'dow':
# dow_bits = [0] * 7
# dow_bits[timestamp.weekday()] = bit_scale
# inputs = dow_bits + inputs
# elif feature_name == 'moy':
# moy_bits = [0] * 12
# moy_bits[timestamp.month - 1] = bit_scale
# inputs = moy_bits + inputs
# elif feature_name == 'woy':
# inputs = [(timestamp.weekofyear - 26.) * 3 * bit_scale / 52] + inputs
# elif feature_name == 'date':
# inputs = [(timestamp.date().toordinal() - first_date - date_range / 2.) * 3 * bit_scale /
# date_range ] + inputs
# if pd.isnull(inputs).any():
# msg = 'Feature "{0}" within the feature list: {1} created null/NaN input values\nFor sample {2}'
# ' and date {3}\nInput vector positions {4}:\nInput vector: {5}'.format(
# feature_name, sorted_features, sampnum, timestamp, ann.table_nan_locs(inputs), inputs)
# msg += '\nBuilding load Series:\n{0}\n'.format(series)
# if ignore_nans:
# warnings.warn(msg)
# else:
# raise ValueError(msg)
# ds.addSample(inputs, list(sample['target'].values))
# return ds, mean, std, thresh
def dataset_nan_locs(ds):
"""
from http://stackoverflow.com/a/14033137/623735
# gets the indices of the rows with nan values in a dataframe
pd.isnull(df).any(1).nonzero()[0]
"""
ans = []
for sampnum, sample in enumerate(ds):
if pd.isnull(sample).any():
ans += [{
'sample': sampnum,
'input': pd.isnull(sample[0]).nonzero()[0],
'output': pd.isnull(sample[1]).nonzero()[0],
}]
return ans
def plot_network_results(network, ds=None, mean=0, std=1, title='', show=True, save=True):
"""Identical to plot_trainer except `network` and `ds` must be provided separately"""
df = sim_network(network=network, ds=ds, mean=mean, std=std)
df.plot()
plt.xlabel('Date')
plt.ylabel('Threshold (kW)')
plt.title(title)
if show:
try:
# ipython notebook overrides plt.show and doesn't have a block kwarg
plt.show(block=False)
except TypeError:
plt.show()
if save:
filename = 'ann_performance_for_{0}.png'.format(title).replace(' ', '_')
if isinstance(save, basestring) and os.path.isdir(save):
filename = os.path.join(save, filename)
plt.savefig(filename)
if not show:
plt.clf()
return network, mean, std
def trainer_results(trainer, mean=0, std=1, title='', show=True, save=True):
"""Plot the performance of the Network and SupervisedDataSet in a pybrain Trainer
DataSet target and output values are denormalized before plotting with:
output * std + mean
Which inverses the normalization
(output - mean) / std
Args:
trainer (Trainer): a pybrain Trainer instance containing a valid Network and DataSet
ds (DataSet): a pybrain DataSet to override the one contained in `trainer`.
Required if trainer is a Network instance rather than a Trainer instance.
mean (float): mean of the denormalized dataset (default: 0)
Only affects the scale of the plot
std (float): std (standard deviation) of the denormalized dataset (default: 1)
title (str): title to display on the plot.
Returns:
3-tuple: (trainer, mean, std), A trainer/dataset along with denormalization info
"""
return plot_network_results(network=trainer.module, ds=trainer.ds, mean=mean, std=std, title=title,
show=show, save=save)
def sim_trainer(trainer, mean=0, std=1):
"""Simulate a trainer by activating its DataSet and returning DataFrame(columns=['Output','Target'])
"""
return sim_network(network=trainer.module, ds=trainer.ds, mean=mean, std=std)
def sim_network(network, ds=None, index=None, mean=0, std=1):
"""Simulate/activate a Network on a SupervisedDataSet and return DataFrame(columns=['Output','Target'])
The DataSet's target and output values are denormalized before populating the dataframe columns:
denormalized_output = normalized_output * std + mean
Which inverses the normalization that produced the normalized output in the first place: \
normalized_output = (denormalzied_output - mean) / std
Args:
network (Network): a pybrain Network instance to activate with the provided DataSet, `ds`
ds (DataSet): a pybrain DataSet to activate the Network on to produce an output sequence
mean (float): mean of the denormalized dataset (default: 0)
Output is scaled
std (float): std (standard deviation) of the denormalized dataset (default: 1)
title (str): title to display on the plot.
Returns:
DataFrame: DataFrame with columns "Output" and "Target" suitable for df.plot-ting
"""
# just in case network is a trainer or has a Module-derived instance as one of it's attribute
# isinstance(network.module, (networks.Network, modules.Module))
if hasattr(network, 'module') and hasattr(network.module, 'activate'):
# may want to also check: isinstance(network.module, (networks.Network, modules.Module))
network = network.module
ds = ds or network.ds
if not ds:
raise RuntimeError("Unable to find a `pybrain.datasets.DataSet` instance to activate the Network with, "
" to plot the outputs. A dataset can be provided as part of a network instance or "
"as a separate kwarg if `network` is used to provide the `pybrain.Network`"
" instance directly.")
results_generator = ((network.activate(ds['input'][i])[0] * std + mean, ds['target'][i][0] * std + mean)
for i in xrange(len(ds['input'])))
return pd.DataFrame(results_generator, columns=['Output', 'Target'], index=index or range(len(ds['input'])))
|
hobson/pug-ann | pug/ann/util.py | plot_network_results | python | def plot_network_results(network, ds=None, mean=0, std=1, title='', show=True, save=True):
df = sim_network(network=network, ds=ds, mean=mean, std=std)
df.plot()
plt.xlabel('Date')
plt.ylabel('Threshold (kW)')
plt.title(title)
if show:
try:
# ipython notebook overrides plt.show and doesn't have a block kwarg
plt.show(block=False)
except TypeError:
plt.show()
if save:
filename = 'ann_performance_for_{0}.png'.format(title).replace(' ', '_')
if isinstance(save, basestring) and os.path.isdir(save):
filename = os.path.join(save, filename)
plt.savefig(filename)
if not show:
plt.clf()
return network, mean, std | Identical to plot_trainer except `network` and `ds` must be provided separately | train | https://github.com/hobson/pug-ann/blob/8a4d7103a744d15b4a737fc0f9a84c823973e0ec/pug/ann/util.py#L570-L592 | [
"def sim_network(network, ds=None, index=None, mean=0, std=1):\n \"\"\"Simulate/activate a Network on a SupervisedDataSet and return DataFrame(columns=['Output','Target'])\n\n The DataSet's target and output values are denormalized before populating the dataframe columns:\n\n denormalized_output = normalized_output * std + mean\n\n Which inverses the normalization that produced the normalized output in the first place: \\\n\n normalized_output = (denormalzied_output - mean) / std\n\n Args:\n network (Network): a pybrain Network instance to activate with the provided DataSet, `ds`\n ds (DataSet): a pybrain DataSet to activate the Network on to produce an output sequence\n mean (float): mean of the denormalized dataset (default: 0)\n Output is scaled\n std (float): std (standard deviation) of the denormalized dataset (default: 1)\n title (str): title to display on the plot.\n\n Returns:\n DataFrame: DataFrame with columns \"Output\" and \"Target\" suitable for df.plot-ting\n \"\"\"\n # just in case network is a trainer or has a Module-derived instance as one of it's attribute\n # isinstance(network.module, (networks.Network, modules.Module))\n if hasattr(network, 'module') and hasattr(network.module, 'activate'):\n # may want to also check: isinstance(network.module, (networks.Network, modules.Module))\n network = network.module\n ds = ds or network.ds\n if not ds:\n raise RuntimeError(\"Unable to find a `pybrain.datasets.DataSet` instance to activate the Network with, \"\n \" to plot the outputs. A dataset can be provided as part of a network instance or \"\n \"as a separate kwarg if `network` is used to provide the `pybrain.Network`\"\n \" instance directly.\")\n results_generator = ((network.activate(ds['input'][i])[0] * std + mean, ds['target'][i][0] * std + mean)\n for i in xrange(len(ds['input'])))\n\n return pd.DataFrame(results_generator, columns=['Output', 'Target'], index=index or range(len(ds['input'])))\n"
] | """Maniuplate, analyze and plot `pybrain` `Network` and `DataSet` objects
TODO:
Incorporate into pybrain fork so pug doesn't have to depend on pybrain
"""
from __future__ import print_function
import os
import warnings
import pandas as pd
from scipy import ndarray, reshape # array, amin, amax,
np = pd.np
from matplotlib import pyplot as plt
import pybrain.datasets
import pybrain.structure
import pybrain.supervised
import pybrain.tools
pb = pybrain
# from pybrain.supervised.trainers import Trainer
from pybrain.tools.customxml import NetworkReader
from pybrain.structure.parametercontainer import ParameterContainer
from pybrain.structure.connections.connection import Connection
from pug.nlp.util import tuplify, fuzzy_get
from pug.ann.data import weather
#import pug.nlp.util as nlp
# print(os.path.realpath(__file__))
DATA_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(DATA_PATH, '..', 'data')
LAYER_TYPES = set([getattr(pybrain.structure, s) for s in
dir(pybrain.structure) if s.endswith('Layer')])
FAST = False
try:
from arac.pybrainbridge import _FeedForwardNetwork as FeedForwardNetwork
FAST = True
except:
from pybrain.structure import FeedForwardNetwork
print("No fast (ARAC, a FC++ library) FeedForwardNetwork was found, "
"so using the slower pybrain python implementation of FFNN.")
def normalize_layer_type(layer_type):
try:
if layer_type in LAYER_TYPES:
return layer_type
except TypeError:
pass
try:
return getattr(pb.structure, layer_type.strip())
except AttributeError:
try:
return getattr(pb.structure, layer_type.strip() + 'Layer')
except AttributeError:
pass
return [normalize_layer_type(lt) for lt in layer_type]
def build_ann(N_input=None, N_hidden=2, N_output=1, hidden_layer_type='Linear', verbosity=1):
"""Build a neural net with the indicated input, hidden, and outout dimensions
Arguments:
params (dict or PyBrainParams namedtuple):
default: {'N_hidden': 6}
(this is the only parameter that affects the NN build)
Returns:
FeedForwardNetwork with N_input + N_hidden + N_output nodes in 3 layers
"""
N_input = N_input or 1
N_output = N_output or 1
N_hidden = N_hidden or tuple()
if isinstance(N_hidden, (int, float, basestring)):
N_hidden = (int(N_hidden),)
hidden_layer_type = hidden_layer_type or tuple()
hidden_layer_type = tuplify(normalize_layer_type(hidden_layer_type))
if verbosity > 0:
print(N_hidden, ' layers of type ', hidden_layer_type)
assert(len(N_hidden) == len(hidden_layer_type))
nn = pb.structure.FeedForwardNetwork()
# layers
nn.addInputModule(pb.structure.BiasUnit(name='bias'))
nn.addInputModule(pb.structure.LinearLayer(N_input, name='input'))
for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden, hidden_layer_type)):
Nhid = int(Nhid)
nn.addModule(hidlaytype(Nhid, name=('hidden-{}'.format(i) if i else 'hidden')))
nn.addOutputModule(pb.structure.LinearLayer(N_output, name='output'))
# connections
nn.addConnection(pb.structure.FullConnection(nn['bias'], nn['hidden'] if N_hidden else nn['output']))
nn.addConnection(pb.structure.FullConnection(nn['input'], nn['hidden'] if N_hidden else nn['output']))
for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden[:-1], hidden_layer_type[:-1])):
Nhid = int(Nhid)
nn.addConnection(pb.structure.FullConnection(nn[('hidden-{}'.format(i) if i else 'hidden')],
nn['hidden-{}'.format(i + 1)]))
i = len(N_hidden) - 1
nn.addConnection(pb.structure.FullConnection(nn['hidden-{}'.format(i) if i else 'hidden'], nn['output']))
nn.sortModules()
if FAST:
try:
nn.convertToFastNetwork()
except:
if verbosity > 0:
print('Unable to convert slow PyBrain NN to a fast ARAC network...')
if verbosity > 0:
print(nn.connections)
return nn
def ann_from_ds(ds=None, N_input=3, N_hidden=0, N_output=1, verbosity=1):
N_input = getattr(ds, 'indim', N_input)
N_output = getattr(ds, 'outdim', N_output)
N_hidden = N_hidden or getattr(ds, 'paramdim', N_hidden + N_input + N_output) - N_input - N_output
N_hidden = max(round(min(N_hidden, len(ds) / float(N_input) / float(N_output) / 5.)), N_output)
return build_ann(N_input=N_input, N_hidden=N_hidden, N_output=N_output, verbosity=verbosity)
def prepend_dataset_with_weather(samples, location='Fresno, CA', weather_columns=None, use_cache=True, verbosity=0):
""" Prepend weather the values specified (e.g. Max TempF) to the samples[0..N]['input'] vectors
samples[0..N]['target'] should have an index with the date timestamp
If you use_cache for the curent year, you may not get the most recent data.
Arguments:
samples (list of dict): {'input': np.array(), 'target': pandas.DataFrame}
"""
if verbosity > 1:
print('Prepending weather data for {} to dataset samples'.format(weather_columns))
if not weather_columns:
return samples
timestamps = pd.DatetimeIndex([s['target'].index[0] for s in samples])
years = range(timestamps.min().date().year, timestamps.max().date().year + 1)
weather_df = weather.daily(location=location, years=years, use_cache=use_cache)
# FIXME: weather_df.resample('D') fails
weather_df.index = [d.date() for d in weather_df.index]
if verbosity > 1:
print('Retrieved weather for years {}:'.format(years))
print(weather_df)
weather_columns = [label if label in weather_df.columns else weather_df.columns[int(label)]
for label in (weather_columns or [])]
for sampnum, sample in enumerate(samples):
timestamp = timestamps[sampnum]
try:
weather_day = weather_df.loc[timestamp.date()]
except:
from traceback import print_exc
print_exc()
weather_day = {}
if verbosity >= 0:
warnings.warn('Unable to find weather for the date {}'.format(timestamp.date()))
NaN = float('NaN')
sample['input'] = [weather_day.get(label, None) for label in weather_columns] + list(sample['input'])
if verbosity > 0 and NaN in sample['input']:
warnings.warn('Unable to find weather features {} in the weather for date {}'.format(
[label for i, label in enumerate(weather_columns) if sample['input'][i] == NaN], timestamp))
return samples
def dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=(-1,), normalize=False, verbosity=1):
"""Compose a pybrain.dataset from a pandas DataFrame
Arguments:
delays (list of int): sample delays to use for the input tapped delay line
Positive and negative values are treated the same as sample counts into the past.
default: [1, 2, 3], in z-transform notation: z^-1 + z^-2 + z^-3
inputs (list of int or list of str): column indices or labels for the inputs
outputs (list of int or list of str): column indices or labels for the outputs
normalize (bool): whether to divide each input to be normally distributed about 0 with std 1
Returns:
3-tuple: tuple(dataset, list of means, list of stds)
means and stds allow normalization of new inputs and denormalization of the outputs
TODO:
Detect categorical variables with low dimensionality and split into separate bits
Vowpel Wabbit hashes strings into an int?
Detect ordinal variables and convert to continuous int sequence
SEE: http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
"""
if isinstance(delays, int):
if delays:
delays = range(1, delays + 1)
else:
delays = [0]
delays = np.abs(np.array([int(i) for i in delays]))
inputs = [df.columns[int(inp)] if isinstance(inp, (float, int)) else str(inp) for inp in inputs]
outputs = [df.columns[int(out)] if isinstance(out, (float, int)) else str(out) for out in (outputs or [])]
inputs = [fuzzy_get(df.columns, i) for i in inputs]
outputs = [fuzzy_get(df.columns, o) for o in outputs]
N_inp = len(inputs)
N_out = len(outputs)
inp_outs = inputs + outputs
if verbosity > 0:
print("inputs: {}\noutputs: {}\ndelays: {}\n".format(inputs, outputs, delays))
means, stds = np.zeros(len(inp_outs)), np.ones(len(inp_outs))
if normalize:
means, stds = df[inp_outs].mean(), df[inp_outs].std()
if normalize and verbosity > 0:
print("Input mean values (used to normalize input biases): {}".format(means[:N_inp]))
print("Output mean values (used to normalize output biases): {}".format(means[N_inp:]))
ds = pb.datasets.SupervisedDataSet(N_inp * len(delays), N_out)
if verbosity > 0:
print("Dataset dimensions are {}x{}x{} (records x indim x outdim) for {} delays, {} inputs, {} outputs".format(
len(df), ds.indim, ds.outdim, len(delays), len(inputs), len(outputs)))
# FIXME: normalize the whole matrix at once and add it quickly rather than one sample at a time
if delays == np.array([0]) and not normalize:
if verbosity > 0:
print("No tapped delay lines (delays) were requested, so using undelayed features for the dataset.")
assert(df[inputs].values.shape[0] == df[outputs].values.shape[0])
ds.setField('input', df[inputs].values)
ds.setField('target', df[outputs].values)
ds.linkFields(['input', 'target'])
# for inp, outp in zip(df[inputs].values, df[outputs].values):
# ds.appendLinked(inp, outp)
assert(len(ds['input']) == len(ds['target']))
else:
for i, out_vec in enumerate(df[outputs].values):
if verbosity > 0 and i % 100 == 0:
print("{}%".format(i / .01 / len(df)))
elif verbosity > 1:
print('sample[{i}].target={out_vec}'.format(i=i, out_vec=out_vec))
if i < max(delays):
continue
inp_vec = []
for delay in delays:
inp_vec += list((df[inputs].values[i - delay] - means[:N_inp]) / stds[:N_inp])
ds.addSample(inp_vec, (out_vec - means[N_inp:]) / stds[N_inp:])
if verbosity > 0:
print("Dataset now has {} samples".format(len(ds)))
if normalize:
return ds, means, stds
else:
return ds
# def dataset_from_feature_names(df, delays=(1, 2,), quantiles=(), input_columns=(0,), target_columns=(-1,),
# weather_columns=(), features=(), verbosity=1):
# """FIXME: Untested. Transform Datafram columns and append to input_columns as additional features
# Arguments:
# features (seq of str): names of feautures to be appended to the input vector (feature set)
# """
# raise NotImplementedError("Better to implement this as a separate feature transformation function")
# # Compute the length of extra features added on to the vector from the rolling window of previous
# # threshold values
# extras = (+ int('dow' in features) * 7
# + int('moy' in features) * 12
# + int('woy' in features)
# + int('date' in features)
# + len(quantiles)
# + len(weather_columns)
# )
# N = max(list(delays) + list(quantiles))
# last_date = first_date = 0
# try:
# if verbosity > 0:
# print('The total input vector length (dimension) is now {0}'.format(len(N) + extras))
# print('Starting to augment {0}x{1} sample inputs by adding adding {2} additional features'.format(
# len(df), len(input_columns), extras))
# if verbosity > 0:
# print('The first sample input was {0}'.format(
# df[input_columns].iloc[0]))
# print('The first sample target was {0}'.format(
# df[target_columns].iloc[0]))
# print('The last sample input was {0}'.format(
# df[input_columns].iloc[-1]))
# print('The last sample target was {0}'.format(
# df[target_columns].iloc[-1]))
# first_date = df.index.iloc[0].date().toordinal()
# last_date = df.index.iloc[-1].date().toordinal()
# except:
# if verbosity > -1:
# from traceback import format_exc
# warnings.warn(format_exc())
# if verbosity > 1:
# import ipdb
# ipdb.set_trace()
# date_range = (last_date - first_date) + 1 or 1
# # FIXME: scale each feature/column/dimension independently using pug.ann.util.dataset_from_dataframe
# # but mean and std become vectors of the same dimension as the feature/input vector.
# # scikit-learn has transformations that do this more reasonably
# bit_scale = 5 # number of standard deviations for the magnitude of bit
# # convert the list of dicts ((input, output) supervised dataset pairs) into a pybrains.Dataset
# ds = pybrain.datasets.SupervisedDataSet(len(N) + extras, 1)
# sorted_features = sorted(features)
# for sampnum, (input_vector, target_vector) in enumerate(
# zip(df[input_columns].values, df[target_columns].values)):
# # sample['input'] and ['output'] are pd.Series tables so convert them to normal list()
# # inputs = list(sample['input'])
# # the date we're trying to predict the threshold for
# timestamp = target_vector.index[0]
# for feature_name in sorted_features:
# if feature_name.startswith('morn'):
# day = get_day(series, date=timestamp.date())
# morning_loads = (day.values[:morn] - mean) / std
# if verbosity > 2:
# print('day = {0} and morning = {1}'.format(len(day), len(morning_loads)))
# inputs = list(morning_loads) + inputs
# elif feature_name == 'dow':
# dow_bits = [0] * 7
# dow_bits[timestamp.weekday()] = bit_scale
# inputs = dow_bits + inputs
# elif feature_name == 'moy':
# moy_bits = [0] * 12
# moy_bits[timestamp.month - 1] = bit_scale
# inputs = moy_bits + inputs
# elif feature_name == 'woy':
# inputs = [(timestamp.weekofyear - 26.) * 3 * bit_scale / 52] + inputs
# elif feature_name == 'date':
# inputs = [(timestamp.date().toordinal() - first_date - date_range / 2.) * 3 * bit_scale / date_range
# ] + inputs
# if pd.isnull(inputs).any():
# msg = 'Feature "{0}" within the feature list: {1} created null/NaN input values\n'.format(
# feature_name, sorted_features)
# msg += 'For sample {} and date {}\n'.format(sampnum, timestamp)
# msg += 'Input vector positions {}:\nInput vector: {}'.format(
# ann.table_nan_locs(inputs), inputs)
# msg += '\nBuilding load Series:\n{0}\n'.format(series)
# if ignore_nans:
# warnings.warn(msg)
# else:
# raise ValueError(msg)
# ds.addSample(inputs, list(target_vector.values))
def input_dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):
""" Build a dataset with an empty output/target vector
Identical to `dataset_from_dataframe`, except that default values for 2 arguments:
outputs: None
"""
return dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,
normalize=normalize, verbosity=verbosity)
def inputs_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):
""" Build a sequence of vectors suitable for "activation" by a neural net
Identical to `dataset_from_dataframe`, except that only the input vectors are
returned (not a full DataSet instance) and default values for 2 arguments are changed:
outputs: None
And only the input vectors are return
"""
ds = input_dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,
normalize=normalize, verbosity=verbosity)
return ds['input']
def build_trainer(nn, ds, verbosity=1):
"""Configure neural net trainer from a pybrain dataset"""
return pb.supervised.trainers.rprop.RPropMinusTrainer(nn, dataset=ds, batchlearning=True, verbose=bool(verbosity))
def weight_matrices(nn):
""" Extract list of weight matrices from a Network, Layer (module), Trainer, Connection or other pybrain object"""
if isinstance(nn, ndarray):
return nn
try:
return weight_matrices(nn.connections)
except:
pass
try:
return weight_matrices(nn.module)
except:
pass
# Network objects are ParameterContainer's too, but won't reshape into a single matrix,
# so this must come after try nn.connections
if isinstance(nn, (ParameterContainer, Connection)):
return reshape(nn.params, (nn.outdim, nn.indim))
if isinstance(nn, basestring):
try:
fn = nn
nn = NetworkReader(fn, newfile=False)
return weight_matrices(nn.readFrom(fn))
except:
pass
# FIXME: what does NetworkReader output? (Module? Layer?) need to handle it's type here
try:
return [weight_matrices(v) for (k, v) in nn.iteritems()]
except:
try:
connections = nn.module.connections.values()
nn = []
for conlist in connections:
nn += conlist
return weight_matrices(nn)
except:
return [weight_matrices(v) for v in nn]
# # FIXME: resolve all these NLP dependencies and get this working
# def dataset_from_time_series(df, N_inp=None, features=('moy',), verbosity=1):
# """Build a pybrains.dataset from the time series contained in a dataframe"""
# N_inp = N_inp or len(df.columns)
# features = features or []
# # Add features to input vector in reverse alphabetical order by feature name,
# # so woy will be added first, and date will be added last.
# # The order that the feature vectors should appear in the input vector to remain consistent
# # and neural net architecture can have structure that anticipates this.
# sorted_features = nlp.sort_strings(features, ('dat', 'dow', 'moy', 'dom', 'moy', 'mor'), case_sensitive=False)
# if verbosity > 0:
# print('dataset_from_thresh(features={0})'.format(features))
# samples, mean, std, thresh = simple_dataset_from_thresh(thresh, N=N, max_window=max_window,
# normalize=normalize, ignore_below=ignore_below)
# name = getattr(thresh, 'name', None)
# if name:
# name = normalize_building_name(name)
# if name:
# series = get_series(name)
# else:
# if isinstance(series, basestring):
# name = normalize_building_name(series.strip()) or thresh.name or 'Unknown'
# series = get_series(name)
# elif isinstance(series, pd.DataFrame):
# name = normalize_building_name(series.columns[0]) or thresh.name or 'Unknown'
# series = series[name]
# elif isinstance(series, pd.Series):
# name = normalize_building_name(series.name) or thresh.name or 'Unknown'
# else:
# name = None
# # Compute the length of extra features added on to the vector from the rolling window of previous
# # threshold values
# # TODO: pre-process features list of strings in a separate function
# morn = 0
# # if the building name isn't known, you can't retrieve the morning load values for it
# if name:
# for s in features:
# if s.startswith('morn'):
# try:
# morn = int(s[4:])
# except:
# if verbosity > 0:
# warnings.warn('Unable to determine morning length from feature named "{0}" so using default '
# '(8 am = 8 * 4 = 32)')
# morn = 32 # default to 9 am morning ending
# break
# if verbosity > 0:
# print('In dataset_from_thresh() using {0} morning load values for Building {1}'
# ' because series arg is of type {2}'.format(morn, name, type(series)))
# extras = (+ int('dow' in features) * 7
# + int('moy' in features) * 12
# + int('woy' in features)
# + int('date' in features)
# + morn)
# if verbosity > 0:
# print('The total input vector length (dimension) is now {0}'.format(N + extras))
# ds = pb.datasets.SupervisedDataSet(N + extras, 1)
# first_date = samples[0]['target'].index[0].date().toordinal()
# last_date = samples[-1]['target'].index[0].date().toordinal()
# date_range = (last_date - first_date) or 1
# bit_scale = 5 # number of standard deviations for the magnitude of bit
# if verbosity > 0:
# print('Adding features for building {3}, {0}, and a morning time series of len {2}, '
# 'to each of the {1} vectors (samples)'.format(features, len(samples), morn, name))
# for sampnum, sample in enumerate(samples):
# # sample['input'] and ['output'] are pd.Series tables so convert them to normal list()
# inputs = list(sample['input'].values)
# # the date we're trying to predict the rhreshold for
# timestamp = sample['target'].index[0]
# for feature_name in sorted_features:
# if feature_name.startswith('morn'):
# day = get_day(series, date=timestamp.date())
# morning_loads = (day.values[:morn] - mean) / std
# if verbosity > 1:
# print('day = {0} and morning = {1}'.format(len(day), len(morning_loads)))
# inputs = list(morning_loads) + inputs
# elif feature_name == 'dow':
# dow_bits = [0] * 7
# dow_bits[timestamp.weekday()] = bit_scale
# inputs = dow_bits + inputs
# elif feature_name == 'moy':
# moy_bits = [0] * 12
# moy_bits[timestamp.month - 1] = bit_scale
# inputs = moy_bits + inputs
# elif feature_name == 'woy':
# inputs = [(timestamp.weekofyear - 26.) * 3 * bit_scale / 52] + inputs
# elif feature_name == 'date':
# inputs = [(timestamp.date().toordinal() - first_date - date_range / 2.) * 3 * bit_scale /
# date_range ] + inputs
# if pd.isnull(inputs).any():
# msg = 'Feature "{0}" within the feature list: {1} created null/NaN input values\nFor sample {2}'
# ' and date {3}\nInput vector positions {4}:\nInput vector: {5}'.format(
# feature_name, sorted_features, sampnum, timestamp, ann.table_nan_locs(inputs), inputs)
# msg += '\nBuilding load Series:\n{0}\n'.format(series)
# if ignore_nans:
# warnings.warn(msg)
# else:
# raise ValueError(msg)
# ds.addSample(inputs, list(sample['target'].values))
# return ds, mean, std, thresh
def dataset_nan_locs(ds):
"""
from http://stackoverflow.com/a/14033137/623735
# gets the indices of the rows with nan values in a dataframe
pd.isnull(df).any(1).nonzero()[0]
"""
ans = []
for sampnum, sample in enumerate(ds):
if pd.isnull(sample).any():
ans += [{
'sample': sampnum,
'input': pd.isnull(sample[0]).nonzero()[0],
'output': pd.isnull(sample[1]).nonzero()[0],
}]
return ans
def table_nan_locs(table):
"""
from http://stackoverflow.com/a/14033137/623735
# gets the indices of the rows with nan values in a dataframe
pd.isnull(df).any(1).nonzero()[0]
"""
ans = []
for rownum, row in enumerate(table):
try:
if pd.isnull(row).any():
colnums = pd.isnull(row).nonzero()[0]
ans += [(rownum, colnum) for colnum in colnums]
except AttributeError: # table is really just a sequence of scalars
if pd.isnull(row):
ans += [(rownum, 0)]
return ans
def trainer_results(trainer, mean=0, std=1, title='', show=True, save=True):
"""Plot the performance of the Network and SupervisedDataSet in a pybrain Trainer
DataSet target and output values are denormalized before plotting with:
output * std + mean
Which inverses the normalization
(output - mean) / std
Args:
trainer (Trainer): a pybrain Trainer instance containing a valid Network and DataSet
ds (DataSet): a pybrain DataSet to override the one contained in `trainer`.
Required if trainer is a Network instance rather than a Trainer instance.
mean (float): mean of the denormalized dataset (default: 0)
Only affects the scale of the plot
std (float): std (standard deviation) of the denormalized dataset (default: 1)
title (str): title to display on the plot.
Returns:
3-tuple: (trainer, mean, std), A trainer/dataset along with denormalization info
"""
return plot_network_results(network=trainer.module, ds=trainer.ds, mean=mean, std=std, title=title,
show=show, save=save)
def sim_trainer(trainer, mean=0, std=1):
"""Simulate a trainer by activating its DataSet and returning DataFrame(columns=['Output','Target'])
"""
return sim_network(network=trainer.module, ds=trainer.ds, mean=mean, std=std)
def sim_network(network, ds=None, index=None, mean=0, std=1):
"""Simulate/activate a Network on a SupervisedDataSet and return DataFrame(columns=['Output','Target'])
The DataSet's target and output values are denormalized before populating the dataframe columns:
denormalized_output = normalized_output * std + mean
Which inverses the normalization that produced the normalized output in the first place: \
normalized_output = (denormalzied_output - mean) / std
Args:
network (Network): a pybrain Network instance to activate with the provided DataSet, `ds`
ds (DataSet): a pybrain DataSet to activate the Network on to produce an output sequence
mean (float): mean of the denormalized dataset (default: 0)
Output is scaled
std (float): std (standard deviation) of the denormalized dataset (default: 1)
title (str): title to display on the plot.
Returns:
DataFrame: DataFrame with columns "Output" and "Target" suitable for df.plot-ting
"""
# just in case network is a trainer or has a Module-derived instance as one of it's attribute
# isinstance(network.module, (networks.Network, modules.Module))
if hasattr(network, 'module') and hasattr(network.module, 'activate'):
# may want to also check: isinstance(network.module, (networks.Network, modules.Module))
network = network.module
ds = ds or network.ds
if not ds:
raise RuntimeError("Unable to find a `pybrain.datasets.DataSet` instance to activate the Network with, "
" to plot the outputs. A dataset can be provided as part of a network instance or "
"as a separate kwarg if `network` is used to provide the `pybrain.Network`"
" instance directly.")
results_generator = ((network.activate(ds['input'][i])[0] * std + mean, ds['target'][i][0] * std + mean)
for i in xrange(len(ds['input'])))
return pd.DataFrame(results_generator, columns=['Output', 'Target'], index=index or range(len(ds['input'])))
|
hobson/pug-ann | pug/ann/util.py | trainer_results | python | def trainer_results(trainer, mean=0, std=1, title='', show=True, save=True):
return plot_network_results(network=trainer.module, ds=trainer.ds, mean=mean, std=std, title=title,
show=show, save=save) | Plot the performance of the Network and SupervisedDataSet in a pybrain Trainer
DataSet target and output values are denormalized before plotting with:
output * std + mean
Which inverses the normalization
(output - mean) / std
Args:
trainer (Trainer): a pybrain Trainer instance containing a valid Network and DataSet
ds (DataSet): a pybrain DataSet to override the one contained in `trainer`.
Required if trainer is a Network instance rather than a Trainer instance.
mean (float): mean of the denormalized dataset (default: 0)
Only affects the scale of the plot
std (float): std (standard deviation) of the denormalized dataset (default: 1)
title (str): title to display on the plot.
Returns:
3-tuple: (trainer, mean, std), A trainer/dataset along with denormalization info | train | https://github.com/hobson/pug-ann/blob/8a4d7103a744d15b4a737fc0f9a84c823973e0ec/pug/ann/util.py#L595-L619 | [
"def plot_network_results(network, ds=None, mean=0, std=1, title='', show=True, save=True):\n \"\"\"Identical to plot_trainer except `network` and `ds` must be provided separately\"\"\"\n df = sim_network(network=network, ds=ds, mean=mean, std=std)\n df.plot()\n plt.xlabel('Date')\n plt.ylabel('Threshold (kW)')\n plt.title(title)\n\n if show:\n try:\n # ipython notebook overrides plt.show and doesn't have a block kwarg\n plt.show(block=False)\n except TypeError:\n plt.show()\n if save:\n filename = 'ann_performance_for_{0}.png'.format(title).replace(' ', '_')\n if isinstance(save, basestring) and os.path.isdir(save):\n filename = os.path.join(save, filename)\n plt.savefig(filename)\n if not show:\n plt.clf()\n\n return network, mean, std\n"
] | """Maniuplate, analyze and plot `pybrain` `Network` and `DataSet` objects
TODO:
Incorporate into pybrain fork so pug doesn't have to depend on pybrain
"""
from __future__ import print_function
import os
import warnings
import pandas as pd
from scipy import ndarray, reshape # array, amin, amax,
np = pd.np
from matplotlib import pyplot as plt
import pybrain.datasets
import pybrain.structure
import pybrain.supervised
import pybrain.tools
pb = pybrain
# from pybrain.supervised.trainers import Trainer
from pybrain.tools.customxml import NetworkReader
from pybrain.structure.parametercontainer import ParameterContainer
from pybrain.structure.connections.connection import Connection
from pug.nlp.util import tuplify, fuzzy_get
from pug.ann.data import weather
#import pug.nlp.util as nlp
# print(os.path.realpath(__file__))
DATA_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(DATA_PATH, '..', 'data')
LAYER_TYPES = set([getattr(pybrain.structure, s) for s in
dir(pybrain.structure) if s.endswith('Layer')])
FAST = False
try:
from arac.pybrainbridge import _FeedForwardNetwork as FeedForwardNetwork
FAST = True
except:
from pybrain.structure import FeedForwardNetwork
print("No fast (ARAC, a FC++ library) FeedForwardNetwork was found, "
"so using the slower pybrain python implementation of FFNN.")
def normalize_layer_type(layer_type):
try:
if layer_type in LAYER_TYPES:
return layer_type
except TypeError:
pass
try:
return getattr(pb.structure, layer_type.strip())
except AttributeError:
try:
return getattr(pb.structure, layer_type.strip() + 'Layer')
except AttributeError:
pass
return [normalize_layer_type(lt) for lt in layer_type]
def build_ann(N_input=None, N_hidden=2, N_output=1, hidden_layer_type='Linear', verbosity=1):
"""Build a neural net with the indicated input, hidden, and outout dimensions
Arguments:
params (dict or PyBrainParams namedtuple):
default: {'N_hidden': 6}
(this is the only parameter that affects the NN build)
Returns:
FeedForwardNetwork with N_input + N_hidden + N_output nodes in 3 layers
"""
N_input = N_input or 1
N_output = N_output or 1
N_hidden = N_hidden or tuple()
if isinstance(N_hidden, (int, float, basestring)):
N_hidden = (int(N_hidden),)
hidden_layer_type = hidden_layer_type or tuple()
hidden_layer_type = tuplify(normalize_layer_type(hidden_layer_type))
if verbosity > 0:
print(N_hidden, ' layers of type ', hidden_layer_type)
assert(len(N_hidden) == len(hidden_layer_type))
nn = pb.structure.FeedForwardNetwork()
# layers
nn.addInputModule(pb.structure.BiasUnit(name='bias'))
nn.addInputModule(pb.structure.LinearLayer(N_input, name='input'))
for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden, hidden_layer_type)):
Nhid = int(Nhid)
nn.addModule(hidlaytype(Nhid, name=('hidden-{}'.format(i) if i else 'hidden')))
nn.addOutputModule(pb.structure.LinearLayer(N_output, name='output'))
# connections
nn.addConnection(pb.structure.FullConnection(nn['bias'], nn['hidden'] if N_hidden else nn['output']))
nn.addConnection(pb.structure.FullConnection(nn['input'], nn['hidden'] if N_hidden else nn['output']))
for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden[:-1], hidden_layer_type[:-1])):
Nhid = int(Nhid)
nn.addConnection(pb.structure.FullConnection(nn[('hidden-{}'.format(i) if i else 'hidden')],
nn['hidden-{}'.format(i + 1)]))
i = len(N_hidden) - 1
nn.addConnection(pb.structure.FullConnection(nn['hidden-{}'.format(i) if i else 'hidden'], nn['output']))
nn.sortModules()
if FAST:
try:
nn.convertToFastNetwork()
except:
if verbosity > 0:
print('Unable to convert slow PyBrain NN to a fast ARAC network...')
if verbosity > 0:
print(nn.connections)
return nn
def ann_from_ds(ds=None, N_input=3, N_hidden=0, N_output=1, verbosity=1):
N_input = getattr(ds, 'indim', N_input)
N_output = getattr(ds, 'outdim', N_output)
N_hidden = N_hidden or getattr(ds, 'paramdim', N_hidden + N_input + N_output) - N_input - N_output
N_hidden = max(round(min(N_hidden, len(ds) / float(N_input) / float(N_output) / 5.)), N_output)
return build_ann(N_input=N_input, N_hidden=N_hidden, N_output=N_output, verbosity=verbosity)
def prepend_dataset_with_weather(samples, location='Fresno, CA', weather_columns=None, use_cache=True, verbosity=0):
""" Prepend weather the values specified (e.g. Max TempF) to the samples[0..N]['input'] vectors
samples[0..N]['target'] should have an index with the date timestamp
If you use_cache for the curent year, you may not get the most recent data.
Arguments:
samples (list of dict): {'input': np.array(), 'target': pandas.DataFrame}
"""
if verbosity > 1:
print('Prepending weather data for {} to dataset samples'.format(weather_columns))
if not weather_columns:
return samples
timestamps = pd.DatetimeIndex([s['target'].index[0] for s in samples])
years = range(timestamps.min().date().year, timestamps.max().date().year + 1)
weather_df = weather.daily(location=location, years=years, use_cache=use_cache)
# FIXME: weather_df.resample('D') fails
weather_df.index = [d.date() for d in weather_df.index]
if verbosity > 1:
print('Retrieved weather for years {}:'.format(years))
print(weather_df)
weather_columns = [label if label in weather_df.columns else weather_df.columns[int(label)]
for label in (weather_columns or [])]
for sampnum, sample in enumerate(samples):
timestamp = timestamps[sampnum]
try:
weather_day = weather_df.loc[timestamp.date()]
except:
from traceback import print_exc
print_exc()
weather_day = {}
if verbosity >= 0:
warnings.warn('Unable to find weather for the date {}'.format(timestamp.date()))
NaN = float('NaN')
sample['input'] = [weather_day.get(label, None) for label in weather_columns] + list(sample['input'])
if verbosity > 0 and NaN in sample['input']:
warnings.warn('Unable to find weather features {} in the weather for date {}'.format(
[label for i, label in enumerate(weather_columns) if sample['input'][i] == NaN], timestamp))
return samples
def dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=(-1,), normalize=False, verbosity=1):
"""Compose a pybrain.dataset from a pandas DataFrame
Arguments:
delays (list of int): sample delays to use for the input tapped delay line
Positive and negative values are treated the same as sample counts into the past.
default: [1, 2, 3], in z-transform notation: z^-1 + z^-2 + z^-3
inputs (list of int or list of str): column indices or labels for the inputs
outputs (list of int or list of str): column indices or labels for the outputs
normalize (bool): whether to divide each input to be normally distributed about 0 with std 1
Returns:
3-tuple: tuple(dataset, list of means, list of stds)
means and stds allow normalization of new inputs and denormalization of the outputs
TODO:
Detect categorical variables with low dimensionality and split into separate bits
Vowpel Wabbit hashes strings into an int?
Detect ordinal variables and convert to continuous int sequence
SEE: http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
"""
if isinstance(delays, int):
if delays:
delays = range(1, delays + 1)
else:
delays = [0]
delays = np.abs(np.array([int(i) for i in delays]))
inputs = [df.columns[int(inp)] if isinstance(inp, (float, int)) else str(inp) for inp in inputs]
outputs = [df.columns[int(out)] if isinstance(out, (float, int)) else str(out) for out in (outputs or [])]
inputs = [fuzzy_get(df.columns, i) for i in inputs]
outputs = [fuzzy_get(df.columns, o) for o in outputs]
N_inp = len(inputs)
N_out = len(outputs)
inp_outs = inputs + outputs
if verbosity > 0:
print("inputs: {}\noutputs: {}\ndelays: {}\n".format(inputs, outputs, delays))
means, stds = np.zeros(len(inp_outs)), np.ones(len(inp_outs))
if normalize:
means, stds = df[inp_outs].mean(), df[inp_outs].std()
if normalize and verbosity > 0:
print("Input mean values (used to normalize input biases): {}".format(means[:N_inp]))
print("Output mean values (used to normalize output biases): {}".format(means[N_inp:]))
ds = pb.datasets.SupervisedDataSet(N_inp * len(delays), N_out)
if verbosity > 0:
print("Dataset dimensions are {}x{}x{} (records x indim x outdim) for {} delays, {} inputs, {} outputs".format(
len(df), ds.indim, ds.outdim, len(delays), len(inputs), len(outputs)))
# FIXME: normalize the whole matrix at once and add it quickly rather than one sample at a time
if delays == np.array([0]) and not normalize:
if verbosity > 0:
print("No tapped delay lines (delays) were requested, so using undelayed features for the dataset.")
assert(df[inputs].values.shape[0] == df[outputs].values.shape[0])
ds.setField('input', df[inputs].values)
ds.setField('target', df[outputs].values)
ds.linkFields(['input', 'target'])
# for inp, outp in zip(df[inputs].values, df[outputs].values):
# ds.appendLinked(inp, outp)
assert(len(ds['input']) == len(ds['target']))
else:
for i, out_vec in enumerate(df[outputs].values):
if verbosity > 0 and i % 100 == 0:
print("{}%".format(i / .01 / len(df)))
elif verbosity > 1:
print('sample[{i}].target={out_vec}'.format(i=i, out_vec=out_vec))
if i < max(delays):
continue
inp_vec = []
for delay in delays:
inp_vec += list((df[inputs].values[i - delay] - means[:N_inp]) / stds[:N_inp])
ds.addSample(inp_vec, (out_vec - means[N_inp:]) / stds[N_inp:])
if verbosity > 0:
print("Dataset now has {} samples".format(len(ds)))
if normalize:
return ds, means, stds
else:
return ds
# def dataset_from_feature_names(df, delays=(1, 2,), quantiles=(), input_columns=(0,), target_columns=(-1,),
# weather_columns=(), features=(), verbosity=1):
# """FIXME: Untested. Transform Datafram columns and append to input_columns as additional features
# Arguments:
# features (seq of str): names of feautures to be appended to the input vector (feature set)
# """
# raise NotImplementedError("Better to implement this as a separate feature transformation function")
# # Compute the length of extra features added on to the vector from the rolling window of previous
# # threshold values
# extras = (+ int('dow' in features) * 7
# + int('moy' in features) * 12
# + int('woy' in features)
# + int('date' in features)
# + len(quantiles)
# + len(weather_columns)
# )
# N = max(list(delays) + list(quantiles))
# last_date = first_date = 0
# try:
# if verbosity > 0:
# print('The total input vector length (dimension) is now {0}'.format(len(N) + extras))
# print('Starting to augment {0}x{1} sample inputs by adding adding {2} additional features'.format(
# len(df), len(input_columns), extras))
# if verbosity > 0:
# print('The first sample input was {0}'.format(
# df[input_columns].iloc[0]))
# print('The first sample target was {0}'.format(
# df[target_columns].iloc[0]))
# print('The last sample input was {0}'.format(
# df[input_columns].iloc[-1]))
# print('The last sample target was {0}'.format(
# df[target_columns].iloc[-1]))
# first_date = df.index.iloc[0].date().toordinal()
# last_date = df.index.iloc[-1].date().toordinal()
# except:
# if verbosity > -1:
# from traceback import format_exc
# warnings.warn(format_exc())
# if verbosity > 1:
# import ipdb
# ipdb.set_trace()
# date_range = (last_date - first_date) + 1 or 1
# # FIXME: scale each feature/column/dimension independently using pug.ann.util.dataset_from_dataframe
# # but mean and std become vectors of the same dimension as the feature/input vector.
# # scikit-learn has transformations that do this more reasonably
# bit_scale = 5 # number of standard deviations for the magnitude of bit
# # convert the list of dicts ((input, output) supervised dataset pairs) into a pybrains.Dataset
# ds = pybrain.datasets.SupervisedDataSet(len(N) + extras, 1)
# sorted_features = sorted(features)
# for sampnum, (input_vector, target_vector) in enumerate(
# zip(df[input_columns].values, df[target_columns].values)):
# # sample['input'] and ['output'] are pd.Series tables so convert them to normal list()
# # inputs = list(sample['input'])
# # the date we're trying to predict the threshold for
# timestamp = target_vector.index[0]
# for feature_name in sorted_features:
# if feature_name.startswith('morn'):
# day = get_day(series, date=timestamp.date())
# morning_loads = (day.values[:morn] - mean) / std
# if verbosity > 2:
# print('day = {0} and morning = {1}'.format(len(day), len(morning_loads)))
# inputs = list(morning_loads) + inputs
# elif feature_name == 'dow':
# dow_bits = [0] * 7
# dow_bits[timestamp.weekday()] = bit_scale
# inputs = dow_bits + inputs
# elif feature_name == 'moy':
# moy_bits = [0] * 12
# moy_bits[timestamp.month - 1] = bit_scale
# inputs = moy_bits + inputs
# elif feature_name == 'woy':
# inputs = [(timestamp.weekofyear - 26.) * 3 * bit_scale / 52] + inputs
# elif feature_name == 'date':
# inputs = [(timestamp.date().toordinal() - first_date - date_range / 2.) * 3 * bit_scale / date_range
# ] + inputs
# if pd.isnull(inputs).any():
# msg = 'Feature "{0}" within the feature list: {1} created null/NaN input values\n'.format(
# feature_name, sorted_features)
# msg += 'For sample {} and date {}\n'.format(sampnum, timestamp)
# msg += 'Input vector positions {}:\nInput vector: {}'.format(
# ann.table_nan_locs(inputs), inputs)
# msg += '\nBuilding load Series:\n{0}\n'.format(series)
# if ignore_nans:
# warnings.warn(msg)
# else:
# raise ValueError(msg)
# ds.addSample(inputs, list(target_vector.values))
def input_dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):
""" Build a dataset with an empty output/target vector
Identical to `dataset_from_dataframe`, except that default values for 2 arguments:
outputs: None
"""
return dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,
normalize=normalize, verbosity=verbosity)
def inputs_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):
""" Build a sequence of vectors suitable for "activation" by a neural net
Identical to `dataset_from_dataframe`, except that only the input vectors are
returned (not a full DataSet instance) and default values for 2 arguments are changed:
outputs: None
And only the input vectors are return
"""
ds = input_dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,
normalize=normalize, verbosity=verbosity)
return ds['input']
def build_trainer(nn, ds, verbosity=1):
"""Configure neural net trainer from a pybrain dataset"""
return pb.supervised.trainers.rprop.RPropMinusTrainer(nn, dataset=ds, batchlearning=True, verbose=bool(verbosity))
def weight_matrices(nn):
""" Extract list of weight matrices from a Network, Layer (module), Trainer, Connection or other pybrain object"""
if isinstance(nn, ndarray):
return nn
try:
return weight_matrices(nn.connections)
except:
pass
try:
return weight_matrices(nn.module)
except:
pass
# Network objects are ParameterContainer's too, but won't reshape into a single matrix,
# so this must come after try nn.connections
if isinstance(nn, (ParameterContainer, Connection)):
return reshape(nn.params, (nn.outdim, nn.indim))
if isinstance(nn, basestring):
try:
fn = nn
nn = NetworkReader(fn, newfile=False)
return weight_matrices(nn.readFrom(fn))
except:
pass
# FIXME: what does NetworkReader output? (Module? Layer?) need to handle it's type here
try:
return [weight_matrices(v) for (k, v) in nn.iteritems()]
except:
try:
connections = nn.module.connections.values()
nn = []
for conlist in connections:
nn += conlist
return weight_matrices(nn)
except:
return [weight_matrices(v) for v in nn]
# # FIXME: resolve all these NLP dependencies and get this working
# def dataset_from_time_series(df, N_inp=None, features=('moy',), verbosity=1):
# """Build a pybrains.dataset from the time series contained in a dataframe"""
# N_inp = N_inp or len(df.columns)
# features = features or []
# # Add features to input vector in reverse alphabetical order by feature name,
# # so woy will be added first, and date will be added last.
# # The order that the feature vectors should appear in the input vector to remain consistent
# # and neural net architecture can have structure that anticipates this.
# sorted_features = nlp.sort_strings(features, ('dat', 'dow', 'moy', 'dom', 'moy', 'mor'), case_sensitive=False)
# if verbosity > 0:
# print('dataset_from_thresh(features={0})'.format(features))
# samples, mean, std, thresh = simple_dataset_from_thresh(thresh, N=N, max_window=max_window,
# normalize=normalize, ignore_below=ignore_below)
# name = getattr(thresh, 'name', None)
# if name:
# name = normalize_building_name(name)
# if name:
# series = get_series(name)
# else:
# if isinstance(series, basestring):
# name = normalize_building_name(series.strip()) or thresh.name or 'Unknown'
# series = get_series(name)
# elif isinstance(series, pd.DataFrame):
# name = normalize_building_name(series.columns[0]) or thresh.name or 'Unknown'
# series = series[name]
# elif isinstance(series, pd.Series):
# name = normalize_building_name(series.name) or thresh.name or 'Unknown'
# else:
# name = None
# # Compute the length of extra features added on to the vector from the rolling window of previous
# # threshold values
# # TODO: pre-process features list of strings in a separate function
# morn = 0
# # if the building name isn't known, you can't retrieve the morning load values for it
# if name:
# for s in features:
# if s.startswith('morn'):
# try:
# morn = int(s[4:])
# except:
# if verbosity > 0:
# warnings.warn('Unable to determine morning length from feature named "{0}" so using default '
# '(8 am = 8 * 4 = 32)')
# morn = 32 # default to 9 am morning ending
# break
# if verbosity > 0:
# print('In dataset_from_thresh() using {0} morning load values for Building {1}'
# ' because series arg is of type {2}'.format(morn, name, type(series)))
# extras = (+ int('dow' in features) * 7
# + int('moy' in features) * 12
# + int('woy' in features)
# + int('date' in features)
# + morn)
# if verbosity > 0:
# print('The total input vector length (dimension) is now {0}'.format(N + extras))
# ds = pb.datasets.SupervisedDataSet(N + extras, 1)
# first_date = samples[0]['target'].index[0].date().toordinal()
# last_date = samples[-1]['target'].index[0].date().toordinal()
# date_range = (last_date - first_date) or 1
# bit_scale = 5 # number of standard deviations for the magnitude of bit
# if verbosity > 0:
# print('Adding features for building {3}, {0}, and a morning time series of len {2}, '
# 'to each of the {1} vectors (samples)'.format(features, len(samples), morn, name))
# for sampnum, sample in enumerate(samples):
# # sample['input'] and ['output'] are pd.Series tables so convert them to normal list()
# inputs = list(sample['input'].values)
# # the date we're trying to predict the rhreshold for
# timestamp = sample['target'].index[0]
# for feature_name in sorted_features:
# if feature_name.startswith('morn'):
# day = get_day(series, date=timestamp.date())
# morning_loads = (day.values[:morn] - mean) / std
# if verbosity > 1:
# print('day = {0} and morning = {1}'.format(len(day), len(morning_loads)))
# inputs = list(morning_loads) + inputs
# elif feature_name == 'dow':
# dow_bits = [0] * 7
# dow_bits[timestamp.weekday()] = bit_scale
# inputs = dow_bits + inputs
# elif feature_name == 'moy':
# moy_bits = [0] * 12
# moy_bits[timestamp.month - 1] = bit_scale
# inputs = moy_bits + inputs
# elif feature_name == 'woy':
# inputs = [(timestamp.weekofyear - 26.) * 3 * bit_scale / 52] + inputs
# elif feature_name == 'date':
# inputs = [(timestamp.date().toordinal() - first_date - date_range / 2.) * 3 * bit_scale /
# date_range ] + inputs
# if pd.isnull(inputs).any():
# msg = 'Feature "{0}" within the feature list: {1} created null/NaN input values\nFor sample {2}'
# ' and date {3}\nInput vector positions {4}:\nInput vector: {5}'.format(
# feature_name, sorted_features, sampnum, timestamp, ann.table_nan_locs(inputs), inputs)
# msg += '\nBuilding load Series:\n{0}\n'.format(series)
# if ignore_nans:
# warnings.warn(msg)
# else:
# raise ValueError(msg)
# ds.addSample(inputs, list(sample['target'].values))
# return ds, mean, std, thresh
def dataset_nan_locs(ds):
"""
from http://stackoverflow.com/a/14033137/623735
# gets the indices of the rows with nan values in a dataframe
pd.isnull(df).any(1).nonzero()[0]
"""
ans = []
for sampnum, sample in enumerate(ds):
if pd.isnull(sample).any():
ans += [{
'sample': sampnum,
'input': pd.isnull(sample[0]).nonzero()[0],
'output': pd.isnull(sample[1]).nonzero()[0],
}]
return ans
def table_nan_locs(table):
"""
from http://stackoverflow.com/a/14033137/623735
# gets the indices of the rows with nan values in a dataframe
pd.isnull(df).any(1).nonzero()[0]
"""
ans = []
for rownum, row in enumerate(table):
try:
if pd.isnull(row).any():
colnums = pd.isnull(row).nonzero()[0]
ans += [(rownum, colnum) for colnum in colnums]
except AttributeError: # table is really just a sequence of scalars
if pd.isnull(row):
ans += [(rownum, 0)]
return ans
def plot_network_results(network, ds=None, mean=0, std=1, title='', show=True, save=True):
"""Identical to plot_trainer except `network` and `ds` must be provided separately"""
df = sim_network(network=network, ds=ds, mean=mean, std=std)
df.plot()
plt.xlabel('Date')
plt.ylabel('Threshold (kW)')
plt.title(title)
if show:
try:
# ipython notebook overrides plt.show and doesn't have a block kwarg
plt.show(block=False)
except TypeError:
plt.show()
if save:
filename = 'ann_performance_for_{0}.png'.format(title).replace(' ', '_')
if isinstance(save, basestring) and os.path.isdir(save):
filename = os.path.join(save, filename)
plt.savefig(filename)
if not show:
plt.clf()
return network, mean, std
def sim_trainer(trainer, mean=0, std=1):
"""Simulate a trainer by activating its DataSet and returning DataFrame(columns=['Output','Target'])
"""
return sim_network(network=trainer.module, ds=trainer.ds, mean=mean, std=std)
def sim_network(network, ds=None, index=None, mean=0, std=1):
"""Simulate/activate a Network on a SupervisedDataSet and return DataFrame(columns=['Output','Target'])
The DataSet's target and output values are denormalized before populating the dataframe columns:
denormalized_output = normalized_output * std + mean
Which inverses the normalization that produced the normalized output in the first place: \
normalized_output = (denormalzied_output - mean) / std
Args:
network (Network): a pybrain Network instance to activate with the provided DataSet, `ds`
ds (DataSet): a pybrain DataSet to activate the Network on to produce an output sequence
mean (float): mean of the denormalized dataset (default: 0)
Output is scaled
std (float): std (standard deviation) of the denormalized dataset (default: 1)
title (str): title to display on the plot.
Returns:
DataFrame: DataFrame with columns "Output" and "Target" suitable for df.plot-ting
"""
# just in case network is a trainer or has a Module-derived instance as one of it's attribute
# isinstance(network.module, (networks.Network, modules.Module))
if hasattr(network, 'module') and hasattr(network.module, 'activate'):
# may want to also check: isinstance(network.module, (networks.Network, modules.Module))
network = network.module
ds = ds or network.ds
if not ds:
raise RuntimeError("Unable to find a `pybrain.datasets.DataSet` instance to activate the Network with, "
" to plot the outputs. A dataset can be provided as part of a network instance or "
"as a separate kwarg if `network` is used to provide the `pybrain.Network`"
" instance directly.")
results_generator = ((network.activate(ds['input'][i])[0] * std + mean, ds['target'][i][0] * std + mean)
for i in xrange(len(ds['input'])))
return pd.DataFrame(results_generator, columns=['Output', 'Target'], index=index or range(len(ds['input'])))
|
hobson/pug-ann | pug/ann/util.py | sim_trainer | python | def sim_trainer(trainer, mean=0, std=1):
return sim_network(network=trainer.module, ds=trainer.ds, mean=mean, std=std) | Simulate a trainer by activating its DataSet and returning DataFrame(columns=['Output','Target']) | train | https://github.com/hobson/pug-ann/blob/8a4d7103a744d15b4a737fc0f9a84c823973e0ec/pug/ann/util.py#L622-L625 | [
"def sim_network(network, ds=None, index=None, mean=0, std=1):\n \"\"\"Simulate/activate a Network on a SupervisedDataSet and return DataFrame(columns=['Output','Target'])\n\n The DataSet's target and output values are denormalized before populating the dataframe columns:\n\n denormalized_output = normalized_output * std + mean\n\n Which inverses the normalization that produced the normalized output in the first place: \\\n\n normalized_output = (denormalzied_output - mean) / std\n\n Args:\n network (Network): a pybrain Network instance to activate with the provided DataSet, `ds`\n ds (DataSet): a pybrain DataSet to activate the Network on to produce an output sequence\n mean (float): mean of the denormalized dataset (default: 0)\n Output is scaled\n std (float): std (standard deviation) of the denormalized dataset (default: 1)\n title (str): title to display on the plot.\n\n Returns:\n DataFrame: DataFrame with columns \"Output\" and \"Target\" suitable for df.plot-ting\n \"\"\"\n # just in case network is a trainer or has a Module-derived instance as one of it's attribute\n # isinstance(network.module, (networks.Network, modules.Module))\n if hasattr(network, 'module') and hasattr(network.module, 'activate'):\n # may want to also check: isinstance(network.module, (networks.Network, modules.Module))\n network = network.module\n ds = ds or network.ds\n if not ds:\n raise RuntimeError(\"Unable to find a `pybrain.datasets.DataSet` instance to activate the Network with, \"\n \" to plot the outputs. A dataset can be provided as part of a network instance or \"\n \"as a separate kwarg if `network` is used to provide the `pybrain.Network`\"\n \" instance directly.\")\n results_generator = ((network.activate(ds['input'][i])[0] * std + mean, ds['target'][i][0] * std + mean)\n for i in xrange(len(ds['input'])))\n\n return pd.DataFrame(results_generator, columns=['Output', 'Target'], index=index or range(len(ds['input'])))\n"
] | """Maniuplate, analyze and plot `pybrain` `Network` and `DataSet` objects
TODO:
Incorporate into pybrain fork so pug doesn't have to depend on pybrain
"""
from __future__ import print_function
import os
import warnings
import pandas as pd
from scipy import ndarray, reshape # array, amin, amax,
np = pd.np
from matplotlib import pyplot as plt
import pybrain.datasets
import pybrain.structure
import pybrain.supervised
import pybrain.tools
pb = pybrain
# from pybrain.supervised.trainers import Trainer
from pybrain.tools.customxml import NetworkReader
from pybrain.structure.parametercontainer import ParameterContainer
from pybrain.structure.connections.connection import Connection
from pug.nlp.util import tuplify, fuzzy_get
from pug.ann.data import weather
#import pug.nlp.util as nlp
# print(os.path.realpath(__file__))
DATA_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(DATA_PATH, '..', 'data')
LAYER_TYPES = set([getattr(pybrain.structure, s) for s in
dir(pybrain.structure) if s.endswith('Layer')])
FAST = False
try:
from arac.pybrainbridge import _FeedForwardNetwork as FeedForwardNetwork
FAST = True
except:
from pybrain.structure import FeedForwardNetwork
print("No fast (ARAC, a FC++ library) FeedForwardNetwork was found, "
"so using the slower pybrain python implementation of FFNN.")
def normalize_layer_type(layer_type):
try:
if layer_type in LAYER_TYPES:
return layer_type
except TypeError:
pass
try:
return getattr(pb.structure, layer_type.strip())
except AttributeError:
try:
return getattr(pb.structure, layer_type.strip() + 'Layer')
except AttributeError:
pass
return [normalize_layer_type(lt) for lt in layer_type]
def build_ann(N_input=None, N_hidden=2, N_output=1, hidden_layer_type='Linear', verbosity=1):
"""Build a neural net with the indicated input, hidden, and outout dimensions
Arguments:
params (dict or PyBrainParams namedtuple):
default: {'N_hidden': 6}
(this is the only parameter that affects the NN build)
Returns:
FeedForwardNetwork with N_input + N_hidden + N_output nodes in 3 layers
"""
N_input = N_input or 1
N_output = N_output or 1
N_hidden = N_hidden or tuple()
if isinstance(N_hidden, (int, float, basestring)):
N_hidden = (int(N_hidden),)
hidden_layer_type = hidden_layer_type or tuple()
hidden_layer_type = tuplify(normalize_layer_type(hidden_layer_type))
if verbosity > 0:
print(N_hidden, ' layers of type ', hidden_layer_type)
assert(len(N_hidden) == len(hidden_layer_type))
nn = pb.structure.FeedForwardNetwork()
# layers
nn.addInputModule(pb.structure.BiasUnit(name='bias'))
nn.addInputModule(pb.structure.LinearLayer(N_input, name='input'))
for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden, hidden_layer_type)):
Nhid = int(Nhid)
nn.addModule(hidlaytype(Nhid, name=('hidden-{}'.format(i) if i else 'hidden')))
nn.addOutputModule(pb.structure.LinearLayer(N_output, name='output'))
# connections
nn.addConnection(pb.structure.FullConnection(nn['bias'], nn['hidden'] if N_hidden else nn['output']))
nn.addConnection(pb.structure.FullConnection(nn['input'], nn['hidden'] if N_hidden else nn['output']))
for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden[:-1], hidden_layer_type[:-1])):
Nhid = int(Nhid)
nn.addConnection(pb.structure.FullConnection(nn[('hidden-{}'.format(i) if i else 'hidden')],
nn['hidden-{}'.format(i + 1)]))
i = len(N_hidden) - 1
nn.addConnection(pb.structure.FullConnection(nn['hidden-{}'.format(i) if i else 'hidden'], nn['output']))
nn.sortModules()
if FAST:
try:
nn.convertToFastNetwork()
except:
if verbosity > 0:
print('Unable to convert slow PyBrain NN to a fast ARAC network...')
if verbosity > 0:
print(nn.connections)
return nn
def ann_from_ds(ds=None, N_input=3, N_hidden=0, N_output=1, verbosity=1):
N_input = getattr(ds, 'indim', N_input)
N_output = getattr(ds, 'outdim', N_output)
N_hidden = N_hidden or getattr(ds, 'paramdim', N_hidden + N_input + N_output) - N_input - N_output
N_hidden = max(round(min(N_hidden, len(ds) / float(N_input) / float(N_output) / 5.)), N_output)
return build_ann(N_input=N_input, N_hidden=N_hidden, N_output=N_output, verbosity=verbosity)
def prepend_dataset_with_weather(samples, location='Fresno, CA', weather_columns=None, use_cache=True, verbosity=0):
""" Prepend weather the values specified (e.g. Max TempF) to the samples[0..N]['input'] vectors
samples[0..N]['target'] should have an index with the date timestamp
If you use_cache for the curent year, you may not get the most recent data.
Arguments:
samples (list of dict): {'input': np.array(), 'target': pandas.DataFrame}
"""
if verbosity > 1:
print('Prepending weather data for {} to dataset samples'.format(weather_columns))
if not weather_columns:
return samples
timestamps = pd.DatetimeIndex([s['target'].index[0] for s in samples])
years = range(timestamps.min().date().year, timestamps.max().date().year + 1)
weather_df = weather.daily(location=location, years=years, use_cache=use_cache)
# FIXME: weather_df.resample('D') fails
weather_df.index = [d.date() for d in weather_df.index]
if verbosity > 1:
print('Retrieved weather for years {}:'.format(years))
print(weather_df)
weather_columns = [label if label in weather_df.columns else weather_df.columns[int(label)]
for label in (weather_columns or [])]
for sampnum, sample in enumerate(samples):
timestamp = timestamps[sampnum]
try:
weather_day = weather_df.loc[timestamp.date()]
except:
from traceback import print_exc
print_exc()
weather_day = {}
if verbosity >= 0:
warnings.warn('Unable to find weather for the date {}'.format(timestamp.date()))
NaN = float('NaN')
sample['input'] = [weather_day.get(label, None) for label in weather_columns] + list(sample['input'])
if verbosity > 0 and NaN in sample['input']:
warnings.warn('Unable to find weather features {} in the weather for date {}'.format(
[label for i, label in enumerate(weather_columns) if sample['input'][i] == NaN], timestamp))
return samples
def dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=(-1,), normalize=False, verbosity=1):
"""Compose a pybrain.dataset from a pandas DataFrame
Arguments:
delays (list of int): sample delays to use for the input tapped delay line
Positive and negative values are treated the same as sample counts into the past.
default: [1, 2, 3], in z-transform notation: z^-1 + z^-2 + z^-3
inputs (list of int or list of str): column indices or labels for the inputs
outputs (list of int or list of str): column indices or labels for the outputs
normalize (bool): whether to divide each input to be normally distributed about 0 with std 1
Returns:
3-tuple: tuple(dataset, list of means, list of stds)
means and stds allow normalization of new inputs and denormalization of the outputs
TODO:
Detect categorical variables with low dimensionality and split into separate bits
Vowpel Wabbit hashes strings into an int?
Detect ordinal variables and convert to continuous int sequence
SEE: http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
"""
if isinstance(delays, int):
if delays:
delays = range(1, delays + 1)
else:
delays = [0]
delays = np.abs(np.array([int(i) for i in delays]))
inputs = [df.columns[int(inp)] if isinstance(inp, (float, int)) else str(inp) for inp in inputs]
outputs = [df.columns[int(out)] if isinstance(out, (float, int)) else str(out) for out in (outputs or [])]
inputs = [fuzzy_get(df.columns, i) for i in inputs]
outputs = [fuzzy_get(df.columns, o) for o in outputs]
N_inp = len(inputs)
N_out = len(outputs)
inp_outs = inputs + outputs
if verbosity > 0:
print("inputs: {}\noutputs: {}\ndelays: {}\n".format(inputs, outputs, delays))
means, stds = np.zeros(len(inp_outs)), np.ones(len(inp_outs))
if normalize:
means, stds = df[inp_outs].mean(), df[inp_outs].std()
if normalize and verbosity > 0:
print("Input mean values (used to normalize input biases): {}".format(means[:N_inp]))
print("Output mean values (used to normalize output biases): {}".format(means[N_inp:]))
ds = pb.datasets.SupervisedDataSet(N_inp * len(delays), N_out)
if verbosity > 0:
print("Dataset dimensions are {}x{}x{} (records x indim x outdim) for {} delays, {} inputs, {} outputs".format(
len(df), ds.indim, ds.outdim, len(delays), len(inputs), len(outputs)))
# FIXME: normalize the whole matrix at once and add it quickly rather than one sample at a time
if delays == np.array([0]) and not normalize:
if verbosity > 0:
print("No tapped delay lines (delays) were requested, so using undelayed features for the dataset.")
assert(df[inputs].values.shape[0] == df[outputs].values.shape[0])
ds.setField('input', df[inputs].values)
ds.setField('target', df[outputs].values)
ds.linkFields(['input', 'target'])
# for inp, outp in zip(df[inputs].values, df[outputs].values):
# ds.appendLinked(inp, outp)
assert(len(ds['input']) == len(ds['target']))
else:
for i, out_vec in enumerate(df[outputs].values):
if verbosity > 0 and i % 100 == 0:
print("{}%".format(i / .01 / len(df)))
elif verbosity > 1:
print('sample[{i}].target={out_vec}'.format(i=i, out_vec=out_vec))
if i < max(delays):
continue
inp_vec = []
for delay in delays:
inp_vec += list((df[inputs].values[i - delay] - means[:N_inp]) / stds[:N_inp])
ds.addSample(inp_vec, (out_vec - means[N_inp:]) / stds[N_inp:])
if verbosity > 0:
print("Dataset now has {} samples".format(len(ds)))
if normalize:
return ds, means, stds
else:
return ds
# def dataset_from_feature_names(df, delays=(1, 2,), quantiles=(), input_columns=(0,), target_columns=(-1,),
# weather_columns=(), features=(), verbosity=1):
# """FIXME: Untested. Transform Datafram columns and append to input_columns as additional features
# Arguments:
# features (seq of str): names of feautures to be appended to the input vector (feature set)
# """
# raise NotImplementedError("Better to implement this as a separate feature transformation function")
# # Compute the length of extra features added on to the vector from the rolling window of previous
# # threshold values
# extras = (+ int('dow' in features) * 7
# + int('moy' in features) * 12
# + int('woy' in features)
# + int('date' in features)
# + len(quantiles)
# + len(weather_columns)
# )
# N = max(list(delays) + list(quantiles))
# last_date = first_date = 0
# try:
# if verbosity > 0:
# print('The total input vector length (dimension) is now {0}'.format(len(N) + extras))
# print('Starting to augment {0}x{1} sample inputs by adding adding {2} additional features'.format(
# len(df), len(input_columns), extras))
# if verbosity > 0:
# print('The first sample input was {0}'.format(
# df[input_columns].iloc[0]))
# print('The first sample target was {0}'.format(
# df[target_columns].iloc[0]))
# print('The last sample input was {0}'.format(
# df[input_columns].iloc[-1]))
# print('The last sample target was {0}'.format(
# df[target_columns].iloc[-1]))
# first_date = df.index.iloc[0].date().toordinal()
# last_date = df.index.iloc[-1].date().toordinal()
# except:
# if verbosity > -1:
# from traceback import format_exc
# warnings.warn(format_exc())
# if verbosity > 1:
# import ipdb
# ipdb.set_trace()
# date_range = (last_date - first_date) + 1 or 1
# # FIXME: scale each feature/column/dimension independently using pug.ann.util.dataset_from_dataframe
# # but mean and std become vectors of the same dimension as the feature/input vector.
# # scikit-learn has transformations that do this more reasonably
# bit_scale = 5 # number of standard deviations for the magnitude of bit
# # convert the list of dicts ((input, output) supervised dataset pairs) into a pybrains.Dataset
# ds = pybrain.datasets.SupervisedDataSet(len(N) + extras, 1)
# sorted_features = sorted(features)
# for sampnum, (input_vector, target_vector) in enumerate(
# zip(df[input_columns].values, df[target_columns].values)):
# # sample['input'] and ['output'] are pd.Series tables so convert them to normal list()
# # inputs = list(sample['input'])
# # the date we're trying to predict the threshold for
# timestamp = target_vector.index[0]
# for feature_name in sorted_features:
# if feature_name.startswith('morn'):
# day = get_day(series, date=timestamp.date())
# morning_loads = (day.values[:morn] - mean) / std
# if verbosity > 2:
# print('day = {0} and morning = {1}'.format(len(day), len(morning_loads)))
# inputs = list(morning_loads) + inputs
# elif feature_name == 'dow':
# dow_bits = [0] * 7
# dow_bits[timestamp.weekday()] = bit_scale
# inputs = dow_bits + inputs
# elif feature_name == 'moy':
# moy_bits = [0] * 12
# moy_bits[timestamp.month - 1] = bit_scale
# inputs = moy_bits + inputs
# elif feature_name == 'woy':
# inputs = [(timestamp.weekofyear - 26.) * 3 * bit_scale / 52] + inputs
# elif feature_name == 'date':
# inputs = [(timestamp.date().toordinal() - first_date - date_range / 2.) * 3 * bit_scale / date_range
# ] + inputs
# if pd.isnull(inputs).any():
# msg = 'Feature "{0}" within the feature list: {1} created null/NaN input values\n'.format(
# feature_name, sorted_features)
# msg += 'For sample {} and date {}\n'.format(sampnum, timestamp)
# msg += 'Input vector positions {}:\nInput vector: {}'.format(
# ann.table_nan_locs(inputs), inputs)
# msg += '\nBuilding load Series:\n{0}\n'.format(series)
# if ignore_nans:
# warnings.warn(msg)
# else:
# raise ValueError(msg)
# ds.addSample(inputs, list(target_vector.values))
def input_dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):
""" Build a dataset with an empty output/target vector
Identical to `dataset_from_dataframe`, except that default values for 2 arguments:
outputs: None
"""
return dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,
normalize=normalize, verbosity=verbosity)
def inputs_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):
""" Build a sequence of vectors suitable for "activation" by a neural net
Identical to `dataset_from_dataframe`, except that only the input vectors are
returned (not a full DataSet instance) and default values for 2 arguments are changed:
outputs: None
And only the input vectors are return
"""
ds = input_dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,
normalize=normalize, verbosity=verbosity)
return ds['input']
def build_trainer(nn, ds, verbosity=1):
"""Configure neural net trainer from a pybrain dataset"""
return pb.supervised.trainers.rprop.RPropMinusTrainer(nn, dataset=ds, batchlearning=True, verbose=bool(verbosity))
def weight_matrices(nn):
""" Extract list of weight matrices from a Network, Layer (module), Trainer, Connection or other pybrain object"""
if isinstance(nn, ndarray):
return nn
try:
return weight_matrices(nn.connections)
except:
pass
try:
return weight_matrices(nn.module)
except:
pass
# Network objects are ParameterContainer's too, but won't reshape into a single matrix,
# so this must come after try nn.connections
if isinstance(nn, (ParameterContainer, Connection)):
return reshape(nn.params, (nn.outdim, nn.indim))
if isinstance(nn, basestring):
try:
fn = nn
nn = NetworkReader(fn, newfile=False)
return weight_matrices(nn.readFrom(fn))
except:
pass
# FIXME: what does NetworkReader output? (Module? Layer?) need to handle it's type here
try:
return [weight_matrices(v) for (k, v) in nn.iteritems()]
except:
try:
connections = nn.module.connections.values()
nn = []
for conlist in connections:
nn += conlist
return weight_matrices(nn)
except:
return [weight_matrices(v) for v in nn]
# # FIXME: resolve all these NLP dependencies and get this working
# def dataset_from_time_series(df, N_inp=None, features=('moy',), verbosity=1):
# """Build a pybrains.dataset from the time series contained in a dataframe"""
# N_inp = N_inp or len(df.columns)
# features = features or []
# # Add features to input vector in reverse alphabetical order by feature name,
# # so woy will be added first, and date will be added last.
# # The order that the feature vectors should appear in the input vector to remain consistent
# # and neural net architecture can have structure that anticipates this.
# sorted_features = nlp.sort_strings(features, ('dat', 'dow', 'moy', 'dom', 'moy', 'mor'), case_sensitive=False)
# if verbosity > 0:
# print('dataset_from_thresh(features={0})'.format(features))
# samples, mean, std, thresh = simple_dataset_from_thresh(thresh, N=N, max_window=max_window,
# normalize=normalize, ignore_below=ignore_below)
# name = getattr(thresh, 'name', None)
# if name:
# name = normalize_building_name(name)
# if name:
# series = get_series(name)
# else:
# if isinstance(series, basestring):
# name = normalize_building_name(series.strip()) or thresh.name or 'Unknown'
# series = get_series(name)
# elif isinstance(series, pd.DataFrame):
# name = normalize_building_name(series.columns[0]) or thresh.name or 'Unknown'
# series = series[name]
# elif isinstance(series, pd.Series):
# name = normalize_building_name(series.name) or thresh.name or 'Unknown'
# else:
# name = None
# # Compute the length of extra features added on to the vector from the rolling window of previous
# # threshold values
# # TODO: pre-process features list of strings in a separate function
# morn = 0
# # if the building name isn't known, you can't retrieve the morning load values for it
# if name:
# for s in features:
# if s.startswith('morn'):
# try:
# morn = int(s[4:])
# except:
# if verbosity > 0:
# warnings.warn('Unable to determine morning length from feature named "{0}" so using default '
# '(8 am = 8 * 4 = 32)')
# morn = 32 # default to 9 am morning ending
# break
# if verbosity > 0:
# print('In dataset_from_thresh() using {0} morning load values for Building {1}'
# ' because series arg is of type {2}'.format(morn, name, type(series)))
# extras = (+ int('dow' in features) * 7
# + int('moy' in features) * 12
# + int('woy' in features)
# + int('date' in features)
# + morn)
# if verbosity > 0:
# print('The total input vector length (dimension) is now {0}'.format(N + extras))
# ds = pb.datasets.SupervisedDataSet(N + extras, 1)
# first_date = samples[0]['target'].index[0].date().toordinal()
# last_date = samples[-1]['target'].index[0].date().toordinal()
# date_range = (last_date - first_date) or 1
# bit_scale = 5 # number of standard deviations for the magnitude of bit
# if verbosity > 0:
# print('Adding features for building {3}, {0}, and a morning time series of len {2}, '
# 'to each of the {1} vectors (samples)'.format(features, len(samples), morn, name))
# for sampnum, sample in enumerate(samples):
# # sample['input'] and ['output'] are pd.Series tables so convert them to normal list()
# inputs = list(sample['input'].values)
# # the date we're trying to predict the rhreshold for
# timestamp = sample['target'].index[0]
# for feature_name in sorted_features:
# if feature_name.startswith('morn'):
# day = get_day(series, date=timestamp.date())
# morning_loads = (day.values[:morn] - mean) / std
# if verbosity > 1:
# print('day = {0} and morning = {1}'.format(len(day), len(morning_loads)))
# inputs = list(morning_loads) + inputs
# elif feature_name == 'dow':
# dow_bits = [0] * 7
# dow_bits[timestamp.weekday()] = bit_scale
# inputs = dow_bits + inputs
# elif feature_name == 'moy':
# moy_bits = [0] * 12
# moy_bits[timestamp.month - 1] = bit_scale
# inputs = moy_bits + inputs
# elif feature_name == 'woy':
# inputs = [(timestamp.weekofyear - 26.) * 3 * bit_scale / 52] + inputs
# elif feature_name == 'date':
# inputs = [(timestamp.date().toordinal() - first_date - date_range / 2.) * 3 * bit_scale /
# date_range ] + inputs
# if pd.isnull(inputs).any():
# msg = 'Feature "{0}" within the feature list: {1} created null/NaN input values\nFor sample {2}'
# ' and date {3}\nInput vector positions {4}:\nInput vector: {5}'.format(
# feature_name, sorted_features, sampnum, timestamp, ann.table_nan_locs(inputs), inputs)
# msg += '\nBuilding load Series:\n{0}\n'.format(series)
# if ignore_nans:
# warnings.warn(msg)
# else:
# raise ValueError(msg)
# ds.addSample(inputs, list(sample['target'].values))
# return ds, mean, std, thresh
def dataset_nan_locs(ds):
"""
from http://stackoverflow.com/a/14033137/623735
# gets the indices of the rows with nan values in a dataframe
pd.isnull(df).any(1).nonzero()[0]
"""
ans = []
for sampnum, sample in enumerate(ds):
if pd.isnull(sample).any():
ans += [{
'sample': sampnum,
'input': pd.isnull(sample[0]).nonzero()[0],
'output': pd.isnull(sample[1]).nonzero()[0],
}]
return ans
def table_nan_locs(table):
"""
from http://stackoverflow.com/a/14033137/623735
# gets the indices of the rows with nan values in a dataframe
pd.isnull(df).any(1).nonzero()[0]
"""
ans = []
for rownum, row in enumerate(table):
try:
if pd.isnull(row).any():
colnums = pd.isnull(row).nonzero()[0]
ans += [(rownum, colnum) for colnum in colnums]
except AttributeError: # table is really just a sequence of scalars
if pd.isnull(row):
ans += [(rownum, 0)]
return ans
def plot_network_results(network, ds=None, mean=0, std=1, title='', show=True, save=True):
"""Identical to plot_trainer except `network` and `ds` must be provided separately"""
df = sim_network(network=network, ds=ds, mean=mean, std=std)
df.plot()
plt.xlabel('Date')
plt.ylabel('Threshold (kW)')
plt.title(title)
if show:
try:
# ipython notebook overrides plt.show and doesn't have a block kwarg
plt.show(block=False)
except TypeError:
plt.show()
if save:
filename = 'ann_performance_for_{0}.png'.format(title).replace(' ', '_')
if isinstance(save, basestring) and os.path.isdir(save):
filename = os.path.join(save, filename)
plt.savefig(filename)
if not show:
plt.clf()
return network, mean, std
def trainer_results(trainer, mean=0, std=1, title='', show=True, save=True):
"""Plot the performance of the Network and SupervisedDataSet in a pybrain Trainer
DataSet target and output values are denormalized before plotting with:
output * std + mean
Which inverses the normalization
(output - mean) / std
Args:
trainer (Trainer): a pybrain Trainer instance containing a valid Network and DataSet
ds (DataSet): a pybrain DataSet to override the one contained in `trainer`.
Required if trainer is a Network instance rather than a Trainer instance.
mean (float): mean of the denormalized dataset (default: 0)
Only affects the scale of the plot
std (float): std (standard deviation) of the denormalized dataset (default: 1)
title (str): title to display on the plot.
Returns:
3-tuple: (trainer, mean, std), A trainer/dataset along with denormalization info
"""
return plot_network_results(network=trainer.module, ds=trainer.ds, mean=mean, std=std, title=title,
show=show, save=save)
def sim_network(network, ds=None, index=None, mean=0, std=1):
"""Simulate/activate a Network on a SupervisedDataSet and return DataFrame(columns=['Output','Target'])
The DataSet's target and output values are denormalized before populating the dataframe columns:
denormalized_output = normalized_output * std + mean
Which inverses the normalization that produced the normalized output in the first place: \
normalized_output = (denormalzied_output - mean) / std
Args:
network (Network): a pybrain Network instance to activate with the provided DataSet, `ds`
ds (DataSet): a pybrain DataSet to activate the Network on to produce an output sequence
mean (float): mean of the denormalized dataset (default: 0)
Output is scaled
std (float): std (standard deviation) of the denormalized dataset (default: 1)
title (str): title to display on the plot.
Returns:
DataFrame: DataFrame with columns "Output" and "Target" suitable for df.plot-ting
"""
# just in case network is a trainer or has a Module-derived instance as one of it's attribute
# isinstance(network.module, (networks.Network, modules.Module))
if hasattr(network, 'module') and hasattr(network.module, 'activate'):
# may want to also check: isinstance(network.module, (networks.Network, modules.Module))
network = network.module
ds = ds or network.ds
if not ds:
raise RuntimeError("Unable to find a `pybrain.datasets.DataSet` instance to activate the Network with, "
" to plot the outputs. A dataset can be provided as part of a network instance or "
"as a separate kwarg if `network` is used to provide the `pybrain.Network`"
" instance directly.")
results_generator = ((network.activate(ds['input'][i])[0] * std + mean, ds['target'][i][0] * std + mean)
for i in xrange(len(ds['input'])))
return pd.DataFrame(results_generator, columns=['Output', 'Target'], index=index or range(len(ds['input'])))
|
hobson/pug-ann | pug/ann/util.py | sim_network | python | def sim_network(network, ds=None, index=None, mean=0, std=1):
# just in case network is a trainer or has a Module-derived instance as one of it's attribute
# isinstance(network.module, (networks.Network, modules.Module))
if hasattr(network, 'module') and hasattr(network.module, 'activate'):
# may want to also check: isinstance(network.module, (networks.Network, modules.Module))
network = network.module
ds = ds or network.ds
if not ds:
raise RuntimeError("Unable to find a `pybrain.datasets.DataSet` instance to activate the Network with, "
" to plot the outputs. A dataset can be provided as part of a network instance or "
"as a separate kwarg if `network` is used to provide the `pybrain.Network`"
" instance directly.")
results_generator = ((network.activate(ds['input'][i])[0] * std + mean, ds['target'][i][0] * std + mean)
for i in xrange(len(ds['input'])))
return pd.DataFrame(results_generator, columns=['Output', 'Target'], index=index or range(len(ds['input']))) | Simulate/activate a Network on a SupervisedDataSet and return DataFrame(columns=['Output','Target'])
The DataSet's target and output values are denormalized before populating the dataframe columns:
denormalized_output = normalized_output * std + mean
Which inverses the normalization that produced the normalized output in the first place: \
normalized_output = (denormalzied_output - mean) / std
Args:
network (Network): a pybrain Network instance to activate with the provided DataSet, `ds`
ds (DataSet): a pybrain DataSet to activate the Network on to produce an output sequence
mean (float): mean of the denormalized dataset (default: 0)
Output is scaled
std (float): std (standard deviation) of the denormalized dataset (default: 1)
title (str): title to display on the plot.
Returns:
DataFrame: DataFrame with columns "Output" and "Target" suitable for df.plot-ting | train | https://github.com/hobson/pug-ann/blob/8a4d7103a744d15b4a737fc0f9a84c823973e0ec/pug/ann/util.py#L628-L664 | null | """Maniuplate, analyze and plot `pybrain` `Network` and `DataSet` objects
TODO:
Incorporate into pybrain fork so pug doesn't have to depend on pybrain
"""
from __future__ import print_function
import os
import warnings
import pandas as pd
from scipy import ndarray, reshape # array, amin, amax,
np = pd.np
from matplotlib import pyplot as plt
import pybrain.datasets
import pybrain.structure
import pybrain.supervised
import pybrain.tools
pb = pybrain
# from pybrain.supervised.trainers import Trainer
from pybrain.tools.customxml import NetworkReader
from pybrain.structure.parametercontainer import ParameterContainer
from pybrain.structure.connections.connection import Connection
from pug.nlp.util import tuplify, fuzzy_get
from pug.ann.data import weather
#import pug.nlp.util as nlp
# print(os.path.realpath(__file__))
DATA_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(DATA_PATH, '..', 'data')
LAYER_TYPES = set([getattr(pybrain.structure, s) for s in
dir(pybrain.structure) if s.endswith('Layer')])
FAST = False
try:
from arac.pybrainbridge import _FeedForwardNetwork as FeedForwardNetwork
FAST = True
except:
from pybrain.structure import FeedForwardNetwork
print("No fast (ARAC, a FC++ library) FeedForwardNetwork was found, "
"so using the slower pybrain python implementation of FFNN.")
def normalize_layer_type(layer_type):
try:
if layer_type in LAYER_TYPES:
return layer_type
except TypeError:
pass
try:
return getattr(pb.structure, layer_type.strip())
except AttributeError:
try:
return getattr(pb.structure, layer_type.strip() + 'Layer')
except AttributeError:
pass
return [normalize_layer_type(lt) for lt in layer_type]
def build_ann(N_input=None, N_hidden=2, N_output=1, hidden_layer_type='Linear', verbosity=1):
"""Build a neural net with the indicated input, hidden, and outout dimensions
Arguments:
params (dict or PyBrainParams namedtuple):
default: {'N_hidden': 6}
(this is the only parameter that affects the NN build)
Returns:
FeedForwardNetwork with N_input + N_hidden + N_output nodes in 3 layers
"""
N_input = N_input or 1
N_output = N_output or 1
N_hidden = N_hidden or tuple()
if isinstance(N_hidden, (int, float, basestring)):
N_hidden = (int(N_hidden),)
hidden_layer_type = hidden_layer_type or tuple()
hidden_layer_type = tuplify(normalize_layer_type(hidden_layer_type))
if verbosity > 0:
print(N_hidden, ' layers of type ', hidden_layer_type)
assert(len(N_hidden) == len(hidden_layer_type))
nn = pb.structure.FeedForwardNetwork()
# layers
nn.addInputModule(pb.structure.BiasUnit(name='bias'))
nn.addInputModule(pb.structure.LinearLayer(N_input, name='input'))
for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden, hidden_layer_type)):
Nhid = int(Nhid)
nn.addModule(hidlaytype(Nhid, name=('hidden-{}'.format(i) if i else 'hidden')))
nn.addOutputModule(pb.structure.LinearLayer(N_output, name='output'))
# connections
nn.addConnection(pb.structure.FullConnection(nn['bias'], nn['hidden'] if N_hidden else nn['output']))
nn.addConnection(pb.structure.FullConnection(nn['input'], nn['hidden'] if N_hidden else nn['output']))
for i, (Nhid, hidlaytype) in enumerate(zip(N_hidden[:-1], hidden_layer_type[:-1])):
Nhid = int(Nhid)
nn.addConnection(pb.structure.FullConnection(nn[('hidden-{}'.format(i) if i else 'hidden')],
nn['hidden-{}'.format(i + 1)]))
i = len(N_hidden) - 1
nn.addConnection(pb.structure.FullConnection(nn['hidden-{}'.format(i) if i else 'hidden'], nn['output']))
nn.sortModules()
if FAST:
try:
nn.convertToFastNetwork()
except:
if verbosity > 0:
print('Unable to convert slow PyBrain NN to a fast ARAC network...')
if verbosity > 0:
print(nn.connections)
return nn
def ann_from_ds(ds=None, N_input=3, N_hidden=0, N_output=1, verbosity=1):
N_input = getattr(ds, 'indim', N_input)
N_output = getattr(ds, 'outdim', N_output)
N_hidden = N_hidden or getattr(ds, 'paramdim', N_hidden + N_input + N_output) - N_input - N_output
N_hidden = max(round(min(N_hidden, len(ds) / float(N_input) / float(N_output) / 5.)), N_output)
return build_ann(N_input=N_input, N_hidden=N_hidden, N_output=N_output, verbosity=verbosity)
def prepend_dataset_with_weather(samples, location='Fresno, CA', weather_columns=None, use_cache=True, verbosity=0):
""" Prepend weather the values specified (e.g. Max TempF) to the samples[0..N]['input'] vectors
samples[0..N]['target'] should have an index with the date timestamp
If you use_cache for the curent year, you may not get the most recent data.
Arguments:
samples (list of dict): {'input': np.array(), 'target': pandas.DataFrame}
"""
if verbosity > 1:
print('Prepending weather data for {} to dataset samples'.format(weather_columns))
if not weather_columns:
return samples
timestamps = pd.DatetimeIndex([s['target'].index[0] for s in samples])
years = range(timestamps.min().date().year, timestamps.max().date().year + 1)
weather_df = weather.daily(location=location, years=years, use_cache=use_cache)
# FIXME: weather_df.resample('D') fails
weather_df.index = [d.date() for d in weather_df.index]
if verbosity > 1:
print('Retrieved weather for years {}:'.format(years))
print(weather_df)
weather_columns = [label if label in weather_df.columns else weather_df.columns[int(label)]
for label in (weather_columns or [])]
for sampnum, sample in enumerate(samples):
timestamp = timestamps[sampnum]
try:
weather_day = weather_df.loc[timestamp.date()]
except:
from traceback import print_exc
print_exc()
weather_day = {}
if verbosity >= 0:
warnings.warn('Unable to find weather for the date {}'.format(timestamp.date()))
NaN = float('NaN')
sample['input'] = [weather_day.get(label, None) for label in weather_columns] + list(sample['input'])
if verbosity > 0 and NaN in sample['input']:
warnings.warn('Unable to find weather features {} in the weather for date {}'.format(
[label for i, label in enumerate(weather_columns) if sample['input'][i] == NaN], timestamp))
return samples
def dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=(-1,), normalize=False, verbosity=1):
"""Compose a pybrain.dataset from a pandas DataFrame
Arguments:
delays (list of int): sample delays to use for the input tapped delay line
Positive and negative values are treated the same as sample counts into the past.
default: [1, 2, 3], in z-transform notation: z^-1 + z^-2 + z^-3
inputs (list of int or list of str): column indices or labels for the inputs
outputs (list of int or list of str): column indices or labels for the outputs
normalize (bool): whether to divide each input to be normally distributed about 0 with std 1
Returns:
3-tuple: tuple(dataset, list of means, list of stds)
means and stds allow normalization of new inputs and denormalization of the outputs
TODO:
Detect categorical variables with low dimensionality and split into separate bits
Vowpel Wabbit hashes strings into an int?
Detect ordinal variables and convert to continuous int sequence
SEE: http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
"""
if isinstance(delays, int):
if delays:
delays = range(1, delays + 1)
else:
delays = [0]
delays = np.abs(np.array([int(i) for i in delays]))
inputs = [df.columns[int(inp)] if isinstance(inp, (float, int)) else str(inp) for inp in inputs]
outputs = [df.columns[int(out)] if isinstance(out, (float, int)) else str(out) for out in (outputs or [])]
inputs = [fuzzy_get(df.columns, i) for i in inputs]
outputs = [fuzzy_get(df.columns, o) for o in outputs]
N_inp = len(inputs)
N_out = len(outputs)
inp_outs = inputs + outputs
if verbosity > 0:
print("inputs: {}\noutputs: {}\ndelays: {}\n".format(inputs, outputs, delays))
means, stds = np.zeros(len(inp_outs)), np.ones(len(inp_outs))
if normalize:
means, stds = df[inp_outs].mean(), df[inp_outs].std()
if normalize and verbosity > 0:
print("Input mean values (used to normalize input biases): {}".format(means[:N_inp]))
print("Output mean values (used to normalize output biases): {}".format(means[N_inp:]))
ds = pb.datasets.SupervisedDataSet(N_inp * len(delays), N_out)
if verbosity > 0:
print("Dataset dimensions are {}x{}x{} (records x indim x outdim) for {} delays, {} inputs, {} outputs".format(
len(df), ds.indim, ds.outdim, len(delays), len(inputs), len(outputs)))
# FIXME: normalize the whole matrix at once and add it quickly rather than one sample at a time
if delays == np.array([0]) and not normalize:
if verbosity > 0:
print("No tapped delay lines (delays) were requested, so using undelayed features for the dataset.")
assert(df[inputs].values.shape[0] == df[outputs].values.shape[0])
ds.setField('input', df[inputs].values)
ds.setField('target', df[outputs].values)
ds.linkFields(['input', 'target'])
# for inp, outp in zip(df[inputs].values, df[outputs].values):
# ds.appendLinked(inp, outp)
assert(len(ds['input']) == len(ds['target']))
else:
for i, out_vec in enumerate(df[outputs].values):
if verbosity > 0 and i % 100 == 0:
print("{}%".format(i / .01 / len(df)))
elif verbosity > 1:
print('sample[{i}].target={out_vec}'.format(i=i, out_vec=out_vec))
if i < max(delays):
continue
inp_vec = []
for delay in delays:
inp_vec += list((df[inputs].values[i - delay] - means[:N_inp]) / stds[:N_inp])
ds.addSample(inp_vec, (out_vec - means[N_inp:]) / stds[N_inp:])
if verbosity > 0:
print("Dataset now has {} samples".format(len(ds)))
if normalize:
return ds, means, stds
else:
return ds
# def dataset_from_feature_names(df, delays=(1, 2,), quantiles=(), input_columns=(0,), target_columns=(-1,),
# weather_columns=(), features=(), verbosity=1):
# """FIXME: Untested. Transform Datafram columns and append to input_columns as additional features
# Arguments:
# features (seq of str): names of feautures to be appended to the input vector (feature set)
# """
# raise NotImplementedError("Better to implement this as a separate feature transformation function")
# # Compute the length of extra features added on to the vector from the rolling window of previous
# # threshold values
# extras = (+ int('dow' in features) * 7
# + int('moy' in features) * 12
# + int('woy' in features)
# + int('date' in features)
# + len(quantiles)
# + len(weather_columns)
# )
# N = max(list(delays) + list(quantiles))
# last_date = first_date = 0
# try:
# if verbosity > 0:
# print('The total input vector length (dimension) is now {0}'.format(len(N) + extras))
# print('Starting to augment {0}x{1} sample inputs by adding adding {2} additional features'.format(
# len(df), len(input_columns), extras))
# if verbosity > 0:
# print('The first sample input was {0}'.format(
# df[input_columns].iloc[0]))
# print('The first sample target was {0}'.format(
# df[target_columns].iloc[0]))
# print('The last sample input was {0}'.format(
# df[input_columns].iloc[-1]))
# print('The last sample target was {0}'.format(
# df[target_columns].iloc[-1]))
# first_date = df.index.iloc[0].date().toordinal()
# last_date = df.index.iloc[-1].date().toordinal()
# except:
# if verbosity > -1:
# from traceback import format_exc
# warnings.warn(format_exc())
# if verbosity > 1:
# import ipdb
# ipdb.set_trace()
# date_range = (last_date - first_date) + 1 or 1
# # FIXME: scale each feature/column/dimension independently using pug.ann.util.dataset_from_dataframe
# # but mean and std become vectors of the same dimension as the feature/input vector.
# # scikit-learn has transformations that do this more reasonably
# bit_scale = 5 # number of standard deviations for the magnitude of bit
# # convert the list of dicts ((input, output) supervised dataset pairs) into a pybrains.Dataset
# ds = pybrain.datasets.SupervisedDataSet(len(N) + extras, 1)
# sorted_features = sorted(features)
# for sampnum, (input_vector, target_vector) in enumerate(
# zip(df[input_columns].values, df[target_columns].values)):
# # sample['input'] and ['output'] are pd.Series tables so convert them to normal list()
# # inputs = list(sample['input'])
# # the date we're trying to predict the threshold for
# timestamp = target_vector.index[0]
# for feature_name in sorted_features:
# if feature_name.startswith('morn'):
# day = get_day(series, date=timestamp.date())
# morning_loads = (day.values[:morn] - mean) / std
# if verbosity > 2:
# print('day = {0} and morning = {1}'.format(len(day), len(morning_loads)))
# inputs = list(morning_loads) + inputs
# elif feature_name == 'dow':
# dow_bits = [0] * 7
# dow_bits[timestamp.weekday()] = bit_scale
# inputs = dow_bits + inputs
# elif feature_name == 'moy':
# moy_bits = [0] * 12
# moy_bits[timestamp.month - 1] = bit_scale
# inputs = moy_bits + inputs
# elif feature_name == 'woy':
# inputs = [(timestamp.weekofyear - 26.) * 3 * bit_scale / 52] + inputs
# elif feature_name == 'date':
# inputs = [(timestamp.date().toordinal() - first_date - date_range / 2.) * 3 * bit_scale / date_range
# ] + inputs
# if pd.isnull(inputs).any():
# msg = 'Feature "{0}" within the feature list: {1} created null/NaN input values\n'.format(
# feature_name, sorted_features)
# msg += 'For sample {} and date {}\n'.format(sampnum, timestamp)
# msg += 'Input vector positions {}:\nInput vector: {}'.format(
# ann.table_nan_locs(inputs), inputs)
# msg += '\nBuilding load Series:\n{0}\n'.format(series)
# if ignore_nans:
# warnings.warn(msg)
# else:
# raise ValueError(msg)
# ds.addSample(inputs, list(target_vector.values))
def input_dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):
""" Build a dataset with an empty output/target vector
Identical to `dataset_from_dataframe`, except that default values for 2 arguments:
outputs: None
"""
return dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,
normalize=normalize, verbosity=verbosity)
def inputs_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=None, normalize=True, verbosity=1):
""" Build a sequence of vectors suitable for "activation" by a neural net
Identical to `dataset_from_dataframe`, except that only the input vectors are
returned (not a full DataSet instance) and default values for 2 arguments are changed:
outputs: None
And only the input vectors are return
"""
ds = input_dataset_from_dataframe(df=df, delays=delays, inputs=inputs, outputs=outputs,
normalize=normalize, verbosity=verbosity)
return ds['input']
def build_trainer(nn, ds, verbosity=1):
"""Configure neural net trainer from a pybrain dataset"""
return pb.supervised.trainers.rprop.RPropMinusTrainer(nn, dataset=ds, batchlearning=True, verbose=bool(verbosity))
def weight_matrices(nn):
""" Extract list of weight matrices from a Network, Layer (module), Trainer, Connection or other pybrain object"""
if isinstance(nn, ndarray):
return nn
try:
return weight_matrices(nn.connections)
except:
pass
try:
return weight_matrices(nn.module)
except:
pass
# Network objects are ParameterContainer's too, but won't reshape into a single matrix,
# so this must come after try nn.connections
if isinstance(nn, (ParameterContainer, Connection)):
return reshape(nn.params, (nn.outdim, nn.indim))
if isinstance(nn, basestring):
try:
fn = nn
nn = NetworkReader(fn, newfile=False)
return weight_matrices(nn.readFrom(fn))
except:
pass
# FIXME: what does NetworkReader output? (Module? Layer?) need to handle it's type here
try:
return [weight_matrices(v) for (k, v) in nn.iteritems()]
except:
try:
connections = nn.module.connections.values()
nn = []
for conlist in connections:
nn += conlist
return weight_matrices(nn)
except:
return [weight_matrices(v) for v in nn]
# # FIXME: resolve all these NLP dependencies and get this working
# def dataset_from_time_series(df, N_inp=None, features=('moy',), verbosity=1):
# """Build a pybrains.dataset from the time series contained in a dataframe"""
# N_inp = N_inp or len(df.columns)
# features = features or []
# # Add features to input vector in reverse alphabetical order by feature name,
# # so woy will be added first, and date will be added last.
# # The order that the feature vectors should appear in the input vector to remain consistent
# # and neural net architecture can have structure that anticipates this.
# sorted_features = nlp.sort_strings(features, ('dat', 'dow', 'moy', 'dom', 'moy', 'mor'), case_sensitive=False)
# if verbosity > 0:
# print('dataset_from_thresh(features={0})'.format(features))
# samples, mean, std, thresh = simple_dataset_from_thresh(thresh, N=N, max_window=max_window,
# normalize=normalize, ignore_below=ignore_below)
# name = getattr(thresh, 'name', None)
# if name:
# name = normalize_building_name(name)
# if name:
# series = get_series(name)
# else:
# if isinstance(series, basestring):
# name = normalize_building_name(series.strip()) or thresh.name or 'Unknown'
# series = get_series(name)
# elif isinstance(series, pd.DataFrame):
# name = normalize_building_name(series.columns[0]) or thresh.name or 'Unknown'
# series = series[name]
# elif isinstance(series, pd.Series):
# name = normalize_building_name(series.name) or thresh.name or 'Unknown'
# else:
# name = None
# # Compute the length of extra features added on to the vector from the rolling window of previous
# # threshold values
# # TODO: pre-process features list of strings in a separate function
# morn = 0
# # if the building name isn't known, you can't retrieve the morning load values for it
# if name:
# for s in features:
# if s.startswith('morn'):
# try:
# morn = int(s[4:])
# except:
# if verbosity > 0:
# warnings.warn('Unable to determine morning length from feature named "{0}" so using default '
# '(8 am = 8 * 4 = 32)')
# morn = 32 # default to 9 am morning ending
# break
# if verbosity > 0:
# print('In dataset_from_thresh() using {0} morning load values for Building {1}'
# ' because series arg is of type {2}'.format(morn, name, type(series)))
# extras = (+ int('dow' in features) * 7
# + int('moy' in features) * 12
# + int('woy' in features)
# + int('date' in features)
# + morn)
# if verbosity > 0:
# print('The total input vector length (dimension) is now {0}'.format(N + extras))
# ds = pb.datasets.SupervisedDataSet(N + extras, 1)
# first_date = samples[0]['target'].index[0].date().toordinal()
# last_date = samples[-1]['target'].index[0].date().toordinal()
# date_range = (last_date - first_date) or 1
# bit_scale = 5 # number of standard deviations for the magnitude of bit
# if verbosity > 0:
# print('Adding features for building {3}, {0}, and a morning time series of len {2}, '
# 'to each of the {1} vectors (samples)'.format(features, len(samples), morn, name))
# for sampnum, sample in enumerate(samples):
# # sample['input'] and ['output'] are pd.Series tables so convert them to normal list()
# inputs = list(sample['input'].values)
# # the date we're trying to predict the rhreshold for
# timestamp = sample['target'].index[0]
# for feature_name in sorted_features:
# if feature_name.startswith('morn'):
# day = get_day(series, date=timestamp.date())
# morning_loads = (day.values[:morn] - mean) / std
# if verbosity > 1:
# print('day = {0} and morning = {1}'.format(len(day), len(morning_loads)))
# inputs = list(morning_loads) + inputs
# elif feature_name == 'dow':
# dow_bits = [0] * 7
# dow_bits[timestamp.weekday()] = bit_scale
# inputs = dow_bits + inputs
# elif feature_name == 'moy':
# moy_bits = [0] * 12
# moy_bits[timestamp.month - 1] = bit_scale
# inputs = moy_bits + inputs
# elif feature_name == 'woy':
# inputs = [(timestamp.weekofyear - 26.) * 3 * bit_scale / 52] + inputs
# elif feature_name == 'date':
# inputs = [(timestamp.date().toordinal() - first_date - date_range / 2.) * 3 * bit_scale /
# date_range ] + inputs
# if pd.isnull(inputs).any():
# msg = 'Feature "{0}" within the feature list: {1} created null/NaN input values\nFor sample {2}'
# ' and date {3}\nInput vector positions {4}:\nInput vector: {5}'.format(
# feature_name, sorted_features, sampnum, timestamp, ann.table_nan_locs(inputs), inputs)
# msg += '\nBuilding load Series:\n{0}\n'.format(series)
# if ignore_nans:
# warnings.warn(msg)
# else:
# raise ValueError(msg)
# ds.addSample(inputs, list(sample['target'].values))
# return ds, mean, std, thresh
def dataset_nan_locs(ds):
"""
from http://stackoverflow.com/a/14033137/623735
# gets the indices of the rows with nan values in a dataframe
pd.isnull(df).any(1).nonzero()[0]
"""
ans = []
for sampnum, sample in enumerate(ds):
if pd.isnull(sample).any():
ans += [{
'sample': sampnum,
'input': pd.isnull(sample[0]).nonzero()[0],
'output': pd.isnull(sample[1]).nonzero()[0],
}]
return ans
def table_nan_locs(table):
"""
from http://stackoverflow.com/a/14033137/623735
# gets the indices of the rows with nan values in a dataframe
pd.isnull(df).any(1).nonzero()[0]
"""
ans = []
for rownum, row in enumerate(table):
try:
if pd.isnull(row).any():
colnums = pd.isnull(row).nonzero()[0]
ans += [(rownum, colnum) for colnum in colnums]
except AttributeError: # table is really just a sequence of scalars
if pd.isnull(row):
ans += [(rownum, 0)]
return ans
def plot_network_results(network, ds=None, mean=0, std=1, title='', show=True, save=True):
"""Identical to plot_trainer except `network` and `ds` must be provided separately"""
df = sim_network(network=network, ds=ds, mean=mean, std=std)
df.plot()
plt.xlabel('Date')
plt.ylabel('Threshold (kW)')
plt.title(title)
if show:
try:
# ipython notebook overrides plt.show and doesn't have a block kwarg
plt.show(block=False)
except TypeError:
plt.show()
if save:
filename = 'ann_performance_for_{0}.png'.format(title).replace(' ', '_')
if isinstance(save, basestring) and os.path.isdir(save):
filename = os.path.join(save, filename)
plt.savefig(filename)
if not show:
plt.clf()
return network, mean, std
def trainer_results(trainer, mean=0, std=1, title='', show=True, save=True):
"""Plot the performance of the Network and SupervisedDataSet in a pybrain Trainer
DataSet target and output values are denormalized before plotting with:
output * std + mean
Which inverses the normalization
(output - mean) / std
Args:
trainer (Trainer): a pybrain Trainer instance containing a valid Network and DataSet
ds (DataSet): a pybrain DataSet to override the one contained in `trainer`.
Required if trainer is a Network instance rather than a Trainer instance.
mean (float): mean of the denormalized dataset (default: 0)
Only affects the scale of the plot
std (float): std (standard deviation) of the denormalized dataset (default: 1)
title (str): title to display on the plot.
Returns:
3-tuple: (trainer, mean, std), A trainer/dataset along with denormalization info
"""
return plot_network_results(network=trainer.module, ds=trainer.ds, mean=mean, std=std, title=title,
show=show, save=save)
def sim_trainer(trainer, mean=0, std=1):
"""Simulate a trainer by activating its DataSet and returning DataFrame(columns=['Output','Target'])
"""
return sim_network(network=trainer.module, ds=trainer.ds, mean=mean, std=std)
|
hobson/pug-ann | pug/ann/example.py | train_weather_predictor | python | def train_weather_predictor(
location='Portland, OR',
years=range(2013, 2016,),
delays=(1, 2, 3),
inputs=('Min Temperature', 'Max Temperature', 'Min Sea Level Pressure', u'Max Sea Level Pressure', 'WindDirDegrees',),
outputs=(u'Max TemperatureF',),
N_hidden=6,
epochs=30,
use_cache=False,
verbosity=2,
):
df = weather.daily(location, years=years, use_cache=use_cache, verbosity=verbosity).sort()
ds = util.dataset_from_dataframe(df, normalize=False, delays=delays, inputs=inputs, outputs=outputs, verbosity=verbosity)
nn = util.ann_from_ds(ds, N_hidden=N_hidden, verbosity=verbosity)
trainer = util.build_trainer(nn, ds=ds, verbosity=verbosity)
trainer.trainEpochs(epochs)
columns = []
for delay in delays:
columns += [inp + "[-{}]".format(delay) for inp in inputs]
columns += list(outputs)
columns += ['Predicted {}'.format(outp) for outp in outputs]
table = [list(i) + list(t) + list(trainer.module.activate(i)) for i, t in zip(trainer.ds['input'], trainer.ds['target'])]
df = pd.DataFrame(table, columns=columns, index=df.index[max(delays):])
#comparison = df[[] + list(outputs)]
return trainer, df | Train a neural nerual net to predict the weather for tomorrow based on past weather.
Builds a linear single hidden layer neural net (multi-dimensional nonlinear regression).
The dataset is a basic SupervisedDataSet rather than a SequentialDataSet, so the training set
and the test set are sampled randomly. This means that historical data for one sample (the delayed
input vector) will likely be used as the target for other samples.
Uses CSVs scraped from wunderground (without an api key) to get daily weather for the years indicated.
Arguments:
location (str): City and state in standard US postal service format: "City, ST"
alternatively an airport code like "PDX or LAX"
delays (list of int): sample delays to use for the input tapped delay line.
Positive and negative values are treated the same as sample counts into the past.
default: [1, 2, 3], in z-transform notation: z^-1 + z^-2 + z^-3
years (int or list of int): list of 4-digit years to download weather from wunderground
inputs (list of int or list of str): column indices or labels for the inputs
outputs (list of int or list of str): column indices or labels for the outputs
Returns:
3-tuple: tuple(dataset, list of means, list of stds)
means and stds allow normalization of new inputs and denormalization of the outputs | train | https://github.com/hobson/pug-ann/blob/8a4d7103a744d15b4a737fc0f9a84c823973e0ec/pug/ann/example.py#L28-L79 | [
"def daily(location='Fresno, CA', years=1, use_cache=True, verbosity=1):\n \"\"\"Retrieve weather for the indicated airport code or 'City, ST' string.\n\n >>> df = daily('Camas, WA', verbosity=-1)\n >>> 365 <= len(df) <= 365 * 2 + 1\n True\n\n Sacramento data has gaps (airport KMCC):\n 8/21/2013 is missing from 2013.\n Whole months are missing from 2014.\n >>> df = daily('Sacramento, CA', years=[2013], verbosity=-1)\n >>> 364 <= len(df) <= 365\n True\n >>> df.columns\n Index([u'PST', u'Max TemperatureF', u'Mean TemperatureF', u'Min TemperatureF', u'Max Dew PointF', u'MeanDew PointF', u'Min DewpointF', ...\n \"\"\"\n this_year = datetime.date.today().year\n if isinstance(years, (int, float)):\n # current (incomplete) year doesn't count in total number of years\n # so 0 would return this calendar year's weather data\n years = np.arange(0, int(years) + 1)\n years = sorted(years)\n if not all(1900 <= yr <= this_year for yr in years):\n years = np.array([abs(yr) if (1900 <= abs(yr) <= this_year) else (this_year - abs(int(yr))) for yr in years])[::-1]\n\n airport_code = airport(location, default=location)\n\n # refresh the cache each time the start or end year changes\n cache_path = 'daily-{}-{}-{}.csv'.format(airport_code, years[0], years[-1])\n cache_path = os.path.join(CACHE_PATH, cache_path)\n if use_cache:\n try:\n return pd.DataFrame.from_csv(cache_path)\n except:\n pass\n\n df = pd.DataFrame()\n for year in years:\n url = ('http://www.wunderground.com/history/airport/{airport}/{yearstart}/1/1/' +\n 'CustomHistory.html?dayend=31&monthend=12&yearend={yearend}' +\n '&req_city=&req_state=&req_statename=&reqdb.zip=&reqdb.magic=&reqdb.wmo=&MR=1&format=1').format(\n airport=airport_code,\n yearstart=year,\n yearend=year\n )\n if verbosity > 1:\n print('GETing *.CSV using \"{0}\"'.format(url))\n buf = urllib.urlopen(url).read()\n if verbosity > 0:\n N = buf.count('\\n')\n M = (buf.count(',') + N) / float(N)\n print('Retrieved CSV for airport code \"{}\" with appox. {} lines and {} columns = {} cells.'.format(\n airport_code, N, int(round(M)), int(round(M)) * N))\n if verbosity > 2:\n print(buf)\n table = util.read_csv(buf, format='header+values-list', numbers=True)\n # # clean up the last column (if it contains <br> tags)\n table = [util.strip_br(row) if len(row) > 1 else row for row in table]\n # numcols = max(len(row) for row in table)\n # table = [row for row in table if len(row) == numcols]\n columns = table.pop(0)\n tzs = [s for s in columns if (s[1:] in ['ST', 'DT'] and s[0] in 'PMCE')]\n dates = [float('nan')] * len(table)\n for i, row in enumerate(table):\n for j, value in enumerate(row):\n if not value and value is not None:\n value = 0\n continue\n if columns[j] in tzs:\n table[i][j] = util.make_tz_aware(value, tz=columns[j])\n if isinstance(table[i][j], datetime.datetime):\n dates[i] = table[i][j]\n continue\n try:\n table[i][j] = float(value)\n if not (table[i][j] % 1):\n table[i][j] = int(table[i][j])\n except:\n pass\n df0 = pd.DataFrame(table, columns=columns, index=dates)\n df = df.append(df0)\n\n if verbosity > 1:\n print(df)\n\n try:\n df.to_csv(cache_path)\n except:\n if verbosity > 0 and use_cache:\n from traceback import print_exc\n print_exc()\n warnings.warn('Unable to write weather data to cache file at {}'.format(cache_path))\n\n return df\n",
"def dataset_from_dataframe(df, delays=(1, 2, 3), inputs=(1, 2, -1), outputs=(-1,), normalize=False, verbosity=1):\n \"\"\"Compose a pybrain.dataset from a pandas DataFrame\n\n Arguments:\n delays (list of int): sample delays to use for the input tapped delay line\n Positive and negative values are treated the same as sample counts into the past.\n default: [1, 2, 3], in z-transform notation: z^-1 + z^-2 + z^-3\n inputs (list of int or list of str): column indices or labels for the inputs\n outputs (list of int or list of str): column indices or labels for the outputs\n normalize (bool): whether to divide each input to be normally distributed about 0 with std 1\n\n Returns:\n 3-tuple: tuple(dataset, list of means, list of stds)\n means and stds allow normalization of new inputs and denormalization of the outputs\n\n TODO:\n\n Detect categorical variables with low dimensionality and split into separate bits\n Vowpel Wabbit hashes strings into an int?\n Detect ordinal variables and convert to continuous int sequence\n SEE: http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm\n \"\"\"\n if isinstance(delays, int):\n if delays:\n delays = range(1, delays + 1)\n else:\n delays = [0]\n delays = np.abs(np.array([int(i) for i in delays]))\n inputs = [df.columns[int(inp)] if isinstance(inp, (float, int)) else str(inp) for inp in inputs]\n outputs = [df.columns[int(out)] if isinstance(out, (float, int)) else str(out) for out in (outputs or [])]\n\n inputs = [fuzzy_get(df.columns, i) for i in inputs]\n outputs = [fuzzy_get(df.columns, o) for o in outputs]\n\n N_inp = len(inputs)\n N_out = len(outputs)\n\n inp_outs = inputs + outputs\n if verbosity > 0:\n print(\"inputs: {}\\noutputs: {}\\ndelays: {}\\n\".format(inputs, outputs, delays))\n means, stds = np.zeros(len(inp_outs)), np.ones(len(inp_outs))\n if normalize:\n means, stds = df[inp_outs].mean(), df[inp_outs].std()\n\n if normalize and verbosity > 0:\n print(\"Input mean values (used to normalize input biases): {}\".format(means[:N_inp]))\n print(\"Output mean values (used to normalize output biases): {}\".format(means[N_inp:]))\n ds = pb.datasets.SupervisedDataSet(N_inp * len(delays), N_out)\n if verbosity > 0:\n print(\"Dataset dimensions are {}x{}x{} (records x indim x outdim) for {} delays, {} inputs, {} outputs\".format(\n len(df), ds.indim, ds.outdim, len(delays), len(inputs), len(outputs)))\n # FIXME: normalize the whole matrix at once and add it quickly rather than one sample at a time\n if delays == np.array([0]) and not normalize:\n if verbosity > 0:\n print(\"No tapped delay lines (delays) were requested, so using undelayed features for the dataset.\")\n assert(df[inputs].values.shape[0] == df[outputs].values.shape[0])\n ds.setField('input', df[inputs].values)\n ds.setField('target', df[outputs].values)\n ds.linkFields(['input', 'target'])\n # for inp, outp in zip(df[inputs].values, df[outputs].values):\n # ds.appendLinked(inp, outp)\n assert(len(ds['input']) == len(ds['target']))\n else:\n for i, out_vec in enumerate(df[outputs].values):\n if verbosity > 0 and i % 100 == 0:\n print(\"{}%\".format(i / .01 / len(df)))\n elif verbosity > 1:\n print('sample[{i}].target={out_vec}'.format(i=i, out_vec=out_vec))\n if i < max(delays):\n continue\n inp_vec = []\n for delay in delays:\n inp_vec += list((df[inputs].values[i - delay] - means[:N_inp]) / stds[:N_inp])\n ds.addSample(inp_vec, (out_vec - means[N_inp:]) / stds[N_inp:])\n if verbosity > 0:\n print(\"Dataset now has {} samples\".format(len(ds)))\n if normalize:\n return ds, means, stds\n else:\n return ds\n",
"def ann_from_ds(ds=None, N_input=3, N_hidden=0, N_output=1, verbosity=1):\n N_input = getattr(ds, 'indim', N_input)\n N_output = getattr(ds, 'outdim', N_output)\n N_hidden = N_hidden or getattr(ds, 'paramdim', N_hidden + N_input + N_output) - N_input - N_output\n N_hidden = max(round(min(N_hidden, len(ds) / float(N_input) / float(N_output) / 5.)), N_output)\n\n return build_ann(N_input=N_input, N_hidden=N_hidden, N_output=N_output, verbosity=verbosity)\n",
"def build_trainer(nn, ds, verbosity=1):\n \"\"\"Configure neural net trainer from a pybrain dataset\"\"\"\n return pb.supervised.trainers.rprop.RPropMinusTrainer(nn, dataset=ds, batchlearning=True, verbose=bool(verbosity))\n"
] | """Example pybrain network training to predict the weather
Installation:
pip install pug-ann
Examples:
In the future DataSets should have an attribute `columns` or `df` to facilitate converting back to dataframes
>>> trainer, df = train_weather_predictor('San Francisco, CA', epochs=2, inputs=['Max TemperatureF'], outputs=['Max TemperatureF'], years=range(2013,2015), delays=(1,), use_cache=True, verbosity=0)
>>> all(trainer.module.activate(trainer.ds['input'][0]) == trainer.module.activate(trainer.ds['input'][1]))
False
>>> trainer.trainEpochs(5)
Make sure NN hasn't saturated (as it might for a sigmoid hidden layer)
>>> all(trainer.module.activate(trainer.ds['input'][0]) == trainer.module.activate(trainer.ds['input'][1]))
False
"""
import datetime
from pug.ann.data import weather
from pug.ann import util
from pug.nlp.util import make_date, update_dict
from matplotlib import pyplot as plt
import pandas as pd
def oneday_weather_forecast(
location='Portland, OR',
inputs=('Min Temperature', 'Mean Temperature', 'Max Temperature', 'Max Humidity', 'Mean Humidity', 'Min Humidity', 'Max Sea Level Pressure', 'Mean Sea Level Pressure', 'Min Sea Level Pressure', 'Wind Direction'),
outputs=('Min Temperature', 'Mean Temperature', 'Max Temperature', 'Max Humidity'),
date=None,
epochs=200,
delays=(1, 2, 3, 4),
num_years=4,
use_cache=False,
verbosity=1,
):
""" Provide a weather forecast for tomorrow based on historical weather at that location """
date = make_date(date or datetime.datetime.now().date())
num_years = int(num_years or 10)
years = range(date.year - num_years, date.year + 1)
df = weather.daily(location, years=years, use_cache=use_cache, verbosity=verbosity).sort()
# because up-to-date weather history was cached above, can use that cache, regardless of use_cache kwarg
trainer, df = train_weather_predictor(
location,
years=years,
delays=delays,
inputs=inputs,
outputs=outputs,
epochs=epochs,
verbosity=verbosity,
use_cache=True,
)
nn = trainer.module
forecast = {'trainer': trainer}
yesterday = dict(zip(outputs, nn.activate(trainer.ds['input'][-2])))
forecast['yesterday'] = update_dict(yesterday, {'date': df.index[-2].date()})
today = dict(zip(outputs, nn.activate(trainer.ds['input'][-1])))
forecast['today'] = update_dict(today, {'date': df.index[-1].date()})
ds = util.input_dataset_from_dataframe(df[-max(delays):], delays=delays, inputs=inputs, normalize=False, verbosity=0)
tomorrow = dict(zip(outputs, nn.activate(ds['input'][-1])))
forecast['tomorrow'] = update_dict(tomorrow, {'date': (df.index[-1] + datetime.timedelta(1)).date()})
return forecast
def thermostat(
location='Camas, WA',
days=100,
capacity=1000,
max_eval=1000,
):
""" Control the thermostat on an AirCon system with finite thermal energy capacity (chiller)
Useful for controlling a chiller (something that can cool down overnight and heat up during the
hottest part of the day (in order to cool the building).
Builds a linear single-layer neural net (multi-dimensional regression).
The dataset is a basic SupervisedDataSet rather than a SequentialDataSet, so there may be
"accuracy left on the table" or even "cheating" during training, because training and test
set are selected randomly so historical data for one sample is used as target (furture data)
for other samples.
Uses CSVs scraped from wunderground (no api key required) to get daily weather for the years indicated.
Arguments:
location (str): City and state in standard US postal service format: "City, ST" or an airport code like "PDX"
days (int): Number of days of weather data to download from wunderground
delays (list of int): sample delays to use for the input tapped delay line.
Positive and negative values are treated the same as sample counts into the past.
default: [1, 2, 3], in z-transform notation: z^-1 + z^-2 + z^-3
years (int or list of int): list of 4-digit years to download weather from wunderground
inputs (list of int or list of str): column indices or labels for the inputs
outputs (list of int or list of str): column indices or labels for the outputs
Returns:
3-tuple: tuple(dataset, list of means, list of stds)
means and stds allow normalization of new inputs and denormalization of the outputs
"""
pass
def explore_maze():
# simplified version of the reinforcement learning tutorial example
structure = [
list('!!!!!!!!!!'),
list('! ! ! ! !'),
list('! !! ! ! !'),
list('! ! !'),
list('! !!!!!! !'),
list('! ! ! !'),
list('! ! !!!! !'),
list('! !'),
list('! !!!!! !'),
list('! ! !'),
list('!!!!!!!!!!'),
]
structure = np.array([[ord(c)-ord(' ') for c in row] for row in structure])
shape = np.array(structure.shape)
environment = Maze(structure, tuple(shape - 2))
controller = ActionValueTable(shape.prod(), 4)
controller.initialize(1.)
learner = Q()
agent = LearningAgent(controller, learner)
task = MDPMazeTask(environment)
experiment = Experiment(task, agent)
for i in range(30):
experiment.doInteractions(30)
agent.learn()
agent.reset()
controller.params.reshape(shape.prod(), 4).max(1).reshape(*shape)
# (0, 0) is upper left and (0, N) is upper right, so flip matrix upside down to match NESW action order
greedy_policy = np.argmax(controller.params.reshape(shape.prod(), 4), 1)
greedy_policy = np.flipud(np.array(list('NESW'))[greedy_policy].reshape(shape))
maze = np.flipud(np.array(list(' #'))[structure])
print('Maze map:')
print('\n'.join(''.join(row) for row in maze))
print('Greedy policy:')
print('\n'.join(''.join(row) for row in greedy_policy))
assert '\n'.join(''.join(row) for row in greedy_policy) == 'NNNNN\nNSNNN\nNSNNN\nNEENN\nNNNNN'
#################################################################
## An online (reinforcement) learning example based on the
## cart pole-balancing example in pybrian
## WIP to perform optimal control of Building HVAC system
## with limited electrical or thermal energy resource that is recharged every day
from pybrain.rl.environments import EpisodicTask
from pybrain.rl.environments.cartpole import CartPoleEnvironment
from pybrain.rl.environments.cartpole.nonmarkovpole import NonMarkovPoleEnvironment
class BalanceTask(EpisodicTask):
""" The task of balancing some pole(s) on a cart """
def __init__(self, env=None, maxsteps=1000, desiredValue=0, location='Portland, OR'):
"""
:key env: (optional) an instance of a CartPoleEnvironment (or a subclass thereof)
:key maxsteps: maximal number of steps (default: 1000)
"""
self.location = location
self.airport_code = weather.airport(location)
self.desiredValue = desiredValue
if env is None:
env = CartPoleEnvironment()
EpisodicTask.__init__(self, env)
self.N = maxsteps
self.t = 0
# scale position and angle, don't scale velocities (unknown maximum)
self.sensor_limits = [(-3, 3)]
for i in range(1, self.outdim):
if isinstance(self.env, NonMarkovPoleEnvironment) and i % 2 == 0:
self.sensor_limits.append(None)
else:
self.sensor_limits.append((-np.pi, np.pi))
# self.sensor_limits = [None] * 4
# actor between -10 and 10 Newton
self.actor_limits = [(-50, 50)]
def reset(self):
EpisodicTask.reset(self)
self.day = weather.daily(date='random')
self.t = 0
def performAction(self, action):
self.t += 1
EpisodicTask.performAction(self, action)
def isFinished(self):
if max(list(map(abs, self.env.getPoleAngles()))) > 0.7:
# pole has fallen
return True
elif abs(self.env.getCartPosition()) > 2.4:
# cart is out of it's border conditions
return True
elif self.t >= self.N:
# maximal timesteps
return True
return False
def getReward(self):
angles = list(map(abs, self.env.getPoleAngles()))
s = abs(self.env.getCartPosition())
reward = 0
if min(angles) < 0.05 and abs(s) < 0.05:
reward = 0
elif max(angles) > 0.7 or abs(s) > 2.4:
reward = -2 * (self.N - self.t)
else:
reward = -1
return reward
def setMaxLength(self, n):
self.N = n
from pybrain.tools.shortcuts import buildNetwork, NetworkError
from pybrain.optimization.hillclimber import HillClimber
import time
import numpy as np
def run_competition(builders=[], task=BalanceTask(), Optimizer=HillClimber, rounds=3, max_eval=20, N_hidden=3, verbosity=0):
""" pybrain buildNetwork builds a subtly different network structhan build_ann... so compete them!
Arguments:
task (Task): task to compete at
Optimizer (class): pybrain.Optimizer class to instantiate for each competitor
rounds (int): number of times to run the competition
max_eval (int): number of objective function evaluations that the optimizer is allowed
in each round
N_hidden (int): number of hidden nodes in each network being competed
The functional difference that I can see is that:
buildNetwork connects the bias to the output
build_ann does not
The api differences are:
build_ann allows heterogeneous layer types but the output layer is always linear
buildNetwork allows specification of the output layer type
"""
results = []
builders = list(builders) + [buildNetwork, util.build_ann]
for r in range(rounds):
heat = []
# FIXME: shuffle the order of the builders to keep things fair
# (like switching sides of the tennis court)
for builder in builders:
try:
competitor = builder(task.outdim, N_hidden, task.indim, verbosity=verbosity)
except NetworkError:
competitor = builder(task.outdim, N_hidden, task.indim)
# TODO: verify that a full reset is actually happening
task.reset()
optimizer = Optimizer(task, competitor, maxEvaluations=max_eval)
t0 = time.time()
nn, nn_best = optimizer.learn()
t1 = time.time()
heat += [(nn_best, t1-t0, nn)]
results += [tuple(heat)]
if verbosity >= 0:
print([competitor_scores[:2] for competitor_scores in heat])
# # alternatively:
# agent = ( pybrain.rl.agents.OptimizationAgent(net, HillClimber())
# or
# pybrain.rl.agents.LearningAgent(net, pybrain.rl.learners.ENAC()) )
# exp = pybrain.rl.experiments.EpisodicExperiment(task, agent).doEpisodes(100)
means = [[np.array([r[i][j] for r in results]).mean() for i in range(len(results[0]))] for j in range(2)]
if verbosity > -1:
print('Mean Performance:')
print(means)
perfi, speedi = np.argmax(means[0]), np.argmin(means[1])
print('And the winner for performance is ... Algorithm #{} (0-offset array index [{}])'.format(perfi+1, perfi))
print('And the winner for speed is ... Algorithm #{} (0-offset array index [{}])'.format(speedi+1, speedi))
return results, means
try:
# this will fail on latest master branch of pybrain as well as latest pypi release of pybrain
from pybrain.rl.environments.mazes import Maze, MDPMazeTask
from pybrain.rl.learners.valuebased import ActionValueTable
from pybrain.rl.agents import LearningAgent
from pybrain.rl.learners import Q # , SARSA # (State-Action-Reward-State-Action)
from pybrain.rl.experiments import Experiment
# from pybrain.rl.environments import Task
import pylab
def maze():
# import sys, time
pylab.gray()
pylab.ion()
# The goal appears to be in the upper right
structure = [
'!!!!!!!!!!',
'! ! ! ! !',
'! !! ! ! !',
'! ! !',
'! !!!!!! !',
'! ! ! !',
'! ! !!!! !',
'! !',
'! !!!!! !',
'! ! !',
'!!!!!!!!!!',
]
structure = np.array([[ord(c)-ord(' ') for c in row] for row in structure])
shape = np.array(structure.shape)
environment = Maze(structure, tuple(shape - 2))
controller = ActionValueTable(shape.prod(), 4)
controller.initialize(1.)
learner = Q()
agent = LearningAgent(controller, learner)
task = MDPMazeTask(environment)
experiment = Experiment(task, agent)
for i in range(100):
experiment.doInteractions(100)
agent.learn()
agent.reset()
# 4 actions, 81 locations/states (9x9 grid)
# max(1) gives/plots the biggest objective function value for that square
pylab.pcolor(controller.params.reshape(81, 4).max(1).reshape(9, 9))
pylab.draw()
# (0, 0) is upper left and (0, N) is upper right, so flip matrix upside down to match NESW action order
greedy_policy = np.argmax(controller.params.reshape(shape.prod(), 4), 1)
greedy_policy = np.flipud(np.array(list('NESW'))[greedy_policy].reshape(shape))
maze = np.flipud(np.array(list(' #'))[structure])
print('Maze map:')
print('\n'.join(''.join(row) for row in maze))
print('Greedy policy:')
print('\n'.join(''.join(row) for row in greedy_policy))
# pylab.show()
except ImportError:
pass
if __name__ == '__main__':
import sys
try:
explore_maze()
except:
from traceback import format_exc
sys.exit(format_exc())
print(run_competition(verbosity=0))
sys.exit(0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.