repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
moderngl/moderngl | examples/window/base.py | BaseWindow.set_default_viewport | def set_default_viewport(self):
"""
Calculates the viewport based on the configured aspect ratio.
Will add black borders and center the viewport if the window
do not match the configured viewport.
If aspect ratio is None the viewport will be scaled
to the entire window size regardless of size.
"""
if self.aspect_ratio:
expected_width = int(self.buffer_height * self.aspect_ratio)
expected_height = int(expected_width / self.aspect_ratio)
if expected_width > self.buffer_width:
expected_width = self.buffer_width
expected_height = int(expected_width / self.aspect_ratio)
blank_space_x = self.buffer_width - expected_width
blank_space_y = self.buffer_height - expected_height
self.ctx.viewport = (
blank_space_x // 2,
blank_space_y // 2,
expected_width,
expected_height,
)
else:
self.ctx.viewport = (0, 0, self.buffer_width, self.buffer_height) | python | def set_default_viewport(self):
"""
Calculates the viewport based on the configured aspect ratio.
Will add black borders and center the viewport if the window
do not match the configured viewport.
If aspect ratio is None the viewport will be scaled
to the entire window size regardless of size.
"""
if self.aspect_ratio:
expected_width = int(self.buffer_height * self.aspect_ratio)
expected_height = int(expected_width / self.aspect_ratio)
if expected_width > self.buffer_width:
expected_width = self.buffer_width
expected_height = int(expected_width / self.aspect_ratio)
blank_space_x = self.buffer_width - expected_width
blank_space_y = self.buffer_height - expected_height
self.ctx.viewport = (
blank_space_x // 2,
blank_space_y // 2,
expected_width,
expected_height,
)
else:
self.ctx.viewport = (0, 0, self.buffer_width, self.buffer_height) | [
"def",
"set_default_viewport",
"(",
"self",
")",
":",
"if",
"self",
".",
"aspect_ratio",
":",
"expected_width",
"=",
"int",
"(",
"self",
".",
"buffer_height",
"*",
"self",
".",
"aspect_ratio",
")",
"expected_height",
"=",
"int",
"(",
"expected_width",
"/",
"... | Calculates the viewport based on the configured aspect ratio.
Will add black borders and center the viewport if the window
do not match the configured viewport.
If aspect ratio is None the viewport will be scaled
to the entire window size regardless of size. | [
"Calculates",
"the",
"viewport",
"based",
"on",
"the",
"configured",
"aspect",
"ratio",
".",
"Will",
"add",
"black",
"borders",
"and",
"center",
"the",
"viewport",
"if",
"the",
"window",
"do",
"not",
"match",
"the",
"configured",
"viewport",
".",
"If",
"aspe... | a8f5dce8dc72ae84a2f9523887fb5f6b620049b9 | https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/examples/window/base.py#L150-L177 | train | 220,400 |
moderngl/moderngl | examples/window/base.py | BaseWindow.print_context_info | def print_context_info(self):
"""
Prints moderngl context info.
"""
print("Context Version:")
print('ModernGL:', moderngl.__version__)
print('vendor:', self.ctx.info['GL_VENDOR'])
print('renderer:', self.ctx.info['GL_RENDERER'])
print('version:', self.ctx.info['GL_VERSION'])
print('python:', sys.version)
print('platform:', sys.platform)
print('code:', self.ctx.version_code) | python | def print_context_info(self):
"""
Prints moderngl context info.
"""
print("Context Version:")
print('ModernGL:', moderngl.__version__)
print('vendor:', self.ctx.info['GL_VENDOR'])
print('renderer:', self.ctx.info['GL_RENDERER'])
print('version:', self.ctx.info['GL_VERSION'])
print('python:', sys.version)
print('platform:', sys.platform)
print('code:', self.ctx.version_code) | [
"def",
"print_context_info",
"(",
"self",
")",
":",
"print",
"(",
"\"Context Version:\"",
")",
"print",
"(",
"'ModernGL:'",
",",
"moderngl",
".",
"__version__",
")",
"print",
"(",
"'vendor:'",
",",
"self",
".",
"ctx",
".",
"info",
"[",
"'GL_VENDOR'",
"]",
... | Prints moderngl context info. | [
"Prints",
"moderngl",
"context",
"info",
"."
] | a8f5dce8dc72ae84a2f9523887fb5f6b620049b9 | https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/examples/window/base.py#L187-L198 | train | 220,401 |
moderngl/moderngl | moderngl/texture.py | Texture.read | def read(self, *, level=0, alignment=1) -> bytes:
'''
Read the content of the texture into a buffer.
Keyword Args:
level (int): The mipmap level.
alignment (int): The byte alignment of the pixels.
Returns:
bytes
'''
return self.mglo.read(level, alignment) | python | def read(self, *, level=0, alignment=1) -> bytes:
'''
Read the content of the texture into a buffer.
Keyword Args:
level (int): The mipmap level.
alignment (int): The byte alignment of the pixels.
Returns:
bytes
'''
return self.mglo.read(level, alignment) | [
"def",
"read",
"(",
"self",
",",
"*",
",",
"level",
"=",
"0",
",",
"alignment",
"=",
"1",
")",
"->",
"bytes",
":",
"return",
"self",
".",
"mglo",
".",
"read",
"(",
"level",
",",
"alignment",
")"
] | Read the content of the texture into a buffer.
Keyword Args:
level (int): The mipmap level.
alignment (int): The byte alignment of the pixels.
Returns:
bytes | [
"Read",
"the",
"content",
"of",
"the",
"texture",
"into",
"a",
"buffer",
"."
] | a8f5dce8dc72ae84a2f9523887fb5f6b620049b9 | https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/moderngl/texture.py#L270-L282 | train | 220,402 |
moderngl/moderngl | examples/window/__init__.py | parse_args | def parse_args(args=None):
"""Parse arguments from sys.argv"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-w', '--window',
default="pyqt5",
choices=find_window_classes(),
help='Name for the window type to use',
)
parser.add_argument(
'-fs', '--fullscreen',
action="store_true",
help='Open the window in fullscreen mode',
)
parser.add_argument(
'-vs', '--vsync',
type=str2bool,
default="1",
help="Enable or disable vsync",
)
parser.add_argument(
'-s', '--samples',
type=int,
default=4,
help="Specify the desired number of samples to use for multisampling",
)
parser.add_argument(
'-c', '--cursor',
type=str2bool,
default="true",
help="Enable or disable displaying the mouse cursor",
)
return parser.parse_args(args or sys.argv[1:]) | python | def parse_args(args=None):
"""Parse arguments from sys.argv"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-w', '--window',
default="pyqt5",
choices=find_window_classes(),
help='Name for the window type to use',
)
parser.add_argument(
'-fs', '--fullscreen',
action="store_true",
help='Open the window in fullscreen mode',
)
parser.add_argument(
'-vs', '--vsync',
type=str2bool,
default="1",
help="Enable or disable vsync",
)
parser.add_argument(
'-s', '--samples',
type=int,
default=4,
help="Specify the desired number of samples to use for multisampling",
)
parser.add_argument(
'-c', '--cursor',
type=str2bool,
default="true",
help="Enable or disable displaying the mouse cursor",
)
return parser.parse_args(args or sys.argv[1:]) | [
"def",
"parse_args",
"(",
"args",
"=",
"None",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'-w'",
",",
"'--window'",
",",
"default",
"=",
"\"pyqt5\"",
",",
"choices",
"=",
"find_window_classes",
... | Parse arguments from sys.argv | [
"Parse",
"arguments",
"from",
"sys",
".",
"argv"
] | a8f5dce8dc72ae84a2f9523887fb5f6b620049b9 | https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/examples/window/__init__.py#L65-L99 | train | 220,403 |
moderngl/moderngl | moderngl/compute_shader.py | ComputeShader.run | def run(self, group_x=1, group_y=1, group_z=1) -> None:
'''
Run the compute shader.
Args:
group_x (int): The number of work groups to be launched in the X dimension.
group_y (int): The number of work groups to be launched in the Y dimension.
group_z (int): The number of work groups to be launched in the Z dimension.
'''
return self.mglo.run(group_x, group_y, group_z) | python | def run(self, group_x=1, group_y=1, group_z=1) -> None:
'''
Run the compute shader.
Args:
group_x (int): The number of work groups to be launched in the X dimension.
group_y (int): The number of work groups to be launched in the Y dimension.
group_z (int): The number of work groups to be launched in the Z dimension.
'''
return self.mglo.run(group_x, group_y, group_z) | [
"def",
"run",
"(",
"self",
",",
"group_x",
"=",
"1",
",",
"group_y",
"=",
"1",
",",
"group_z",
"=",
"1",
")",
"->",
"None",
":",
"return",
"self",
".",
"mglo",
".",
"run",
"(",
"group_x",
",",
"group_y",
",",
"group_z",
")"
] | Run the compute shader.
Args:
group_x (int): The number of work groups to be launched in the X dimension.
group_y (int): The number of work groups to be launched in the Y dimension.
group_z (int): The number of work groups to be launched in the Z dimension. | [
"Run",
"the",
"compute",
"shader",
"."
] | a8f5dce8dc72ae84a2f9523887fb5f6b620049b9 | https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/moderngl/compute_shader.py#L54-L64 | train | 220,404 |
moderngl/moderngl | moderngl/compute_shader.py | ComputeShader.get | def get(self, key, default) -> Union[Uniform, UniformBlock, Subroutine, Attribute, Varying]:
'''
Returns a Uniform, UniformBlock, Subroutine, Attribute or Varying.
Args:
default: This is the value to be returned in case key does not exist.
Returns:
:py:class:`Uniform`, :py:class:`UniformBlock`, :py:class:`Subroutine`,
:py:class:`Attribute` or :py:class:`Varying`
'''
return self._members.get(key, default) | python | def get(self, key, default) -> Union[Uniform, UniformBlock, Subroutine, Attribute, Varying]:
'''
Returns a Uniform, UniformBlock, Subroutine, Attribute or Varying.
Args:
default: This is the value to be returned in case key does not exist.
Returns:
:py:class:`Uniform`, :py:class:`UniformBlock`, :py:class:`Subroutine`,
:py:class:`Attribute` or :py:class:`Varying`
'''
return self._members.get(key, default) | [
"def",
"get",
"(",
"self",
",",
"key",
",",
"default",
")",
"->",
"Union",
"[",
"Uniform",
",",
"UniformBlock",
",",
"Subroutine",
",",
"Attribute",
",",
"Varying",
"]",
":",
"return",
"self",
".",
"_members",
".",
"get",
"(",
"key",
",",
"default",
... | Returns a Uniform, UniformBlock, Subroutine, Attribute or Varying.
Args:
default: This is the value to be returned in case key does not exist.
Returns:
:py:class:`Uniform`, :py:class:`UniformBlock`, :py:class:`Subroutine`,
:py:class:`Attribute` or :py:class:`Varying` | [
"Returns",
"a",
"Uniform",
"UniformBlock",
"Subroutine",
"Attribute",
"or",
"Varying",
"."
] | a8f5dce8dc72ae84a2f9523887fb5f6b620049b9 | https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/moderngl/compute_shader.py#L66-L78 | train | 220,405 |
moderngl/moderngl | examples/window/sdl2/window.py | Window.resize | def resize(self, width, height):
"""
Sets the new size and buffer size internally
"""
self.width = width
self.height = height
self.buffer_width, self.buffer_height = self.width, self.height
self.set_default_viewport()
super().resize(self.buffer_width, self.buffer_height) | python | def resize(self, width, height):
"""
Sets the new size and buffer size internally
"""
self.width = width
self.height = height
self.buffer_width, self.buffer_height = self.width, self.height
self.set_default_viewport()
super().resize(self.buffer_width, self.buffer_height) | [
"def",
"resize",
"(",
"self",
",",
"width",
",",
"height",
")",
":",
"self",
".",
"width",
"=",
"width",
"self",
".",
"height",
"=",
"height",
"self",
".",
"buffer_width",
",",
"self",
".",
"buffer_height",
"=",
"self",
".",
"width",
",",
"self",
"."... | Sets the new size and buffer size internally | [
"Sets",
"the",
"new",
"size",
"and",
"buffer",
"size",
"internally"
] | a8f5dce8dc72ae84a2f9523887fb5f6b620049b9 | https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/examples/window/sdl2/window.py#L76-L85 | train | 220,406 |
moderngl/moderngl | examples/window/sdl2/window.py | Window.process_events | def process_events(self):
"""
Loop through and handle all the queued events.
"""
for event in sdl2.ext.get_events():
if event.type == sdl2.SDL_MOUSEMOTION:
self.example.mouse_position_event(event.motion.x, event.motion.y)
elif event.type == sdl2.SDL_MOUSEBUTTONUP:
# Support left and right mouse button for now
if event.button.button in [1, 3]:
self.example.mouse_press_event(
event.motion.x, event.motion.y,
1 if event.button.button == 1 else 2,
)
elif event.type == sdl2.SDL_MOUSEBUTTONDOWN:
# Support left and right mouse button for now
if event.button.button in [1, 3]:
self.example.mouse_release_event(
event.motion.x, event.motion.y,
1 if event.button.button == 1 else 2,
)
elif event.type in [sdl2.SDL_KEYDOWN, sdl2.SDL_KEYUP]:
if event.key.keysym.sym == sdl2.SDLK_ESCAPE:
self.close()
self.example.key_event(event.key.keysym.sym, event.type)
elif event.type == sdl2.SDL_QUIT:
self.close()
elif event.type == sdl2.SDL_WINDOWEVENT:
if event.window.event == sdl2.SDL_WINDOWEVENT_RESIZED:
self.resize(event.window.data1, event.window.data2) | python | def process_events(self):
"""
Loop through and handle all the queued events.
"""
for event in sdl2.ext.get_events():
if event.type == sdl2.SDL_MOUSEMOTION:
self.example.mouse_position_event(event.motion.x, event.motion.y)
elif event.type == sdl2.SDL_MOUSEBUTTONUP:
# Support left and right mouse button for now
if event.button.button in [1, 3]:
self.example.mouse_press_event(
event.motion.x, event.motion.y,
1 if event.button.button == 1 else 2,
)
elif event.type == sdl2.SDL_MOUSEBUTTONDOWN:
# Support left and right mouse button for now
if event.button.button in [1, 3]:
self.example.mouse_release_event(
event.motion.x, event.motion.y,
1 if event.button.button == 1 else 2,
)
elif event.type in [sdl2.SDL_KEYDOWN, sdl2.SDL_KEYUP]:
if event.key.keysym.sym == sdl2.SDLK_ESCAPE:
self.close()
self.example.key_event(event.key.keysym.sym, event.type)
elif event.type == sdl2.SDL_QUIT:
self.close()
elif event.type == sdl2.SDL_WINDOWEVENT:
if event.window.event == sdl2.SDL_WINDOWEVENT_RESIZED:
self.resize(event.window.data1, event.window.data2) | [
"def",
"process_events",
"(",
"self",
")",
":",
"for",
"event",
"in",
"sdl2",
".",
"ext",
".",
"get_events",
"(",
")",
":",
"if",
"event",
".",
"type",
"==",
"sdl2",
".",
"SDL_MOUSEMOTION",
":",
"self",
".",
"example",
".",
"mouse_position_event",
"(",
... | Loop through and handle all the queued events. | [
"Loop",
"through",
"and",
"handle",
"all",
"the",
"queued",
"events",
"."
] | a8f5dce8dc72ae84a2f9523887fb5f6b620049b9 | https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/examples/window/sdl2/window.py#L87-L122 | train | 220,407 |
moderngl/moderngl | examples/window/sdl2/window.py | Window.destroy | def destroy(self):
"""
Gracefully close the window
"""
sdl2.SDL_GL_DeleteContext(self.context)
sdl2.SDL_DestroyWindow(self.window)
sdl2.SDL_Quit() | python | def destroy(self):
"""
Gracefully close the window
"""
sdl2.SDL_GL_DeleteContext(self.context)
sdl2.SDL_DestroyWindow(self.window)
sdl2.SDL_Quit() | [
"def",
"destroy",
"(",
"self",
")",
":",
"sdl2",
".",
"SDL_GL_DeleteContext",
"(",
"self",
".",
"context",
")",
"sdl2",
".",
"SDL_DestroyWindow",
"(",
"self",
".",
"window",
")",
"sdl2",
".",
"SDL_Quit",
"(",
")"
] | Gracefully close the window | [
"Gracefully",
"close",
"the",
"window"
] | a8f5dce8dc72ae84a2f9523887fb5f6b620049b9 | https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/examples/window/sdl2/window.py#L124-L130 | train | 220,408 |
moderngl/moderngl | examples/window/pyglet/window.py | Window.on_key_press | def on_key_press(self, symbol, modifiers):
"""
Pyglet specific key press callback.
Forwards and translates the events to the example
"""
self.example.key_event(symbol, self.keys.ACTION_PRESS) | python | def on_key_press(self, symbol, modifiers):
"""
Pyglet specific key press callback.
Forwards and translates the events to the example
"""
self.example.key_event(symbol, self.keys.ACTION_PRESS) | [
"def",
"on_key_press",
"(",
"self",
",",
"symbol",
",",
"modifiers",
")",
":",
"self",
".",
"example",
".",
"key_event",
"(",
"symbol",
",",
"self",
".",
"keys",
".",
"ACTION_PRESS",
")"
] | Pyglet specific key press callback.
Forwards and translates the events to the example | [
"Pyglet",
"specific",
"key",
"press",
"callback",
".",
"Forwards",
"and",
"translates",
"the",
"events",
"to",
"the",
"example"
] | a8f5dce8dc72ae84a2f9523887fb5f6b620049b9 | https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/examples/window/pyglet/window.py#L96-L101 | train | 220,409 |
moderngl/moderngl | examples/window/pyglet/window.py | Window.on_key_release | def on_key_release(self, symbol, modifiers):
"""
Pyglet specific key release callback.
Forwards and translates the events to the example
"""
self.example.key_event(symbol, self.keys.ACTION_RELEASE) | python | def on_key_release(self, symbol, modifiers):
"""
Pyglet specific key release callback.
Forwards and translates the events to the example
"""
self.example.key_event(symbol, self.keys.ACTION_RELEASE) | [
"def",
"on_key_release",
"(",
"self",
",",
"symbol",
",",
"modifiers",
")",
":",
"self",
".",
"example",
".",
"key_event",
"(",
"symbol",
",",
"self",
".",
"keys",
".",
"ACTION_RELEASE",
")"
] | Pyglet specific key release callback.
Forwards and translates the events to the example | [
"Pyglet",
"specific",
"key",
"release",
"callback",
".",
"Forwards",
"and",
"translates",
"the",
"events",
"to",
"the",
"example"
] | a8f5dce8dc72ae84a2f9523887fb5f6b620049b9 | https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/examples/window/pyglet/window.py#L103-L108 | train | 220,410 |
moderngl/moderngl | examples/window/pyglet/window.py | Window.on_mouse_motion | def on_mouse_motion(self, x, y, dx, dy):
"""
Pyglet specific mouse motion callback.
Forwards and traslates the event to the example
"""
# Screen coordinates relative to the lower-left corner
# so we have to flip the y axis to make this consistent with
# other window libraries
self.example.mouse_position_event(x, self.buffer_height - y) | python | def on_mouse_motion(self, x, y, dx, dy):
"""
Pyglet specific mouse motion callback.
Forwards and traslates the event to the example
"""
# Screen coordinates relative to the lower-left corner
# so we have to flip the y axis to make this consistent with
# other window libraries
self.example.mouse_position_event(x, self.buffer_height - y) | [
"def",
"on_mouse_motion",
"(",
"self",
",",
"x",
",",
"y",
",",
"dx",
",",
"dy",
")",
":",
"# Screen coordinates relative to the lower-left corner\r",
"# so we have to flip the y axis to make this consistent with\r",
"# other window libraries\r",
"self",
".",
"example",
".",
... | Pyglet specific mouse motion callback.
Forwards and traslates the event to the example | [
"Pyglet",
"specific",
"mouse",
"motion",
"callback",
".",
"Forwards",
"and",
"traslates",
"the",
"event",
"to",
"the",
"example"
] | a8f5dce8dc72ae84a2f9523887fb5f6b620049b9 | https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/examples/window/pyglet/window.py#L110-L118 | train | 220,411 |
moderngl/moderngl | examples/window/pyglet/window.py | Window.on_mouse_press | def on_mouse_press(self, x: int, y: int, button, mods):
"""
Handle mouse press events and forward to example window
"""
if button in [1, 4]:
self.example.mouse_press_event(
x, self.buffer_height - y,
1 if button == 1 else 2,
) | python | def on_mouse_press(self, x: int, y: int, button, mods):
"""
Handle mouse press events and forward to example window
"""
if button in [1, 4]:
self.example.mouse_press_event(
x, self.buffer_height - y,
1 if button == 1 else 2,
) | [
"def",
"on_mouse_press",
"(",
"self",
",",
"x",
":",
"int",
",",
"y",
":",
"int",
",",
"button",
",",
"mods",
")",
":",
"if",
"button",
"in",
"[",
"1",
",",
"4",
"]",
":",
"self",
".",
"example",
".",
"mouse_press_event",
"(",
"x",
",",
"self",
... | Handle mouse press events and forward to example window | [
"Handle",
"mouse",
"press",
"events",
"and",
"forward",
"to",
"example",
"window"
] | a8f5dce8dc72ae84a2f9523887fb5f6b620049b9 | https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/examples/window/pyglet/window.py#L120-L128 | train | 220,412 |
moderngl/moderngl | examples/window/pyglet/window.py | Window.on_mouse_release | def on_mouse_release(self, x: int, y: int, button, mods):
"""
Handle mouse release events and forward to example window
"""
if button in [1, 4]:
self.example.mouse_release_event(
x, self.buffer_height - y,
1 if button == 1 else 2,
) | python | def on_mouse_release(self, x: int, y: int, button, mods):
"""
Handle mouse release events and forward to example window
"""
if button in [1, 4]:
self.example.mouse_release_event(
x, self.buffer_height - y,
1 if button == 1 else 2,
) | [
"def",
"on_mouse_release",
"(",
"self",
",",
"x",
":",
"int",
",",
"y",
":",
"int",
",",
"button",
",",
"mods",
")",
":",
"if",
"button",
"in",
"[",
"1",
",",
"4",
"]",
":",
"self",
".",
"example",
".",
"mouse_release_event",
"(",
"x",
",",
"self... | Handle mouse release events and forward to example window | [
"Handle",
"mouse",
"release",
"events",
"and",
"forward",
"to",
"example",
"window"
] | a8f5dce8dc72ae84a2f9523887fb5f6b620049b9 | https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/examples/window/pyglet/window.py#L130-L138 | train | 220,413 |
moderngl/moderngl | moderngl/texture_cube.py | TextureCube.write | def write(self, face, data, viewport=None, *, alignment=1) -> None:
'''
Update the content of the texture.
Args:
face (int): The face to update.
data (bytes): The pixel data.
viewport (tuple): The viewport.
Keyword Args:
alignment (int): The byte alignment of the pixels.
'''
if type(data) is Buffer:
data = data.mglo
self.mglo.write(face, data, viewport, alignment) | python | def write(self, face, data, viewport=None, *, alignment=1) -> None:
'''
Update the content of the texture.
Args:
face (int): The face to update.
data (bytes): The pixel data.
viewport (tuple): The viewport.
Keyword Args:
alignment (int): The byte alignment of the pixels.
'''
if type(data) is Buffer:
data = data.mglo
self.mglo.write(face, data, viewport, alignment) | [
"def",
"write",
"(",
"self",
",",
"face",
",",
"data",
",",
"viewport",
"=",
"None",
",",
"*",
",",
"alignment",
"=",
"1",
")",
"->",
"None",
":",
"if",
"type",
"(",
"data",
")",
"is",
"Buffer",
":",
"data",
"=",
"data",
".",
"mglo",
"self",
".... | Update the content of the texture.
Args:
face (int): The face to update.
data (bytes): The pixel data.
viewport (tuple): The viewport.
Keyword Args:
alignment (int): The byte alignment of the pixels. | [
"Update",
"the",
"content",
"of",
"the",
"texture",
"."
] | a8f5dce8dc72ae84a2f9523887fb5f6b620049b9 | https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/moderngl/texture_cube.py#L170-L186 | train | 220,414 |
moderngl/moderngl | examples/00_empty_window.py | EmptyWindow.key_event | def key_event(self, key, action):
"""
Handle key events in a generic way
supporting all window types.
"""
if action == self.wnd.keys.ACTION_PRESS:
if key == self.wnd.keys.SPACE:
print("Space was pressed")
if action == self.wnd.keys.ACTION_RELEASE:
if key == self.wnd.keys.SPACE:
print("Space was released") | python | def key_event(self, key, action):
"""
Handle key events in a generic way
supporting all window types.
"""
if action == self.wnd.keys.ACTION_PRESS:
if key == self.wnd.keys.SPACE:
print("Space was pressed")
if action == self.wnd.keys.ACTION_RELEASE:
if key == self.wnd.keys.SPACE:
print("Space was released") | [
"def",
"key_event",
"(",
"self",
",",
"key",
",",
"action",
")",
":",
"if",
"action",
"==",
"self",
".",
"wnd",
".",
"keys",
".",
"ACTION_PRESS",
":",
"if",
"key",
"==",
"self",
".",
"wnd",
".",
"keys",
".",
"SPACE",
":",
"print",
"(",
"\"Space was... | Handle key events in a generic way
supporting all window types. | [
"Handle",
"key",
"events",
"in",
"a",
"generic",
"way",
"supporting",
"all",
"window",
"types",
"."
] | a8f5dce8dc72ae84a2f9523887fb5f6b620049b9 | https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/examples/00_empty_window.py#L24-L35 | train | 220,415 |
moderngl/moderngl | examples/00_empty_window.py | EmptyWindow.mouse_press_event | def mouse_press_event(self, x, y, button):
"""Reports left and right mouse button presses + position"""
if button == 1:
print("Left mouse button pressed @", x, y)
if button == 2:
print("Right mouse button pressed @", x, y) | python | def mouse_press_event(self, x, y, button):
"""Reports left and right mouse button presses + position"""
if button == 1:
print("Left mouse button pressed @", x, y)
if button == 2:
print("Right mouse button pressed @", x, y) | [
"def",
"mouse_press_event",
"(",
"self",
",",
"x",
",",
"y",
",",
"button",
")",
":",
"if",
"button",
"==",
"1",
":",
"print",
"(",
"\"Left mouse button pressed @\"",
",",
"x",
",",
"y",
")",
"if",
"button",
"==",
"2",
":",
"print",
"(",
"\"Right mouse... | Reports left and right mouse button presses + position | [
"Reports",
"left",
"and",
"right",
"mouse",
"button",
"presses",
"+",
"position"
] | a8f5dce8dc72ae84a2f9523887fb5f6b620049b9 | https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/examples/00_empty_window.py#L44-L49 | train | 220,416 |
moderngl/moderngl | examples/00_empty_window.py | EmptyWindow.mouse_release_event | def mouse_release_event(self, x, y, button):
"""Reports left and right mouse button releases + position"""
if button == 1:
print("Left mouse button released @", x, y)
if button == 2:
print("Right mouse button released @", x, y) | python | def mouse_release_event(self, x, y, button):
"""Reports left and right mouse button releases + position"""
if button == 1:
print("Left mouse button released @", x, y)
if button == 2:
print("Right mouse button released @", x, y) | [
"def",
"mouse_release_event",
"(",
"self",
",",
"x",
",",
"y",
",",
"button",
")",
":",
"if",
"button",
"==",
"1",
":",
"print",
"(",
"\"Left mouse button released @\"",
",",
"x",
",",
"y",
")",
"if",
"button",
"==",
"2",
":",
"print",
"(",
"\"Right mo... | Reports left and right mouse button releases + position | [
"Reports",
"left",
"and",
"right",
"mouse",
"button",
"releases",
"+",
"position"
] | a8f5dce8dc72ae84a2f9523887fb5f6b620049b9 | https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/examples/00_empty_window.py#L51-L56 | train | 220,417 |
moderngl/moderngl | moderngl/program.py | detect_format | def detect_format(program, attributes) -> str:
'''
Detect format for vertex attributes.
The format returned does not contain padding.
Args:
program (Program): The program.
attributes (list): A list of attribute names.
Returns:
str
'''
def fmt(attr):
'''
For internal use only.
'''
return attr.array_length * attr.dimension, attr.shape
return ' '.join('%d%s' % fmt(program[a]) for a in attributes) | python | def detect_format(program, attributes) -> str:
'''
Detect format for vertex attributes.
The format returned does not contain padding.
Args:
program (Program): The program.
attributes (list): A list of attribute names.
Returns:
str
'''
def fmt(attr):
'''
For internal use only.
'''
return attr.array_length * attr.dimension, attr.shape
return ' '.join('%d%s' % fmt(program[a]) for a in attributes) | [
"def",
"detect_format",
"(",
"program",
",",
"attributes",
")",
"->",
"str",
":",
"def",
"fmt",
"(",
"attr",
")",
":",
"'''\n For internal use only.\n '''",
"return",
"attr",
".",
"array_length",
"*",
"attr",
".",
"dimension",
",",
"attr",
".",... | Detect format for vertex attributes.
The format returned does not contain padding.
Args:
program (Program): The program.
attributes (list): A list of attribute names.
Returns:
str | [
"Detect",
"format",
"for",
"vertex",
"attributes",
".",
"The",
"format",
"returned",
"does",
"not",
"contain",
"padding",
"."
] | a8f5dce8dc72ae84a2f9523887fb5f6b620049b9 | https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/moderngl/program.py#L115-L135 | train | 220,418 |
dswah/pyGAM | pygam/callbacks.py | validate_callback_data | def validate_callback_data(method):
"""
wraps a callback's method to pull the desired arguments from the vars dict
also checks to ensure the method's arguments are in the vars dict
Parameters
----------
method : callable
Returns
-------
validated callable
"""
@wraps(method)
def method_wrapper(*args, **kwargs):
"""
Parameters
----------
*args
**kwargs
Returns
-------
method's output
"""
expected = method.__code__.co_varnames
# rename curret gam object
if 'self' in kwargs:
gam = kwargs['self']
del(kwargs['self'])
kwargs['gam'] = gam
# loop once to check any missing
missing = []
for e in expected:
if e == 'self':
continue
if e not in kwargs:
missing.append(e)
assert len(missing) == 0, 'CallBack cannot reference: {}'.\
format(', '.join(missing))
# loop again to extract desired
kwargs_subset = {}
for e in expected:
if e == 'self':
continue
kwargs_subset[e] = kwargs[e]
return method(*args, **kwargs_subset)
return method_wrapper | python | def validate_callback_data(method):
"""
wraps a callback's method to pull the desired arguments from the vars dict
also checks to ensure the method's arguments are in the vars dict
Parameters
----------
method : callable
Returns
-------
validated callable
"""
@wraps(method)
def method_wrapper(*args, **kwargs):
"""
Parameters
----------
*args
**kwargs
Returns
-------
method's output
"""
expected = method.__code__.co_varnames
# rename curret gam object
if 'self' in kwargs:
gam = kwargs['self']
del(kwargs['self'])
kwargs['gam'] = gam
# loop once to check any missing
missing = []
for e in expected:
if e == 'self':
continue
if e not in kwargs:
missing.append(e)
assert len(missing) == 0, 'CallBack cannot reference: {}'.\
format(', '.join(missing))
# loop again to extract desired
kwargs_subset = {}
for e in expected:
if e == 'self':
continue
kwargs_subset[e] = kwargs[e]
return method(*args, **kwargs_subset)
return method_wrapper | [
"def",
"validate_callback_data",
"(",
"method",
")",
":",
"@",
"wraps",
"(",
"method",
")",
"def",
"method_wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"\n\n Parameters\n ----------\n *args\n **kwargs\n\n Returns\... | wraps a callback's method to pull the desired arguments from the vars dict
also checks to ensure the method's arguments are in the vars dict
Parameters
----------
method : callable
Returns
-------
validated callable | [
"wraps",
"a",
"callback",
"s",
"method",
"to",
"pull",
"the",
"desired",
"arguments",
"from",
"the",
"vars",
"dict",
"also",
"checks",
"to",
"ensure",
"the",
"method",
"s",
"arguments",
"are",
"in",
"the",
"vars",
"dict"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/callbacks.py#L13-L66 | train | 220,419 |
dswah/pyGAM | pygam/callbacks.py | validate_callback | def validate_callback(callback):
"""
validates a callback's on_loop_start and on_loop_end methods
Parameters
----------
callback : Callback object
Returns
-------
validated callback
"""
if not(hasattr(callback, '_validated')) or callback._validated == False:
assert hasattr(callback, 'on_loop_start') \
or hasattr(callback, 'on_loop_end'), \
'callback must have `on_loop_start` or `on_loop_end` method'
if hasattr(callback, 'on_loop_start'):
setattr(callback, 'on_loop_start',
validate_callback_data(callback.on_loop_start))
if hasattr(callback, 'on_loop_end'):
setattr(callback, 'on_loop_end',
validate_callback_data(callback.on_loop_end))
setattr(callback, '_validated', True)
return callback | python | def validate_callback(callback):
"""
validates a callback's on_loop_start and on_loop_end methods
Parameters
----------
callback : Callback object
Returns
-------
validated callback
"""
if not(hasattr(callback, '_validated')) or callback._validated == False:
assert hasattr(callback, 'on_loop_start') \
or hasattr(callback, 'on_loop_end'), \
'callback must have `on_loop_start` or `on_loop_end` method'
if hasattr(callback, 'on_loop_start'):
setattr(callback, 'on_loop_start',
validate_callback_data(callback.on_loop_start))
if hasattr(callback, 'on_loop_end'):
setattr(callback, 'on_loop_end',
validate_callback_data(callback.on_loop_end))
setattr(callback, '_validated', True)
return callback | [
"def",
"validate_callback",
"(",
"callback",
")",
":",
"if",
"not",
"(",
"hasattr",
"(",
"callback",
",",
"'_validated'",
")",
")",
"or",
"callback",
".",
"_validated",
"==",
"False",
":",
"assert",
"hasattr",
"(",
"callback",
",",
"'on_loop_start'",
")",
... | validates a callback's on_loop_start and on_loop_end methods
Parameters
----------
callback : Callback object
Returns
-------
validated callback | [
"validates",
"a",
"callback",
"s",
"on_loop_start",
"and",
"on_loop_end",
"methods"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/callbacks.py#L68-L91 | train | 220,420 |
dswah/pyGAM | pygam/distributions.py | Distribution.phi | def phi(self, y, mu, edof, weights):
"""
GLM scale parameter.
for Binomial and Poisson families this is unity
for Normal family this is variance
Parameters
----------
y : array-like of length n
target values
mu : array-like of length n
expected values
edof : float
estimated degrees of freedom
weights : array-like shape (n,) or None, default: None
sample weights
if None, defaults to array of ones
Returns
-------
scale : estimated model scale
"""
if self._known_scale:
return self.scale
else:
return (np.sum(weights * self.V(mu)**-1 * (y - mu)**2) /
(len(mu) - edof)) | python | def phi(self, y, mu, edof, weights):
"""
GLM scale parameter.
for Binomial and Poisson families this is unity
for Normal family this is variance
Parameters
----------
y : array-like of length n
target values
mu : array-like of length n
expected values
edof : float
estimated degrees of freedom
weights : array-like shape (n,) or None, default: None
sample weights
if None, defaults to array of ones
Returns
-------
scale : estimated model scale
"""
if self._known_scale:
return self.scale
else:
return (np.sum(weights * self.V(mu)**-1 * (y - mu)**2) /
(len(mu) - edof)) | [
"def",
"phi",
"(",
"self",
",",
"y",
",",
"mu",
",",
"edof",
",",
"weights",
")",
":",
"if",
"self",
".",
"_known_scale",
":",
"return",
"self",
".",
"scale",
"else",
":",
"return",
"(",
"np",
".",
"sum",
"(",
"weights",
"*",
"self",
".",
"V",
... | GLM scale parameter.
for Binomial and Poisson families this is unity
for Normal family this is variance
Parameters
----------
y : array-like of length n
target values
mu : array-like of length n
expected values
edof : float
estimated degrees of freedom
weights : array-like shape (n,) or None, default: None
sample weights
if None, defaults to array of ones
Returns
-------
scale : estimated model scale | [
"GLM",
"scale",
"parameter",
".",
"for",
"Binomial",
"and",
"Poisson",
"families",
"this",
"is",
"unity",
"for",
"Normal",
"family",
"this",
"is",
"variance"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/distributions.py#L61-L87 | train | 220,421 |
dswah/pyGAM | pygam/distributions.py | GammaDist.sample | def sample(self, mu):
"""
Return random samples from this Gamma distribution.
Parameters
----------
mu : array-like of shape n_samples or shape (n_simulations, n_samples)
expected values
Returns
-------
random_samples : np.array of same shape as mu
"""
# in numpy.random.gamma, `shape` is the parameter sometimes denoted by
# `k` that corresponds to `nu` in S. Wood (2006) Table 2.1
shape = 1. / self.scale
# in numpy.random.gamma, `scale` is the parameter sometimes denoted by
# `theta` that corresponds to mu / nu in S. Wood (2006) Table 2.1
scale = mu / shape
return np.random.gamma(shape=shape, scale=scale, size=None) | python | def sample(self, mu):
"""
Return random samples from this Gamma distribution.
Parameters
----------
mu : array-like of shape n_samples or shape (n_simulations, n_samples)
expected values
Returns
-------
random_samples : np.array of same shape as mu
"""
# in numpy.random.gamma, `shape` is the parameter sometimes denoted by
# `k` that corresponds to `nu` in S. Wood (2006) Table 2.1
shape = 1. / self.scale
# in numpy.random.gamma, `scale` is the parameter sometimes denoted by
# `theta` that corresponds to mu / nu in S. Wood (2006) Table 2.1
scale = mu / shape
return np.random.gamma(shape=shape, scale=scale, size=None) | [
"def",
"sample",
"(",
"self",
",",
"mu",
")",
":",
"# in numpy.random.gamma, `shape` is the parameter sometimes denoted by",
"# `k` that corresponds to `nu` in S. Wood (2006) Table 2.1",
"shape",
"=",
"1.",
"/",
"self",
".",
"scale",
"# in numpy.random.gamma, `scale` is the paramet... | Return random samples from this Gamma distribution.
Parameters
----------
mu : array-like of shape n_samples or shape (n_simulations, n_samples)
expected values
Returns
-------
random_samples : np.array of same shape as mu | [
"Return",
"random",
"samples",
"from",
"this",
"Gamma",
"distribution",
"."
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/distributions.py#L535-L554 | train | 220,422 |
dswah/pyGAM | pygam/datasets/load_datasets.py | _clean_X_y | def _clean_X_y(X, y):
"""ensure that X and y data are float and correct shapes
"""
return make_2d(X, verbose=False).astype('float'), y.astype('float') | python | def _clean_X_y(X, y):
"""ensure that X and y data are float and correct shapes
"""
return make_2d(X, verbose=False).astype('float'), y.astype('float') | [
"def",
"_clean_X_y",
"(",
"X",
",",
"y",
")",
":",
"return",
"make_2d",
"(",
"X",
",",
"verbose",
"=",
"False",
")",
".",
"astype",
"(",
"'float'",
")",
",",
"y",
".",
"astype",
"(",
"'float'",
")"
] | ensure that X and y data are float and correct shapes | [
"ensure",
"that",
"X",
"and",
"y",
"data",
"are",
"float",
"and",
"correct",
"shapes"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/datasets/load_datasets.py#L17-L20 | train | 220,423 |
dswah/pyGAM | pygam/datasets/load_datasets.py | mcycle | def mcycle(return_X_y=True):
"""motorcyle acceleration dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the times after the impact.
y contains the acceleration.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/MASS/mcycle.html
"""
# y is real
# recommend LinearGAM
motor = pd.read_csv(PATH + '/mcycle.csv', index_col=0)
if return_X_y:
X = motor.times.values
y = motor.accel
return _clean_X_y(X, y)
return motor | python | def mcycle(return_X_y=True):
"""motorcyle acceleration dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the times after the impact.
y contains the acceleration.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/MASS/mcycle.html
"""
# y is real
# recommend LinearGAM
motor = pd.read_csv(PATH + '/mcycle.csv', index_col=0)
if return_X_y:
X = motor.times.values
y = motor.accel
return _clean_X_y(X, y)
return motor | [
"def",
"mcycle",
"(",
"return_X_y",
"=",
"True",
")",
":",
"# y is real",
"# recommend LinearGAM",
"motor",
"=",
"pd",
".",
"read_csv",
"(",
"PATH",
"+",
"'/mcycle.csv'",
",",
"index_col",
"=",
"0",
")",
"if",
"return_X_y",
":",
"X",
"=",
"motor",
".",
"... | motorcyle acceleration dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the times after the impact.
y contains the acceleration.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/MASS/mcycle.html | [
"motorcyle",
"acceleration",
"dataset"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/datasets/load_datasets.py#L22-L52 | train | 220,424 |
dswah/pyGAM | pygam/datasets/load_datasets.py | coal | def coal(return_X_y=True):
"""coal-mining accidents dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
The (X, y) tuple is a processed version of the otherwise raw DataFrame.
A histogram of 150 bins has been computed describing the number accidents per year.
X contains the midpoints of histogram bins.
y contains the count in each histogram bin.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/boot/coal.html
"""
# y is counts
# recommend PoissonGAM
coal = pd.read_csv(PATH + '/coal.csv', index_col=0)
if return_X_y:
y, x = np.histogram(coal.values, bins=150)
X = x[:-1] + np.diff(x)/2 # get midpoints of bins
return _clean_X_y(X, y)
return coal | python | def coal(return_X_y=True):
"""coal-mining accidents dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
The (X, y) tuple is a processed version of the otherwise raw DataFrame.
A histogram of 150 bins has been computed describing the number accidents per year.
X contains the midpoints of histogram bins.
y contains the count in each histogram bin.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/boot/coal.html
"""
# y is counts
# recommend PoissonGAM
coal = pd.read_csv(PATH + '/coal.csv', index_col=0)
if return_X_y:
y, x = np.histogram(coal.values, bins=150)
X = x[:-1] + np.diff(x)/2 # get midpoints of bins
return _clean_X_y(X, y)
return coal | [
"def",
"coal",
"(",
"return_X_y",
"=",
"True",
")",
":",
"# y is counts",
"# recommend PoissonGAM",
"coal",
"=",
"pd",
".",
"read_csv",
"(",
"PATH",
"+",
"'/coal.csv'",
",",
"index_col",
"=",
"0",
")",
"if",
"return_X_y",
":",
"y",
",",
"x",
"=",
"np",
... | coal-mining accidents dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
The (X, y) tuple is a processed version of the otherwise raw DataFrame.
A histogram of 150 bins has been computed describing the number accidents per year.
X contains the midpoints of histogram bins.
y contains the count in each histogram bin.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/boot/coal.html | [
"coal",
"-",
"mining",
"accidents",
"dataset"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/datasets/load_datasets.py#L54-L88 | train | 220,425 |
dswah/pyGAM | pygam/datasets/load_datasets.py | faithful | def faithful(return_X_y=True):
"""old-faithful dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
The (X, y) tuple is a processed version of the otherwise raw DataFrame.
A histogram of 200 bins has been computed describing the wating time between eruptions.
X contains the midpoints of histogram bins.
y contains the count in each histogram bin.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/datasets/faithful.html
"""
# y is counts
# recommend PoissonGAM
faithful = pd.read_csv(PATH + '/faithful.csv', index_col=0)
if return_X_y:
y, x = np.histogram(faithful['eruptions'], bins=200)
X = x[:-1] + np.diff(x)/2 # get midpoints of bins
return _clean_X_y(X, y)
return faithful | python | def faithful(return_X_y=True):
"""old-faithful dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
The (X, y) tuple is a processed version of the otherwise raw DataFrame.
A histogram of 200 bins has been computed describing the wating time between eruptions.
X contains the midpoints of histogram bins.
y contains the count in each histogram bin.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/datasets/faithful.html
"""
# y is counts
# recommend PoissonGAM
faithful = pd.read_csv(PATH + '/faithful.csv', index_col=0)
if return_X_y:
y, x = np.histogram(faithful['eruptions'], bins=200)
X = x[:-1] + np.diff(x)/2 # get midpoints of bins
return _clean_X_y(X, y)
return faithful | [
"def",
"faithful",
"(",
"return_X_y",
"=",
"True",
")",
":",
"# y is counts",
"# recommend PoissonGAM",
"faithful",
"=",
"pd",
".",
"read_csv",
"(",
"PATH",
"+",
"'/faithful.csv'",
",",
"index_col",
"=",
"0",
")",
"if",
"return_X_y",
":",
"y",
",",
"x",
"=... | old-faithful dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
The (X, y) tuple is a processed version of the otherwise raw DataFrame.
A histogram of 200 bins has been computed describing the wating time between eruptions.
X contains the midpoints of histogram bins.
y contains the count in each histogram bin.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/datasets/faithful.html | [
"old",
"-",
"faithful",
"dataset"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/datasets/load_datasets.py#L90-L124 | train | 220,426 |
dswah/pyGAM | pygam/datasets/load_datasets.py | trees | def trees(return_X_y=True):
"""cherry trees dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the girth and the height of each tree.
y contains the volume.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/datasets/trees.html
"""
# y is real.
# recommend InvGaussGAM, or GAM(distribution='gamma', link='log')
trees = pd.read_csv(PATH + '/trees.csv', index_col=0)
if return_X_y:
y = trees.Volume.values
X = trees[['Girth', 'Height']].values
return _clean_X_y(X, y)
return trees | python | def trees(return_X_y=True):
"""cherry trees dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the girth and the height of each tree.
y contains the volume.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/datasets/trees.html
"""
# y is real.
# recommend InvGaussGAM, or GAM(distribution='gamma', link='log')
trees = pd.read_csv(PATH + '/trees.csv', index_col=0)
if return_X_y:
y = trees.Volume.values
X = trees[['Girth', 'Height']].values
return _clean_X_y(X, y)
return trees | [
"def",
"trees",
"(",
"return_X_y",
"=",
"True",
")",
":",
"# y is real.",
"# recommend InvGaussGAM, or GAM(distribution='gamma', link='log')",
"trees",
"=",
"pd",
".",
"read_csv",
"(",
"PATH",
"+",
"'/trees.csv'",
",",
"index_col",
"=",
"0",
")",
"if",
"return_X_y",... | cherry trees dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the girth and the height of each tree.
y contains the volume.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/datasets/trees.html | [
"cherry",
"trees",
"dataset"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/datasets/load_datasets.py#L161-L191 | train | 220,427 |
dswah/pyGAM | pygam/datasets/load_datasets.py | default | def default(return_X_y=True):
"""credit default dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the category of student or not, credit card balance,
and income.
y contains the outcome of default (0) or not (1).
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/ISLR/Default.html
"""
# y is binary
# recommend LogisticGAM
default = pd.read_csv(PATH + '/default.csv', index_col=0)
if return_X_y:
default = default.values
default[:,0] = np.unique(default[:,0], return_inverse=True)[1]
default[:,1] = np.unique(default[:,1], return_inverse=True)[1]
X = default[:,1:]
y = default[:,0]
return _clean_X_y(X, y)
return default | python | def default(return_X_y=True):
"""credit default dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the category of student or not, credit card balance,
and income.
y contains the outcome of default (0) or not (1).
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/ISLR/Default.html
"""
# y is binary
# recommend LogisticGAM
default = pd.read_csv(PATH + '/default.csv', index_col=0)
if return_X_y:
default = default.values
default[:,0] = np.unique(default[:,0], return_inverse=True)[1]
default[:,1] = np.unique(default[:,1], return_inverse=True)[1]
X = default[:,1:]
y = default[:,0]
return _clean_X_y(X, y)
return default | [
"def",
"default",
"(",
"return_X_y",
"=",
"True",
")",
":",
"# y is binary",
"# recommend LogisticGAM",
"default",
"=",
"pd",
".",
"read_csv",
"(",
"PATH",
"+",
"'/default.csv'",
",",
"index_col",
"=",
"0",
")",
"if",
"return_X_y",
":",
"default",
"=",
"defa... | credit default dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the category of student or not, credit card balance,
and income.
y contains the outcome of default (0) or not (1).
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/ISLR/Default.html | [
"credit",
"default",
"dataset"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/datasets/load_datasets.py#L193-L228 | train | 220,428 |
dswah/pyGAM | pygam/datasets/load_datasets.py | hepatitis | def hepatitis(return_X_y=True):
"""hepatitis in Bulgaria dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the age of each patient group.
y contains the ratio of HAV positive patients to the total number for each
age group.
Groups with 0 total patients are excluded.
Source:
Keiding, N. (1991) Age-specific incidence and prevalence: a statistical perspective
"""
# y is real
# recommend LinearGAM
hep = pd.read_csv(PATH + '/hepatitis_A_bulgaria.csv').astype(float)
if return_X_y:
# eliminate 0/0
mask = (hep.total > 0).values
hep = hep[mask]
X = hep.age.values
y = hep.hepatitis_A_positive.values / hep.total.values
return _clean_X_y(X, y)
return hep | python | def hepatitis(return_X_y=True):
"""hepatitis in Bulgaria dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the age of each patient group.
y contains the ratio of HAV positive patients to the total number for each
age group.
Groups with 0 total patients are excluded.
Source:
Keiding, N. (1991) Age-specific incidence and prevalence: a statistical perspective
"""
# y is real
# recommend LinearGAM
hep = pd.read_csv(PATH + '/hepatitis_A_bulgaria.csv').astype(float)
if return_X_y:
# eliminate 0/0
mask = (hep.total > 0).values
hep = hep[mask]
X = hep.age.values
y = hep.hepatitis_A_positive.values / hep.total.values
return _clean_X_y(X, y)
return hep | [
"def",
"hepatitis",
"(",
"return_X_y",
"=",
"True",
")",
":",
"# y is real",
"# recommend LinearGAM",
"hep",
"=",
"pd",
".",
"read_csv",
"(",
"PATH",
"+",
"'/hepatitis_A_bulgaria.csv'",
")",
".",
"astype",
"(",
"float",
")",
"if",
"return_X_y",
":",
"# elimina... | hepatitis in Bulgaria dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the age of each patient group.
y contains the ratio of HAV positive patients to the total number for each
age group.
Groups with 0 total patients are excluded.
Source:
Keiding, N. (1991) Age-specific incidence and prevalence: a statistical perspective | [
"hepatitis",
"in",
"Bulgaria",
"dataset"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/datasets/load_datasets.py#L266-L304 | train | 220,429 |
dswah/pyGAM | pygam/datasets/load_datasets.py | toy_classification | def toy_classification(return_X_y=True, n=5000):
"""toy classification dataset with irrelevant features
fitting a logistic model on this data and performing a model summary
should reveal that features 2,3,4 are not significant.
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
n : int, default: 5000
number of samples to generate
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains 5 variables:
continuous feature 0
continuous feature 1
irrelevant feature 0
irrelevant feature 1
irrelevant feature 2
categorical feature 0
y contains binary labels
Also, this dataset is randomly generated and will vary each time.
"""
# make features
X = np.random.rand(n,5) * 10 - 5
cat = np.random.randint(0,4, n)
X = np.c_[X, cat]
# make observations
log_odds = (-0.5*X[:,0]**2) + 5 +(-0.5*X[:,1]**2) + np.mod(X[:,-1], 2)*-30
p = 1/(1+np.exp(-log_odds)).squeeze()
y = (np.random.rand(n) < p).astype(np.int)
if return_X_y:
return X, y
else:
return pd.DataFrame(np.c_[X, y], columns=[['continuous0',
'continuous1',
'irrelevant0',
'irrelevant1',
'irrelevant2',
'categorical0',
'observations'
]]) | python | def toy_classification(return_X_y=True, n=5000):
"""toy classification dataset with irrelevant features
fitting a logistic model on this data and performing a model summary
should reveal that features 2,3,4 are not significant.
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
n : int, default: 5000
number of samples to generate
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains 5 variables:
continuous feature 0
continuous feature 1
irrelevant feature 0
irrelevant feature 1
irrelevant feature 2
categorical feature 0
y contains binary labels
Also, this dataset is randomly generated and will vary each time.
"""
# make features
X = np.random.rand(n,5) * 10 - 5
cat = np.random.randint(0,4, n)
X = np.c_[X, cat]
# make observations
log_odds = (-0.5*X[:,0]**2) + 5 +(-0.5*X[:,1]**2) + np.mod(X[:,-1], 2)*-30
p = 1/(1+np.exp(-log_odds)).squeeze()
y = (np.random.rand(n) < p).astype(np.int)
if return_X_y:
return X, y
else:
return pd.DataFrame(np.c_[X, y], columns=[['continuous0',
'continuous1',
'irrelevant0',
'irrelevant1',
'irrelevant2',
'categorical0',
'observations'
]]) | [
"def",
"toy_classification",
"(",
"return_X_y",
"=",
"True",
",",
"n",
"=",
"5000",
")",
":",
"# make features",
"X",
"=",
"np",
".",
"random",
".",
"rand",
"(",
"n",
",",
"5",
")",
"*",
"10",
"-",
"5",
"cat",
"=",
"np",
".",
"random",
".",
"rand... | toy classification dataset with irrelevant features
fitting a logistic model on this data and performing a model summary
should reveal that features 2,3,4 are not significant.
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
n : int, default: 5000
number of samples to generate
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains 5 variables:
continuous feature 0
continuous feature 1
irrelevant feature 0
irrelevant feature 1
irrelevant feature 2
categorical feature 0
y contains binary labels
Also, this dataset is randomly generated and will vary each time. | [
"toy",
"classification",
"dataset",
"with",
"irrelevant",
"features"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/datasets/load_datasets.py#L306-L361 | train | 220,430 |
dswah/pyGAM | pygam/datasets/load_datasets.py | head_circumference | def head_circumference(return_X_y=True):
"""head circumference for dutch boys
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the age in years of each patient.
y contains the head circumference in centimeters
"""
# y is real
# recommend ExpectileGAM
head = pd.read_csv(PATH + '/head_circumference.csv', index_col=0).astype(float)
if return_X_y:
y = head['head'].values
X = head[['age']].values
return _clean_X_y(X, y)
return head | python | def head_circumference(return_X_y=True):
"""head circumference for dutch boys
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the age in years of each patient.
y contains the head circumference in centimeters
"""
# y is real
# recommend ExpectileGAM
head = pd.read_csv(PATH + '/head_circumference.csv', index_col=0).astype(float)
if return_X_y:
y = head['head'].values
X = head[['age']].values
return _clean_X_y(X, y)
return head | [
"def",
"head_circumference",
"(",
"return_X_y",
"=",
"True",
")",
":",
"# y is real",
"# recommend ExpectileGAM",
"head",
"=",
"pd",
".",
"read_csv",
"(",
"PATH",
"+",
"'/head_circumference.csv'",
",",
"index_col",
"=",
"0",
")",
".",
"astype",
"(",
"float",
"... | head circumference for dutch boys
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the age in years of each patient.
y contains the head circumference in centimeters | [
"head",
"circumference",
"for",
"dutch",
"boys"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/datasets/load_datasets.py#L363-L391 | train | 220,431 |
dswah/pyGAM | pygam/datasets/load_datasets.py | chicago | def chicago(return_X_y=True):
"""Chicago air pollution and death rate data
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains [['time', 'tmpd', 'pm10median', 'o3median']], with no NaNs
y contains 'death', the deaths per day, with no NaNs
Source:
R gamair package
`data(chicago)`
Notes
-----
https://cran.r-project.org/web/packages/gamair/gamair.pdf
https://rdrr.io/cran/gamair/man/chicago.html
Columns:
death : total deaths (per day).
pm10median : median particles in 2.5-10 per cubic m
pm25median : median particles < 2.5 mg per cubic m (more dangerous).
o3median : Ozone in parts per billion
so2median : Median Sulpher dioxide measurement
time : time in days
tmpd : temperature in fahrenheit
"""
# recommend PoissonGAM
chi = pd.read_csv(PATH + '/chicago.csv', index_col=0).astype(float)
if return_X_y:
chi = chi[['time', 'tmpd', 'pm10median', 'o3median', 'death']].dropna()
X = chi[['time', 'tmpd', 'pm10median', 'o3median']].values
y = chi['death'].values
return X, y
else:
return chi | python | def chicago(return_X_y=True):
"""Chicago air pollution and death rate data
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains [['time', 'tmpd', 'pm10median', 'o3median']], with no NaNs
y contains 'death', the deaths per day, with no NaNs
Source:
R gamair package
`data(chicago)`
Notes
-----
https://cran.r-project.org/web/packages/gamair/gamair.pdf
https://rdrr.io/cran/gamair/man/chicago.html
Columns:
death : total deaths (per day).
pm10median : median particles in 2.5-10 per cubic m
pm25median : median particles < 2.5 mg per cubic m (more dangerous).
o3median : Ozone in parts per billion
so2median : Median Sulpher dioxide measurement
time : time in days
tmpd : temperature in fahrenheit
"""
# recommend PoissonGAM
chi = pd.read_csv(PATH + '/chicago.csv', index_col=0).astype(float)
if return_X_y:
chi = chi[['time', 'tmpd', 'pm10median', 'o3median', 'death']].dropna()
X = chi[['time', 'tmpd', 'pm10median', 'o3median']].values
y = chi['death'].values
return X, y
else:
return chi | [
"def",
"chicago",
"(",
"return_X_y",
"=",
"True",
")",
":",
"# recommend PoissonGAM",
"chi",
"=",
"pd",
".",
"read_csv",
"(",
"PATH",
"+",
"'/chicago.csv'",
",",
"index_col",
"=",
"0",
")",
".",
"astype",
"(",
"float",
")",
"if",
"return_X_y",
":",
"chi"... | Chicago air pollution and death rate data
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains [['time', 'tmpd', 'pm10median', 'o3median']], with no NaNs
y contains 'death', the deaths per day, with no NaNs
Source:
R gamair package
`data(chicago)`
Notes
-----
https://cran.r-project.org/web/packages/gamair/gamair.pdf
https://rdrr.io/cran/gamair/man/chicago.html
Columns:
death : total deaths (per day).
pm10median : median particles in 2.5-10 per cubic m
pm25median : median particles < 2.5 mg per cubic m (more dangerous).
o3median : Ozone in parts per billion
so2median : Median Sulpher dioxide measurement
time : time in days
tmpd : temperature in fahrenheit | [
"Chicago",
"air",
"pollution",
"and",
"death",
"rate",
"data"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/datasets/load_datasets.py#L393-L442 | train | 220,432 |
dswah/pyGAM | pygam/datasets/load_datasets.py | toy_interaction | def toy_interaction(return_X_y=True, n=50000, stddev=0.1):
"""a sinusoid modulated by a linear function
this is a simple dataset to test a model's capacity to fit interactions
between features.
a GAM with no interaction terms will have an R-squared close to 0,
while a GAM with a tensor product will have R-squared close to 1.
the data is random, and will vary on each invocation.
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
n : int, optional
number of points to generate
stddev : positive float, optional,
standard deviation of irreducible error
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains [['sinusoid', 'linear']]
y is formed by multiplying the sinusoid by the linear function.
Source:
"""
X = np.random.uniform(-1,1, size=(n, 2))
X[:, 1] *= 5
y = np.sin(X[:,0] * 2 * np.pi * 1.5) * X[:,1]
y += np.random.randn(len(X)) * stddev
if return_X_y:
return X, y
else:
data = pd.DataFrame(np.c_[X, y])
data.columns = [['sinusoid', 'linear', 'y']]
return data | python | def toy_interaction(return_X_y=True, n=50000, stddev=0.1):
"""a sinusoid modulated by a linear function
this is a simple dataset to test a model's capacity to fit interactions
between features.
a GAM with no interaction terms will have an R-squared close to 0,
while a GAM with a tensor product will have R-squared close to 1.
the data is random, and will vary on each invocation.
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
n : int, optional
number of points to generate
stddev : positive float, optional,
standard deviation of irreducible error
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains [['sinusoid', 'linear']]
y is formed by multiplying the sinusoid by the linear function.
Source:
"""
X = np.random.uniform(-1,1, size=(n, 2))
X[:, 1] *= 5
y = np.sin(X[:,0] * 2 * np.pi * 1.5) * X[:,1]
y += np.random.randn(len(X)) * stddev
if return_X_y:
return X, y
else:
data = pd.DataFrame(np.c_[X, y])
data.columns = [['sinusoid', 'linear', 'y']]
return data | [
"def",
"toy_interaction",
"(",
"return_X_y",
"=",
"True",
",",
"n",
"=",
"50000",
",",
"stddev",
"=",
"0.1",
")",
":",
"X",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
"-",
"1",
",",
"1",
",",
"size",
"=",
"(",
"n",
",",
"2",
")",
")",
"X"... | a sinusoid modulated by a linear function
this is a simple dataset to test a model's capacity to fit interactions
between features.
a GAM with no interaction terms will have an R-squared close to 0,
while a GAM with a tensor product will have R-squared close to 1.
the data is random, and will vary on each invocation.
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
n : int, optional
number of points to generate
stddev : positive float, optional,
standard deviation of irreducible error
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains [['sinusoid', 'linear']]
y is formed by multiplying the sinusoid by the linear function.
Source: | [
"a",
"sinusoid",
"modulated",
"by",
"a",
"linear",
"function"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/datasets/load_datasets.py#L444-L493 | train | 220,433 |
dswah/pyGAM | gen_imgs.py | gen_multi_data | def gen_multi_data(n=5000):
"""
multivariate Logistic problem
"""
X, y = toy_classification(return_X_y=True, n=10000)
lgam = LogisticGAM(s(0) + s(1) + s(2) + s(3) + s(4) + f(5))
lgam.fit(X, y)
plt.figure()
for i, term in enumerate(lgam.terms):
if term.isintercept:
continue
plt.plot(lgam.partial_dependence(term=i))
plt.savefig('imgs/pygam_multi_pdep.png', dpi=300)
plt.figure()
plt.plot(lgam.logs_['deviance'])
plt.savefig('imgs/pygam_multi_deviance.png', dpi=300) | python | def gen_multi_data(n=5000):
"""
multivariate Logistic problem
"""
X, y = toy_classification(return_X_y=True, n=10000)
lgam = LogisticGAM(s(0) + s(1) + s(2) + s(3) + s(4) + f(5))
lgam.fit(X, y)
plt.figure()
for i, term in enumerate(lgam.terms):
if term.isintercept:
continue
plt.plot(lgam.partial_dependence(term=i))
plt.savefig('imgs/pygam_multi_pdep.png', dpi=300)
plt.figure()
plt.plot(lgam.logs_['deviance'])
plt.savefig('imgs/pygam_multi_deviance.png', dpi=300) | [
"def",
"gen_multi_data",
"(",
"n",
"=",
"5000",
")",
":",
"X",
",",
"y",
"=",
"toy_classification",
"(",
"return_X_y",
"=",
"True",
",",
"n",
"=",
"10000",
")",
"lgam",
"=",
"LogisticGAM",
"(",
"s",
"(",
"0",
")",
"+",
"s",
"(",
"1",
")",
"+",
... | multivariate Logistic problem | [
"multivariate",
"Logistic",
"problem"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/gen_imgs.py#L229-L248 | train | 220,434 |
dswah/pyGAM | gen_imgs.py | gen_tensor_data | def gen_tensor_data():
"""
toy interaction data
"""
X, y = toy_interaction(return_X_y=True, n=10000)
gam = LinearGAM(te(0, 1,lam=0.1)).fit(X, y)
XX = gam.generate_X_grid(term=0, meshgrid=True)
Z = gam.partial_dependence(term=0, meshgrid=True)
fig = plt.figure(figsize=(9,6))
ax = plt.axes(projection='3d')
ax.dist = 7.5
ax.plot_surface(XX[0], XX[1], Z, cmap='viridis')
ax.set_axis_off()
fig.tight_layout()
plt.savefig('imgs/pygam_tensor.png', transparent=True, dpi=300) | python | def gen_tensor_data():
"""
toy interaction data
"""
X, y = toy_interaction(return_X_y=True, n=10000)
gam = LinearGAM(te(0, 1,lam=0.1)).fit(X, y)
XX = gam.generate_X_grid(term=0, meshgrid=True)
Z = gam.partial_dependence(term=0, meshgrid=True)
fig = plt.figure(figsize=(9,6))
ax = plt.axes(projection='3d')
ax.dist = 7.5
ax.plot_surface(XX[0], XX[1], Z, cmap='viridis')
ax.set_axis_off()
fig.tight_layout()
plt.savefig('imgs/pygam_tensor.png', transparent=True, dpi=300) | [
"def",
"gen_tensor_data",
"(",
")",
":",
"X",
",",
"y",
"=",
"toy_interaction",
"(",
"return_X_y",
"=",
"True",
",",
"n",
"=",
"10000",
")",
"gam",
"=",
"LinearGAM",
"(",
"te",
"(",
"0",
",",
"1",
",",
"lam",
"=",
"0.1",
")",
")",
".",
"fit",
"... | toy interaction data | [
"toy",
"interaction",
"data"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/gen_imgs.py#L250-L267 | train | 220,435 |
dswah/pyGAM | gen_imgs.py | expectiles | def expectiles():
"""
a bunch of expectiles
"""
X, y = mcycle(return_X_y=True)
# lets fit the mean model first by CV
gam50 = ExpectileGAM(expectile=0.5).gridsearch(X, y)
# and copy the smoothing to the other models
lam = gam50.lam
# now fit a few more models
gam95 = ExpectileGAM(expectile=0.95, lam=lam).fit(X, y)
gam75 = ExpectileGAM(expectile=0.75, lam=lam).fit(X, y)
gam25 = ExpectileGAM(expectile=0.25, lam=lam).fit(X, y)
gam05 = ExpectileGAM(expectile=0.05, lam=lam).fit(X, y)
XX = gam50.generate_X_grid(term=0, n=500)
fig = plt.figure()
plt.scatter(X, y, c='k', alpha=0.2)
plt.plot(XX, gam95.predict(XX), label='0.95')
plt.plot(XX, gam75.predict(XX), label='0.75')
plt.plot(XX, gam50.predict(XX), label='0.50')
plt.plot(XX, gam25.predict(XX), label='0.25')
plt.plot(XX, gam05.predict(XX), label='0.05')
plt.legend()
fig.tight_layout()
plt.savefig('imgs/pygam_expectiles.png', dpi=300) | python | def expectiles():
"""
a bunch of expectiles
"""
X, y = mcycle(return_X_y=True)
# lets fit the mean model first by CV
gam50 = ExpectileGAM(expectile=0.5).gridsearch(X, y)
# and copy the smoothing to the other models
lam = gam50.lam
# now fit a few more models
gam95 = ExpectileGAM(expectile=0.95, lam=lam).fit(X, y)
gam75 = ExpectileGAM(expectile=0.75, lam=lam).fit(X, y)
gam25 = ExpectileGAM(expectile=0.25, lam=lam).fit(X, y)
gam05 = ExpectileGAM(expectile=0.05, lam=lam).fit(X, y)
XX = gam50.generate_X_grid(term=0, n=500)
fig = plt.figure()
plt.scatter(X, y, c='k', alpha=0.2)
plt.plot(XX, gam95.predict(XX), label='0.95')
plt.plot(XX, gam75.predict(XX), label='0.75')
plt.plot(XX, gam50.predict(XX), label='0.50')
plt.plot(XX, gam25.predict(XX), label='0.25')
plt.plot(XX, gam05.predict(XX), label='0.05')
plt.legend()
fig.tight_layout()
plt.savefig('imgs/pygam_expectiles.png', dpi=300) | [
"def",
"expectiles",
"(",
")",
":",
"X",
",",
"y",
"=",
"mcycle",
"(",
"return_X_y",
"=",
"True",
")",
"# lets fit the mean model first by CV",
"gam50",
"=",
"ExpectileGAM",
"(",
"expectile",
"=",
"0.5",
")",
".",
"gridsearch",
"(",
"X",
",",
"y",
")",
"... | a bunch of expectiles | [
"a",
"bunch",
"of",
"expectiles"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/gen_imgs.py#L287-L318 | train | 220,436 |
dswah/pyGAM | pygam/utils.py | cholesky | def cholesky(A, sparse=True, verbose=True):
"""
Choose the best possible cholesky factorizor.
if possible, import the Scikit-Sparse sparse Cholesky method.
Permutes the output L to ensure A = L.H . L
otherwise defaults to numpy's non-sparse version
Parameters
----------
A : array-like
array to decompose
sparse : boolean, default: True
whether to return a sparse array
verbose : bool, default: True
whether to print warnings
"""
if SKSPIMPORT:
A = sp.sparse.csc_matrix(A)
try:
F = spcholesky(A)
# permutation matrix P
P = sp.sparse.lil_matrix(A.shape)
p = F.P()
P[np.arange(len(p)), p] = 1
# permute
L = F.L()
L = P.T.dot(L)
except CholmodNotPositiveDefiniteError as e:
raise NotPositiveDefiniteError('Matrix is not positive definite')
if sparse:
return L.T # upper triangular factorization
return L.T.A # upper triangular factorization
else:
msg = 'Could not import Scikit-Sparse or Suite-Sparse.\n'\
'This will slow down optimization for models with '\
'monotonicity/convexity penalties and many splines.\n'\
'See installation instructions for installing '\
'Scikit-Sparse and Suite-Sparse via Conda.'
if verbose:
warnings.warn(msg)
if sp.sparse.issparse(A):
A = A.A
try:
L = sp.linalg.cholesky(A, lower=False)
except LinAlgError as e:
raise NotPositiveDefiniteError('Matrix is not positive definite')
if sparse:
return sp.sparse.csc_matrix(L)
return L | python | def cholesky(A, sparse=True, verbose=True):
"""
Choose the best possible cholesky factorizor.
if possible, import the Scikit-Sparse sparse Cholesky method.
Permutes the output L to ensure A = L.H . L
otherwise defaults to numpy's non-sparse version
Parameters
----------
A : array-like
array to decompose
sparse : boolean, default: True
whether to return a sparse array
verbose : bool, default: True
whether to print warnings
"""
if SKSPIMPORT:
A = sp.sparse.csc_matrix(A)
try:
F = spcholesky(A)
# permutation matrix P
P = sp.sparse.lil_matrix(A.shape)
p = F.P()
P[np.arange(len(p)), p] = 1
# permute
L = F.L()
L = P.T.dot(L)
except CholmodNotPositiveDefiniteError as e:
raise NotPositiveDefiniteError('Matrix is not positive definite')
if sparse:
return L.T # upper triangular factorization
return L.T.A # upper triangular factorization
else:
msg = 'Could not import Scikit-Sparse or Suite-Sparse.\n'\
'This will slow down optimization for models with '\
'monotonicity/convexity penalties and many splines.\n'\
'See installation instructions for installing '\
'Scikit-Sparse and Suite-Sparse via Conda.'
if verbose:
warnings.warn(msg)
if sp.sparse.issparse(A):
A = A.A
try:
L = sp.linalg.cholesky(A, lower=False)
except LinAlgError as e:
raise NotPositiveDefiniteError('Matrix is not positive definite')
if sparse:
return sp.sparse.csc_matrix(L)
return L | [
"def",
"cholesky",
"(",
"A",
",",
"sparse",
"=",
"True",
",",
"verbose",
"=",
"True",
")",
":",
"if",
"SKSPIMPORT",
":",
"A",
"=",
"sp",
".",
"sparse",
".",
"csc_matrix",
"(",
"A",
")",
"try",
":",
"F",
"=",
"spcholesky",
"(",
"A",
")",
"# permut... | Choose the best possible cholesky factorizor.
if possible, import the Scikit-Sparse sparse Cholesky method.
Permutes the output L to ensure A = L.H . L
otherwise defaults to numpy's non-sparse version
Parameters
----------
A : array-like
array to decompose
sparse : boolean, default: True
whether to return a sparse array
verbose : bool, default: True
whether to print warnings | [
"Choose",
"the",
"best",
"possible",
"cholesky",
"factorizor",
"."
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/utils.py#L33-L90 | train | 220,437 |
dswah/pyGAM | pygam/utils.py | make_2d | def make_2d(array, verbose=True):
"""
tiny tool to expand 1D arrays the way i want
Parameters
----------
array : array-like
verbose : bool, default: True
whether to print warnings
Returns
-------
np.array of with ndim = 2
"""
array = np.asarray(array)
if array.ndim < 2:
msg = 'Expected 2D input data array, but found {}D. '\
'Expanding to 2D.'.format(array.ndim)
if verbose:
warnings.warn(msg)
array = np.atleast_1d(array)[:,None]
return array | python | def make_2d(array, verbose=True):
"""
tiny tool to expand 1D arrays the way i want
Parameters
----------
array : array-like
verbose : bool, default: True
whether to print warnings
Returns
-------
np.array of with ndim = 2
"""
array = np.asarray(array)
if array.ndim < 2:
msg = 'Expected 2D input data array, but found {}D. '\
'Expanding to 2D.'.format(array.ndim)
if verbose:
warnings.warn(msg)
array = np.atleast_1d(array)[:,None]
return array | [
"def",
"make_2d",
"(",
"array",
",",
"verbose",
"=",
"True",
")",
":",
"array",
"=",
"np",
".",
"asarray",
"(",
"array",
")",
"if",
"array",
".",
"ndim",
"<",
"2",
":",
"msg",
"=",
"'Expected 2D input data array, but found {}D. '",
"'Expanding to 2D.'",
".",... | tiny tool to expand 1D arrays the way i want
Parameters
----------
array : array-like
verbose : bool, default: True
whether to print warnings
Returns
-------
np.array of with ndim = 2 | [
"tiny",
"tool",
"to",
"expand",
"1D",
"arrays",
"the",
"way",
"i",
"want"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/utils.py#L93-L115 | train | 220,438 |
dswah/pyGAM | pygam/utils.py | check_array | def check_array(array, force_2d=False, n_feats=None, ndim=None,
min_samples=1, name='Input data', verbose=True):
"""
tool to perform basic data validation.
called by check_X and check_y.
ensures that data:
- is ndim dimensional
- contains float-compatible data-types
- has at least min_samples
- has n_feats
- is finite
Parameters
----------
array : array-like
force_2d : boolean, default: False
whether to force a 2d array. Setting to True forces ndim = 2
n_feats : int, default: None
represents number of features that the array should have.
not enforced if n_feats is None.
ndim : int default: None
number of dimensions expected in the array
min_samples : int, default: 1
name : str, default: 'Input data'
name to use when referring to the array
verbose : bool, default: True
whether to print warnings
Returns
-------
array : validated array
"""
# make array
if force_2d:
array = make_2d(array, verbose=verbose)
ndim = 2
else:
array = np.array(array)
# cast to float
dtype = array.dtype
if dtype.kind not in ['i', 'f']:
try:
array = array.astype('float')
except ValueError as e:
raise ValueError('{} must be type int or float, '\
'but found type: {}\n'\
'Try transforming data with a LabelEncoder first.'\
.format(name, dtype.type))
# check finite
if not(np.isfinite(array).all()):
raise ValueError('{} must not contain Inf nor NaN'.format(name))
# check ndim
if ndim is not None:
if array.ndim != ndim:
raise ValueError('{} must have {} dimensions. '\
'found shape {}'.format(name, ndim, array.shape))
# check n_feats
if n_feats is not None:
m = array.shape[1]
if m != n_feats:
raise ValueError('{} must have {} features, '\
'but found {}'.format(name, n_feats, m))
# minimum samples
n = array.shape[0]
if n < min_samples:
raise ValueError('{} should have at least {} samples, '\
'but found {}'.format(name, min_samples, n))
return array | python | def check_array(array, force_2d=False, n_feats=None, ndim=None,
min_samples=1, name='Input data', verbose=True):
"""
tool to perform basic data validation.
called by check_X and check_y.
ensures that data:
- is ndim dimensional
- contains float-compatible data-types
- has at least min_samples
- has n_feats
- is finite
Parameters
----------
array : array-like
force_2d : boolean, default: False
whether to force a 2d array. Setting to True forces ndim = 2
n_feats : int, default: None
represents number of features that the array should have.
not enforced if n_feats is None.
ndim : int default: None
number of dimensions expected in the array
min_samples : int, default: 1
name : str, default: 'Input data'
name to use when referring to the array
verbose : bool, default: True
whether to print warnings
Returns
-------
array : validated array
"""
# make array
if force_2d:
array = make_2d(array, verbose=verbose)
ndim = 2
else:
array = np.array(array)
# cast to float
dtype = array.dtype
if dtype.kind not in ['i', 'f']:
try:
array = array.astype('float')
except ValueError as e:
raise ValueError('{} must be type int or float, '\
'but found type: {}\n'\
'Try transforming data with a LabelEncoder first.'\
.format(name, dtype.type))
# check finite
if not(np.isfinite(array).all()):
raise ValueError('{} must not contain Inf nor NaN'.format(name))
# check ndim
if ndim is not None:
if array.ndim != ndim:
raise ValueError('{} must have {} dimensions. '\
'found shape {}'.format(name, ndim, array.shape))
# check n_feats
if n_feats is not None:
m = array.shape[1]
if m != n_feats:
raise ValueError('{} must have {} features, '\
'but found {}'.format(name, n_feats, m))
# minimum samples
n = array.shape[0]
if n < min_samples:
raise ValueError('{} should have at least {} samples, '\
'but found {}'.format(name, min_samples, n))
return array | [
"def",
"check_array",
"(",
"array",
",",
"force_2d",
"=",
"False",
",",
"n_feats",
"=",
"None",
",",
"ndim",
"=",
"None",
",",
"min_samples",
"=",
"1",
",",
"name",
"=",
"'Input data'",
",",
"verbose",
"=",
"True",
")",
":",
"# make array",
"if",
"forc... | tool to perform basic data validation.
called by check_X and check_y.
ensures that data:
- is ndim dimensional
- contains float-compatible data-types
- has at least min_samples
- has n_feats
- is finite
Parameters
----------
array : array-like
force_2d : boolean, default: False
whether to force a 2d array. Setting to True forces ndim = 2
n_feats : int, default: None
represents number of features that the array should have.
not enforced if n_feats is None.
ndim : int default: None
number of dimensions expected in the array
min_samples : int, default: 1
name : str, default: 'Input data'
name to use when referring to the array
verbose : bool, default: True
whether to print warnings
Returns
-------
array : validated array | [
"tool",
"to",
"perform",
"basic",
"data",
"validation",
".",
"called",
"by",
"check_X",
"and",
"check_y",
"."
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/utils.py#L118-L192 | train | 220,439 |
dswah/pyGAM | pygam/utils.py | check_param | def check_param(param, param_name, dtype, constraint=None, iterable=True,
max_depth=2):
"""
checks the dtype of a parameter,
and whether it satisfies a numerical contraint
Parameters
---------
param : object
param_name : str, name of the parameter
dtype : str, desired dtype of the parameter
contraint : str, default: None
numerical constraint of the parameter.
if None, no constraint is enforced
iterable : bool, default: True
whether to allow iterable param
max_depth : int, default: 2
maximum nesting of the iterable.
only used if iterable == True
Returns
-------
list of validated and converted parameter(s)
"""
msg = []
msg.append(param_name + " must be "+ dtype)
if iterable:
msg.append(" or nested iterable of depth " + str(max_depth) +
" containing " + dtype + "s")
msg.append(", but found " + param_name + " = {}".format(repr(param)))
if constraint is not None:
msg = (" " + constraint).join(msg)
else:
msg = ''.join(msg)
# check param is numerical
try:
param_dt = np.array(flatten(param))# + np.zeros_like(flatten(param), dtype='int')
# param_dt = np.array(param).astype(dtype)
except (ValueError, TypeError):
raise TypeError(msg)
# check iterable
if iterable:
if check_iterable_depth(param) > max_depth:
raise TypeError(msg)
if (not iterable) and isiterable(param):
raise TypeError(msg)
# check param is correct dtype
if not (param_dt == np.array(flatten(param)).astype(float)).all():
raise TypeError(msg)
# check constraint
if constraint is not None:
if not (eval('np.' + repr(param_dt) + constraint)).all():
raise ValueError(msg)
return param | python | def check_param(param, param_name, dtype, constraint=None, iterable=True,
max_depth=2):
"""
checks the dtype of a parameter,
and whether it satisfies a numerical contraint
Parameters
---------
param : object
param_name : str, name of the parameter
dtype : str, desired dtype of the parameter
contraint : str, default: None
numerical constraint of the parameter.
if None, no constraint is enforced
iterable : bool, default: True
whether to allow iterable param
max_depth : int, default: 2
maximum nesting of the iterable.
only used if iterable == True
Returns
-------
list of validated and converted parameter(s)
"""
msg = []
msg.append(param_name + " must be "+ dtype)
if iterable:
msg.append(" or nested iterable of depth " + str(max_depth) +
" containing " + dtype + "s")
msg.append(", but found " + param_name + " = {}".format(repr(param)))
if constraint is not None:
msg = (" " + constraint).join(msg)
else:
msg = ''.join(msg)
# check param is numerical
try:
param_dt = np.array(flatten(param))# + np.zeros_like(flatten(param), dtype='int')
# param_dt = np.array(param).astype(dtype)
except (ValueError, TypeError):
raise TypeError(msg)
# check iterable
if iterable:
if check_iterable_depth(param) > max_depth:
raise TypeError(msg)
if (not iterable) and isiterable(param):
raise TypeError(msg)
# check param is correct dtype
if not (param_dt == np.array(flatten(param)).astype(float)).all():
raise TypeError(msg)
# check constraint
if constraint is not None:
if not (eval('np.' + repr(param_dt) + constraint)).all():
raise ValueError(msg)
return param | [
"def",
"check_param",
"(",
"param",
",",
"param_name",
",",
"dtype",
",",
"constraint",
"=",
"None",
",",
"iterable",
"=",
"True",
",",
"max_depth",
"=",
"2",
")",
":",
"msg",
"=",
"[",
"]",
"msg",
".",
"append",
"(",
"param_name",
"+",
"\" must be \""... | checks the dtype of a parameter,
and whether it satisfies a numerical contraint
Parameters
---------
param : object
param_name : str, name of the parameter
dtype : str, desired dtype of the parameter
contraint : str, default: None
numerical constraint of the parameter.
if None, no constraint is enforced
iterable : bool, default: True
whether to allow iterable param
max_depth : int, default: 2
maximum nesting of the iterable.
only used if iterable == True
Returns
-------
list of validated and converted parameter(s) | [
"checks",
"the",
"dtype",
"of",
"a",
"parameter",
"and",
"whether",
"it",
"satisfies",
"a",
"numerical",
"contraint"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/utils.py#L341-L400 | train | 220,440 |
dswah/pyGAM | pygam/utils.py | get_link_domain | def get_link_domain(link, dist):
"""
tool to identify the domain of a given monotonic link function
Parameters
----------
link : Link object
dist : Distribution object
Returns
-------
domain : list of length 2, representing the interval of the domain.
"""
domain = np.array([-np.inf, -1, 0, 1, np.inf])
domain = domain[~np.isnan(link.link(domain, dist))]
return [domain[0], domain[-1]] | python | def get_link_domain(link, dist):
"""
tool to identify the domain of a given monotonic link function
Parameters
----------
link : Link object
dist : Distribution object
Returns
-------
domain : list of length 2, representing the interval of the domain.
"""
domain = np.array([-np.inf, -1, 0, 1, np.inf])
domain = domain[~np.isnan(link.link(domain, dist))]
return [domain[0], domain[-1]] | [
"def",
"get_link_domain",
"(",
"link",
",",
"dist",
")",
":",
"domain",
"=",
"np",
".",
"array",
"(",
"[",
"-",
"np",
".",
"inf",
",",
"-",
"1",
",",
"0",
",",
"1",
",",
"np",
".",
"inf",
"]",
")",
"domain",
"=",
"domain",
"[",
"~",
"np",
"... | tool to identify the domain of a given monotonic link function
Parameters
----------
link : Link object
dist : Distribution object
Returns
-------
domain : list of length 2, representing the interval of the domain. | [
"tool",
"to",
"identify",
"the",
"domain",
"of",
"a",
"given",
"monotonic",
"link",
"function"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/utils.py#L402-L417 | train | 220,441 |
dswah/pyGAM | pygam/utils.py | load_diagonal | def load_diagonal(cov, load=None):
"""Return the given square matrix with a small amount added to the diagonal
to make it positive semi-definite.
"""
n, m = cov.shape
assert n == m, "matrix must be square, but found shape {}".format((n, m))
if load is None:
load = np.sqrt(np.finfo(np.float64).eps) # machine epsilon
return cov + np.eye(n) * load | python | def load_diagonal(cov, load=None):
"""Return the given square matrix with a small amount added to the diagonal
to make it positive semi-definite.
"""
n, m = cov.shape
assert n == m, "matrix must be square, but found shape {}".format((n, m))
if load is None:
load = np.sqrt(np.finfo(np.float64).eps) # machine epsilon
return cov + np.eye(n) * load | [
"def",
"load_diagonal",
"(",
"cov",
",",
"load",
"=",
"None",
")",
":",
"n",
",",
"m",
"=",
"cov",
".",
"shape",
"assert",
"n",
"==",
"m",
",",
"\"matrix must be square, but found shape {}\"",
".",
"format",
"(",
"(",
"n",
",",
"m",
")",
")",
"if",
"... | Return the given square matrix with a small amount added to the diagonal
to make it positive semi-definite. | [
"Return",
"the",
"given",
"square",
"matrix",
"with",
"a",
"small",
"amount",
"added",
"to",
"the",
"diagonal",
"to",
"make",
"it",
"positive",
"semi",
"-",
"definite",
"."
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/utils.py#L420-L429 | train | 220,442 |
dswah/pyGAM | pygam/utils.py | round_to_n_decimal_places | def round_to_n_decimal_places(array, n=3):
"""
tool to keep round a float to n decimal places.
n=3 by default
Parameters
----------
array : np.array
n : int. number of decimal places to keep
Returns
-------
array : rounded np.array
"""
# check if in scientific notation
if issubclass(array.__class__, float) and '%.e'%array == str(array):
return array # do nothing
shape = np.shape(array)
out = ((np.atleast_1d(array) * 10**n).round().astype('int') / (10.**n))
return out.reshape(shape) | python | def round_to_n_decimal_places(array, n=3):
"""
tool to keep round a float to n decimal places.
n=3 by default
Parameters
----------
array : np.array
n : int. number of decimal places to keep
Returns
-------
array : rounded np.array
"""
# check if in scientific notation
if issubclass(array.__class__, float) and '%.e'%array == str(array):
return array # do nothing
shape = np.shape(array)
out = ((np.atleast_1d(array) * 10**n).round().astype('int') / (10.**n))
return out.reshape(shape) | [
"def",
"round_to_n_decimal_places",
"(",
"array",
",",
"n",
"=",
"3",
")",
":",
"# check if in scientific notation",
"if",
"issubclass",
"(",
"array",
".",
"__class__",
",",
"float",
")",
"and",
"'%.e'",
"%",
"array",
"==",
"str",
"(",
"array",
")",
":",
"... | tool to keep round a float to n decimal places.
n=3 by default
Parameters
----------
array : np.array
n : int. number of decimal places to keep
Returns
-------
array : rounded np.array | [
"tool",
"to",
"keep",
"round",
"a",
"float",
"to",
"n",
"decimal",
"places",
"."
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/utils.py#L432-L453 | train | 220,443 |
dswah/pyGAM | pygam/utils.py | space_row | def space_row(left, right, filler=' ', total_width=-1):
"""space the data in a row with optional filling
Arguments
---------
left : str, to be aligned left
right : str, to be aligned right
filler : str, default ' '.
must be of length 1
total_width : int, width of line.
if negative number is specified,
then that number of spaces is used between the left and right text
Returns
-------
str
"""
left = str(left)
right = str(right)
filler = str(filler)[:1]
if total_width < 0:
spacing = - total_width
else:
spacing = total_width - len(left) - len(right)
return left + filler * spacing + right | python | def space_row(left, right, filler=' ', total_width=-1):
"""space the data in a row with optional filling
Arguments
---------
left : str, to be aligned left
right : str, to be aligned right
filler : str, default ' '.
must be of length 1
total_width : int, width of line.
if negative number is specified,
then that number of spaces is used between the left and right text
Returns
-------
str
"""
left = str(left)
right = str(right)
filler = str(filler)[:1]
if total_width < 0:
spacing = - total_width
else:
spacing = total_width - len(left) - len(right)
return left + filler * spacing + right | [
"def",
"space_row",
"(",
"left",
",",
"right",
",",
"filler",
"=",
"' '",
",",
"total_width",
"=",
"-",
"1",
")",
":",
"left",
"=",
"str",
"(",
"left",
")",
"right",
"=",
"str",
"(",
"right",
")",
"filler",
"=",
"str",
"(",
"filler",
")",
"[",
... | space the data in a row with optional filling
Arguments
---------
left : str, to be aligned left
right : str, to be aligned right
filler : str, default ' '.
must be of length 1
total_width : int, width of line.
if negative number is specified,
then that number of spaces is used between the left and right text
Returns
-------
str | [
"space",
"the",
"data",
"in",
"a",
"row",
"with",
"optional",
"filling"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/utils.py#L489-L515 | train | 220,444 |
dswah/pyGAM | pygam/utils.py | gen_edge_knots | def gen_edge_knots(data, dtype, verbose=True):
"""
generate uniform knots from data including the edges of the data
for discrete data, assumes k categories in [0, k-1] interval
Parameters
----------
data : array-like with one dimension
dtype : str in {'categorical', 'numerical'}
verbose : bool, default: True
whether to print warnings
Returns
-------
np.array containing ordered knots
"""
if dtype not in ['categorical', 'numerical']:
raise ValueError('unsupported dtype: {}'.format(dtype))
if dtype == 'categorical':
return np.r_[np.min(data) - 0.5, np.max(data) + 0.5]
else:
knots = np.r_[np.min(data), np.max(data)]
if knots[0] == knots[1] and verbose:
warnings.warn('Data contains constant feature. '\
'Consider removing and setting fit_intercept=True',
stacklevel=2)
return knots | python | def gen_edge_knots(data, dtype, verbose=True):
"""
generate uniform knots from data including the edges of the data
for discrete data, assumes k categories in [0, k-1] interval
Parameters
----------
data : array-like with one dimension
dtype : str in {'categorical', 'numerical'}
verbose : bool, default: True
whether to print warnings
Returns
-------
np.array containing ordered knots
"""
if dtype not in ['categorical', 'numerical']:
raise ValueError('unsupported dtype: {}'.format(dtype))
if dtype == 'categorical':
return np.r_[np.min(data) - 0.5, np.max(data) + 0.5]
else:
knots = np.r_[np.min(data), np.max(data)]
if knots[0] == knots[1] and verbose:
warnings.warn('Data contains constant feature. '\
'Consider removing and setting fit_intercept=True',
stacklevel=2)
return knots | [
"def",
"gen_edge_knots",
"(",
"data",
",",
"dtype",
",",
"verbose",
"=",
"True",
")",
":",
"if",
"dtype",
"not",
"in",
"[",
"'categorical'",
",",
"'numerical'",
"]",
":",
"raise",
"ValueError",
"(",
"'unsupported dtype: {}'",
".",
"format",
"(",
"dtype",
"... | generate uniform knots from data including the edges of the data
for discrete data, assumes k categories in [0, k-1] interval
Parameters
----------
data : array-like with one dimension
dtype : str in {'categorical', 'numerical'}
verbose : bool, default: True
whether to print warnings
Returns
-------
np.array containing ordered knots | [
"generate",
"uniform",
"knots",
"from",
"data",
"including",
"the",
"edges",
"of",
"the",
"data"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/utils.py#L539-L566 | train | 220,445 |
dswah/pyGAM | pygam/utils.py | ylogydu | def ylogydu(y, u):
"""
tool to give desired output for the limit as y -> 0, which is 0
Parameters
----------
y : array-like of len(n)
u : array-like of len(n)
Returns
-------
np.array len(n)
"""
mask = (np.atleast_1d(y)!=0.)
out = np.zeros_like(u)
out[mask] = y[mask] * np.log(y[mask] / u[mask])
return out | python | def ylogydu(y, u):
"""
tool to give desired output for the limit as y -> 0, which is 0
Parameters
----------
y : array-like of len(n)
u : array-like of len(n)
Returns
-------
np.array len(n)
"""
mask = (np.atleast_1d(y)!=0.)
out = np.zeros_like(u)
out[mask] = y[mask] * np.log(y[mask] / u[mask])
return out | [
"def",
"ylogydu",
"(",
"y",
",",
"u",
")",
":",
"mask",
"=",
"(",
"np",
".",
"atleast_1d",
"(",
"y",
")",
"!=",
"0.",
")",
"out",
"=",
"np",
".",
"zeros_like",
"(",
"u",
")",
"out",
"[",
"mask",
"]",
"=",
"y",
"[",
"mask",
"]",
"*",
"np",
... | tool to give desired output for the limit as y -> 0, which is 0
Parameters
----------
y : array-like of len(n)
u : array-like of len(n)
Returns
-------
np.array len(n) | [
"tool",
"to",
"give",
"desired",
"output",
"for",
"the",
"limit",
"as",
"y",
"-",
">",
"0",
"which",
"is",
"0"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/utils.py#L710-L726 | train | 220,446 |
dswah/pyGAM | pygam/utils.py | combine | def combine(*args):
"""
tool to perform tree search via recursion
useful for developing the grid in a grid search
Parameters
----------
args : list of lists
Returns
-------
list of all the combinations of the elements in the input lists
"""
if hasattr(args, '__iter__') and (len(args) > 1):
subtree = combine(*args[:-1])
tree = []
for leaf in subtree:
for node in args[-1]:
if hasattr(leaf, '__iter__'):
tree.append(leaf + [node])
else:
tree.append([leaf] + [node])
return tree
else:
return [[arg] for arg in args[0]] | python | def combine(*args):
"""
tool to perform tree search via recursion
useful for developing the grid in a grid search
Parameters
----------
args : list of lists
Returns
-------
list of all the combinations of the elements in the input lists
"""
if hasattr(args, '__iter__') and (len(args) > 1):
subtree = combine(*args[:-1])
tree = []
for leaf in subtree:
for node in args[-1]:
if hasattr(leaf, '__iter__'):
tree.append(leaf + [node])
else:
tree.append([leaf] + [node])
return tree
else:
return [[arg] for arg in args[0]] | [
"def",
"combine",
"(",
"*",
"args",
")",
":",
"if",
"hasattr",
"(",
"args",
",",
"'__iter__'",
")",
"and",
"(",
"len",
"(",
"args",
")",
">",
"1",
")",
":",
"subtree",
"=",
"combine",
"(",
"*",
"args",
"[",
":",
"-",
"1",
"]",
")",
"tree",
"=... | tool to perform tree search via recursion
useful for developing the grid in a grid search
Parameters
----------
args : list of lists
Returns
-------
list of all the combinations of the elements in the input lists | [
"tool",
"to",
"perform",
"tree",
"search",
"via",
"recursion",
"useful",
"for",
"developing",
"the",
"grid",
"in",
"a",
"grid",
"search"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/utils.py#L729-L753 | train | 220,447 |
dswah/pyGAM | pygam/utils.py | isiterable | def isiterable(obj, reject_string=True):
"""convenience tool to detect if something is iterable.
in python3, strings count as iterables to we have the option to exclude them
Parameters:
-----------
obj : object to analyse
reject_string : bool, whether to ignore strings
Returns:
--------
bool, if the object is itereable.
"""
iterable = hasattr(obj, '__len__')
if reject_string:
iterable = iterable and not isinstance(obj, str)
return iterable | python | def isiterable(obj, reject_string=True):
"""convenience tool to detect if something is iterable.
in python3, strings count as iterables to we have the option to exclude them
Parameters:
-----------
obj : object to analyse
reject_string : bool, whether to ignore strings
Returns:
--------
bool, if the object is itereable.
"""
iterable = hasattr(obj, '__len__')
if reject_string:
iterable = iterable and not isinstance(obj, str)
return iterable | [
"def",
"isiterable",
"(",
"obj",
",",
"reject_string",
"=",
"True",
")",
":",
"iterable",
"=",
"hasattr",
"(",
"obj",
",",
"'__len__'",
")",
"if",
"reject_string",
":",
"iterable",
"=",
"iterable",
"and",
"not",
"isinstance",
"(",
"obj",
",",
"str",
")",... | convenience tool to detect if something is iterable.
in python3, strings count as iterables to we have the option to exclude them
Parameters:
-----------
obj : object to analyse
reject_string : bool, whether to ignore strings
Returns:
--------
bool, if the object is itereable. | [
"convenience",
"tool",
"to",
"detect",
"if",
"something",
"is",
"iterable",
".",
"in",
"python3",
"strings",
"count",
"as",
"iterables",
"to",
"we",
"have",
"the",
"option",
"to",
"exclude",
"them"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/utils.py#L755-L774 | train | 220,448 |
dswah/pyGAM | pygam/utils.py | check_iterable_depth | def check_iterable_depth(obj, max_depth=100):
"""find the maximum depth of nesting of the iterable
Parameters
----------
obj : iterable
max_depth : int, default: 100
maximum depth beyond which we stop counting
Returns
-------
int
"""
def find_iterables(obj):
iterables = []
for item in obj:
if isiterable(item):
iterables += list(item)
return iterables
depth = 0
while (depth < max_depth) and isiterable(obj) and len(obj) > 0:
depth += 1
obj = find_iterables(obj)
return depth | python | def check_iterable_depth(obj, max_depth=100):
"""find the maximum depth of nesting of the iterable
Parameters
----------
obj : iterable
max_depth : int, default: 100
maximum depth beyond which we stop counting
Returns
-------
int
"""
def find_iterables(obj):
iterables = []
for item in obj:
if isiterable(item):
iterables += list(item)
return iterables
depth = 0
while (depth < max_depth) and isiterable(obj) and len(obj) > 0:
depth += 1
obj = find_iterables(obj)
return depth | [
"def",
"check_iterable_depth",
"(",
"obj",
",",
"max_depth",
"=",
"100",
")",
":",
"def",
"find_iterables",
"(",
"obj",
")",
":",
"iterables",
"=",
"[",
"]",
"for",
"item",
"in",
"obj",
":",
"if",
"isiterable",
"(",
"item",
")",
":",
"iterables",
"+=",... | find the maximum depth of nesting of the iterable
Parameters
----------
obj : iterable
max_depth : int, default: 100
maximum depth beyond which we stop counting
Returns
-------
int | [
"find",
"the",
"maximum",
"depth",
"of",
"nesting",
"of",
"the",
"iterable"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/utils.py#L776-L800 | train | 220,449 |
dswah/pyGAM | pygam/utils.py | flatten | def flatten(iterable):
"""convenience tool to flatten any nested iterable
example:
flatten([[[],[4]],[[[5,[6,7, []]]]]])
>>> [4, 5, 6, 7]
flatten('hello')
>>> 'hello'
Parameters
----------
iterable
Returns
-------
flattened object
"""
if isiterable(iterable):
flat = []
for item in list(iterable):
item = flatten(item)
if not isiterable(item):
item = [item]
flat += item
return flat
else:
return iterable | python | def flatten(iterable):
"""convenience tool to flatten any nested iterable
example:
flatten([[[],[4]],[[[5,[6,7, []]]]]])
>>> [4, 5, 6, 7]
flatten('hello')
>>> 'hello'
Parameters
----------
iterable
Returns
-------
flattened object
"""
if isiterable(iterable):
flat = []
for item in list(iterable):
item = flatten(item)
if not isiterable(item):
item = [item]
flat += item
return flat
else:
return iterable | [
"def",
"flatten",
"(",
"iterable",
")",
":",
"if",
"isiterable",
"(",
"iterable",
")",
":",
"flat",
"=",
"[",
"]",
"for",
"item",
"in",
"list",
"(",
"iterable",
")",
":",
"item",
"=",
"flatten",
"(",
"item",
")",
"if",
"not",
"isiterable",
"(",
"it... | convenience tool to flatten any nested iterable
example:
flatten([[[],[4]],[[[5,[6,7, []]]]]])
>>> [4, 5, 6, 7]
flatten('hello')
>>> 'hello'
Parameters
----------
iterable
Returns
-------
flattened object | [
"convenience",
"tool",
"to",
"flatten",
"any",
"nested",
"iterable"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/utils.py#L802-L830 | train | 220,450 |
dswah/pyGAM | pygam/utils.py | tensor_product | def tensor_product(a, b, reshape=True):
"""
compute the tensor protuct of two matrices a and b
if a is (n, m_a), b is (n, m_b),
then the result is
(n, m_a * m_b) if reshape = True.
or
(n, m_a, m_b) otherwise
Parameters
---------
a : array-like of shape (n, m_a)
b : array-like of shape (n, m_b)
reshape : bool, default True
whether to reshape the result to be 2-dimensional ie
(n, m_a * m_b)
or return a 3-dimensional tensor ie
(n, m_a, m_b)
Returns
-------
dense np.ndarray of shape
(n, m_a * m_b) if reshape = True.
or
(n, m_a, m_b) otherwise
"""
assert a.ndim == 2, 'matrix a must be 2-dimensional, but found {} dimensions'.format(a.ndim)
assert b.ndim == 2, 'matrix b must be 2-dimensional, but found {} dimensions'.format(b.ndim)
na, ma = a.shape
nb, mb = b.shape
if na != nb:
raise ValueError('both arguments must have the same number of samples')
if sp.sparse.issparse(a):
a = a.A
if sp.sparse.issparse(b):
b = b.A
tensor = a[..., :, None] * b[..., None, :]
if reshape:
return tensor.reshape(na, ma * mb)
return tensor | python | def tensor_product(a, b, reshape=True):
"""
compute the tensor protuct of two matrices a and b
if a is (n, m_a), b is (n, m_b),
then the result is
(n, m_a * m_b) if reshape = True.
or
(n, m_a, m_b) otherwise
Parameters
---------
a : array-like of shape (n, m_a)
b : array-like of shape (n, m_b)
reshape : bool, default True
whether to reshape the result to be 2-dimensional ie
(n, m_a * m_b)
or return a 3-dimensional tensor ie
(n, m_a, m_b)
Returns
-------
dense np.ndarray of shape
(n, m_a * m_b) if reshape = True.
or
(n, m_a, m_b) otherwise
"""
assert a.ndim == 2, 'matrix a must be 2-dimensional, but found {} dimensions'.format(a.ndim)
assert b.ndim == 2, 'matrix b must be 2-dimensional, but found {} dimensions'.format(b.ndim)
na, ma = a.shape
nb, mb = b.shape
if na != nb:
raise ValueError('both arguments must have the same number of samples')
if sp.sparse.issparse(a):
a = a.A
if sp.sparse.issparse(b):
b = b.A
tensor = a[..., :, None] * b[..., None, :]
if reshape:
return tensor.reshape(na, ma * mb)
return tensor | [
"def",
"tensor_product",
"(",
"a",
",",
"b",
",",
"reshape",
"=",
"True",
")",
":",
"assert",
"a",
".",
"ndim",
"==",
"2",
",",
"'matrix a must be 2-dimensional, but found {} dimensions'",
".",
"format",
"(",
"a",
".",
"ndim",
")",
"assert",
"b",
".",
"ndi... | compute the tensor protuct of two matrices a and b
if a is (n, m_a), b is (n, m_b),
then the result is
(n, m_a * m_b) if reshape = True.
or
(n, m_a, m_b) otherwise
Parameters
---------
a : array-like of shape (n, m_a)
b : array-like of shape (n, m_b)
reshape : bool, default True
whether to reshape the result to be 2-dimensional ie
(n, m_a * m_b)
or return a 3-dimensional tensor ie
(n, m_a, m_b)
Returns
-------
dense np.ndarray of shape
(n, m_a * m_b) if reshape = True.
or
(n, m_a, m_b) otherwise | [
"compute",
"the",
"tensor",
"protuct",
"of",
"two",
"matrices",
"a",
"and",
"b"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/utils.py#L833-L882 | train | 220,451 |
dswah/pyGAM | pygam/links.py | LogitLink.link | def link(self, mu, dist):
"""
glm link function
this is useful for going from mu to the linear prediction
Parameters
----------
mu : array-like of legth n
dist : Distribution instance
Returns
-------
lp : np.array of length n
"""
return np.log(mu) - np.log(dist.levels - mu) | python | def link(self, mu, dist):
"""
glm link function
this is useful for going from mu to the linear prediction
Parameters
----------
mu : array-like of legth n
dist : Distribution instance
Returns
-------
lp : np.array of length n
"""
return np.log(mu) - np.log(dist.levels - mu) | [
"def",
"link",
"(",
"self",
",",
"mu",
",",
"dist",
")",
":",
"return",
"np",
".",
"log",
"(",
"mu",
")",
"-",
"np",
".",
"log",
"(",
"dist",
".",
"levels",
"-",
"mu",
")"
] | glm link function
this is useful for going from mu to the linear prediction
Parameters
----------
mu : array-like of legth n
dist : Distribution instance
Returns
-------
lp : np.array of length n | [
"glm",
"link",
"function",
"this",
"is",
"useful",
"for",
"going",
"from",
"mu",
"to",
"the",
"linear",
"prediction"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/links.py#L103-L117 | train | 220,452 |
dswah/pyGAM | pygam/links.py | LogitLink.mu | def mu(self, lp, dist):
"""
glm mean function, ie inverse of link function
this is useful for going from the linear prediction to mu
Parameters
----------
lp : array-like of legth n
dist : Distribution instance
Returns
-------
mu : np.array of length n
"""
elp = np.exp(lp)
return dist.levels * elp / (elp + 1) | python | def mu(self, lp, dist):
"""
glm mean function, ie inverse of link function
this is useful for going from the linear prediction to mu
Parameters
----------
lp : array-like of legth n
dist : Distribution instance
Returns
-------
mu : np.array of length n
"""
elp = np.exp(lp)
return dist.levels * elp / (elp + 1) | [
"def",
"mu",
"(",
"self",
",",
"lp",
",",
"dist",
")",
":",
"elp",
"=",
"np",
".",
"exp",
"(",
"lp",
")",
"return",
"dist",
".",
"levels",
"*",
"elp",
"/",
"(",
"elp",
"+",
"1",
")"
] | glm mean function, ie inverse of link function
this is useful for going from the linear prediction to mu
Parameters
----------
lp : array-like of legth n
dist : Distribution instance
Returns
-------
mu : np.array of length n | [
"glm",
"mean",
"function",
"ie",
"inverse",
"of",
"link",
"function",
"this",
"is",
"useful",
"for",
"going",
"from",
"the",
"linear",
"prediction",
"to",
"mu"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/links.py#L119-L134 | train | 220,453 |
dswah/pyGAM | pygam/links.py | LogitLink.gradient | def gradient(self, mu, dist):
"""
derivative of the link function wrt mu
Parameters
----------
mu : array-like of legth n
dist : Distribution instance
Returns
-------
grad : np.array of length n
"""
return dist.levels/(mu*(dist.levels - mu)) | python | def gradient(self, mu, dist):
"""
derivative of the link function wrt mu
Parameters
----------
mu : array-like of legth n
dist : Distribution instance
Returns
-------
grad : np.array of length n
"""
return dist.levels/(mu*(dist.levels - mu)) | [
"def",
"gradient",
"(",
"self",
",",
"mu",
",",
"dist",
")",
":",
"return",
"dist",
".",
"levels",
"/",
"(",
"mu",
"*",
"(",
"dist",
".",
"levels",
"-",
"mu",
")",
")"
] | derivative of the link function wrt mu
Parameters
----------
mu : array-like of legth n
dist : Distribution instance
Returns
-------
grad : np.array of length n | [
"derivative",
"of",
"the",
"link",
"function",
"wrt",
"mu"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/links.py#L136-L149 | train | 220,454 |
dswah/pyGAM | pygam/penalties.py | derivative | def derivative(n, coef, derivative=2, periodic=False):
"""
Builds a penalty matrix for P-Splines with continuous features.
Penalizes the squared differences between basis coefficients.
Parameters
----------
n : int
number of splines
coef : unused
for compatibility with constraints
derivative: int, default: 2
which derivative do we penalize.
derivative is 1, we penalize 1st order derivatives,
derivative is 2, we penalize 2nd order derivatives, etc
Returns
-------
penalty matrix : sparse csc matrix of shape (n,n)
"""
if n == 1:
# no derivative for constant functions
return sp.sparse.csc_matrix(0.)
D = sparse_diff(sp.sparse.identity(n + 2*derivative*periodic).tocsc(), n=derivative).tolil()
if periodic:
# wrap penalty
cols = D[:, :derivative]
D[:, -2 * derivative:-derivative] += cols * (-1) ** derivative
# do symmetric operation on lower half of matrix
n_rows = int((n + 2 * derivative)/2)
D[-n_rows:] = D[:n_rows][::-1, ::-1]
# keep only the center of the augmented matrix
D = D[derivative:-derivative, derivative:-derivative]
return D.dot(D.T).tocsc() | python | def derivative(n, coef, derivative=2, periodic=False):
"""
Builds a penalty matrix for P-Splines with continuous features.
Penalizes the squared differences between basis coefficients.
Parameters
----------
n : int
number of splines
coef : unused
for compatibility with constraints
derivative: int, default: 2
which derivative do we penalize.
derivative is 1, we penalize 1st order derivatives,
derivative is 2, we penalize 2nd order derivatives, etc
Returns
-------
penalty matrix : sparse csc matrix of shape (n,n)
"""
if n == 1:
# no derivative for constant functions
return sp.sparse.csc_matrix(0.)
D = sparse_diff(sp.sparse.identity(n + 2*derivative*periodic).tocsc(), n=derivative).tolil()
if periodic:
# wrap penalty
cols = D[:, :derivative]
D[:, -2 * derivative:-derivative] += cols * (-1) ** derivative
# do symmetric operation on lower half of matrix
n_rows = int((n + 2 * derivative)/2)
D[-n_rows:] = D[:n_rows][::-1, ::-1]
# keep only the center of the augmented matrix
D = D[derivative:-derivative, derivative:-derivative]
return D.dot(D.T).tocsc() | [
"def",
"derivative",
"(",
"n",
",",
"coef",
",",
"derivative",
"=",
"2",
",",
"periodic",
"=",
"False",
")",
":",
"if",
"n",
"==",
"1",
":",
"# no derivative for constant functions",
"return",
"sp",
".",
"sparse",
".",
"csc_matrix",
"(",
"0.",
")",
"D",
... | Builds a penalty matrix for P-Splines with continuous features.
Penalizes the squared differences between basis coefficients.
Parameters
----------
n : int
number of splines
coef : unused
for compatibility with constraints
derivative: int, default: 2
which derivative do we penalize.
derivative is 1, we penalize 1st order derivatives,
derivative is 2, we penalize 2nd order derivatives, etc
Returns
-------
penalty matrix : sparse csc matrix of shape (n,n) | [
"Builds",
"a",
"penalty",
"matrix",
"for",
"P",
"-",
"Splines",
"with",
"continuous",
"features",
".",
"Penalizes",
"the",
"squared",
"differences",
"between",
"basis",
"coefficients",
"."
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/penalties.py#L9-L47 | train | 220,455 |
dswah/pyGAM | pygam/penalties.py | monotonicity_ | def monotonicity_(n, coef, increasing=True):
"""
Builds a penalty matrix for P-Splines with continuous features.
Penalizes violation of monotonicity in the feature function.
Parameters
----------
n : int
number of splines
coef : array-like
coefficients of the feature function
increasing : bool, default: True
whether to enforce monotic increasing, or decreasing functions
Returns
-------
penalty matrix : sparse csc matrix of shape (n,n)
"""
if n != len(coef.ravel()):
raise ValueError('dimension mismatch: expected n equals len(coef), '\
'but found n = {}, coef.shape = {}.'\
.format(n, coef.shape))
if n==1:
# no monotonic penalty for constant functions
return sp.sparse.csc_matrix(0.)
if increasing:
# only penalize the case where coef_i-1 > coef_i
mask = sp.sparse.diags((np.diff(coef.ravel()) < 0).astype(float))
else:
# only penalize the case where coef_i-1 < coef_i
mask = sp.sparse.diags((np.diff(coef.ravel()) > 0).astype(float))
derivative = 1
D = sparse_diff(sp.sparse.identity(n).tocsc(), n=derivative) * mask
return D.dot(D.T).tocsc() | python | def monotonicity_(n, coef, increasing=True):
"""
Builds a penalty matrix for P-Splines with continuous features.
Penalizes violation of monotonicity in the feature function.
Parameters
----------
n : int
number of splines
coef : array-like
coefficients of the feature function
increasing : bool, default: True
whether to enforce monotic increasing, or decreasing functions
Returns
-------
penalty matrix : sparse csc matrix of shape (n,n)
"""
if n != len(coef.ravel()):
raise ValueError('dimension mismatch: expected n equals len(coef), '\
'but found n = {}, coef.shape = {}.'\
.format(n, coef.shape))
if n==1:
# no monotonic penalty for constant functions
return sp.sparse.csc_matrix(0.)
if increasing:
# only penalize the case where coef_i-1 > coef_i
mask = sp.sparse.diags((np.diff(coef.ravel()) < 0).astype(float))
else:
# only penalize the case where coef_i-1 < coef_i
mask = sp.sparse.diags((np.diff(coef.ravel()) > 0).astype(float))
derivative = 1
D = sparse_diff(sp.sparse.identity(n).tocsc(), n=derivative) * mask
return D.dot(D.T).tocsc() | [
"def",
"monotonicity_",
"(",
"n",
",",
"coef",
",",
"increasing",
"=",
"True",
")",
":",
"if",
"n",
"!=",
"len",
"(",
"coef",
".",
"ravel",
"(",
")",
")",
":",
"raise",
"ValueError",
"(",
"'dimension mismatch: expected n equals len(coef), '",
"'but found n = {... | Builds a penalty matrix for P-Splines with continuous features.
Penalizes violation of monotonicity in the feature function.
Parameters
----------
n : int
number of splines
coef : array-like
coefficients of the feature function
increasing : bool, default: True
whether to enforce monotic increasing, or decreasing functions
Returns
-------
penalty matrix : sparse csc matrix of shape (n,n) | [
"Builds",
"a",
"penalty",
"matrix",
"for",
"P",
"-",
"Splines",
"with",
"continuous",
"features",
".",
"Penalizes",
"violation",
"of",
"monotonicity",
"in",
"the",
"feature",
"function",
"."
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/penalties.py#L71-L106 | train | 220,456 |
dswah/pyGAM | pygam/penalties.py | none | def none(n, coef):
"""
Build a matrix of zeros for features that should go unpenalized
Parameters
----------
n : int
number of splines
coef : unused
for compatibility with constraints
Returns
-------
penalty matrix : sparse csc matrix of shape (n,n)
"""
return sp.sparse.csc_matrix(np.zeros((n, n))) | python | def none(n, coef):
"""
Build a matrix of zeros for features that should go unpenalized
Parameters
----------
n : int
number of splines
coef : unused
for compatibility with constraints
Returns
-------
penalty matrix : sparse csc matrix of shape (n,n)
"""
return sp.sparse.csc_matrix(np.zeros((n, n))) | [
"def",
"none",
"(",
"n",
",",
"coef",
")",
":",
"return",
"sp",
".",
"sparse",
".",
"csc_matrix",
"(",
"np",
".",
"zeros",
"(",
"(",
"n",
",",
"n",
")",
")",
")"
] | Build a matrix of zeros for features that should go unpenalized
Parameters
----------
n : int
number of splines
coef : unused
for compatibility with constraints
Returns
-------
penalty matrix : sparse csc matrix of shape (n,n) | [
"Build",
"a",
"matrix",
"of",
"zeros",
"for",
"features",
"that",
"should",
"go",
"unpenalized"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/penalties.py#L245-L260 | train | 220,457 |
dswah/pyGAM | pygam/penalties.py | wrap_penalty | def wrap_penalty(p, fit_linear, linear_penalty=0.):
"""
tool to account for unity penalty on the linear term of any feature.
example:
p = wrap_penalty(derivative, fit_linear=True)(n, coef)
Parameters
----------
p : callable.
penalty-matrix-generating function.
fit_linear : boolean.
whether the current feature has a linear term or not.
linear_penalty : float, default: 0.
penalty on the linear term
Returns
-------
wrapped_p : callable
modified penalty-matrix-generating function
"""
def wrapped_p(n, *args):
if fit_linear:
if n == 1:
return sp.sparse.block_diag([linear_penalty], format='csc')
return sp.sparse.block_diag([linear_penalty,
p(n-1, *args)], format='csc')
else:
return p(n, *args)
return wrapped_p | python | def wrap_penalty(p, fit_linear, linear_penalty=0.):
"""
tool to account for unity penalty on the linear term of any feature.
example:
p = wrap_penalty(derivative, fit_linear=True)(n, coef)
Parameters
----------
p : callable.
penalty-matrix-generating function.
fit_linear : boolean.
whether the current feature has a linear term or not.
linear_penalty : float, default: 0.
penalty on the linear term
Returns
-------
wrapped_p : callable
modified penalty-matrix-generating function
"""
def wrapped_p(n, *args):
if fit_linear:
if n == 1:
return sp.sparse.block_diag([linear_penalty], format='csc')
return sp.sparse.block_diag([linear_penalty,
p(n-1, *args)], format='csc')
else:
return p(n, *args)
return wrapped_p | [
"def",
"wrap_penalty",
"(",
"p",
",",
"fit_linear",
",",
"linear_penalty",
"=",
"0.",
")",
":",
"def",
"wrapped_p",
"(",
"n",
",",
"*",
"args",
")",
":",
"if",
"fit_linear",
":",
"if",
"n",
"==",
"1",
":",
"return",
"sp",
".",
"sparse",
".",
"block... | tool to account for unity penalty on the linear term of any feature.
example:
p = wrap_penalty(derivative, fit_linear=True)(n, coef)
Parameters
----------
p : callable.
penalty-matrix-generating function.
fit_linear : boolean.
whether the current feature has a linear term or not.
linear_penalty : float, default: 0.
penalty on the linear term
Returns
-------
wrapped_p : callable
modified penalty-matrix-generating function | [
"tool",
"to",
"account",
"for",
"unity",
"penalty",
"on",
"the",
"linear",
"term",
"of",
"any",
"feature",
"."
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/penalties.py#L262-L291 | train | 220,458 |
dswah/pyGAM | pygam/penalties.py | sparse_diff | def sparse_diff(array, n=1, axis=-1):
"""
A ported sparse version of np.diff.
Uses recursion to compute higher order differences
Parameters
----------
array : sparse array
n : int, default: 1
differencing order
axis : int, default: -1
axis along which differences are computed
Returns
-------
diff_array : sparse array
same shape as input array,
but 'axis' dimension is smaller by 'n'.
"""
if (n < 0) or (int(n) != n):
raise ValueError('Expected order is non-negative integer, '\
'but found: {}'.format(n))
if not sp.sparse.issparse(array):
warnings.warn('Array is not sparse. Consider using numpy.diff')
if n == 0:
return array
nd = array.ndim
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
A = sparse_diff(array, n-1, axis=axis)
return A[slice1] - A[slice2] | python | def sparse_diff(array, n=1, axis=-1):
"""
A ported sparse version of np.diff.
Uses recursion to compute higher order differences
Parameters
----------
array : sparse array
n : int, default: 1
differencing order
axis : int, default: -1
axis along which differences are computed
Returns
-------
diff_array : sparse array
same shape as input array,
but 'axis' dimension is smaller by 'n'.
"""
if (n < 0) or (int(n) != n):
raise ValueError('Expected order is non-negative integer, '\
'but found: {}'.format(n))
if not sp.sparse.issparse(array):
warnings.warn('Array is not sparse. Consider using numpy.diff')
if n == 0:
return array
nd = array.ndim
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
A = sparse_diff(array, n-1, axis=axis)
return A[slice1] - A[slice2] | [
"def",
"sparse_diff",
"(",
"array",
",",
"n",
"=",
"1",
",",
"axis",
"=",
"-",
"1",
")",
":",
"if",
"(",
"n",
"<",
"0",
")",
"or",
"(",
"int",
"(",
"n",
")",
"!=",
"n",
")",
":",
"raise",
"ValueError",
"(",
"'Expected order is non-negative integer,... | A ported sparse version of np.diff.
Uses recursion to compute higher order differences
Parameters
----------
array : sparse array
n : int, default: 1
differencing order
axis : int, default: -1
axis along which differences are computed
Returns
-------
diff_array : sparse array
same shape as input array,
but 'axis' dimension is smaller by 'n'. | [
"A",
"ported",
"sparse",
"version",
"of",
"np",
".",
"diff",
".",
"Uses",
"recursion",
"to",
"compute",
"higher",
"order",
"differences"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/penalties.py#L293-L330 | train | 220,459 |
dswah/pyGAM | pygam/pygam.py | GAM._linear_predictor | def _linear_predictor(self, X=None, modelmat=None, b=None, term=-1):
"""linear predictor
compute the linear predictor portion of the model
ie multiply the model matrix by the spline basis coefficients
Parameters
---------
at least 1 of (X, modelmat)
and
at least 1 of (b, feature)
X : array-like of shape (n_samples, m_features) or None, optional
containing the input dataset
if None, will attempt to use modelmat
modelmat : array-like or None, optional
contains the spline basis for each feature evaluated at the input
values for each feature, ie model matrix
if None, will attempt to construct the model matrix from X
b : array-like or None, optional
contains the spline coefficients
if None, will use current model coefficients
feature : int, optional
feature for which to compute the linear prediction
if -1, will compute for all features
Returns
-------
lp : np.array of shape (n_samples,)
"""
if modelmat is None:
modelmat = self._modelmat(X, term=term)
if b is None:
b = self.coef_[self.terms.get_coef_indices(term)]
return modelmat.dot(b).flatten() | python | def _linear_predictor(self, X=None, modelmat=None, b=None, term=-1):
"""linear predictor
compute the linear predictor portion of the model
ie multiply the model matrix by the spline basis coefficients
Parameters
---------
at least 1 of (X, modelmat)
and
at least 1 of (b, feature)
X : array-like of shape (n_samples, m_features) or None, optional
containing the input dataset
if None, will attempt to use modelmat
modelmat : array-like or None, optional
contains the spline basis for each feature evaluated at the input
values for each feature, ie model matrix
if None, will attempt to construct the model matrix from X
b : array-like or None, optional
contains the spline coefficients
if None, will use current model coefficients
feature : int, optional
feature for which to compute the linear prediction
if -1, will compute for all features
Returns
-------
lp : np.array of shape (n_samples,)
"""
if modelmat is None:
modelmat = self._modelmat(X, term=term)
if b is None:
b = self.coef_[self.terms.get_coef_indices(term)]
return modelmat.dot(b).flatten() | [
"def",
"_linear_predictor",
"(",
"self",
",",
"X",
"=",
"None",
",",
"modelmat",
"=",
"None",
",",
"b",
"=",
"None",
",",
"term",
"=",
"-",
"1",
")",
":",
"if",
"modelmat",
"is",
"None",
":",
"modelmat",
"=",
"self",
".",
"_modelmat",
"(",
"X",
"... | linear predictor
compute the linear predictor portion of the model
ie multiply the model matrix by the spline basis coefficients
Parameters
---------
at least 1 of (X, modelmat)
and
at least 1 of (b, feature)
X : array-like of shape (n_samples, m_features) or None, optional
containing the input dataset
if None, will attempt to use modelmat
modelmat : array-like or None, optional
contains the spline basis for each feature evaluated at the input
values for each feature, ie model matrix
if None, will attempt to construct the model matrix from X
b : array-like or None, optional
contains the spline coefficients
if None, will use current model coefficients
feature : int, optional
feature for which to compute the linear prediction
if -1, will compute for all features
Returns
-------
lp : np.array of shape (n_samples,) | [
"linear",
"predictor",
"compute",
"the",
"linear",
"predictor",
"portion",
"of",
"the",
"model",
"ie",
"multiply",
"the",
"model",
"matrix",
"by",
"the",
"spline",
"basis",
"coefficients"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L357-L393 | train | 220,460 |
dswah/pyGAM | pygam/pygam.py | GAM.predict_mu | def predict_mu(self, X):
"""
preduct expected value of target given model and input X
Parameters
---------
X : array-like of shape (n_samples, m_features),
containing the input dataset
Returns
-------
y : np.array of shape (n_samples,)
containing expected values under the model
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
X = check_X(X, n_feats=self.statistics_['m_features'],
edge_knots=self.edge_knots_, dtypes=self.dtype,
features=self.feature, verbose=self.verbose)
lp = self._linear_predictor(X)
return self.link.mu(lp, self.distribution) | python | def predict_mu(self, X):
"""
preduct expected value of target given model and input X
Parameters
---------
X : array-like of shape (n_samples, m_features),
containing the input dataset
Returns
-------
y : np.array of shape (n_samples,)
containing expected values under the model
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
X = check_X(X, n_feats=self.statistics_['m_features'],
edge_knots=self.edge_knots_, dtypes=self.dtype,
features=self.feature, verbose=self.verbose)
lp = self._linear_predictor(X)
return self.link.mu(lp, self.distribution) | [
"def",
"predict_mu",
"(",
"self",
",",
"X",
")",
":",
"if",
"not",
"self",
".",
"_is_fitted",
":",
"raise",
"AttributeError",
"(",
"'GAM has not been fitted. Call fit first.'",
")",
"X",
"=",
"check_X",
"(",
"X",
",",
"n_feats",
"=",
"self",
".",
"statistics... | preduct expected value of target given model and input X
Parameters
---------
X : array-like of shape (n_samples, m_features),
containing the input dataset
Returns
-------
y : np.array of shape (n_samples,)
containing expected values under the model | [
"preduct",
"expected",
"value",
"of",
"target",
"given",
"model",
"and",
"input",
"X"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L395-L417 | train | 220,461 |
dswah/pyGAM | pygam/pygam.py | GAM._modelmat | def _modelmat(self, X, term=-1):
"""
Builds a model matrix, B, out of the spline basis for each feature
B = [B_0, B_1, ..., B_p]
Parameters
---------
X : array-like of shape (n_samples, m_features)
containing the input dataset
term : int, optional
term index for which to compute the model matrix
if -1, will create the model matrix for all features
Returns
-------
modelmat : sparse matrix of len n_samples
containing model matrix of the spline basis for selected features
"""
X = check_X(X, n_feats=self.statistics_['m_features'],
edge_knots=self.edge_knots_, dtypes=self.dtype,
features=self.feature, verbose=self.verbose)
return self.terms.build_columns(X, term=term) | python | def _modelmat(self, X, term=-1):
"""
Builds a model matrix, B, out of the spline basis for each feature
B = [B_0, B_1, ..., B_p]
Parameters
---------
X : array-like of shape (n_samples, m_features)
containing the input dataset
term : int, optional
term index for which to compute the model matrix
if -1, will create the model matrix for all features
Returns
-------
modelmat : sparse matrix of len n_samples
containing model matrix of the spline basis for selected features
"""
X = check_X(X, n_feats=self.statistics_['m_features'],
edge_knots=self.edge_knots_, dtypes=self.dtype,
features=self.feature, verbose=self.verbose)
return self.terms.build_columns(X, term=term) | [
"def",
"_modelmat",
"(",
"self",
",",
"X",
",",
"term",
"=",
"-",
"1",
")",
":",
"X",
"=",
"check_X",
"(",
"X",
",",
"n_feats",
"=",
"self",
".",
"statistics_",
"[",
"'m_features'",
"]",
",",
"edge_knots",
"=",
"self",
".",
"edge_knots_",
",",
"dty... | Builds a model matrix, B, out of the spline basis for each feature
B = [B_0, B_1, ..., B_p]
Parameters
---------
X : array-like of shape (n_samples, m_features)
containing the input dataset
term : int, optional
term index for which to compute the model matrix
if -1, will create the model matrix for all features
Returns
-------
modelmat : sparse matrix of len n_samples
containing model matrix of the spline basis for selected features | [
"Builds",
"a",
"model",
"matrix",
"B",
"out",
"of",
"the",
"spline",
"basis",
"for",
"each",
"feature"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L436-L459 | train | 220,462 |
dswah/pyGAM | pygam/pygam.py | GAM._cholesky | def _cholesky(self, A, **kwargs):
"""
method to handle potential problems with the cholesky decomposition.
will try to increase L2 regularization of the penalty matrix to
do away with non-positive-definite errors
Parameters
----------
A : np.array
Returns
-------
np.array
"""
# create appropriate-size diagonal matrix
if sp.sparse.issparse(A):
diag = sp.sparse.eye(A.shape[0])
else:
diag = np.eye(A.shape[0])
constraint_l2 = self._constraint_l2
while constraint_l2 <= self._constraint_l2_max:
try:
L = cholesky(A, **kwargs)
self._constraint_l2 = constraint_l2
return L
except NotPositiveDefiniteError:
if self.verbose:
warnings.warn('Matrix is not positive definite. \n'\
'Increasing l2 reg by factor of 10.',
stacklevel=2)
A -= constraint_l2 * diag
constraint_l2 *= 10
A += constraint_l2 * diag
raise NotPositiveDefiniteError('Matrix is not positive \n'
'definite.') | python | def _cholesky(self, A, **kwargs):
"""
method to handle potential problems with the cholesky decomposition.
will try to increase L2 regularization of the penalty matrix to
do away with non-positive-definite errors
Parameters
----------
A : np.array
Returns
-------
np.array
"""
# create appropriate-size diagonal matrix
if sp.sparse.issparse(A):
diag = sp.sparse.eye(A.shape[0])
else:
diag = np.eye(A.shape[0])
constraint_l2 = self._constraint_l2
while constraint_l2 <= self._constraint_l2_max:
try:
L = cholesky(A, **kwargs)
self._constraint_l2 = constraint_l2
return L
except NotPositiveDefiniteError:
if self.verbose:
warnings.warn('Matrix is not positive definite. \n'\
'Increasing l2 reg by factor of 10.',
stacklevel=2)
A -= constraint_l2 * diag
constraint_l2 *= 10
A += constraint_l2 * diag
raise NotPositiveDefiniteError('Matrix is not positive \n'
'definite.') | [
"def",
"_cholesky",
"(",
"self",
",",
"A",
",",
"*",
"*",
"kwargs",
")",
":",
"# create appropriate-size diagonal matrix",
"if",
"sp",
".",
"sparse",
".",
"issparse",
"(",
"A",
")",
":",
"diag",
"=",
"sp",
".",
"sparse",
".",
"eye",
"(",
"A",
".",
"s... | method to handle potential problems with the cholesky decomposition.
will try to increase L2 regularization of the penalty matrix to
do away with non-positive-definite errors
Parameters
----------
A : np.array
Returns
-------
np.array | [
"method",
"to",
"handle",
"potential",
"problems",
"with",
"the",
"cholesky",
"decomposition",
"."
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L461-L498 | train | 220,463 |
dswah/pyGAM | pygam/pygam.py | GAM._pseudo_data | def _pseudo_data(self, y, lp, mu):
"""
compute the pseudo data for a PIRLS iterations
Parameters
---------
y : array-like of shape (n,)
containing target data
lp : array-like of shape (n,)
containing linear predictions by the model
mu : array-like of shape (n_samples,)
expected value of the targets given the model and inputs
Returns
-------
pseudo_data : np.array of shape (n,)
"""
return lp + (y - mu) * self.link.gradient(mu, self.distribution) | python | def _pseudo_data(self, y, lp, mu):
"""
compute the pseudo data for a PIRLS iterations
Parameters
---------
y : array-like of shape (n,)
containing target data
lp : array-like of shape (n,)
containing linear predictions by the model
mu : array-like of shape (n_samples,)
expected value of the targets given the model and inputs
Returns
-------
pseudo_data : np.array of shape (n,)
"""
return lp + (y - mu) * self.link.gradient(mu, self.distribution) | [
"def",
"_pseudo_data",
"(",
"self",
",",
"y",
",",
"lp",
",",
"mu",
")",
":",
"return",
"lp",
"+",
"(",
"y",
"-",
"mu",
")",
"*",
"self",
".",
"link",
".",
"gradient",
"(",
"mu",
",",
"self",
".",
"distribution",
")"
] | compute the pseudo data for a PIRLS iterations
Parameters
---------
y : array-like of shape (n,)
containing target data
lp : array-like of shape (n,)
containing linear predictions by the model
mu : array-like of shape (n_samples,)
expected value of the targets given the model and inputs
Returns
-------
pseudo_data : np.array of shape (n,) | [
"compute",
"the",
"pseudo",
"data",
"for",
"a",
"PIRLS",
"iterations"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L542-L559 | train | 220,464 |
dswah/pyGAM | pygam/pygam.py | GAM._initial_estimate | def _initial_estimate(self, y, modelmat):
"""
Makes an inital estimate for the model coefficients.
For a LinearGAM we simply initialize to small coefficients.
For other GAMs we transform the problem to the linear space
and solve an unpenalized version.
Parameters
---------
y : array-like of shape (n,)
containing target data
modelmat : sparse matrix of shape (n, m)
containing model matrix of the spline basis
Returns
-------
coef : array of shape (m,) containing the initial estimate for the model
coefficients
Notes
-----
This method implements the suggestions in
Wood, section 2.2.2 Geometry and IRLS convergence, pg 80
"""
# do a simple initialization for LinearGAMs
if isinstance(self, LinearGAM):
n, m = modelmat.shape
return np.ones(m) * np.sqrt(EPS)
# transform the problem to the linear scale
y = deepcopy(y).astype('float64')
y[y == 0] += .01 # edge case for log link, inverse link, and logit link
y[y == 1] -= .01 # edge case for logit link
y_ = self.link.link(y, self.distribution)
y_ = make_2d(y_, verbose=False)
assert np.isfinite(y_).all(), "transformed response values should be well-behaved."
# solve the linear problem
return np.linalg.solve(load_diagonal(modelmat.T.dot(modelmat).A),
modelmat.T.dot(y_)) | python | def _initial_estimate(self, y, modelmat):
"""
Makes an inital estimate for the model coefficients.
For a LinearGAM we simply initialize to small coefficients.
For other GAMs we transform the problem to the linear space
and solve an unpenalized version.
Parameters
---------
y : array-like of shape (n,)
containing target data
modelmat : sparse matrix of shape (n, m)
containing model matrix of the spline basis
Returns
-------
coef : array of shape (m,) containing the initial estimate for the model
coefficients
Notes
-----
This method implements the suggestions in
Wood, section 2.2.2 Geometry and IRLS convergence, pg 80
"""
# do a simple initialization for LinearGAMs
if isinstance(self, LinearGAM):
n, m = modelmat.shape
return np.ones(m) * np.sqrt(EPS)
# transform the problem to the linear scale
y = deepcopy(y).astype('float64')
y[y == 0] += .01 # edge case for log link, inverse link, and logit link
y[y == 1] -= .01 # edge case for logit link
y_ = self.link.link(y, self.distribution)
y_ = make_2d(y_, verbose=False)
assert np.isfinite(y_).all(), "transformed response values should be well-behaved."
# solve the linear problem
return np.linalg.solve(load_diagonal(modelmat.T.dot(modelmat).A),
modelmat.T.dot(y_)) | [
"def",
"_initial_estimate",
"(",
"self",
",",
"y",
",",
"modelmat",
")",
":",
"# do a simple initialization for LinearGAMs",
"if",
"isinstance",
"(",
"self",
",",
"LinearGAM",
")",
":",
"n",
",",
"m",
"=",
"modelmat",
".",
"shape",
"return",
"np",
".",
"ones... | Makes an inital estimate for the model coefficients.
For a LinearGAM we simply initialize to small coefficients.
For other GAMs we transform the problem to the linear space
and solve an unpenalized version.
Parameters
---------
y : array-like of shape (n,)
containing target data
modelmat : sparse matrix of shape (n, m)
containing model matrix of the spline basis
Returns
-------
coef : array of shape (m,) containing the initial estimate for the model
coefficients
Notes
-----
This method implements the suggestions in
Wood, section 2.2.2 Geometry and IRLS convergence, pg 80 | [
"Makes",
"an",
"inital",
"estimate",
"for",
"the",
"model",
"coefficients",
"."
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L621-L664 | train | 220,465 |
dswah/pyGAM | pygam/pygam.py | GAM._on_loop_start | def _on_loop_start(self, variables):
"""
performs on-loop-start actions like callbacks
variables contains local namespace variables.
Parameters
---------
variables : dict of available variables
Returns
-------
None
"""
for callback in self.callbacks:
if hasattr(callback, 'on_loop_start'):
self.logs_[str(callback)].append(callback.on_loop_start(**variables)) | python | def _on_loop_start(self, variables):
"""
performs on-loop-start actions like callbacks
variables contains local namespace variables.
Parameters
---------
variables : dict of available variables
Returns
-------
None
"""
for callback in self.callbacks:
if hasattr(callback, 'on_loop_start'):
self.logs_[str(callback)].append(callback.on_loop_start(**variables)) | [
"def",
"_on_loop_start",
"(",
"self",
",",
"variables",
")",
":",
"for",
"callback",
"in",
"self",
".",
"callbacks",
":",
"if",
"hasattr",
"(",
"callback",
",",
"'on_loop_start'",
")",
":",
"self",
".",
"logs_",
"[",
"str",
"(",
"callback",
")",
"]",
"... | performs on-loop-start actions like callbacks
variables contains local namespace variables.
Parameters
---------
variables : dict of available variables
Returns
-------
None | [
"performs",
"on",
"-",
"loop",
"-",
"start",
"actions",
"like",
"callbacks"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L834-L850 | train | 220,466 |
dswah/pyGAM | pygam/pygam.py | GAM._on_loop_end | def _on_loop_end(self, variables):
"""
performs on-loop-end actions like callbacks
variables contains local namespace variables.
Parameters
---------
variables : dict of available variables
Returns
-------
None
"""
for callback in self.callbacks:
if hasattr(callback, 'on_loop_end'):
self.logs_[str(callback)].append(callback.on_loop_end(**variables)) | python | def _on_loop_end(self, variables):
"""
performs on-loop-end actions like callbacks
variables contains local namespace variables.
Parameters
---------
variables : dict of available variables
Returns
-------
None
"""
for callback in self.callbacks:
if hasattr(callback, 'on_loop_end'):
self.logs_[str(callback)].append(callback.on_loop_end(**variables)) | [
"def",
"_on_loop_end",
"(",
"self",
",",
"variables",
")",
":",
"for",
"callback",
"in",
"self",
".",
"callbacks",
":",
"if",
"hasattr",
"(",
"callback",
",",
"'on_loop_end'",
")",
":",
"self",
".",
"logs_",
"[",
"str",
"(",
"callback",
")",
"]",
".",
... | performs on-loop-end actions like callbacks
variables contains local namespace variables.
Parameters
---------
variables : dict of available variables
Returns
-------
None | [
"performs",
"on",
"-",
"loop",
"-",
"end",
"actions",
"like",
"callbacks"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L852-L868 | train | 220,467 |
dswah/pyGAM | pygam/pygam.py | GAM.deviance_residuals | def deviance_residuals(self, X, y, weights=None, scaled=False):
"""
method to compute the deviance residuals of the model
these are analogous to the residuals of an OLS.
Parameters
----------
X : array-like
Input data array of shape (n_saples, m_features)
y : array-like
Output data vector of shape (n_samples,)
weights : array-like shape (n_samples,) or None, optional
Sample weights.
if None, defaults to array of ones
scaled : bool, optional
whether to scale the deviance by the (estimated) distribution scale
Returns
-------
deviance_residuals : np.array
with shape (n_samples,)
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
y = check_y(y, self.link, self.distribution, verbose=self.verbose)
X = check_X(X, n_feats=self.statistics_['m_features'],
edge_knots=self.edge_knots_, dtypes=self.dtype,
features=self.feature, verbose=self.verbose)
check_X_y(X, y)
if weights is not None:
weights = np.array(weights).astype('f').ravel()
weights = check_array(weights, name='sample weights',
ndim=1, verbose=self.verbose)
check_lengths(y, weights)
else:
weights = np.ones_like(y).astype('float64')
mu = self.predict_mu(X)
sign = np.sign(y-mu)
return sign * self.distribution.deviance(y, mu,
weights=weights,
scaled=scaled) ** 0.5 | python | def deviance_residuals(self, X, y, weights=None, scaled=False):
"""
method to compute the deviance residuals of the model
these are analogous to the residuals of an OLS.
Parameters
----------
X : array-like
Input data array of shape (n_saples, m_features)
y : array-like
Output data vector of shape (n_samples,)
weights : array-like shape (n_samples,) or None, optional
Sample weights.
if None, defaults to array of ones
scaled : bool, optional
whether to scale the deviance by the (estimated) distribution scale
Returns
-------
deviance_residuals : np.array
with shape (n_samples,)
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
y = check_y(y, self.link, self.distribution, verbose=self.verbose)
X = check_X(X, n_feats=self.statistics_['m_features'],
edge_knots=self.edge_knots_, dtypes=self.dtype,
features=self.feature, verbose=self.verbose)
check_X_y(X, y)
if weights is not None:
weights = np.array(weights).astype('f').ravel()
weights = check_array(weights, name='sample weights',
ndim=1, verbose=self.verbose)
check_lengths(y, weights)
else:
weights = np.ones_like(y).astype('float64')
mu = self.predict_mu(X)
sign = np.sign(y-mu)
return sign * self.distribution.deviance(y, mu,
weights=weights,
scaled=scaled) ** 0.5 | [
"def",
"deviance_residuals",
"(",
"self",
",",
"X",
",",
"y",
",",
"weights",
"=",
"None",
",",
"scaled",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"_is_fitted",
":",
"raise",
"AttributeError",
"(",
"'GAM has not been fitted. Call fit first.'",
")",
... | method to compute the deviance residuals of the model
these are analogous to the residuals of an OLS.
Parameters
----------
X : array-like
Input data array of shape (n_saples, m_features)
y : array-like
Output data vector of shape (n_samples,)
weights : array-like shape (n_samples,) or None, optional
Sample weights.
if None, defaults to array of ones
scaled : bool, optional
whether to scale the deviance by the (estimated) distribution scale
Returns
-------
deviance_residuals : np.array
with shape (n_samples,) | [
"method",
"to",
"compute",
"the",
"deviance",
"residuals",
"of",
"the",
"model"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L927-L971 | train | 220,468 |
dswah/pyGAM | pygam/pygam.py | GAM._estimate_model_statistics | def _estimate_model_statistics(self, y, modelmat, inner=None, BW=None,
B=None, weights=None, U1=None):
"""
method to compute all of the model statistics
results are stored in the 'statistics_' attribute of the model, as a
dictionary keyed by:
- edof: estimated degrees freedom
- scale: distribution scale, if applicable
- cov: coefficient covariances
- se: standarrd errors
- AIC: Akaike Information Criterion
- AICc: corrected Akaike Information Criterion
- pseudo_r2: dict of Pseudo R-squared metrics
- GCV: generailized cross-validation
or
- UBRE: Un-Biased Risk Estimator
- n_samples: number of samples used in estimation
Parameters
----------
y : array-like
output data vector of shape (n_samples,)
modelmat : array-like, default: None
contains the spline basis for each feature evaluated at the input
inner : array of intermediate computations from naive optimization
BW : array of intermediate computations from either optimization
B : array of intermediate computations from stable optimization
weights : array-like shape (n_samples,) or None, default: None
containing sample weights
U1 : cropped U matrix from SVD.
Returns
-------
None
"""
lp = self._linear_predictor(modelmat=modelmat)
mu = self.link.mu(lp, self.distribution)
self.statistics_['edof_per_coef'] = np.diagonal(U1.dot(U1.T))
self.statistics_['edof'] = self.statistics_['edof_per_coef'].sum()
if not self.distribution._known_scale:
self.distribution.scale = self.distribution.phi(y=y, mu=mu, edof=self.statistics_['edof'], weights=weights)
self.statistics_['scale'] = self.distribution.scale
self.statistics_['cov'] = (B.dot(B.T)) * self.distribution.scale # parameter covariances. no need to remove a W because we are using W^2. Wood pg 184
self.statistics_['se'] = self.statistics_['cov'].diagonal()**0.5
self.statistics_['AIC'] = self._estimate_AIC(y=y, mu=mu, weights=weights)
self.statistics_['AICc'] = self._estimate_AICc(y=y, mu=mu, weights=weights)
self.statistics_['pseudo_r2'] = self._estimate_r2(y=y, mu=mu, weights=weights)
self.statistics_['GCV'], self.statistics_['UBRE'] = self._estimate_GCV_UBRE(modelmat=modelmat, y=y, weights=weights)
self.statistics_['loglikelihood'] = self._loglikelihood(y, mu, weights=weights)
self.statistics_['deviance'] = self.distribution.deviance(y=y, mu=mu, weights=weights).sum()
self.statistics_['p_values'] = self._estimate_p_values() | python | def _estimate_model_statistics(self, y, modelmat, inner=None, BW=None,
B=None, weights=None, U1=None):
"""
method to compute all of the model statistics
results are stored in the 'statistics_' attribute of the model, as a
dictionary keyed by:
- edof: estimated degrees freedom
- scale: distribution scale, if applicable
- cov: coefficient covariances
- se: standarrd errors
- AIC: Akaike Information Criterion
- AICc: corrected Akaike Information Criterion
- pseudo_r2: dict of Pseudo R-squared metrics
- GCV: generailized cross-validation
or
- UBRE: Un-Biased Risk Estimator
- n_samples: number of samples used in estimation
Parameters
----------
y : array-like
output data vector of shape (n_samples,)
modelmat : array-like, default: None
contains the spline basis for each feature evaluated at the input
inner : array of intermediate computations from naive optimization
BW : array of intermediate computations from either optimization
B : array of intermediate computations from stable optimization
weights : array-like shape (n_samples,) or None, default: None
containing sample weights
U1 : cropped U matrix from SVD.
Returns
-------
None
"""
lp = self._linear_predictor(modelmat=modelmat)
mu = self.link.mu(lp, self.distribution)
self.statistics_['edof_per_coef'] = np.diagonal(U1.dot(U1.T))
self.statistics_['edof'] = self.statistics_['edof_per_coef'].sum()
if not self.distribution._known_scale:
self.distribution.scale = self.distribution.phi(y=y, mu=mu, edof=self.statistics_['edof'], weights=weights)
self.statistics_['scale'] = self.distribution.scale
self.statistics_['cov'] = (B.dot(B.T)) * self.distribution.scale # parameter covariances. no need to remove a W because we are using W^2. Wood pg 184
self.statistics_['se'] = self.statistics_['cov'].diagonal()**0.5
self.statistics_['AIC'] = self._estimate_AIC(y=y, mu=mu, weights=weights)
self.statistics_['AICc'] = self._estimate_AICc(y=y, mu=mu, weights=weights)
self.statistics_['pseudo_r2'] = self._estimate_r2(y=y, mu=mu, weights=weights)
self.statistics_['GCV'], self.statistics_['UBRE'] = self._estimate_GCV_UBRE(modelmat=modelmat, y=y, weights=weights)
self.statistics_['loglikelihood'] = self._loglikelihood(y, mu, weights=weights)
self.statistics_['deviance'] = self.distribution.deviance(y=y, mu=mu, weights=weights).sum()
self.statistics_['p_values'] = self._estimate_p_values() | [
"def",
"_estimate_model_statistics",
"(",
"self",
",",
"y",
",",
"modelmat",
",",
"inner",
"=",
"None",
",",
"BW",
"=",
"None",
",",
"B",
"=",
"None",
",",
"weights",
"=",
"None",
",",
"U1",
"=",
"None",
")",
":",
"lp",
"=",
"self",
".",
"_linear_p... | method to compute all of the model statistics
results are stored in the 'statistics_' attribute of the model, as a
dictionary keyed by:
- edof: estimated degrees freedom
- scale: distribution scale, if applicable
- cov: coefficient covariances
- se: standarrd errors
- AIC: Akaike Information Criterion
- AICc: corrected Akaike Information Criterion
- pseudo_r2: dict of Pseudo R-squared metrics
- GCV: generailized cross-validation
or
- UBRE: Un-Biased Risk Estimator
- n_samples: number of samples used in estimation
Parameters
----------
y : array-like
output data vector of shape (n_samples,)
modelmat : array-like, default: None
contains the spline basis for each feature evaluated at the input
inner : array of intermediate computations from naive optimization
BW : array of intermediate computations from either optimization
B : array of intermediate computations from stable optimization
weights : array-like shape (n_samples,) or None, default: None
containing sample weights
U1 : cropped U matrix from SVD.
Returns
-------
None | [
"method",
"to",
"compute",
"all",
"of",
"the",
"model",
"statistics"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L973-L1025 | train | 220,469 |
dswah/pyGAM | pygam/pygam.py | GAM._estimate_AIC | def _estimate_AIC(self, y, mu, weights=None):
"""
estimate the Akaike Information Criterion
Parameters
----------
y : array-like of shape (n_samples,)
output data vector
mu : array-like of shape (n_samples,),
expected value of the targets given the model and inputs
weights : array-like shape (n_samples,) or None, optional
containing sample weights
if None, defaults to array of ones
Returns
-------
None
"""
estimated_scale = not(self.distribution._known_scale) # if we estimate the scale, that adds 2 dof
return -2*self._loglikelihood(y=y, mu=mu, weights=weights) + \
2*self.statistics_['edof'] + 2*estimated_scale | python | def _estimate_AIC(self, y, mu, weights=None):
"""
estimate the Akaike Information Criterion
Parameters
----------
y : array-like of shape (n_samples,)
output data vector
mu : array-like of shape (n_samples,),
expected value of the targets given the model and inputs
weights : array-like shape (n_samples,) or None, optional
containing sample weights
if None, defaults to array of ones
Returns
-------
None
"""
estimated_scale = not(self.distribution._known_scale) # if we estimate the scale, that adds 2 dof
return -2*self._loglikelihood(y=y, mu=mu, weights=weights) + \
2*self.statistics_['edof'] + 2*estimated_scale | [
"def",
"_estimate_AIC",
"(",
"self",
",",
"y",
",",
"mu",
",",
"weights",
"=",
"None",
")",
":",
"estimated_scale",
"=",
"not",
"(",
"self",
".",
"distribution",
".",
"_known_scale",
")",
"# if we estimate the scale, that adds 2 dof",
"return",
"-",
"2",
"*",
... | estimate the Akaike Information Criterion
Parameters
----------
y : array-like of shape (n_samples,)
output data vector
mu : array-like of shape (n_samples,),
expected value of the targets given the model and inputs
weights : array-like shape (n_samples,) or None, optional
containing sample weights
if None, defaults to array of ones
Returns
-------
None | [
"estimate",
"the",
"Akaike",
"Information",
"Criterion"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L1027-L1047 | train | 220,470 |
dswah/pyGAM | pygam/pygam.py | GAM._estimate_AICc | def _estimate_AICc(self, y, mu, weights=None):
"""
estimate the corrected Akaike Information Criterion
relies on the estimated degrees of freedom, which must be computed
before.
Parameters
----------
y : array-like of shape (n_samples,)
output data vector
mu : array-like of shape (n_samples,)
expected value of the targets given the model and inputs
weights : array-like shape (n_samples,) or None, optional
containing sample weights
if None, defaults to array of ones
Returns
-------
None
"""
edof = self.statistics_['edof']
if self.statistics_['AIC'] is None:
self.statistics_['AIC'] = self._estimate_AIC(y, mu, weights)
return self.statistics_['AIC'] + 2*(edof + 1)*(edof + 2)/(y.shape[0] - edof -2) | python | def _estimate_AICc(self, y, mu, weights=None):
"""
estimate the corrected Akaike Information Criterion
relies on the estimated degrees of freedom, which must be computed
before.
Parameters
----------
y : array-like of shape (n_samples,)
output data vector
mu : array-like of shape (n_samples,)
expected value of the targets given the model and inputs
weights : array-like shape (n_samples,) or None, optional
containing sample weights
if None, defaults to array of ones
Returns
-------
None
"""
edof = self.statistics_['edof']
if self.statistics_['AIC'] is None:
self.statistics_['AIC'] = self._estimate_AIC(y, mu, weights)
return self.statistics_['AIC'] + 2*(edof + 1)*(edof + 2)/(y.shape[0] - edof -2) | [
"def",
"_estimate_AICc",
"(",
"self",
",",
"y",
",",
"mu",
",",
"weights",
"=",
"None",
")",
":",
"edof",
"=",
"self",
".",
"statistics_",
"[",
"'edof'",
"]",
"if",
"self",
".",
"statistics_",
"[",
"'AIC'",
"]",
"is",
"None",
":",
"self",
".",
"sta... | estimate the corrected Akaike Information Criterion
relies on the estimated degrees of freedom, which must be computed
before.
Parameters
----------
y : array-like of shape (n_samples,)
output data vector
mu : array-like of shape (n_samples,)
expected value of the targets given the model and inputs
weights : array-like shape (n_samples,) or None, optional
containing sample weights
if None, defaults to array of ones
Returns
-------
None | [
"estimate",
"the",
"corrected",
"Akaike",
"Information",
"Criterion"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L1049-L1073 | train | 220,471 |
dswah/pyGAM | pygam/pygam.py | GAM._estimate_r2 | def _estimate_r2(self, X=None, y=None, mu=None, weights=None):
"""
estimate some pseudo R^2 values
currently only computes explained deviance.
results are stored
Parameters
----------
y : array-like of shape (n_samples,)
output data vector
mu : array-like of shape (n_samples,)
expected value of the targets given the model and inputs
weights : array-like shape (n_samples,) or None, optional
containing sample weights
if None, defaults to array of ones
Returns
-------
None
"""
if mu is None:
mu = self.predict_mu(X=X)
if weights is None:
weights = np.ones_like(y).astype('float64')
null_mu = y.mean() * np.ones_like(y).astype('float64')
null_d = self.distribution.deviance(y=y, mu=null_mu, weights=weights)
full_d = self.distribution.deviance(y=y, mu=mu, weights=weights)
null_ll = self._loglikelihood(y=y, mu=null_mu, weights=weights)
full_ll = self._loglikelihood(y=y, mu=mu, weights=weights)
r2 = OrderedDict()
r2['explained_deviance'] = 1. - full_d.sum()/null_d.sum()
r2['McFadden'] = full_ll/null_ll
r2['McFadden_adj'] = 1. - (full_ll - self.statistics_['edof'])/null_ll
return r2 | python | def _estimate_r2(self, X=None, y=None, mu=None, weights=None):
"""
estimate some pseudo R^2 values
currently only computes explained deviance.
results are stored
Parameters
----------
y : array-like of shape (n_samples,)
output data vector
mu : array-like of shape (n_samples,)
expected value of the targets given the model and inputs
weights : array-like shape (n_samples,) or None, optional
containing sample weights
if None, defaults to array of ones
Returns
-------
None
"""
if mu is None:
mu = self.predict_mu(X=X)
if weights is None:
weights = np.ones_like(y).astype('float64')
null_mu = y.mean() * np.ones_like(y).astype('float64')
null_d = self.distribution.deviance(y=y, mu=null_mu, weights=weights)
full_d = self.distribution.deviance(y=y, mu=mu, weights=weights)
null_ll = self._loglikelihood(y=y, mu=null_mu, weights=weights)
full_ll = self._loglikelihood(y=y, mu=mu, weights=weights)
r2 = OrderedDict()
r2['explained_deviance'] = 1. - full_d.sum()/null_d.sum()
r2['McFadden'] = full_ll/null_ll
r2['McFadden_adj'] = 1. - (full_ll - self.statistics_['edof'])/null_ll
return r2 | [
"def",
"_estimate_r2",
"(",
"self",
",",
"X",
"=",
"None",
",",
"y",
"=",
"None",
",",
"mu",
"=",
"None",
",",
"weights",
"=",
"None",
")",
":",
"if",
"mu",
"is",
"None",
":",
"mu",
"=",
"self",
".",
"predict_mu",
"(",
"X",
"=",
"X",
")",
"if... | estimate some pseudo R^2 values
currently only computes explained deviance.
results are stored
Parameters
----------
y : array-like of shape (n_samples,)
output data vector
mu : array-like of shape (n_samples,)
expected value of the targets given the model and inputs
weights : array-like shape (n_samples,) or None, optional
containing sample weights
if None, defaults to array of ones
Returns
-------
None | [
"estimate",
"some",
"pseudo",
"R^2",
"values"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L1075-L1115 | train | 220,472 |
dswah/pyGAM | pygam/pygam.py | GAM._estimate_GCV_UBRE | def _estimate_GCV_UBRE(self, X=None, y=None, modelmat=None, gamma=1.4,
add_scale=True, weights=None):
"""
Generalized Cross Validation and Un-Biased Risk Estimator.
UBRE is used when the scale parameter is known,
like Poisson and Binomial families.
Parameters
----------
y : array-like of shape (n_samples,)
output data vector
modelmat : array-like, default: None
contains the spline basis for each feature evaluated at the input
gamma : float, default: 1.4
serves as a weighting to increase the impact of the influence matrix
on the score
add_scale : boolean, default: True
UBRE score can be negative because the distribution scale
is subtracted. to keep things positive we can add the scale back.
weights : array-like shape (n_samples,) or None, default: None
containing sample weights
if None, defaults to array of ones
Returns
-------
score : float
Either GCV or UBRE, depending on if the scale parameter is known.
Notes
-----
Sometimes the GCV or UBRE selected model is deemed to be too wiggly,
and a smoother model is desired. One way to achieve this, in a
systematic way, is to increase the amount that each model effective
degree of freedom counts, in the GCV or UBRE score, by a factor γ ≥ 1
see Wood 2006 pg. 177-182, 220 for more details.
"""
if gamma < 1:
raise ValueError('gamma scaling should be greater than 1, '\
'but found gamma = {}',format(gamma))
if modelmat is None:
modelmat = self._modelmat(X)
if weights is None:
weights = np.ones_like(y).astype('float64')
lp = self._linear_predictor(modelmat=modelmat)
mu = self.link.mu(lp, self.distribution)
n = y.shape[0]
edof = self.statistics_['edof']
GCV = None
UBRE = None
dev = self.distribution.deviance(mu=mu, y=y, scaled=False, weights=weights).sum()
if self.distribution._known_scale:
# scale is known, use UBRE
scale = self.distribution.scale
UBRE = 1./n * dev - (~add_scale)*(scale) + 2.*gamma/n * edof * scale
else:
# scale unkown, use GCV
GCV = (n * dev) / (n - gamma * edof)**2
return (GCV, UBRE) | python | def _estimate_GCV_UBRE(self, X=None, y=None, modelmat=None, gamma=1.4,
add_scale=True, weights=None):
"""
Generalized Cross Validation and Un-Biased Risk Estimator.
UBRE is used when the scale parameter is known,
like Poisson and Binomial families.
Parameters
----------
y : array-like of shape (n_samples,)
output data vector
modelmat : array-like, default: None
contains the spline basis for each feature evaluated at the input
gamma : float, default: 1.4
serves as a weighting to increase the impact of the influence matrix
on the score
add_scale : boolean, default: True
UBRE score can be negative because the distribution scale
is subtracted. to keep things positive we can add the scale back.
weights : array-like shape (n_samples,) or None, default: None
containing sample weights
if None, defaults to array of ones
Returns
-------
score : float
Either GCV or UBRE, depending on if the scale parameter is known.
Notes
-----
Sometimes the GCV or UBRE selected model is deemed to be too wiggly,
and a smoother model is desired. One way to achieve this, in a
systematic way, is to increase the amount that each model effective
degree of freedom counts, in the GCV or UBRE score, by a factor γ ≥ 1
see Wood 2006 pg. 177-182, 220 for more details.
"""
if gamma < 1:
raise ValueError('gamma scaling should be greater than 1, '\
'but found gamma = {}',format(gamma))
if modelmat is None:
modelmat = self._modelmat(X)
if weights is None:
weights = np.ones_like(y).astype('float64')
lp = self._linear_predictor(modelmat=modelmat)
mu = self.link.mu(lp, self.distribution)
n = y.shape[0]
edof = self.statistics_['edof']
GCV = None
UBRE = None
dev = self.distribution.deviance(mu=mu, y=y, scaled=False, weights=weights).sum()
if self.distribution._known_scale:
# scale is known, use UBRE
scale = self.distribution.scale
UBRE = 1./n * dev - (~add_scale)*(scale) + 2.*gamma/n * edof * scale
else:
# scale unkown, use GCV
GCV = (n * dev) / (n - gamma * edof)**2
return (GCV, UBRE) | [
"def",
"_estimate_GCV_UBRE",
"(",
"self",
",",
"X",
"=",
"None",
",",
"y",
"=",
"None",
",",
"modelmat",
"=",
"None",
",",
"gamma",
"=",
"1.4",
",",
"add_scale",
"=",
"True",
",",
"weights",
"=",
"None",
")",
":",
"if",
"gamma",
"<",
"1",
":",
"r... | Generalized Cross Validation and Un-Biased Risk Estimator.
UBRE is used when the scale parameter is known,
like Poisson and Binomial families.
Parameters
----------
y : array-like of shape (n_samples,)
output data vector
modelmat : array-like, default: None
contains the spline basis for each feature evaluated at the input
gamma : float, default: 1.4
serves as a weighting to increase the impact of the influence matrix
on the score
add_scale : boolean, default: True
UBRE score can be negative because the distribution scale
is subtracted. to keep things positive we can add the scale back.
weights : array-like shape (n_samples,) or None, default: None
containing sample weights
if None, defaults to array of ones
Returns
-------
score : float
Either GCV or UBRE, depending on if the scale parameter is known.
Notes
-----
Sometimes the GCV or UBRE selected model is deemed to be too wiggly,
and a smoother model is desired. One way to achieve this, in a
systematic way, is to increase the amount that each model effective
degree of freedom counts, in the GCV or UBRE score, by a factor γ ≥ 1
see Wood 2006 pg. 177-182, 220 for more details. | [
"Generalized",
"Cross",
"Validation",
"and",
"Un",
"-",
"Biased",
"Risk",
"Estimator",
"."
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L1117-L1182 | train | 220,473 |
dswah/pyGAM | pygam/pygam.py | GAM._estimate_p_values | def _estimate_p_values(self):
"""estimate the p-values for all features
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
p_values = []
for term_i in range(len(self.terms)):
p_values.append(self._compute_p_value(term_i))
return p_values | python | def _estimate_p_values(self):
"""estimate the p-values for all features
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
p_values = []
for term_i in range(len(self.terms)):
p_values.append(self._compute_p_value(term_i))
return p_values | [
"def",
"_estimate_p_values",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_is_fitted",
":",
"raise",
"AttributeError",
"(",
"'GAM has not been fitted. Call fit first.'",
")",
"p_values",
"=",
"[",
"]",
"for",
"term_i",
"in",
"range",
"(",
"len",
"(",
"sel... | estimate the p-values for all features | [
"estimate",
"the",
"p",
"-",
"values",
"for",
"all",
"features"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L1184-L1194 | train | 220,474 |
dswah/pyGAM | pygam/pygam.py | GAM._compute_p_value | def _compute_p_value(self, term_i):
"""compute the p-value of the desired feature
Arguments
---------
term_i : int
term to select from the data
Returns
-------
p_value : float
Notes
-----
Wood 2006, section 4.8.5:
The p-values, calculated in this manner, behave correctly for un-penalized models,
or models with known smoothing parameters, but when smoothing parameters have
been estimated, the p-values are typically lower than they should be, meaning that
the tests reject the null too readily.
(...)
In practical terms, if these p-values suggest that a term is not needed in a model,
then this is probably true, but if a term is deemed ‘significant’ it is important to be
aware that this significance may be overstated.
based on equations from Wood 2006 section 4.8.5 page 191
and errata https://people.maths.bris.ac.uk/~sw15190/igam/iGAMerrata-12.pdf
the errata shows a correction for the f-statisitc.
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
idxs = self.terms.get_coef_indices(term_i)
cov = self.statistics_['cov'][idxs][:, idxs]
coef = self.coef_[idxs]
# center non-intercept term functions
if isinstance(self.terms[term_i], SplineTerm):
coef -= coef.mean()
inv_cov, rank = sp.linalg.pinv(cov, return_rank=True)
score = coef.T.dot(inv_cov).dot(coef)
# compute p-values
if self.distribution._known_scale:
# for known scale use chi-squared statistic
return 1 - sp.stats.chi2.cdf(x=score, df=rank)
else:
# if scale has been estimated, prefer to use f-statisitc
score = score / rank
return 1 - sp.stats.f.cdf(score, rank, self.statistics_['n_samples'] - self.statistics_['edof']) | python | def _compute_p_value(self, term_i):
"""compute the p-value of the desired feature
Arguments
---------
term_i : int
term to select from the data
Returns
-------
p_value : float
Notes
-----
Wood 2006, section 4.8.5:
The p-values, calculated in this manner, behave correctly for un-penalized models,
or models with known smoothing parameters, but when smoothing parameters have
been estimated, the p-values are typically lower than they should be, meaning that
the tests reject the null too readily.
(...)
In practical terms, if these p-values suggest that a term is not needed in a model,
then this is probably true, but if a term is deemed ‘significant’ it is important to be
aware that this significance may be overstated.
based on equations from Wood 2006 section 4.8.5 page 191
and errata https://people.maths.bris.ac.uk/~sw15190/igam/iGAMerrata-12.pdf
the errata shows a correction for the f-statisitc.
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
idxs = self.terms.get_coef_indices(term_i)
cov = self.statistics_['cov'][idxs][:, idxs]
coef = self.coef_[idxs]
# center non-intercept term functions
if isinstance(self.terms[term_i], SplineTerm):
coef -= coef.mean()
inv_cov, rank = sp.linalg.pinv(cov, return_rank=True)
score = coef.T.dot(inv_cov).dot(coef)
# compute p-values
if self.distribution._known_scale:
# for known scale use chi-squared statistic
return 1 - sp.stats.chi2.cdf(x=score, df=rank)
else:
# if scale has been estimated, prefer to use f-statisitc
score = score / rank
return 1 - sp.stats.f.cdf(score, rank, self.statistics_['n_samples'] - self.statistics_['edof']) | [
"def",
"_compute_p_value",
"(",
"self",
",",
"term_i",
")",
":",
"if",
"not",
"self",
".",
"_is_fitted",
":",
"raise",
"AttributeError",
"(",
"'GAM has not been fitted. Call fit first.'",
")",
"idxs",
"=",
"self",
".",
"terms",
".",
"get_coef_indices",
"(",
"ter... | compute the p-value of the desired feature
Arguments
---------
term_i : int
term to select from the data
Returns
-------
p_value : float
Notes
-----
Wood 2006, section 4.8.5:
The p-values, calculated in this manner, behave correctly for un-penalized models,
or models with known smoothing parameters, but when smoothing parameters have
been estimated, the p-values are typically lower than they should be, meaning that
the tests reject the null too readily.
(...)
In practical terms, if these p-values suggest that a term is not needed in a model,
then this is probably true, but if a term is deemed ‘significant’ it is important to be
aware that this significance may be overstated.
based on equations from Wood 2006 section 4.8.5 page 191
and errata https://people.maths.bris.ac.uk/~sw15190/igam/iGAMerrata-12.pdf
the errata shows a correction for the f-statisitc. | [
"compute",
"the",
"p",
"-",
"value",
"of",
"the",
"desired",
"feature"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L1196-L1248 | train | 220,475 |
dswah/pyGAM | pygam/pygam.py | GAM.confidence_intervals | def confidence_intervals(self, X, width=.95, quantiles=None):
"""estimate confidence intervals for the model.
Parameters
----------
X : array-like of shape (n_samples, m_features)
Input data matrix
width : float on [0,1], optional
quantiles : array-like of floats in (0, 1), optional
Instead of specifying the prediciton width, one can specify the
quantiles. So ``width=.95`` is equivalent to ``quantiles=[.025, .975]``
Returns
-------
intervals: np.array of shape (n_samples, 2 or len(quantiles))
Notes
-----
Wood 2006, section 4.9
Confidence intervals based on section 4.8 rely on large sample results to deal with
non-Gaussian distributions, and treat the smoothing parameters as fixed, when in
reality they are estimated from the data.
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
X = check_X(X, n_feats=self.statistics_['m_features'],
edge_knots=self.edge_knots_, dtypes=self.dtype,
features=self.feature, verbose=self.verbose)
return self._get_quantiles(X, width, quantiles, prediction=False) | python | def confidence_intervals(self, X, width=.95, quantiles=None):
"""estimate confidence intervals for the model.
Parameters
----------
X : array-like of shape (n_samples, m_features)
Input data matrix
width : float on [0,1], optional
quantiles : array-like of floats in (0, 1), optional
Instead of specifying the prediciton width, one can specify the
quantiles. So ``width=.95`` is equivalent to ``quantiles=[.025, .975]``
Returns
-------
intervals: np.array of shape (n_samples, 2 or len(quantiles))
Notes
-----
Wood 2006, section 4.9
Confidence intervals based on section 4.8 rely on large sample results to deal with
non-Gaussian distributions, and treat the smoothing parameters as fixed, when in
reality they are estimated from the data.
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
X = check_X(X, n_feats=self.statistics_['m_features'],
edge_knots=self.edge_knots_, dtypes=self.dtype,
features=self.feature, verbose=self.verbose)
return self._get_quantiles(X, width, quantiles, prediction=False) | [
"def",
"confidence_intervals",
"(",
"self",
",",
"X",
",",
"width",
"=",
".95",
",",
"quantiles",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_is_fitted",
":",
"raise",
"AttributeError",
"(",
"'GAM has not been fitted. Call fit first.'",
")",
"X",
"=",
... | estimate confidence intervals for the model.
Parameters
----------
X : array-like of shape (n_samples, m_features)
Input data matrix
width : float on [0,1], optional
quantiles : array-like of floats in (0, 1), optional
Instead of specifying the prediciton width, one can specify the
quantiles. So ``width=.95`` is equivalent to ``quantiles=[.025, .975]``
Returns
-------
intervals: np.array of shape (n_samples, 2 or len(quantiles))
Notes
-----
Wood 2006, section 4.9
Confidence intervals based on section 4.8 rely on large sample results to deal with
non-Gaussian distributions, and treat the smoothing parameters as fixed, when in
reality they are estimated from the data. | [
"estimate",
"confidence",
"intervals",
"for",
"the",
"model",
"."
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L1250-L1281 | train | 220,476 |
dswah/pyGAM | pygam/pygam.py | GAM._get_quantiles | def _get_quantiles(self, X, width, quantiles, modelmat=None, lp=None,
prediction=False, xform=True, term=-1):
"""
estimate prediction intervals for LinearGAM
Parameters
----------
X : array
input data of shape (n_samples, m_features)
width : float on (0, 1)
quantiles : array-like of floats on (0, 1)
instead of specifying the prediciton width, one can specify the
quantiles. so width=.95 is equivalent to quantiles=[.025, .975]
modelmat : array of shape or None, default: None
lp : array or None, default: None
prediction : bool, default: True.
whether to compute prediction intervals (True)
or confidence intervals (False)
xform : bool, default: True,
whether to apply the inverse link function and return values
on the scale of the distribution mean (True),
or to keep on the linear predictor scale (False)
term : int, default: -1
Returns
-------
intervals: np.array of shape (n_samples, 2 or len(quantiles))
Notes
-----
when the scale parameter is known, then we can proceed with a large
sample approximation to the distribution of the model coefficients
where B_hat ~ Normal(B, cov)
when the scale parameter is unknown, then we have to account for
the distribution of the estimated scale parameter, which is Chi-squared.
since we scale our estimate of B_hat by the sqrt of estimated scale,
we get a t distribution: Normal / sqrt(Chi-squared) ~ t
see Simon Wood section 1.3.2, 1.3.3, 1.5.5, 2.1.5
"""
if quantiles is not None:
quantiles = np.atleast_1d(quantiles)
else:
alpha = (1 - width)/2.
quantiles = [alpha, 1 - alpha]
for quantile in quantiles:
if (quantile >= 1) or (quantile <= 0):
raise ValueError('quantiles must be in (0, 1), but found {}'\
.format(quantiles))
if modelmat is None:
modelmat = self._modelmat(X, term=term)
if lp is None:
lp = self._linear_predictor(modelmat=modelmat, term=term)
idxs = self.terms.get_coef_indices(term)
cov = self.statistics_['cov'][idxs][:, idxs]
var = (modelmat.dot(cov) * modelmat.A).sum(axis=1)
if prediction:
var += self.distribution.scale
lines = []
for quantile in quantiles:
if self.distribution._known_scale:
q = sp.stats.norm.ppf(quantile)
else:
q = sp.stats.t.ppf(quantile, df=self.statistics_['n_samples'] -
self.statistics_['edof'])
lines.append(lp + q * var**0.5)
lines = np.vstack(lines).T
if xform:
lines = self.link.mu(lines, self.distribution)
return lines | python | def _get_quantiles(self, X, width, quantiles, modelmat=None, lp=None,
prediction=False, xform=True, term=-1):
"""
estimate prediction intervals for LinearGAM
Parameters
----------
X : array
input data of shape (n_samples, m_features)
width : float on (0, 1)
quantiles : array-like of floats on (0, 1)
instead of specifying the prediciton width, one can specify the
quantiles. so width=.95 is equivalent to quantiles=[.025, .975]
modelmat : array of shape or None, default: None
lp : array or None, default: None
prediction : bool, default: True.
whether to compute prediction intervals (True)
or confidence intervals (False)
xform : bool, default: True,
whether to apply the inverse link function and return values
on the scale of the distribution mean (True),
or to keep on the linear predictor scale (False)
term : int, default: -1
Returns
-------
intervals: np.array of shape (n_samples, 2 or len(quantiles))
Notes
-----
when the scale parameter is known, then we can proceed with a large
sample approximation to the distribution of the model coefficients
where B_hat ~ Normal(B, cov)
when the scale parameter is unknown, then we have to account for
the distribution of the estimated scale parameter, which is Chi-squared.
since we scale our estimate of B_hat by the sqrt of estimated scale,
we get a t distribution: Normal / sqrt(Chi-squared) ~ t
see Simon Wood section 1.3.2, 1.3.3, 1.5.5, 2.1.5
"""
if quantiles is not None:
quantiles = np.atleast_1d(quantiles)
else:
alpha = (1 - width)/2.
quantiles = [alpha, 1 - alpha]
for quantile in quantiles:
if (quantile >= 1) or (quantile <= 0):
raise ValueError('quantiles must be in (0, 1), but found {}'\
.format(quantiles))
if modelmat is None:
modelmat = self._modelmat(X, term=term)
if lp is None:
lp = self._linear_predictor(modelmat=modelmat, term=term)
idxs = self.terms.get_coef_indices(term)
cov = self.statistics_['cov'][idxs][:, idxs]
var = (modelmat.dot(cov) * modelmat.A).sum(axis=1)
if prediction:
var += self.distribution.scale
lines = []
for quantile in quantiles:
if self.distribution._known_scale:
q = sp.stats.norm.ppf(quantile)
else:
q = sp.stats.t.ppf(quantile, df=self.statistics_['n_samples'] -
self.statistics_['edof'])
lines.append(lp + q * var**0.5)
lines = np.vstack(lines).T
if xform:
lines = self.link.mu(lines, self.distribution)
return lines | [
"def",
"_get_quantiles",
"(",
"self",
",",
"X",
",",
"width",
",",
"quantiles",
",",
"modelmat",
"=",
"None",
",",
"lp",
"=",
"None",
",",
"prediction",
"=",
"False",
",",
"xform",
"=",
"True",
",",
"term",
"=",
"-",
"1",
")",
":",
"if",
"quantiles... | estimate prediction intervals for LinearGAM
Parameters
----------
X : array
input data of shape (n_samples, m_features)
width : float on (0, 1)
quantiles : array-like of floats on (0, 1)
instead of specifying the prediciton width, one can specify the
quantiles. so width=.95 is equivalent to quantiles=[.025, .975]
modelmat : array of shape or None, default: None
lp : array or None, default: None
prediction : bool, default: True.
whether to compute prediction intervals (True)
or confidence intervals (False)
xform : bool, default: True,
whether to apply the inverse link function and return values
on the scale of the distribution mean (True),
or to keep on the linear predictor scale (False)
term : int, default: -1
Returns
-------
intervals: np.array of shape (n_samples, 2 or len(quantiles))
Notes
-----
when the scale parameter is known, then we can proceed with a large
sample approximation to the distribution of the model coefficients
where B_hat ~ Normal(B, cov)
when the scale parameter is unknown, then we have to account for
the distribution of the estimated scale parameter, which is Chi-squared.
since we scale our estimate of B_hat by the sqrt of estimated scale,
we get a t distribution: Normal / sqrt(Chi-squared) ~ t
see Simon Wood section 1.3.2, 1.3.3, 1.5.5, 2.1.5 | [
"estimate",
"prediction",
"intervals",
"for",
"LinearGAM"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L1283-L1359 | train | 220,477 |
dswah/pyGAM | pygam/pygam.py | GAM._flatten_mesh | def _flatten_mesh(self, Xs, term):
"""flatten the mesh and distribute into a feature matrix"""
n = Xs[0].size
if self.terms[term].istensor:
terms = self.terms[term]
else:
terms = [self.terms[term]]
X = np.zeros((n, self.statistics_['m_features']))
for term_, x in zip(terms, Xs):
X[:, term_.feature] = x.ravel()
return X | python | def _flatten_mesh(self, Xs, term):
"""flatten the mesh and distribute into a feature matrix"""
n = Xs[0].size
if self.terms[term].istensor:
terms = self.terms[term]
else:
terms = [self.terms[term]]
X = np.zeros((n, self.statistics_['m_features']))
for term_, x in zip(terms, Xs):
X[:, term_.feature] = x.ravel()
return X | [
"def",
"_flatten_mesh",
"(",
"self",
",",
"Xs",
",",
"term",
")",
":",
"n",
"=",
"Xs",
"[",
"0",
"]",
".",
"size",
"if",
"self",
".",
"terms",
"[",
"term",
"]",
".",
"istensor",
":",
"terms",
"=",
"self",
".",
"terms",
"[",
"term",
"]",
"else",... | flatten the mesh and distribute into a feature matrix | [
"flatten",
"the",
"mesh",
"and",
"distribute",
"into",
"a",
"feature",
"matrix"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L1361-L1373 | train | 220,478 |
dswah/pyGAM | pygam/pygam.py | GAM.generate_X_grid | def generate_X_grid(self, term, n=100, meshgrid=False):
"""create a nice grid of X data
array is sorted by feature and uniformly spaced,
so the marginal and joint distributions are likely wrong
if term is >= 0, we generate n samples per feature,
which results in n^deg samples,
where deg is the degree of the interaction of the term
Parameters
----------
term : int,
Which term to process.
n : int, optional
number of data points to create
meshgrid : bool, optional
Whether to return a meshgrid (useful for 3d plotting)
or a feature matrix (useful for inference like partial predictions)
Returns
-------
if meshgrid is False:
np.array of shape (n, n_features)
where m is the number of
(sub)terms in the requested (tensor)term.
else:
tuple of len m,
where m is the number of (sub)terms in the requested
(tensor)term.
each element in the tuple contains a np.ndarray of size (n)^m
Raises
------
ValueError :
If the term requested is an intercept
since it does not make sense to process the intercept term.
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
# cant do Intercept
if self.terms[term].isintercept:
raise ValueError('cannot create grid for intercept term')
# process each subterm in a TensorTerm
if self.terms[term].istensor:
Xs = []
for term_ in self.terms[term]:
Xs.append(np.linspace(term_.edge_knots_[0],
term_.edge_knots_[1],
num=n))
Xs = np.meshgrid(*Xs, indexing='ij')
if meshgrid:
return tuple(Xs)
else:
return self._flatten_mesh(Xs, term=term)
# all other Terms
elif hasattr(self.terms[term], 'edge_knots_'):
x = np.linspace(self.terms[term].edge_knots_[0],
self.terms[term].edge_knots_[1],
num=n)
if meshgrid:
return (x,)
# fill in feature matrix with only relevant features for this term
X = np.zeros((n, self.statistics_['m_features']))
X[:, self.terms[term].feature] = x
if getattr(self.terms[term], 'by', None) is not None:
X[:, self.terms[term].by] = 1.
return X
# dont know what to do here
else:
raise TypeError('Unexpected term type: {}'.format(self.terms[term])) | python | def generate_X_grid(self, term, n=100, meshgrid=False):
"""create a nice grid of X data
array is sorted by feature and uniformly spaced,
so the marginal and joint distributions are likely wrong
if term is >= 0, we generate n samples per feature,
which results in n^deg samples,
where deg is the degree of the interaction of the term
Parameters
----------
term : int,
Which term to process.
n : int, optional
number of data points to create
meshgrid : bool, optional
Whether to return a meshgrid (useful for 3d plotting)
or a feature matrix (useful for inference like partial predictions)
Returns
-------
if meshgrid is False:
np.array of shape (n, n_features)
where m is the number of
(sub)terms in the requested (tensor)term.
else:
tuple of len m,
where m is the number of (sub)terms in the requested
(tensor)term.
each element in the tuple contains a np.ndarray of size (n)^m
Raises
------
ValueError :
If the term requested is an intercept
since it does not make sense to process the intercept term.
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
# cant do Intercept
if self.terms[term].isintercept:
raise ValueError('cannot create grid for intercept term')
# process each subterm in a TensorTerm
if self.terms[term].istensor:
Xs = []
for term_ in self.terms[term]:
Xs.append(np.linspace(term_.edge_knots_[0],
term_.edge_knots_[1],
num=n))
Xs = np.meshgrid(*Xs, indexing='ij')
if meshgrid:
return tuple(Xs)
else:
return self._flatten_mesh(Xs, term=term)
# all other Terms
elif hasattr(self.terms[term], 'edge_knots_'):
x = np.linspace(self.terms[term].edge_knots_[0],
self.terms[term].edge_knots_[1],
num=n)
if meshgrid:
return (x,)
# fill in feature matrix with only relevant features for this term
X = np.zeros((n, self.statistics_['m_features']))
X[:, self.terms[term].feature] = x
if getattr(self.terms[term], 'by', None) is not None:
X[:, self.terms[term].by] = 1.
return X
# dont know what to do here
else:
raise TypeError('Unexpected term type: {}'.format(self.terms[term])) | [
"def",
"generate_X_grid",
"(",
"self",
",",
"term",
",",
"n",
"=",
"100",
",",
"meshgrid",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"_is_fitted",
":",
"raise",
"AttributeError",
"(",
"'GAM has not been fitted. Call fit first.'",
")",
"# cant do Intercep... | create a nice grid of X data
array is sorted by feature and uniformly spaced,
so the marginal and joint distributions are likely wrong
if term is >= 0, we generate n samples per feature,
which results in n^deg samples,
where deg is the degree of the interaction of the term
Parameters
----------
term : int,
Which term to process.
n : int, optional
number of data points to create
meshgrid : bool, optional
Whether to return a meshgrid (useful for 3d plotting)
or a feature matrix (useful for inference like partial predictions)
Returns
-------
if meshgrid is False:
np.array of shape (n, n_features)
where m is the number of
(sub)terms in the requested (tensor)term.
else:
tuple of len m,
where m is the number of (sub)terms in the requested
(tensor)term.
each element in the tuple contains a np.ndarray of size (n)^m
Raises
------
ValueError :
If the term requested is an intercept
since it does not make sense to process the intercept term. | [
"create",
"a",
"nice",
"grid",
"of",
"X",
"data"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L1375-L1456 | train | 220,479 |
dswah/pyGAM | pygam/pygam.py | GAM.partial_dependence | def partial_dependence(self, term, X=None, width=None, quantiles=None,
meshgrid=False):
"""
Computes the term functions for the GAM
and possibly their confidence intervals.
if both width=None and quantiles=None,
then no confidence intervals are computed
Parameters
----------
term : int, optional
Term for which to compute the partial dependence functions.
X : array-like with input data, optional
if `meshgrid=False`, then `X` should be an array-like
of shape (n_samples, m_features).
if `meshgrid=True`, then `X` should be a tuple containing
an array for each feature in the term.
if None, an equally spaced grid of points is generated.
width : float on (0, 1), optional
Width of the confidence interval.
quantiles : array-like of floats on (0, 1), optional
instead of specifying the prediciton width, one can specify the
quantiles. so width=.95 is equivalent to quantiles=[.025, .975].
if None, defaults to width.
meshgrid : bool, whether to return and accept meshgrids.
Useful for creating outputs that are suitable for
3D plotting.
Note, for simple terms with no interactions, the output
of this function will be the same for ``meshgrid=True`` and
``meshgrid=False``, but the inputs will need to be different.
Returns
-------
pdeps : np.array of shape (n_samples,)
conf_intervals : list of length len(term)
containing np.arrays of shape (n_samples, 2 or len(quantiles))
Raises
------
ValueError :
If the term requested is an intercept
since it does not make sense to process the intercept term.
See Also
--------
generate_X_grid : for help creating meshgrids.
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
if not isinstance(term, int):
raise ValueError('term must be an integer, but found term: {}'.format(term))
# ensure term exists
if (term >= len(self.terms)) or (term < -1):
raise ValueError('Term {} out of range for model with {} terms'\
.format(term, len(self.terms)))
# cant do Intercept
if self.terms[term].isintercept:
raise ValueError('cannot create grid for intercept term')
if X is None:
X = self.generate_X_grid(term=term, meshgrid=meshgrid)
if meshgrid:
if not isinstance(X, tuple):
raise ValueError('X must be a tuple of grids if `meshgrid=True`, '\
'but found X: {}'.format(X))
shape = X[0].shape
X = self._flatten_mesh(X, term=term)
X = check_X(X, n_feats=self.statistics_['m_features'],
edge_knots=self.edge_knots_, dtypes=self.dtype,
features=self.feature, verbose=self.verbose)
modelmat = self._modelmat(X, term=term)
pdep = self._linear_predictor(modelmat=modelmat, term=term)
out = [pdep]
compute_quantiles = (width is not None) or (quantiles is not None)
if compute_quantiles:
conf_intervals = self._get_quantiles(X, width=width,
quantiles=quantiles,
modelmat=modelmat,
lp=pdep,
term=term,
xform=False)
out += [conf_intervals]
if meshgrid:
for i, array in enumerate(out):
# add extra dimensions arising from multiple confidence intervals
if array.ndim > 1:
depth = array.shape[-1]
shape += (depth,)
out[i] = np.reshape(array, shape)
if compute_quantiles:
return out
return out[0] | python | def partial_dependence(self, term, X=None, width=None, quantiles=None,
meshgrid=False):
"""
Computes the term functions for the GAM
and possibly their confidence intervals.
if both width=None and quantiles=None,
then no confidence intervals are computed
Parameters
----------
term : int, optional
Term for which to compute the partial dependence functions.
X : array-like with input data, optional
if `meshgrid=False`, then `X` should be an array-like
of shape (n_samples, m_features).
if `meshgrid=True`, then `X` should be a tuple containing
an array for each feature in the term.
if None, an equally spaced grid of points is generated.
width : float on (0, 1), optional
Width of the confidence interval.
quantiles : array-like of floats on (0, 1), optional
instead of specifying the prediciton width, one can specify the
quantiles. so width=.95 is equivalent to quantiles=[.025, .975].
if None, defaults to width.
meshgrid : bool, whether to return and accept meshgrids.
Useful for creating outputs that are suitable for
3D plotting.
Note, for simple terms with no interactions, the output
of this function will be the same for ``meshgrid=True`` and
``meshgrid=False``, but the inputs will need to be different.
Returns
-------
pdeps : np.array of shape (n_samples,)
conf_intervals : list of length len(term)
containing np.arrays of shape (n_samples, 2 or len(quantiles))
Raises
------
ValueError :
If the term requested is an intercept
since it does not make sense to process the intercept term.
See Also
--------
generate_X_grid : for help creating meshgrids.
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
if not isinstance(term, int):
raise ValueError('term must be an integer, but found term: {}'.format(term))
# ensure term exists
if (term >= len(self.terms)) or (term < -1):
raise ValueError('Term {} out of range for model with {} terms'\
.format(term, len(self.terms)))
# cant do Intercept
if self.terms[term].isintercept:
raise ValueError('cannot create grid for intercept term')
if X is None:
X = self.generate_X_grid(term=term, meshgrid=meshgrid)
if meshgrid:
if not isinstance(X, tuple):
raise ValueError('X must be a tuple of grids if `meshgrid=True`, '\
'but found X: {}'.format(X))
shape = X[0].shape
X = self._flatten_mesh(X, term=term)
X = check_X(X, n_feats=self.statistics_['m_features'],
edge_knots=self.edge_knots_, dtypes=self.dtype,
features=self.feature, verbose=self.verbose)
modelmat = self._modelmat(X, term=term)
pdep = self._linear_predictor(modelmat=modelmat, term=term)
out = [pdep]
compute_quantiles = (width is not None) or (quantiles is not None)
if compute_quantiles:
conf_intervals = self._get_quantiles(X, width=width,
quantiles=quantiles,
modelmat=modelmat,
lp=pdep,
term=term,
xform=False)
out += [conf_intervals]
if meshgrid:
for i, array in enumerate(out):
# add extra dimensions arising from multiple confidence intervals
if array.ndim > 1:
depth = array.shape[-1]
shape += (depth,)
out[i] = np.reshape(array, shape)
if compute_quantiles:
return out
return out[0] | [
"def",
"partial_dependence",
"(",
"self",
",",
"term",
",",
"X",
"=",
"None",
",",
"width",
"=",
"None",
",",
"quantiles",
"=",
"None",
",",
"meshgrid",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"_is_fitted",
":",
"raise",
"AttributeError",
"("... | Computes the term functions for the GAM
and possibly their confidence intervals.
if both width=None and quantiles=None,
then no confidence intervals are computed
Parameters
----------
term : int, optional
Term for which to compute the partial dependence functions.
X : array-like with input data, optional
if `meshgrid=False`, then `X` should be an array-like
of shape (n_samples, m_features).
if `meshgrid=True`, then `X` should be a tuple containing
an array for each feature in the term.
if None, an equally spaced grid of points is generated.
width : float on (0, 1), optional
Width of the confidence interval.
quantiles : array-like of floats on (0, 1), optional
instead of specifying the prediciton width, one can specify the
quantiles. so width=.95 is equivalent to quantiles=[.025, .975].
if None, defaults to width.
meshgrid : bool, whether to return and accept meshgrids.
Useful for creating outputs that are suitable for
3D plotting.
Note, for simple terms with no interactions, the output
of this function will be the same for ``meshgrid=True`` and
``meshgrid=False``, but the inputs will need to be different.
Returns
-------
pdeps : np.array of shape (n_samples,)
conf_intervals : list of length len(term)
containing np.arrays of shape (n_samples, 2 or len(quantiles))
Raises
------
ValueError :
If the term requested is an intercept
since it does not make sense to process the intercept term.
See Also
--------
generate_X_grid : for help creating meshgrids. | [
"Computes",
"the",
"term",
"functions",
"for",
"the",
"GAM",
"and",
"possibly",
"their",
"confidence",
"intervals",
"."
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L1458-L1570 | train | 220,480 |
dswah/pyGAM | pygam/pygam.py | GAM.sample | def sample(self, X, y, quantity='y', sample_at_X=None,
weights=None, n_draws=100, n_bootstraps=5, objective='auto'):
"""Simulate from the posterior of the coefficients and smoothing params.
Samples are drawn from the posterior of the coefficients and smoothing
parameters given the response in an approximate way. The GAM must
already be fitted before calling this method; if the model has not
been fitted, then an exception is raised. Moreover, it is recommended
that the model and its hyperparameters be chosen with `gridsearch`
(with the parameter `keep_best=True`) before calling `sample`, so that
the result of that gridsearch can be used to generate useful response
data and so that the model's coefficients (and their covariance matrix)
can be used as the first bootstrap sample.
These samples are drawn as follows. Details are in the reference below.
1. ``n_bootstraps`` many "bootstrap samples" of the response (``y``) are
simulated by drawing random samples from the model's distribution
evaluated at the expected values (``mu``) for each sample in ``X``.
2. A copy of the model is fitted to each of those bootstrap samples of
the response. The result is an approximation of the distribution over
the smoothing parameter ``lam`` given the response data ``y``.
3. Samples of the coefficients are simulated from a multivariate normal
using the bootstrap samples of the coefficients and their covariance
matrices.
Notes
-----
A ``gridsearch`` is done ``n_bootstraps`` many times, so keep
``n_bootstraps`` small. Make ``n_bootstraps < n_draws`` to take advantage
of the expensive bootstrap samples of the smoothing parameters.
Parameters
-----------
X : array of shape (n_samples, m_features)
empirical input data
y : array of shape (n_samples,)
empirical response vector
quantity : {'y', 'coef', 'mu'}, default: 'y'
What quantity to return pseudorandom samples of.
If `sample_at_X` is not None and `quantity` is either `'y'` or
`'mu'`, then samples are drawn at the values of `X` specified in
`sample_at_X`.
sample_at_X : array of shape (n_samples_to_simulate, m_features) or
None, optional
Input data at which to draw new samples.
Only applies for `quantity` equal to `'y'` or to `'mu`'.
If `None`, then `sample_at_X` is replaced by `X`.
weights : np.array of shape (n_samples,)
sample weights
n_draws : positive int, optional (default=100)
The number of samples to draw from the posterior distribution of
the coefficients and smoothing parameters
n_bootstraps : positive int, optional (default=5)
The number of bootstrap samples to draw from simulations of the
response (from the already fitted model) to estimate the
distribution of the smoothing parameters given the response data.
If `n_bootstraps` is 1, then only the already fitted model's
smoothing parameter is used, and the distribution over the
smoothing parameters is not estimated using bootstrap sampling.
objective : string, optional (default='auto'
metric to optimize in grid search. must be in
['AIC', 'AICc', 'GCV', 'UBRE', 'auto']
if 'auto', then grid search will optimize GCV for models with
unknown scale and UBRE for models with known scale.
Returns
-------
draws : 2D array of length n_draws
Simulations of the given `quantity` using samples from the
posterior distribution of the coefficients and smoothing parameter
given the response data. Each row is a pseudorandom sample.
If `quantity == 'coef'`, then the number of columns of `draws` is
the number of coefficients (`len(self.coef_)`).
Otherwise, the number of columns of `draws` is the number of
rows of `sample_at_X` if `sample_at_X` is not `None` or else
the number of rows of `X`.
References
----------
Simon N. Wood, 2006. Generalized Additive Models: an introduction with
R. Section 4.9.3 (pages 198–199) and Section 5.4.2 (page 256–257).
"""
if quantity not in {'mu', 'coef', 'y'}:
raise ValueError("`quantity` must be one of 'mu', 'coef', 'y';"
" got {}".format(quantity))
coef_draws = self._sample_coef(
X, y, weights=weights, n_draws=n_draws,
n_bootstraps=n_bootstraps, objective=objective)
if quantity == 'coef':
return coef_draws
if sample_at_X is None:
sample_at_X = X
linear_predictor = self._modelmat(sample_at_X).dot(coef_draws.T)
mu_shape_n_draws_by_n_samples = self.link.mu(
linear_predictor, self.distribution).T
if quantity == 'mu':
return mu_shape_n_draws_by_n_samples
else:
return self.distribution.sample(mu_shape_n_draws_by_n_samples) | python | def sample(self, X, y, quantity='y', sample_at_X=None,
weights=None, n_draws=100, n_bootstraps=5, objective='auto'):
"""Simulate from the posterior of the coefficients and smoothing params.
Samples are drawn from the posterior of the coefficients and smoothing
parameters given the response in an approximate way. The GAM must
already be fitted before calling this method; if the model has not
been fitted, then an exception is raised. Moreover, it is recommended
that the model and its hyperparameters be chosen with `gridsearch`
(with the parameter `keep_best=True`) before calling `sample`, so that
the result of that gridsearch can be used to generate useful response
data and so that the model's coefficients (and their covariance matrix)
can be used as the first bootstrap sample.
These samples are drawn as follows. Details are in the reference below.
1. ``n_bootstraps`` many "bootstrap samples" of the response (``y``) are
simulated by drawing random samples from the model's distribution
evaluated at the expected values (``mu``) for each sample in ``X``.
2. A copy of the model is fitted to each of those bootstrap samples of
the response. The result is an approximation of the distribution over
the smoothing parameter ``lam`` given the response data ``y``.
3. Samples of the coefficients are simulated from a multivariate normal
using the bootstrap samples of the coefficients and their covariance
matrices.
Notes
-----
A ``gridsearch`` is done ``n_bootstraps`` many times, so keep
``n_bootstraps`` small. Make ``n_bootstraps < n_draws`` to take advantage
of the expensive bootstrap samples of the smoothing parameters.
Parameters
-----------
X : array of shape (n_samples, m_features)
empirical input data
y : array of shape (n_samples,)
empirical response vector
quantity : {'y', 'coef', 'mu'}, default: 'y'
What quantity to return pseudorandom samples of.
If `sample_at_X` is not None and `quantity` is either `'y'` or
`'mu'`, then samples are drawn at the values of `X` specified in
`sample_at_X`.
sample_at_X : array of shape (n_samples_to_simulate, m_features) or
None, optional
Input data at which to draw new samples.
Only applies for `quantity` equal to `'y'` or to `'mu`'.
If `None`, then `sample_at_X` is replaced by `X`.
weights : np.array of shape (n_samples,)
sample weights
n_draws : positive int, optional (default=100)
The number of samples to draw from the posterior distribution of
the coefficients and smoothing parameters
n_bootstraps : positive int, optional (default=5)
The number of bootstrap samples to draw from simulations of the
response (from the already fitted model) to estimate the
distribution of the smoothing parameters given the response data.
If `n_bootstraps` is 1, then only the already fitted model's
smoothing parameter is used, and the distribution over the
smoothing parameters is not estimated using bootstrap sampling.
objective : string, optional (default='auto'
metric to optimize in grid search. must be in
['AIC', 'AICc', 'GCV', 'UBRE', 'auto']
if 'auto', then grid search will optimize GCV for models with
unknown scale and UBRE for models with known scale.
Returns
-------
draws : 2D array of length n_draws
Simulations of the given `quantity` using samples from the
posterior distribution of the coefficients and smoothing parameter
given the response data. Each row is a pseudorandom sample.
If `quantity == 'coef'`, then the number of columns of `draws` is
the number of coefficients (`len(self.coef_)`).
Otherwise, the number of columns of `draws` is the number of
rows of `sample_at_X` if `sample_at_X` is not `None` or else
the number of rows of `X`.
References
----------
Simon N. Wood, 2006. Generalized Additive Models: an introduction with
R. Section 4.9.3 (pages 198–199) and Section 5.4.2 (page 256–257).
"""
if quantity not in {'mu', 'coef', 'y'}:
raise ValueError("`quantity` must be one of 'mu', 'coef', 'y';"
" got {}".format(quantity))
coef_draws = self._sample_coef(
X, y, weights=weights, n_draws=n_draws,
n_bootstraps=n_bootstraps, objective=objective)
if quantity == 'coef':
return coef_draws
if sample_at_X is None:
sample_at_X = X
linear_predictor = self._modelmat(sample_at_X).dot(coef_draws.T)
mu_shape_n_draws_by_n_samples = self.link.mu(
linear_predictor, self.distribution).T
if quantity == 'mu':
return mu_shape_n_draws_by_n_samples
else:
return self.distribution.sample(mu_shape_n_draws_by_n_samples) | [
"def",
"sample",
"(",
"self",
",",
"X",
",",
"y",
",",
"quantity",
"=",
"'y'",
",",
"sample_at_X",
"=",
"None",
",",
"weights",
"=",
"None",
",",
"n_draws",
"=",
"100",
",",
"n_bootstraps",
"=",
"5",
",",
"objective",
"=",
"'auto'",
")",
":",
"if",... | Simulate from the posterior of the coefficients and smoothing params.
Samples are drawn from the posterior of the coefficients and smoothing
parameters given the response in an approximate way. The GAM must
already be fitted before calling this method; if the model has not
been fitted, then an exception is raised. Moreover, it is recommended
that the model and its hyperparameters be chosen with `gridsearch`
(with the parameter `keep_best=True`) before calling `sample`, so that
the result of that gridsearch can be used to generate useful response
data and so that the model's coefficients (and their covariance matrix)
can be used as the first bootstrap sample.
These samples are drawn as follows. Details are in the reference below.
1. ``n_bootstraps`` many "bootstrap samples" of the response (``y``) are
simulated by drawing random samples from the model's distribution
evaluated at the expected values (``mu``) for each sample in ``X``.
2. A copy of the model is fitted to each of those bootstrap samples of
the response. The result is an approximation of the distribution over
the smoothing parameter ``lam`` given the response data ``y``.
3. Samples of the coefficients are simulated from a multivariate normal
using the bootstrap samples of the coefficients and their covariance
matrices.
Notes
-----
A ``gridsearch`` is done ``n_bootstraps`` many times, so keep
``n_bootstraps`` small. Make ``n_bootstraps < n_draws`` to take advantage
of the expensive bootstrap samples of the smoothing parameters.
Parameters
-----------
X : array of shape (n_samples, m_features)
empirical input data
y : array of shape (n_samples,)
empirical response vector
quantity : {'y', 'coef', 'mu'}, default: 'y'
What quantity to return pseudorandom samples of.
If `sample_at_X` is not None and `quantity` is either `'y'` or
`'mu'`, then samples are drawn at the values of `X` specified in
`sample_at_X`.
sample_at_X : array of shape (n_samples_to_simulate, m_features) or
None, optional
Input data at which to draw new samples.
Only applies for `quantity` equal to `'y'` or to `'mu`'.
If `None`, then `sample_at_X` is replaced by `X`.
weights : np.array of shape (n_samples,)
sample weights
n_draws : positive int, optional (default=100)
The number of samples to draw from the posterior distribution of
the coefficients and smoothing parameters
n_bootstraps : positive int, optional (default=5)
The number of bootstrap samples to draw from simulations of the
response (from the already fitted model) to estimate the
distribution of the smoothing parameters given the response data.
If `n_bootstraps` is 1, then only the already fitted model's
smoothing parameter is used, and the distribution over the
smoothing parameters is not estimated using bootstrap sampling.
objective : string, optional (default='auto'
metric to optimize in grid search. must be in
['AIC', 'AICc', 'GCV', 'UBRE', 'auto']
if 'auto', then grid search will optimize GCV for models with
unknown scale and UBRE for models with known scale.
Returns
-------
draws : 2D array of length n_draws
Simulations of the given `quantity` using samples from the
posterior distribution of the coefficients and smoothing parameter
given the response data. Each row is a pseudorandom sample.
If `quantity == 'coef'`, then the number of columns of `draws` is
the number of coefficients (`len(self.coef_)`).
Otherwise, the number of columns of `draws` is the number of
rows of `sample_at_X` if `sample_at_X` is not `None` or else
the number of rows of `X`.
References
----------
Simon N. Wood, 2006. Generalized Additive Models: an introduction with
R. Section 4.9.3 (pages 198–199) and Section 5.4.2 (page 256–257). | [
"Simulate",
"from",
"the",
"posterior",
"of",
"the",
"coefficients",
"and",
"smoothing",
"params",
"."
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L1929-L2044 | train | 220,481 |
dswah/pyGAM | pygam/pygam.py | GAM._sample_coef | def _sample_coef(self, X, y, weights=None, n_draws=100, n_bootstraps=1,
objective='auto'):
"""Simulate from the posterior of the coefficients.
NOTE: A `gridsearch` is done `n_bootstraps` many times, so keep
`n_bootstraps` small. Make `n_bootstraps < n_draws` to take advantage
of the expensive bootstrap samples of the smoothing parameters.
Parameters
-----------
X : array of shape (n_samples, m_features)
input data
y : array of shape (n_samples,)
response vector
weights : np.array of shape (n_samples,)
sample weights
n_draws : positive int, optional (default=100
The number of samples to draw from the posterior distribution of
the coefficients and smoothing parameters
n_bootstraps : positive int, optional (default=1
The number of bootstrap samples to draw from simulations of the
response (from the already fitted model) to estimate the
distribution of the smoothing parameters given the response data.
If `n_bootstraps` is 1, then only the already fitted model's
smoothing parameters is used.
objective : string, optional (default='auto'
metric to optimize in grid search. must be in
['AIC', 'AICc', 'GCV', 'UBRE', 'auto']
if 'auto', then grid search will optimize GCV for models with
unknown scale and UBRE for models with known scale.
Returns
-------
coef_samples : array of shape (n_draws, n_samples)
Approximate simulations of the coefficients drawn from the
posterior distribution of the coefficients and smoothing
parameters given the response data
References
----------
Simon N. Wood, 2006. Generalized Additive Models: an introduction with
R. Section 4.9.3 (pages 198–199) and Section 5.4.2 (page 256–257).
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
if n_bootstraps < 1:
raise ValueError('n_bootstraps must be >= 1;'
' got {}'.format(n_bootstraps))
if n_draws < 1:
raise ValueError('n_draws must be >= 1;'
' got {}'.format(n_draws))
coef_bootstraps, cov_bootstraps = (
self._bootstrap_samples_of_smoothing(X, y, weights=weights,
n_bootstraps=n_bootstraps,
objective=objective))
coef_draws = self._simulate_coef_from_bootstraps(
n_draws, coef_bootstraps, cov_bootstraps)
return coef_draws | python | def _sample_coef(self, X, y, weights=None, n_draws=100, n_bootstraps=1,
objective='auto'):
"""Simulate from the posterior of the coefficients.
NOTE: A `gridsearch` is done `n_bootstraps` many times, so keep
`n_bootstraps` small. Make `n_bootstraps < n_draws` to take advantage
of the expensive bootstrap samples of the smoothing parameters.
Parameters
-----------
X : array of shape (n_samples, m_features)
input data
y : array of shape (n_samples,)
response vector
weights : np.array of shape (n_samples,)
sample weights
n_draws : positive int, optional (default=100
The number of samples to draw from the posterior distribution of
the coefficients and smoothing parameters
n_bootstraps : positive int, optional (default=1
The number of bootstrap samples to draw from simulations of the
response (from the already fitted model) to estimate the
distribution of the smoothing parameters given the response data.
If `n_bootstraps` is 1, then only the already fitted model's
smoothing parameters is used.
objective : string, optional (default='auto'
metric to optimize in grid search. must be in
['AIC', 'AICc', 'GCV', 'UBRE', 'auto']
if 'auto', then grid search will optimize GCV for models with
unknown scale and UBRE for models with known scale.
Returns
-------
coef_samples : array of shape (n_draws, n_samples)
Approximate simulations of the coefficients drawn from the
posterior distribution of the coefficients and smoothing
parameters given the response data
References
----------
Simon N. Wood, 2006. Generalized Additive Models: an introduction with
R. Section 4.9.3 (pages 198–199) and Section 5.4.2 (page 256–257).
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
if n_bootstraps < 1:
raise ValueError('n_bootstraps must be >= 1;'
' got {}'.format(n_bootstraps))
if n_draws < 1:
raise ValueError('n_draws must be >= 1;'
' got {}'.format(n_draws))
coef_bootstraps, cov_bootstraps = (
self._bootstrap_samples_of_smoothing(X, y, weights=weights,
n_bootstraps=n_bootstraps,
objective=objective))
coef_draws = self._simulate_coef_from_bootstraps(
n_draws, coef_bootstraps, cov_bootstraps)
return coef_draws | [
"def",
"_sample_coef",
"(",
"self",
",",
"X",
",",
"y",
",",
"weights",
"=",
"None",
",",
"n_draws",
"=",
"100",
",",
"n_bootstraps",
"=",
"1",
",",
"objective",
"=",
"'auto'",
")",
":",
"if",
"not",
"self",
".",
"_is_fitted",
":",
"raise",
"Attribut... | Simulate from the posterior of the coefficients.
NOTE: A `gridsearch` is done `n_bootstraps` many times, so keep
`n_bootstraps` small. Make `n_bootstraps < n_draws` to take advantage
of the expensive bootstrap samples of the smoothing parameters.
Parameters
-----------
X : array of shape (n_samples, m_features)
input data
y : array of shape (n_samples,)
response vector
weights : np.array of shape (n_samples,)
sample weights
n_draws : positive int, optional (default=100
The number of samples to draw from the posterior distribution of
the coefficients and smoothing parameters
n_bootstraps : positive int, optional (default=1
The number of bootstrap samples to draw from simulations of the
response (from the already fitted model) to estimate the
distribution of the smoothing parameters given the response data.
If `n_bootstraps` is 1, then only the already fitted model's
smoothing parameters is used.
objective : string, optional (default='auto'
metric to optimize in grid search. must be in
['AIC', 'AICc', 'GCV', 'UBRE', 'auto']
if 'auto', then grid search will optimize GCV for models with
unknown scale and UBRE for models with known scale.
Returns
-------
coef_samples : array of shape (n_draws, n_samples)
Approximate simulations of the coefficients drawn from the
posterior distribution of the coefficients and smoothing
parameters given the response data
References
----------
Simon N. Wood, 2006. Generalized Additive Models: an introduction with
R. Section 4.9.3 (pages 198–199) and Section 5.4.2 (page 256–257). | [
"Simulate",
"from",
"the",
"posterior",
"of",
"the",
"coefficients",
"."
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L2046-L2110 | train | 220,482 |
dswah/pyGAM | pygam/pygam.py | GAM._bootstrap_samples_of_smoothing | def _bootstrap_samples_of_smoothing(self, X, y, weights=None,
n_bootstraps=1, objective='auto'):
"""Sample the smoothing parameters using simulated response data.
For now, the grid of `lam` values is 11 random points in M-dimensional
space, where M = the number of lam values, ie len(flatten(gam.lam))
all values are in [1e-3, 1e3]
"""
mu = self.predict_mu(X) # Wood pg. 198 step 1
coef_bootstraps = [self.coef_]
cov_bootstraps = [
load_diagonal(self.statistics_['cov'])]
for _ in range(n_bootstraps - 1): # Wood pg. 198 step 2
# generate response data from fitted model (Wood pg. 198 step 3)
y_bootstrap = self.distribution.sample(mu)
# fit smoothing parameters on the bootstrap data
# (Wood pg. 198 step 4)
# TODO: Either enable randomized searches over hyperparameters
# (like in sklearn's RandomizedSearchCV), or draw enough samples of
# `lam` so that each of these bootstrap samples get different
# values of `lam`. Right now, each bootstrap sample uses the exact
# same grid of values for `lam`, so it is not worth setting
# `n_bootstraps > 1`.
gam = deepcopy(self)
gam.set_params(self.get_params())
# create a random search of 11 points in lam space
# with all values in [1e-3, 1e3]
lam_grid = np.random.randn(11, len(flatten(self.lam))) * 6 - 3
lam_grid = np.exp(lam_grid)
gam.gridsearch(X, y_bootstrap, weights=weights, lam=lam_grid,
objective=objective)
lam = gam.lam
# fit coefficients on the original data given the smoothing params
# (Wood pg. 199 step 5)
gam = deepcopy(self)
gam.set_params(self.get_params())
gam.lam = lam
gam.fit(X, y, weights=weights)
coef_bootstraps.append(gam.coef_)
cov = load_diagonal(gam.statistics_['cov'])
cov_bootstraps.append(cov)
return coef_bootstraps, cov_bootstraps | python | def _bootstrap_samples_of_smoothing(self, X, y, weights=None,
n_bootstraps=1, objective='auto'):
"""Sample the smoothing parameters using simulated response data.
For now, the grid of `lam` values is 11 random points in M-dimensional
space, where M = the number of lam values, ie len(flatten(gam.lam))
all values are in [1e-3, 1e3]
"""
mu = self.predict_mu(X) # Wood pg. 198 step 1
coef_bootstraps = [self.coef_]
cov_bootstraps = [
load_diagonal(self.statistics_['cov'])]
for _ in range(n_bootstraps - 1): # Wood pg. 198 step 2
# generate response data from fitted model (Wood pg. 198 step 3)
y_bootstrap = self.distribution.sample(mu)
# fit smoothing parameters on the bootstrap data
# (Wood pg. 198 step 4)
# TODO: Either enable randomized searches over hyperparameters
# (like in sklearn's RandomizedSearchCV), or draw enough samples of
# `lam` so that each of these bootstrap samples get different
# values of `lam`. Right now, each bootstrap sample uses the exact
# same grid of values for `lam`, so it is not worth setting
# `n_bootstraps > 1`.
gam = deepcopy(self)
gam.set_params(self.get_params())
# create a random search of 11 points in lam space
# with all values in [1e-3, 1e3]
lam_grid = np.random.randn(11, len(flatten(self.lam))) * 6 - 3
lam_grid = np.exp(lam_grid)
gam.gridsearch(X, y_bootstrap, weights=weights, lam=lam_grid,
objective=objective)
lam = gam.lam
# fit coefficients on the original data given the smoothing params
# (Wood pg. 199 step 5)
gam = deepcopy(self)
gam.set_params(self.get_params())
gam.lam = lam
gam.fit(X, y, weights=weights)
coef_bootstraps.append(gam.coef_)
cov = load_diagonal(gam.statistics_['cov'])
cov_bootstraps.append(cov)
return coef_bootstraps, cov_bootstraps | [
"def",
"_bootstrap_samples_of_smoothing",
"(",
"self",
",",
"X",
",",
"y",
",",
"weights",
"=",
"None",
",",
"n_bootstraps",
"=",
"1",
",",
"objective",
"=",
"'auto'",
")",
":",
"mu",
"=",
"self",
".",
"predict_mu",
"(",
"X",
")",
"# Wood pg. 198 step 1",
... | Sample the smoothing parameters using simulated response data.
For now, the grid of `lam` values is 11 random points in M-dimensional
space, where M = the number of lam values, ie len(flatten(gam.lam))
all values are in [1e-3, 1e3] | [
"Sample",
"the",
"smoothing",
"parameters",
"using",
"simulated",
"response",
"data",
"."
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L2112-L2162 | train | 220,483 |
dswah/pyGAM | pygam/pygam.py | GAM._simulate_coef_from_bootstraps | def _simulate_coef_from_bootstraps(
self, n_draws, coef_bootstraps, cov_bootstraps):
"""Simulate coefficients using bootstrap samples."""
# Sample indices uniformly from {0, ..., n_bootstraps - 1}
# (Wood pg. 199 step 6)
random_bootstrap_indices = np.random.choice(
np.arange(len(coef_bootstraps)), size=n_draws, replace=True)
# Simulate `n_draws` many random coefficient vectors from a
# multivariate normal distribution with mean and covariance given by
# the bootstrap samples (indexed by `random_bootstrap_indices`) of
# `coef_bootstraps` and `cov_bootstraps`. Because it's faster to draw
# many samples from a certain distribution all at once, we make a dict
# mapping bootstrap indices to draw indices and use the `size`
# parameter of `np.random.multivariate_normal` to sample the draws
# needed from that bootstrap sample all at once.
bootstrap_index_to_draw_indices = defaultdict(list)
for draw_index, bootstrap_index in enumerate(random_bootstrap_indices):
bootstrap_index_to_draw_indices[bootstrap_index].append(draw_index)
coef_draws = np.empty((n_draws, len(self.coef_)))
for bootstrap, draw_indices in bootstrap_index_to_draw_indices.items():
coef_draws[draw_indices] = np.random.multivariate_normal(
coef_bootstraps[bootstrap], cov_bootstraps[bootstrap],
size=len(draw_indices))
return coef_draws | python | def _simulate_coef_from_bootstraps(
self, n_draws, coef_bootstraps, cov_bootstraps):
"""Simulate coefficients using bootstrap samples."""
# Sample indices uniformly from {0, ..., n_bootstraps - 1}
# (Wood pg. 199 step 6)
random_bootstrap_indices = np.random.choice(
np.arange(len(coef_bootstraps)), size=n_draws, replace=True)
# Simulate `n_draws` many random coefficient vectors from a
# multivariate normal distribution with mean and covariance given by
# the bootstrap samples (indexed by `random_bootstrap_indices`) of
# `coef_bootstraps` and `cov_bootstraps`. Because it's faster to draw
# many samples from a certain distribution all at once, we make a dict
# mapping bootstrap indices to draw indices and use the `size`
# parameter of `np.random.multivariate_normal` to sample the draws
# needed from that bootstrap sample all at once.
bootstrap_index_to_draw_indices = defaultdict(list)
for draw_index, bootstrap_index in enumerate(random_bootstrap_indices):
bootstrap_index_to_draw_indices[bootstrap_index].append(draw_index)
coef_draws = np.empty((n_draws, len(self.coef_)))
for bootstrap, draw_indices in bootstrap_index_to_draw_indices.items():
coef_draws[draw_indices] = np.random.multivariate_normal(
coef_bootstraps[bootstrap], cov_bootstraps[bootstrap],
size=len(draw_indices))
return coef_draws | [
"def",
"_simulate_coef_from_bootstraps",
"(",
"self",
",",
"n_draws",
",",
"coef_bootstraps",
",",
"cov_bootstraps",
")",
":",
"# Sample indices uniformly from {0, ..., n_bootstraps - 1}",
"# (Wood pg. 199 step 6)",
"random_bootstrap_indices",
"=",
"np",
".",
"random",
".",
"... | Simulate coefficients using bootstrap samples. | [
"Simulate",
"coefficients",
"using",
"bootstrap",
"samples",
"."
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L2164-L2191 | train | 220,484 |
dswah/pyGAM | pygam/pygam.py | LogisticGAM.accuracy | def accuracy(self, X=None, y=None, mu=None):
"""
computes the accuracy of the LogisticGAM
Parameters
----------
note: X or mu must be defined. defaults to mu
X : array-like of shape (n_samples, m_features), optional (default=None)
containing input data
y : array-like of shape (n,)
containing target data
mu : array-like of shape (n_samples,), optional (default=None
expected value of the targets given the model and inputs
Returns
-------
float in [0, 1]
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
y = check_y(y, self.link, self.distribution, verbose=self.verbose)
if X is not None:
X = check_X(X, n_feats=self.statistics_['m_features'],
edge_knots=self.edge_knots_, dtypes=self.dtype,
features=self.feature, verbose=self.verbose)
if mu is None:
mu = self.predict_mu(X)
check_X_y(mu, y)
return ((mu > 0.5).astype(int) == y).mean() | python | def accuracy(self, X=None, y=None, mu=None):
"""
computes the accuracy of the LogisticGAM
Parameters
----------
note: X or mu must be defined. defaults to mu
X : array-like of shape (n_samples, m_features), optional (default=None)
containing input data
y : array-like of shape (n,)
containing target data
mu : array-like of shape (n_samples,), optional (default=None
expected value of the targets given the model and inputs
Returns
-------
float in [0, 1]
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
y = check_y(y, self.link, self.distribution, verbose=self.verbose)
if X is not None:
X = check_X(X, n_feats=self.statistics_['m_features'],
edge_knots=self.edge_knots_, dtypes=self.dtype,
features=self.feature, verbose=self.verbose)
if mu is None:
mu = self.predict_mu(X)
check_X_y(mu, y)
return ((mu > 0.5).astype(int) == y).mean() | [
"def",
"accuracy",
"(",
"self",
",",
"X",
"=",
"None",
",",
"y",
"=",
"None",
",",
"mu",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_is_fitted",
":",
"raise",
"AttributeError",
"(",
"'GAM has not been fitted. Call fit first.'",
")",
"y",
"=",
"che... | computes the accuracy of the LogisticGAM
Parameters
----------
note: X or mu must be defined. defaults to mu
X : array-like of shape (n_samples, m_features), optional (default=None)
containing input data
y : array-like of shape (n,)
containing target data
mu : array-like of shape (n_samples,), optional (default=None
expected value of the targets given the model and inputs
Returns
-------
float in [0, 1] | [
"computes",
"the",
"accuracy",
"of",
"the",
"LogisticGAM"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L2395-L2426 | train | 220,485 |
dswah/pyGAM | pygam/pygam.py | PoissonGAM._exposure_to_weights | def _exposure_to_weights(self, y, exposure=None, weights=None):
"""simple tool to create a common API
Parameters
----------
y : array-like, shape (n_samples,)
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
exposure : array-like shape (n_samples,) or None, default: None
containing exposures
if None, defaults to array of ones
weights : array-like shape (n_samples,) or None, default: None
containing sample weights
if None, defaults to array of ones
Returns
-------
y : y normalized by exposure
weights : array-like shape (n_samples,)
"""
y = y.ravel()
if exposure is not None:
exposure = np.array(exposure).astype('f').ravel()
exposure = check_array(exposure, name='sample exposure',
ndim=1, verbose=self.verbose)
else:
exposure = np.ones_like(y.ravel()).astype('float64')
# check data
exposure = exposure.ravel()
check_lengths(y, exposure)
# normalize response
y = y / exposure
if weights is not None:
weights = np.array(weights).astype('f').ravel()
weights = check_array(weights, name='sample weights',
ndim=1, verbose=self.verbose)
else:
weights = np.ones_like(y).astype('float64')
check_lengths(weights, exposure)
# set exposure as the weight
# we do this because we have divided our response
# so if we make an error of 1 now, we need it to count more heavily.
weights = weights * exposure
return y, weights | python | def _exposure_to_weights(self, y, exposure=None, weights=None):
"""simple tool to create a common API
Parameters
----------
y : array-like, shape (n_samples,)
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
exposure : array-like shape (n_samples,) or None, default: None
containing exposures
if None, defaults to array of ones
weights : array-like shape (n_samples,) or None, default: None
containing sample weights
if None, defaults to array of ones
Returns
-------
y : y normalized by exposure
weights : array-like shape (n_samples,)
"""
y = y.ravel()
if exposure is not None:
exposure = np.array(exposure).astype('f').ravel()
exposure = check_array(exposure, name='sample exposure',
ndim=1, verbose=self.verbose)
else:
exposure = np.ones_like(y.ravel()).astype('float64')
# check data
exposure = exposure.ravel()
check_lengths(y, exposure)
# normalize response
y = y / exposure
if weights is not None:
weights = np.array(weights).astype('f').ravel()
weights = check_array(weights, name='sample weights',
ndim=1, verbose=self.verbose)
else:
weights = np.ones_like(y).astype('float64')
check_lengths(weights, exposure)
# set exposure as the weight
# we do this because we have divided our response
# so if we make an error of 1 now, we need it to count more heavily.
weights = weights * exposure
return y, weights | [
"def",
"_exposure_to_weights",
"(",
"self",
",",
"y",
",",
"exposure",
"=",
"None",
",",
"weights",
"=",
"None",
")",
":",
"y",
"=",
"y",
".",
"ravel",
"(",
")",
"if",
"exposure",
"is",
"not",
"None",
":",
"exposure",
"=",
"np",
".",
"array",
"(",
... | simple tool to create a common API
Parameters
----------
y : array-like, shape (n_samples,)
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
exposure : array-like shape (n_samples,) or None, default: None
containing exposures
if None, defaults to array of ones
weights : array-like shape (n_samples,) or None, default: None
containing sample weights
if None, defaults to array of ones
Returns
-------
y : y normalized by exposure
weights : array-like shape (n_samples,) | [
"simple",
"tool",
"to",
"create",
"a",
"common",
"API"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L2604-L2654 | train | 220,486 |
dswah/pyGAM | pygam/pygam.py | PoissonGAM.predict | def predict(self, X, exposure=None):
"""
preduct expected value of target given model and input X
often this is done via expected value of GAM given input X
Parameters
---------
X : array-like of shape (n_samples, m_features), default: None
containing the input dataset
exposure : array-like shape (n_samples,) or None, default: None
containing exposures
if None, defaults to array of ones
Returns
-------
y : np.array of shape (n_samples,)
containing predicted values under the model
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
X = check_X(X, n_feats=self.statistics_['m_features'],
edge_knots=self.edge_knots_, dtypes=self.dtype,
features=self.feature, verbose=self.verbose)
if exposure is not None:
exposure = np.array(exposure).astype('f')
else:
exposure = np.ones(X.shape[0]).astype('f')
check_lengths(X, exposure)
return self.predict_mu(X) * exposure | python | def predict(self, X, exposure=None):
"""
preduct expected value of target given model and input X
often this is done via expected value of GAM given input X
Parameters
---------
X : array-like of shape (n_samples, m_features), default: None
containing the input dataset
exposure : array-like shape (n_samples,) or None, default: None
containing exposures
if None, defaults to array of ones
Returns
-------
y : np.array of shape (n_samples,)
containing predicted values under the model
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
X = check_X(X, n_feats=self.statistics_['m_features'],
edge_knots=self.edge_knots_, dtypes=self.dtype,
features=self.feature, verbose=self.verbose)
if exposure is not None:
exposure = np.array(exposure).astype('f')
else:
exposure = np.ones(X.shape[0]).astype('f')
check_lengths(X, exposure)
return self.predict_mu(X) * exposure | [
"def",
"predict",
"(",
"self",
",",
"X",
",",
"exposure",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_is_fitted",
":",
"raise",
"AttributeError",
"(",
"'GAM has not been fitted. Call fit first.'",
")",
"X",
"=",
"check_X",
"(",
"X",
",",
"n_feats",
... | preduct expected value of target given model and input X
often this is done via expected value of GAM given input X
Parameters
---------
X : array-like of shape (n_samples, m_features), default: None
containing the input dataset
exposure : array-like shape (n_samples,) or None, default: None
containing exposures
if None, defaults to array of ones
Returns
-------
y : np.array of shape (n_samples,)
containing predicted values under the model | [
"preduct",
"expected",
"value",
"of",
"target",
"given",
"model",
"and",
"input",
"X",
"often",
"this",
"is",
"done",
"via",
"expected",
"value",
"of",
"GAM",
"given",
"input",
"X"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L2686-L2718 | train | 220,487 |
dswah/pyGAM | pygam/pygam.py | PoissonGAM.gridsearch | def gridsearch(self, X, y, exposure=None, weights=None,
return_scores=False, keep_best=True, objective='auto',
**param_grids):
"""
performs a grid search over a space of parameters for a given objective
NOTE:
gridsearch method is lazy and will not remove useless combinations
from the search space, eg.
>>> n_splines=np.arange(5,10), fit_splines=[True, False]
will result in 10 loops, of which 5 are equivalent because
even though fit_splines==False
it is not recommended to search over a grid that alternates
between known scales and unknown scales, as the scores of the
candidate models will not be comparable.
Parameters
----------
X : array
input data of shape (n_samples, m_features)
y : array
label data of shape (n_samples,)
exposure : array-like shape (n_samples,) or None, default: None
containing exposures
if None, defaults to array of ones
weights : array-like shape (n_samples,) or None, default: None
containing sample weights
if None, defaults to array of ones
return_scores : boolean, default False
whether to return the hyperpamaters
and score for each element in the grid
keep_best : boolean
whether to keep the best GAM as self.
default: True
objective : string, default: 'auto'
metric to optimize. must be in ['AIC', 'AICc', 'GCV', 'UBRE', 'auto']
if 'auto', then grid search will optimize GCV for models with unknown
scale and UBRE for models with known scale.
**kwargs : dict, default {'lam': np.logspace(-3, 3, 11)}
pairs of parameters and iterables of floats, or
parameters and iterables of iterables of floats.
if iterable of iterables of floats, the outer iterable must have
length m_features.
the method will make a grid of all the combinations of the parameters
and fit a GAM to each combination.
Returns
-------
if return_values == True:
model_scores : dict
Contains each fitted model as keys and corresponding
objective scores as values
else:
self, ie possibly the newly fitted model
"""
y, weights = self._exposure_to_weights(y, exposure, weights)
return super(PoissonGAM, self).gridsearch(X, y,
weights=weights,
return_scores=return_scores,
keep_best=keep_best,
objective=objective,
**param_grids) | python | def gridsearch(self, X, y, exposure=None, weights=None,
return_scores=False, keep_best=True, objective='auto',
**param_grids):
"""
performs a grid search over a space of parameters for a given objective
NOTE:
gridsearch method is lazy and will not remove useless combinations
from the search space, eg.
>>> n_splines=np.arange(5,10), fit_splines=[True, False]
will result in 10 loops, of which 5 are equivalent because
even though fit_splines==False
it is not recommended to search over a grid that alternates
between known scales and unknown scales, as the scores of the
candidate models will not be comparable.
Parameters
----------
X : array
input data of shape (n_samples, m_features)
y : array
label data of shape (n_samples,)
exposure : array-like shape (n_samples,) or None, default: None
containing exposures
if None, defaults to array of ones
weights : array-like shape (n_samples,) or None, default: None
containing sample weights
if None, defaults to array of ones
return_scores : boolean, default False
whether to return the hyperpamaters
and score for each element in the grid
keep_best : boolean
whether to keep the best GAM as self.
default: True
objective : string, default: 'auto'
metric to optimize. must be in ['AIC', 'AICc', 'GCV', 'UBRE', 'auto']
if 'auto', then grid search will optimize GCV for models with unknown
scale and UBRE for models with known scale.
**kwargs : dict, default {'lam': np.logspace(-3, 3, 11)}
pairs of parameters and iterables of floats, or
parameters and iterables of iterables of floats.
if iterable of iterables of floats, the outer iterable must have
length m_features.
the method will make a grid of all the combinations of the parameters
and fit a GAM to each combination.
Returns
-------
if return_values == True:
model_scores : dict
Contains each fitted model as keys and corresponding
objective scores as values
else:
self, ie possibly the newly fitted model
"""
y, weights = self._exposure_to_weights(y, exposure, weights)
return super(PoissonGAM, self).gridsearch(X, y,
weights=weights,
return_scores=return_scores,
keep_best=keep_best,
objective=objective,
**param_grids) | [
"def",
"gridsearch",
"(",
"self",
",",
"X",
",",
"y",
",",
"exposure",
"=",
"None",
",",
"weights",
"=",
"None",
",",
"return_scores",
"=",
"False",
",",
"keep_best",
"=",
"True",
",",
"objective",
"=",
"'auto'",
",",
"*",
"*",
"param_grids",
")",
":... | performs a grid search over a space of parameters for a given objective
NOTE:
gridsearch method is lazy and will not remove useless combinations
from the search space, eg.
>>> n_splines=np.arange(5,10), fit_splines=[True, False]
will result in 10 loops, of which 5 are equivalent because
even though fit_splines==False
it is not recommended to search over a grid that alternates
between known scales and unknown scales, as the scores of the
candidate models will not be comparable.
Parameters
----------
X : array
input data of shape (n_samples, m_features)
y : array
label data of shape (n_samples,)
exposure : array-like shape (n_samples,) or None, default: None
containing exposures
if None, defaults to array of ones
weights : array-like shape (n_samples,) or None, default: None
containing sample weights
if None, defaults to array of ones
return_scores : boolean, default False
whether to return the hyperpamaters
and score for each element in the grid
keep_best : boolean
whether to keep the best GAM as self.
default: True
objective : string, default: 'auto'
metric to optimize. must be in ['AIC', 'AICc', 'GCV', 'UBRE', 'auto']
if 'auto', then grid search will optimize GCV for models with unknown
scale and UBRE for models with known scale.
**kwargs : dict, default {'lam': np.logspace(-3, 3, 11)}
pairs of parameters and iterables of floats, or
parameters and iterables of iterables of floats.
if iterable of iterables of floats, the outer iterable must have
length m_features.
the method will make a grid of all the combinations of the parameters
and fit a GAM to each combination.
Returns
-------
if return_values == True:
model_scores : dict
Contains each fitted model as keys and corresponding
objective scores as values
else:
self, ie possibly the newly fitted model | [
"performs",
"a",
"grid",
"search",
"over",
"a",
"space",
"of",
"parameters",
"for",
"a",
"given",
"objective"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L2720-L2794 | train | 220,488 |
dswah/pyGAM | pygam/pygam.py | ExpectileGAM._get_quantile_ratio | def _get_quantile_ratio(self, X, y):
"""find the expirical quantile of the model
Parameters
----------
X : array-like, shape (n_samples, m_features)
Training vectors, where n_samples is the number of samples
and m_features is the number of features.
y : array-like, shape (n_samples,)
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
Returns
-------
ratio : float on [0, 1]
"""
y_pred = self.predict(X)
return (y_pred > y).mean() | python | def _get_quantile_ratio(self, X, y):
"""find the expirical quantile of the model
Parameters
----------
X : array-like, shape (n_samples, m_features)
Training vectors, where n_samples is the number of samples
and m_features is the number of features.
y : array-like, shape (n_samples,)
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
Returns
-------
ratio : float on [0, 1]
"""
y_pred = self.predict(X)
return (y_pred > y).mean() | [
"def",
"_get_quantile_ratio",
"(",
"self",
",",
"X",
",",
"y",
")",
":",
"y_pred",
"=",
"self",
".",
"predict",
"(",
"X",
")",
"return",
"(",
"y_pred",
">",
"y",
")",
".",
"mean",
"(",
")"
] | find the expirical quantile of the model
Parameters
----------
X : array-like, shape (n_samples, m_features)
Training vectors, where n_samples is the number of samples
and m_features is the number of features.
y : array-like, shape (n_samples,)
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
Returns
-------
ratio : float on [0, 1] | [
"find",
"the",
"expirical",
"quantile",
"of",
"the",
"model"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L3150-L3168 | train | 220,489 |
dswah/pyGAM | pygam/pygam.py | ExpectileGAM.fit_quantile | def fit_quantile(self, X, y, quantile, max_iter=20, tol=0.01, weights=None):
"""fit ExpectileGAM to a desired quantile via binary search
Parameters
----------
X : array-like, shape (n_samples, m_features)
Training vectors, where n_samples is the number of samples
and m_features is the number of features.
y : array-like, shape (n_samples,)
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
quantile : float on (0, 1)
desired quantile to fit.
max_iter : int, default: 20
maximum number of binary search iterations to perform
tol : float > 0, default: 0.01
maximum distance between desired quantile and fitted quantile
weights : array-like shape (n_samples,) or None, default: None
containing sample weights
if None, defaults to array of ones
Returns
-------
self : fitted GAM object
"""
def _within_tol(a, b, tol):
return np.abs(a - b) <= tol
# validate arguments
if quantile <= 0 or quantile >= 1:
raise ValueError('quantile must be on (0, 1), but found {}'.format(quantile))
if tol <= 0:
raise ValueError('tol must be float > 0 {}'.format(tol))
if max_iter <= 0:
raise ValueError('max_iter must be int > 0 {}'.format(max_iter))
# perform a first fit if necessary
if not self._is_fitted:
self.fit(X, y, weights=weights)
# do binary search
max_ = 1.0
min_ = 0.0
n_iter = 0
while n_iter < max_iter:
ratio = self._get_quantile_ratio(X, y)
if _within_tol(ratio, quantile, tol):
break
if ratio < quantile:
min_ = self.expectile
else:
max_ = self.expectile
expectile = (max_ + min_) / 2.
self.set_params(expectile=expectile)
self.fit(X, y, weights=weights)
n_iter += 1
# print diagnostics
if not _within_tol(ratio, quantile, tol) and self.verbose:
warnings.warn('maximum iterations reached')
return self | python | def fit_quantile(self, X, y, quantile, max_iter=20, tol=0.01, weights=None):
"""fit ExpectileGAM to a desired quantile via binary search
Parameters
----------
X : array-like, shape (n_samples, m_features)
Training vectors, where n_samples is the number of samples
and m_features is the number of features.
y : array-like, shape (n_samples,)
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
quantile : float on (0, 1)
desired quantile to fit.
max_iter : int, default: 20
maximum number of binary search iterations to perform
tol : float > 0, default: 0.01
maximum distance between desired quantile and fitted quantile
weights : array-like shape (n_samples,) or None, default: None
containing sample weights
if None, defaults to array of ones
Returns
-------
self : fitted GAM object
"""
def _within_tol(a, b, tol):
return np.abs(a - b) <= tol
# validate arguments
if quantile <= 0 or quantile >= 1:
raise ValueError('quantile must be on (0, 1), but found {}'.format(quantile))
if tol <= 0:
raise ValueError('tol must be float > 0 {}'.format(tol))
if max_iter <= 0:
raise ValueError('max_iter must be int > 0 {}'.format(max_iter))
# perform a first fit if necessary
if not self._is_fitted:
self.fit(X, y, weights=weights)
# do binary search
max_ = 1.0
min_ = 0.0
n_iter = 0
while n_iter < max_iter:
ratio = self._get_quantile_ratio(X, y)
if _within_tol(ratio, quantile, tol):
break
if ratio < quantile:
min_ = self.expectile
else:
max_ = self.expectile
expectile = (max_ + min_) / 2.
self.set_params(expectile=expectile)
self.fit(X, y, weights=weights)
n_iter += 1
# print diagnostics
if not _within_tol(ratio, quantile, tol) and self.verbose:
warnings.warn('maximum iterations reached')
return self | [
"def",
"fit_quantile",
"(",
"self",
",",
"X",
",",
"y",
",",
"quantile",
",",
"max_iter",
"=",
"20",
",",
"tol",
"=",
"0.01",
",",
"weights",
"=",
"None",
")",
":",
"def",
"_within_tol",
"(",
"a",
",",
"b",
",",
"tol",
")",
":",
"return",
"np",
... | fit ExpectileGAM to a desired quantile via binary search
Parameters
----------
X : array-like, shape (n_samples, m_features)
Training vectors, where n_samples is the number of samples
and m_features is the number of features.
y : array-like, shape (n_samples,)
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
quantile : float on (0, 1)
desired quantile to fit.
max_iter : int, default: 20
maximum number of binary search iterations to perform
tol : float > 0, default: 0.01
maximum distance between desired quantile and fitted quantile
weights : array-like shape (n_samples,) or None, default: None
containing sample weights
if None, defaults to array of ones
Returns
-------
self : fitted GAM object | [
"fit",
"ExpectileGAM",
"to",
"a",
"desired",
"quantile",
"via",
"binary",
"search"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/pygam.py#L3170-L3238 | train | 220,490 |
dswah/pyGAM | pygam/core.py | nice_repr | def nice_repr(name, param_kvs, line_width=30, line_offset=5, decimals=3, args=None, flatten_attrs=True):
"""
tool to do a nice repr of a class.
Parameters
----------
name : str
class name
param_kvs : dict
dict containing class parameters names as keys,
and the corresponding values as values
line_width : int
desired maximum line width.
default: 30
line_offset : int
desired offset for new lines
default: 5
decimals : int
number of decimal places to keep for float values
default: 3
Returns
-------
out : str
nicely formatted repr of class instance
"""
if not param_kvs and not args :
# if the object has no params it's easy
return '{}()'.format(name)
# sort keys and values
ks = list(param_kvs.keys())
vs = list(param_kvs.values())
idxs = np.argsort(ks)
param_kvs = [(ks[i],vs[i]) for i in idxs]
if args is not None:
param_kvs = [(None, arg) for arg in args] + param_kvs
param_kvs = param_kvs[::-1]
out = ''
current_line = name + '('
while len(param_kvs) > 0:
# flatten sub-term properties, but not `terms`
k, v = param_kvs.pop()
if flatten_attrs and k is not 'terms':
v = flatten(v)
# round the floats first
if issubclass(v.__class__, (float, np.ndarray)):
v = round_to_n_decimal_places(v, n=decimals)
v = str(v)
else:
v = repr(v)
# handle args
if k is None:
param = '{},'.format(v)
else:
param = '{}={},'.format(k, v)
# print
if len(current_line + param) <= line_width:
current_line += param
else:
out += current_line + '\n'
current_line = ' '*line_offset + param
if len(current_line) < line_width and len(param_kvs) > 0:
current_line += ' '
out += current_line[:-1] # remove trailing comma
out += ')'
return out | python | def nice_repr(name, param_kvs, line_width=30, line_offset=5, decimals=3, args=None, flatten_attrs=True):
"""
tool to do a nice repr of a class.
Parameters
----------
name : str
class name
param_kvs : dict
dict containing class parameters names as keys,
and the corresponding values as values
line_width : int
desired maximum line width.
default: 30
line_offset : int
desired offset for new lines
default: 5
decimals : int
number of decimal places to keep for float values
default: 3
Returns
-------
out : str
nicely formatted repr of class instance
"""
if not param_kvs and not args :
# if the object has no params it's easy
return '{}()'.format(name)
# sort keys and values
ks = list(param_kvs.keys())
vs = list(param_kvs.values())
idxs = np.argsort(ks)
param_kvs = [(ks[i],vs[i]) for i in idxs]
if args is not None:
param_kvs = [(None, arg) for arg in args] + param_kvs
param_kvs = param_kvs[::-1]
out = ''
current_line = name + '('
while len(param_kvs) > 0:
# flatten sub-term properties, but not `terms`
k, v = param_kvs.pop()
if flatten_attrs and k is not 'terms':
v = flatten(v)
# round the floats first
if issubclass(v.__class__, (float, np.ndarray)):
v = round_to_n_decimal_places(v, n=decimals)
v = str(v)
else:
v = repr(v)
# handle args
if k is None:
param = '{},'.format(v)
else:
param = '{}={},'.format(k, v)
# print
if len(current_line + param) <= line_width:
current_line += param
else:
out += current_line + '\n'
current_line = ' '*line_offset + param
if len(current_line) < line_width and len(param_kvs) > 0:
current_line += ' '
out += current_line[:-1] # remove trailing comma
out += ')'
return out | [
"def",
"nice_repr",
"(",
"name",
",",
"param_kvs",
",",
"line_width",
"=",
"30",
",",
"line_offset",
"=",
"5",
",",
"decimals",
"=",
"3",
",",
"args",
"=",
"None",
",",
"flatten_attrs",
"=",
"True",
")",
":",
"if",
"not",
"param_kvs",
"and",
"not",
"... | tool to do a nice repr of a class.
Parameters
----------
name : str
class name
param_kvs : dict
dict containing class parameters names as keys,
and the corresponding values as values
line_width : int
desired maximum line width.
default: 30
line_offset : int
desired offset for new lines
default: 5
decimals : int
number of decimal places to keep for float values
default: 3
Returns
-------
out : str
nicely formatted repr of class instance | [
"tool",
"to",
"do",
"a",
"nice",
"repr",
"of",
"a",
"class",
"."
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/core.py#L11-L85 | train | 220,491 |
dswah/pyGAM | pygam/core.py | Core.get_params | def get_params(self, deep=False):
"""
returns a dict of all of the object's user-facing parameters
Parameters
----------
deep : boolean, default: False
when True, also gets non-user-facing paramters
Returns
-------
dict
"""
attrs = self.__dict__
for attr in self._include:
attrs[attr] = getattr(self, attr)
if deep is True:
return attrs
return dict([(k,v) for k,v in list(attrs.items()) \
if (k[0] != '_') \
and (k[-1] != '_') \
and (k not in self._exclude)]) | python | def get_params(self, deep=False):
"""
returns a dict of all of the object's user-facing parameters
Parameters
----------
deep : boolean, default: False
when True, also gets non-user-facing paramters
Returns
-------
dict
"""
attrs = self.__dict__
for attr in self._include:
attrs[attr] = getattr(self, attr)
if deep is True:
return attrs
return dict([(k,v) for k,v in list(attrs.items()) \
if (k[0] != '_') \
and (k[-1] != '_') \
and (k not in self._exclude)]) | [
"def",
"get_params",
"(",
"self",
",",
"deep",
"=",
"False",
")",
":",
"attrs",
"=",
"self",
".",
"__dict__",
"for",
"attr",
"in",
"self",
".",
"_include",
":",
"attrs",
"[",
"attr",
"]",
"=",
"getattr",
"(",
"self",
",",
"attr",
")",
"if",
"deep",... | returns a dict of all of the object's user-facing parameters
Parameters
----------
deep : boolean, default: False
when True, also gets non-user-facing paramters
Returns
-------
dict | [
"returns",
"a",
"dict",
"of",
"all",
"of",
"the",
"object",
"s",
"user",
"-",
"facing",
"parameters"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/core.py#L132-L154 | train | 220,492 |
dswah/pyGAM | pygam/core.py | Core.set_params | def set_params(self, deep=False, force=False, **parameters):
"""
sets an object's paramters
Parameters
----------
deep : boolean, default: False
when True, also sets non-user-facing paramters
force : boolean, default: False
when True, also sets parameters that the object does not already
have
**parameters : paramters to set
Returns
------
self
"""
param_names = self.get_params(deep=deep).keys()
for parameter, value in parameters.items():
if (parameter in param_names
or force
or (hasattr(self, parameter) and parameter == parameter.strip('_'))):
setattr(self, parameter, value)
return self | python | def set_params(self, deep=False, force=False, **parameters):
"""
sets an object's paramters
Parameters
----------
deep : boolean, default: False
when True, also sets non-user-facing paramters
force : boolean, default: False
when True, also sets parameters that the object does not already
have
**parameters : paramters to set
Returns
------
self
"""
param_names = self.get_params(deep=deep).keys()
for parameter, value in parameters.items():
if (parameter in param_names
or force
or (hasattr(self, parameter) and parameter == parameter.strip('_'))):
setattr(self, parameter, value)
return self | [
"def",
"set_params",
"(",
"self",
",",
"deep",
"=",
"False",
",",
"force",
"=",
"False",
",",
"*",
"*",
"parameters",
")",
":",
"param_names",
"=",
"self",
".",
"get_params",
"(",
"deep",
"=",
"deep",
")",
".",
"keys",
"(",
")",
"for",
"parameter",
... | sets an object's paramters
Parameters
----------
deep : boolean, default: False
when True, also sets non-user-facing paramters
force : boolean, default: False
when True, also sets parameters that the object does not already
have
**parameters : paramters to set
Returns
------
self | [
"sets",
"an",
"object",
"s",
"paramters"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/core.py#L156-L179 | train | 220,493 |
dswah/pyGAM | pygam/terms.py | Term.build_from_info | def build_from_info(cls, info):
"""build a Term instance from a dict
Parameters
----------
cls : class
info : dict
contains all information needed to build the term
Return
------
Term instance
"""
info = deepcopy(info)
if 'term_type' in info:
cls_ = TERMS[info.pop('term_type')]
if issubclass(cls_, MetaTermMixin):
return cls_.build_from_info(info)
else:
cls_ = cls
return cls_(**info) | python | def build_from_info(cls, info):
"""build a Term instance from a dict
Parameters
----------
cls : class
info : dict
contains all information needed to build the term
Return
------
Term instance
"""
info = deepcopy(info)
if 'term_type' in info:
cls_ = TERMS[info.pop('term_type')]
if issubclass(cls_, MetaTermMixin):
return cls_.build_from_info(info)
else:
cls_ = cls
return cls_(**info) | [
"def",
"build_from_info",
"(",
"cls",
",",
"info",
")",
":",
"info",
"=",
"deepcopy",
"(",
"info",
")",
"if",
"'term_type'",
"in",
"info",
":",
"cls_",
"=",
"TERMS",
"[",
"info",
".",
"pop",
"(",
"'term_type'",
")",
"]",
"if",
"issubclass",
"(",
"cls... | build a Term instance from a dict
Parameters
----------
cls : class
info : dict
contains all information needed to build the term
Return
------
Term instance | [
"build",
"a",
"Term",
"instance",
"from",
"a",
"dict"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/terms.py#L216-L238 | train | 220,494 |
dswah/pyGAM | pygam/terms.py | MetaTermMixin._has_terms | def _has_terms(self):
"""bool, whether the instance has any sub-terms
"""
loc = self._super_get('_term_location')
return self._super_has(loc) \
and isiterable(self._super_get(loc)) \
and len(self._super_get(loc)) > 0 \
and all([isinstance(term, Term) for term in self._super_get(loc)]) | python | def _has_terms(self):
"""bool, whether the instance has any sub-terms
"""
loc = self._super_get('_term_location')
return self._super_has(loc) \
and isiterable(self._super_get(loc)) \
and len(self._super_get(loc)) > 0 \
and all([isinstance(term, Term) for term in self._super_get(loc)]) | [
"def",
"_has_terms",
"(",
"self",
")",
":",
"loc",
"=",
"self",
".",
"_super_get",
"(",
"'_term_location'",
")",
"return",
"self",
".",
"_super_has",
"(",
"loc",
")",
"and",
"isiterable",
"(",
"self",
".",
"_super_get",
"(",
"loc",
")",
")",
"and",
"le... | bool, whether the instance has any sub-terms | [
"bool",
"whether",
"the",
"instance",
"has",
"any",
"sub",
"-",
"terms"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/terms.py#L957-L964 | train | 220,495 |
dswah/pyGAM | pygam/terms.py | TensorTerm.build_from_info | def build_from_info(cls, info):
"""build a TensorTerm instance from a dict
Parameters
----------
cls : class
info : dict
contains all information needed to build the term
Return
------
TensorTerm instance
"""
terms = []
for term_info in info['terms']:
terms.append(SplineTerm.build_from_info(term_info))
return cls(*terms) | python | def build_from_info(cls, info):
"""build a TensorTerm instance from a dict
Parameters
----------
cls : class
info : dict
contains all information needed to build the term
Return
------
TensorTerm instance
"""
terms = []
for term_info in info['terms']:
terms.append(SplineTerm.build_from_info(term_info))
return cls(*terms) | [
"def",
"build_from_info",
"(",
"cls",
",",
"info",
")",
":",
"terms",
"=",
"[",
"]",
"for",
"term_info",
"in",
"info",
"[",
"'terms'",
"]",
":",
"terms",
".",
"append",
"(",
"SplineTerm",
".",
"build_from_info",
"(",
"term_info",
")",
")",
"return",
"c... | build a TensorTerm instance from a dict
Parameters
----------
cls : class
info : dict
contains all information needed to build the term
Return
------
TensorTerm instance | [
"build",
"a",
"TensorTerm",
"instance",
"from",
"a",
"dict"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/terms.py#L1217-L1234 | train | 220,496 |
dswah/pyGAM | pygam/terms.py | TensorTerm.hasconstraint | def hasconstraint(self):
"""bool, whether the term has any constraints
"""
constrained = False
for term in self._terms:
constrained = constrained or term.hasconstraint
return constrained | python | def hasconstraint(self):
"""bool, whether the term has any constraints
"""
constrained = False
for term in self._terms:
constrained = constrained or term.hasconstraint
return constrained | [
"def",
"hasconstraint",
"(",
"self",
")",
":",
"constrained",
"=",
"False",
"for",
"term",
"in",
"self",
".",
"_terms",
":",
"constrained",
"=",
"constrained",
"or",
"term",
".",
"hasconstraint",
"return",
"constrained"
] | bool, whether the term has any constraints | [
"bool",
"whether",
"the",
"term",
"has",
"any",
"constraints"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/terms.py#L1237-L1243 | train | 220,497 |
dswah/pyGAM | pygam/terms.py | TensorTerm._build_marginal_constraints | def _build_marginal_constraints(self, i, coef, constraint_lam, constraint_l2):
"""builds a constraint matrix for a marginal term in the tensor term
takes a tensor's coef vector, and slices it into pieces corresponding
to term i, then builds a constraint matrix for each piece of the coef vector,
and assembles them into a composite constraint matrix
Parameters
----------
i : int,
index of the marginal term for which to build a constraint matrix
coefs : array-like containing the coefficients of the tensor term
constraint_lam : float,
penalty to impose on the constraint.
typically this is a very large number.
constraint_l2 : float,
loading to improve the numerical conditioning of the constraint
matrix.
typically this is a very small number.
Returns
-------
C : sparse CSC matrix containing the model constraints in quadratic form
"""
composite_C = np.zeros((len(coef), len(coef)))
for slice_ in self._iterate_marginal_coef_slices(i):
# get the slice of coefficient vector
coef_slice = coef[slice_]
# build the constraint matrix for that slice
slice_C = self._terms[i].build_constraints(coef_slice, constraint_lam, constraint_l2)
# now enter it into the composite
composite_C[tuple(np.meshgrid(slice_, slice_))] = slice_C.A
return sp.sparse.csc_matrix(composite_C) | python | def _build_marginal_constraints(self, i, coef, constraint_lam, constraint_l2):
"""builds a constraint matrix for a marginal term in the tensor term
takes a tensor's coef vector, and slices it into pieces corresponding
to term i, then builds a constraint matrix for each piece of the coef vector,
and assembles them into a composite constraint matrix
Parameters
----------
i : int,
index of the marginal term for which to build a constraint matrix
coefs : array-like containing the coefficients of the tensor term
constraint_lam : float,
penalty to impose on the constraint.
typically this is a very large number.
constraint_l2 : float,
loading to improve the numerical conditioning of the constraint
matrix.
typically this is a very small number.
Returns
-------
C : sparse CSC matrix containing the model constraints in quadratic form
"""
composite_C = np.zeros((len(coef), len(coef)))
for slice_ in self._iterate_marginal_coef_slices(i):
# get the slice of coefficient vector
coef_slice = coef[slice_]
# build the constraint matrix for that slice
slice_C = self._terms[i].build_constraints(coef_slice, constraint_lam, constraint_l2)
# now enter it into the composite
composite_C[tuple(np.meshgrid(slice_, slice_))] = slice_C.A
return sp.sparse.csc_matrix(composite_C) | [
"def",
"_build_marginal_constraints",
"(",
"self",
",",
"i",
",",
"coef",
",",
"constraint_lam",
",",
"constraint_l2",
")",
":",
"composite_C",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"coef",
")",
",",
"len",
"(",
"coef",
")",
")",
")",
"for",
... | builds a constraint matrix for a marginal term in the tensor term
takes a tensor's coef vector, and slices it into pieces corresponding
to term i, then builds a constraint matrix for each piece of the coef vector,
and assembles them into a composite constraint matrix
Parameters
----------
i : int,
index of the marginal term for which to build a constraint matrix
coefs : array-like containing the coefficients of the tensor term
constraint_lam : float,
penalty to impose on the constraint.
typically this is a very large number.
constraint_l2 : float,
loading to improve the numerical conditioning of the constraint
matrix.
typically this is a very small number.
Returns
-------
C : sparse CSC matrix containing the model constraints in quadratic form | [
"builds",
"a",
"constraint",
"matrix",
"for",
"a",
"marginal",
"term",
"in",
"the",
"tensor",
"term"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/terms.py#L1370-L1412 | train | 220,498 |
dswah/pyGAM | pygam/terms.py | TensorTerm._iterate_marginal_coef_slices | def _iterate_marginal_coef_slices(self, i):
"""iterator of indices into tensor's coef vector for marginal term i's coefs
takes a tensor_term and returns an iterator of indices
that chop up the tensor's coef vector into slices belonging to term i
Parameters
----------
i : int,
index of marginal term
Yields
------
np.ndarray of ints
"""
dims = [term_.n_coefs for term_ in self]
# make all linear indices
idxs = np.arange(np.prod(dims))
# reshape indices to a Nd matrix
idxs = idxs.reshape(dims)
# reshape to a 2d matrix, where we can loop over rows
idxs = np.moveaxis(idxs, i, 0).reshape(idxs.shape[i], int(idxs.size/idxs.shape[i]))
# loop over rows
for slice_ in idxs.T:
yield slice_ | python | def _iterate_marginal_coef_slices(self, i):
"""iterator of indices into tensor's coef vector for marginal term i's coefs
takes a tensor_term and returns an iterator of indices
that chop up the tensor's coef vector into slices belonging to term i
Parameters
----------
i : int,
index of marginal term
Yields
------
np.ndarray of ints
"""
dims = [term_.n_coefs for term_ in self]
# make all linear indices
idxs = np.arange(np.prod(dims))
# reshape indices to a Nd matrix
idxs = idxs.reshape(dims)
# reshape to a 2d matrix, where we can loop over rows
idxs = np.moveaxis(idxs, i, 0).reshape(idxs.shape[i], int(idxs.size/idxs.shape[i]))
# loop over rows
for slice_ in idxs.T:
yield slice_ | [
"def",
"_iterate_marginal_coef_slices",
"(",
"self",
",",
"i",
")",
":",
"dims",
"=",
"[",
"term_",
".",
"n_coefs",
"for",
"term_",
"in",
"self",
"]",
"# make all linear indices",
"idxs",
"=",
"np",
".",
"arange",
"(",
"np",
".",
"prod",
"(",
"dims",
")"... | iterator of indices into tensor's coef vector for marginal term i's coefs
takes a tensor_term and returns an iterator of indices
that chop up the tensor's coef vector into slices belonging to term i
Parameters
----------
i : int,
index of marginal term
Yields
------
np.ndarray of ints | [
"iterator",
"of",
"indices",
"into",
"tensor",
"s",
"coef",
"vector",
"for",
"marginal",
"term",
"i",
"s",
"coefs"
] | b3e5c3cd580f0a3ad69f9372861624f67760c325 | https://github.com/dswah/pyGAM/blob/b3e5c3cd580f0a3ad69f9372861624f67760c325/pygam/terms.py#L1414-L1442 | train | 220,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.