repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
Kautenja/nes-py | nes_py/nes_env.py | NESEnv.reset | python | def reset(self):
# call the before reset callback
self._will_reset()
# reset the emulator
if self._has_backup:
self._restore()
else:
_LIB.Reset(self._env)
# call the after reset callback
self._did_reset()
# set the done flag to false
self.done = False
# return the screen from the emulator
return self.screen | Reset the state of the environment and returns an initial observation.
Returns:
state (np.ndarray): next frame as a result of the given action | train | https://github.com/Kautenja/nes-py/blob/a113885198d418f38fcf24b8f79ac508975788c2/nes_py/nes_env.py#L245-L265 | null | class NESEnv(gym.Env):
"""An NES environment based on the LaiNES emulator."""
# relevant meta-data about the environment
metadata = {
'render.modes': ['rgb_array', 'human'],
'video.frames_per_second': 60
}
# the legal range for rewards for this environment
reward_range = (-float('inf'), float('inf'))
# observation space for the environment is static across all instances
observation_space = Box(
low=0,
high=255,
shape=SCREEN_SHAPE_24_BIT,
dtype=np.uint8
)
# action space is a bitmap of button press values for the 8 NES buttons
action_space = Discrete(256)
def __init__(self, rom_path):
"""
Create a new NES environment.
Args:
rom_path (str): the path to the ROM for the environment
Returns:
None
"""
# create a ROM file from the ROM path
rom = ROM(rom_path)
# check that there is PRG ROM
if rom.prg_rom_size == 0:
raise ValueError('ROM has no PRG-ROM banks.')
# ensure that there is no trainer
if rom.has_trainer:
raise ValueError('ROM has trainer. trainer is not supported.')
# try to read the PRG ROM and raise a value error if it fails
_ = rom.prg_rom
# try to read the CHR ROM and raise a value error if it fails
_ = rom.chr_rom
# check the TV system
if rom.is_pal:
raise ValueError('ROM is PAL. PAL is not supported.')
# check that the mapper is implemented
elif rom.mapper not in {0, 1, 2, 3}:
msg = 'ROM has an unsupported mapper number {}.'
raise ValueError(msg.format(rom.mapper))
# create a dedicated random number generator for the environment
self.np_random = np.random.RandomState()
# store the ROM path
self._rom_path = rom_path
# initialize the C++ object for running the environment
self._env = _LIB.Initialize(self._rom_path)
# setup a placeholder for a 'human' render mode viewer
self.viewer = None
# setup a placeholder for a pointer to a backup state
self._has_backup = False
# setup a done flag
self.done = True
# setup the controllers, screen, and RAM buffers
self.controllers = [self._controller_buffer(port) for port in range(2)]
self.screen = self._screen_buffer()
self.ram = self._ram_buffer()
def _screen_buffer(self):
"""Setup the screen buffer from the C++ code."""
# get the address of the screen
address = _LIB.Screen(self._env)
# create a buffer from the contents of the address location
buffer_ = ctypes.cast(address, ctypes.POINTER(SCREEN_TENSOR)).contents
# create a NumPy array from the buffer
screen = np.frombuffer(buffer_, dtype='uint8')
# reshape the screen from a column vector to a tensor
screen = screen.reshape(SCREEN_SHAPE_32_BIT)
# flip the bytes if the machine is little-endian (which it likely is)
if sys.byteorder == 'little':
# invert the little-endian BGRx channels to big-endian xRGB
screen = screen[:, :, ::-1]
# remove the 0th axis (padding from storing colors in 32 bit)
return screen[:, :, 1:]
def _ram_buffer(self):
"""Setup the RAM buffer from the C++ code."""
# get the address of the RAM
address = _LIB.Memory(self._env)
# create a buffer from the contents of the address location
buffer_ = ctypes.cast(address, ctypes.POINTER(RAM_VECTOR)).contents
# create a NumPy array from the buffer
return np.frombuffer(buffer_, dtype='uint8')
def _controller_buffer(self, port):
"""
Find the pointer to a controller and setup a NumPy buffer.
Args:
port: the port of the controller to setup
Returns:
a NumPy buffer with the controller's binary data
"""
# get the address of the controller
address = _LIB.Controller(self._env, port)
# create a memory buffer using the ctypes pointer for this vector
buffer_ = ctypes.cast(address, ctypes.POINTER(CONTROLLER_VECTOR)).contents
# create a NumPy buffer from the binary data and return it
return np.frombuffer(buffer_, dtype='uint8')
def _frame_advance(self, action):
"""
Advance a frame in the emulator with an action.
Args:
action (byte): the action to press on the joy-pad
Returns:
None
"""
# set the action on the controller
self.controllers[0][:] = action
# perform a step on the emulator
_LIB.Step(self._env)
def _backup(self):
"""Backup the NES state in the emulator."""
_LIB.Backup(self._env)
self._has_backup = True
def _restore(self):
"""Restore the backup state into the NES emulator."""
_LIB.Restore(self._env)
def _will_reset(self):
"""Handle any RAM hacking after a reset occurs."""
pass
def seed(self, seed=None):
"""
Set the seed for this environment's random number generator.
Returns:
list<bigint>: Returns the list of seeds used in this env's random
number generators. The first value in the list should be the
"main" seed, or the value which a reproducer should pass to
'seed'. Often, the main seed equals the provided 'seed', but
this won't be true if seed=None, for example.
"""
# if there is no seed, return an empty list
if seed is None:
return []
# set the random number seed for the NumPy random number generator
self.np_random.seed(seed)
# return the list of seeds used by RNG(s) in the environment
return [seed]
def _did_reset(self):
"""Handle any RAM hacking after a reset occurs."""
pass
def step(self, action):
"""
Run one frame of the NES and return the relevant observation data.
Args:
action (byte): the bitmap determining which buttons to press
Returns:
a tuple of:
- state (np.ndarray): next frame as a result of the given action
- reward (float) : amount of reward returned after given action
- done (boolean): whether the episode has ended
- info (dict): contains auxiliary diagnostic information
"""
# if the environment is done, raise an error
if self.done:
raise ValueError('cannot step in a done environment! call `reset`')
# set the action on the controller
self.controllers[0][:] = action
# pass the action to the emulator as an unsigned byte
_LIB.Step(self._env)
# get the reward for this step
reward = self._get_reward()
# get the done flag for this step
self.done = self._get_done()
# get the info for this step
info = self._get_info()
# call the after step callback
self._did_step(self.done)
# bound the reward in [min, max]
if reward < self.reward_range[0]:
reward = self.reward_range[0]
elif reward > self.reward_range[1]:
reward = self.reward_range[1]
# return the screen from the emulator and other relevant data
return self.screen, reward, self.done, info
def _get_reward(self):
"""Return the reward after a step occurs."""
return 0
def _get_done(self):
"""Return True if the episode is over, False otherwise."""
return False
def _get_info(self):
"""Return the info after a step occurs."""
return {}
def _did_step(self, done):
"""
Handle any RAM hacking after a step occurs.
Args:
done (bool): whether the done flag is set to true
Returns:
None
"""
pass
def close(self):
"""Close the environment."""
# make sure the environment hasn't already been closed
if self._env is None:
raise ValueError('env has already been closed.')
# purge the environment from C++ memory
_LIB.Close(self._env)
# deallocate the object locally
self._env = None
# if there is an image viewer open, delete it
if self.viewer is not None:
self.viewer.close()
def render(self, mode='human'):
"""
Render the environment.
Args:
mode (str): the mode to render with:
- human: render to the current display
- rgb_array: Return an numpy.ndarray with shape (x, y, 3),
representing RGB values for an x-by-y pixel image
Returns:
a numpy array if mode is 'rgb_array', None otherwise
"""
if mode == 'human':
# if the viewer isn't setup, import it and create one
if self.viewer is None:
from ._image_viewer import ImageViewer
# get the caption for the ImageViewer
if self.spec is None:
# if there is no spec, just use the .nes filename
caption = self._rom_path.split('/')[-1]
else:
# set the caption to the OpenAI Gym id
caption = self.spec.id
# create the ImageViewer to display frames
self.viewer = ImageViewer(
caption=caption,
height=SCREEN_HEIGHT,
width=SCREEN_WIDTH,
)
# show the screen on the image viewer
self.viewer.show(self.screen)
elif mode == 'rgb_array':
return self.screen
else:
# unpack the modes as comma delineated strings ('a', 'b', ...)
render_modes = [repr(x) for x in self.metadata['render.modes']]
msg = 'valid render modes are: {}'.format(', '.join(render_modes))
raise NotImplementedError(msg)
def get_keys_to_action(self):
"""Return the dictionary of keyboard keys to actions."""
# keyboard keys in an array ordered by their byte order in the bitmap
# i.e. right = 7, left = 6, ..., B = 1, A = 0
buttons = np.array([
ord('d'), # right
ord('a'), # left
ord('s'), # down
ord('w'), # up
ord('\r'), # start
ord(' '), # select
ord('p'), # B
ord('o'), # A
])
# the dictionary of key presses to controller codes
keys_to_action = {}
# the combination map of values for the controller
values = 8 * [[0, 1]]
# iterate over all the combinations
for combination in itertools.product(*values):
# unpack the tuple of bits into an integer
byte = int(''.join(map(str, combination)), 2)
# unwrap the pressed buttons based on the bitmap
pressed = buttons[list(map(bool, combination))]
# assign the pressed buttons to the output byte
keys_to_action[tuple(sorted(pressed))] = byte
return keys_to_action
def get_action_meanings(self):
"""Return a list of actions meanings."""
return ['NOOP']
|
Kautenja/nes-py | nes_py/nes_env.py | NESEnv.step | python | def step(self, action):
# if the environment is done, raise an error
if self.done:
raise ValueError('cannot step in a done environment! call `reset`')
# set the action on the controller
self.controllers[0][:] = action
# pass the action to the emulator as an unsigned byte
_LIB.Step(self._env)
# get the reward for this step
reward = self._get_reward()
# get the done flag for this step
self.done = self._get_done()
# get the info for this step
info = self._get_info()
# call the after step callback
self._did_step(self.done)
# bound the reward in [min, max]
if reward < self.reward_range[0]:
reward = self.reward_range[0]
elif reward > self.reward_range[1]:
reward = self.reward_range[1]
# return the screen from the emulator and other relevant data
return self.screen, reward, self.done, info | Run one frame of the NES and return the relevant observation data.
Args:
action (byte): the bitmap determining which buttons to press
Returns:
a tuple of:
- state (np.ndarray): next frame as a result of the given action
- reward (float) : amount of reward returned after given action
- done (boolean): whether the episode has ended
- info (dict): contains auxiliary diagnostic information | train | https://github.com/Kautenja/nes-py/blob/a113885198d418f38fcf24b8f79ac508975788c2/nes_py/nes_env.py#L271-L307 | null | class NESEnv(gym.Env):
"""An NES environment based on the LaiNES emulator."""
# relevant meta-data about the environment
metadata = {
'render.modes': ['rgb_array', 'human'],
'video.frames_per_second': 60
}
# the legal range for rewards for this environment
reward_range = (-float('inf'), float('inf'))
# observation space for the environment is static across all instances
observation_space = Box(
low=0,
high=255,
shape=SCREEN_SHAPE_24_BIT,
dtype=np.uint8
)
# action space is a bitmap of button press values for the 8 NES buttons
action_space = Discrete(256)
def __init__(self, rom_path):
"""
Create a new NES environment.
Args:
rom_path (str): the path to the ROM for the environment
Returns:
None
"""
# create a ROM file from the ROM path
rom = ROM(rom_path)
# check that there is PRG ROM
if rom.prg_rom_size == 0:
raise ValueError('ROM has no PRG-ROM banks.')
# ensure that there is no trainer
if rom.has_trainer:
raise ValueError('ROM has trainer. trainer is not supported.')
# try to read the PRG ROM and raise a value error if it fails
_ = rom.prg_rom
# try to read the CHR ROM and raise a value error if it fails
_ = rom.chr_rom
# check the TV system
if rom.is_pal:
raise ValueError('ROM is PAL. PAL is not supported.')
# check that the mapper is implemented
elif rom.mapper not in {0, 1, 2, 3}:
msg = 'ROM has an unsupported mapper number {}.'
raise ValueError(msg.format(rom.mapper))
# create a dedicated random number generator for the environment
self.np_random = np.random.RandomState()
# store the ROM path
self._rom_path = rom_path
# initialize the C++ object for running the environment
self._env = _LIB.Initialize(self._rom_path)
# setup a placeholder for a 'human' render mode viewer
self.viewer = None
# setup a placeholder for a pointer to a backup state
self._has_backup = False
# setup a done flag
self.done = True
# setup the controllers, screen, and RAM buffers
self.controllers = [self._controller_buffer(port) for port in range(2)]
self.screen = self._screen_buffer()
self.ram = self._ram_buffer()
def _screen_buffer(self):
"""Setup the screen buffer from the C++ code."""
# get the address of the screen
address = _LIB.Screen(self._env)
# create a buffer from the contents of the address location
buffer_ = ctypes.cast(address, ctypes.POINTER(SCREEN_TENSOR)).contents
# create a NumPy array from the buffer
screen = np.frombuffer(buffer_, dtype='uint8')
# reshape the screen from a column vector to a tensor
screen = screen.reshape(SCREEN_SHAPE_32_BIT)
# flip the bytes if the machine is little-endian (which it likely is)
if sys.byteorder == 'little':
# invert the little-endian BGRx channels to big-endian xRGB
screen = screen[:, :, ::-1]
# remove the 0th axis (padding from storing colors in 32 bit)
return screen[:, :, 1:]
def _ram_buffer(self):
"""Setup the RAM buffer from the C++ code."""
# get the address of the RAM
address = _LIB.Memory(self._env)
# create a buffer from the contents of the address location
buffer_ = ctypes.cast(address, ctypes.POINTER(RAM_VECTOR)).contents
# create a NumPy array from the buffer
return np.frombuffer(buffer_, dtype='uint8')
def _controller_buffer(self, port):
"""
Find the pointer to a controller and setup a NumPy buffer.
Args:
port: the port of the controller to setup
Returns:
a NumPy buffer with the controller's binary data
"""
# get the address of the controller
address = _LIB.Controller(self._env, port)
# create a memory buffer using the ctypes pointer for this vector
buffer_ = ctypes.cast(address, ctypes.POINTER(CONTROLLER_VECTOR)).contents
# create a NumPy buffer from the binary data and return it
return np.frombuffer(buffer_, dtype='uint8')
def _frame_advance(self, action):
"""
Advance a frame in the emulator with an action.
Args:
action (byte): the action to press on the joy-pad
Returns:
None
"""
# set the action on the controller
self.controllers[0][:] = action
# perform a step on the emulator
_LIB.Step(self._env)
def _backup(self):
"""Backup the NES state in the emulator."""
_LIB.Backup(self._env)
self._has_backup = True
def _restore(self):
"""Restore the backup state into the NES emulator."""
_LIB.Restore(self._env)
def _will_reset(self):
"""Handle any RAM hacking after a reset occurs."""
pass
def seed(self, seed=None):
"""
Set the seed for this environment's random number generator.
Returns:
list<bigint>: Returns the list of seeds used in this env's random
number generators. The first value in the list should be the
"main" seed, or the value which a reproducer should pass to
'seed'. Often, the main seed equals the provided 'seed', but
this won't be true if seed=None, for example.
"""
# if there is no seed, return an empty list
if seed is None:
return []
# set the random number seed for the NumPy random number generator
self.np_random.seed(seed)
# return the list of seeds used by RNG(s) in the environment
return [seed]
def reset(self):
"""
Reset the state of the environment and returns an initial observation.
Returns:
state (np.ndarray): next frame as a result of the given action
"""
# call the before reset callback
self._will_reset()
# reset the emulator
if self._has_backup:
self._restore()
else:
_LIB.Reset(self._env)
# call the after reset callback
self._did_reset()
# set the done flag to false
self.done = False
# return the screen from the emulator
return self.screen
def _did_reset(self):
"""Handle any RAM hacking after a reset occurs."""
pass
def _get_reward(self):
"""Return the reward after a step occurs."""
return 0
def _get_done(self):
"""Return True if the episode is over, False otherwise."""
return False
def _get_info(self):
"""Return the info after a step occurs."""
return {}
def _did_step(self, done):
"""
Handle any RAM hacking after a step occurs.
Args:
done (bool): whether the done flag is set to true
Returns:
None
"""
pass
def close(self):
"""Close the environment."""
# make sure the environment hasn't already been closed
if self._env is None:
raise ValueError('env has already been closed.')
# purge the environment from C++ memory
_LIB.Close(self._env)
# deallocate the object locally
self._env = None
# if there is an image viewer open, delete it
if self.viewer is not None:
self.viewer.close()
def render(self, mode='human'):
"""
Render the environment.
Args:
mode (str): the mode to render with:
- human: render to the current display
- rgb_array: Return an numpy.ndarray with shape (x, y, 3),
representing RGB values for an x-by-y pixel image
Returns:
a numpy array if mode is 'rgb_array', None otherwise
"""
if mode == 'human':
# if the viewer isn't setup, import it and create one
if self.viewer is None:
from ._image_viewer import ImageViewer
# get the caption for the ImageViewer
if self.spec is None:
# if there is no spec, just use the .nes filename
caption = self._rom_path.split('/')[-1]
else:
# set the caption to the OpenAI Gym id
caption = self.spec.id
# create the ImageViewer to display frames
self.viewer = ImageViewer(
caption=caption,
height=SCREEN_HEIGHT,
width=SCREEN_WIDTH,
)
# show the screen on the image viewer
self.viewer.show(self.screen)
elif mode == 'rgb_array':
return self.screen
else:
# unpack the modes as comma delineated strings ('a', 'b', ...)
render_modes = [repr(x) for x in self.metadata['render.modes']]
msg = 'valid render modes are: {}'.format(', '.join(render_modes))
raise NotImplementedError(msg)
def get_keys_to_action(self):
"""Return the dictionary of keyboard keys to actions."""
# keyboard keys in an array ordered by their byte order in the bitmap
# i.e. right = 7, left = 6, ..., B = 1, A = 0
buttons = np.array([
ord('d'), # right
ord('a'), # left
ord('s'), # down
ord('w'), # up
ord('\r'), # start
ord(' '), # select
ord('p'), # B
ord('o'), # A
])
# the dictionary of key presses to controller codes
keys_to_action = {}
# the combination map of values for the controller
values = 8 * [[0, 1]]
# iterate over all the combinations
for combination in itertools.product(*values):
# unpack the tuple of bits into an integer
byte = int(''.join(map(str, combination)), 2)
# unwrap the pressed buttons based on the bitmap
pressed = buttons[list(map(bool, combination))]
# assign the pressed buttons to the output byte
keys_to_action[tuple(sorted(pressed))] = byte
return keys_to_action
def get_action_meanings(self):
"""Return a list of actions meanings."""
return ['NOOP']
|
Kautenja/nes-py | nes_py/nes_env.py | NESEnv.close | python | def close(self):
# make sure the environment hasn't already been closed
if self._env is None:
raise ValueError('env has already been closed.')
# purge the environment from C++ memory
_LIB.Close(self._env)
# deallocate the object locally
self._env = None
# if there is an image viewer open, delete it
if self.viewer is not None:
self.viewer.close() | Close the environment. | train | https://github.com/Kautenja/nes-py/blob/a113885198d418f38fcf24b8f79ac508975788c2/nes_py/nes_env.py#L334-L345 | null | class NESEnv(gym.Env):
"""An NES environment based on the LaiNES emulator."""
# relevant meta-data about the environment
metadata = {
'render.modes': ['rgb_array', 'human'],
'video.frames_per_second': 60
}
# the legal range for rewards for this environment
reward_range = (-float('inf'), float('inf'))
# observation space for the environment is static across all instances
observation_space = Box(
low=0,
high=255,
shape=SCREEN_SHAPE_24_BIT,
dtype=np.uint8
)
# action space is a bitmap of button press values for the 8 NES buttons
action_space = Discrete(256)
def __init__(self, rom_path):
"""
Create a new NES environment.
Args:
rom_path (str): the path to the ROM for the environment
Returns:
None
"""
# create a ROM file from the ROM path
rom = ROM(rom_path)
# check that there is PRG ROM
if rom.prg_rom_size == 0:
raise ValueError('ROM has no PRG-ROM banks.')
# ensure that there is no trainer
if rom.has_trainer:
raise ValueError('ROM has trainer. trainer is not supported.')
# try to read the PRG ROM and raise a value error if it fails
_ = rom.prg_rom
# try to read the CHR ROM and raise a value error if it fails
_ = rom.chr_rom
# check the TV system
if rom.is_pal:
raise ValueError('ROM is PAL. PAL is not supported.')
# check that the mapper is implemented
elif rom.mapper not in {0, 1, 2, 3}:
msg = 'ROM has an unsupported mapper number {}.'
raise ValueError(msg.format(rom.mapper))
# create a dedicated random number generator for the environment
self.np_random = np.random.RandomState()
# store the ROM path
self._rom_path = rom_path
# initialize the C++ object for running the environment
self._env = _LIB.Initialize(self._rom_path)
# setup a placeholder for a 'human' render mode viewer
self.viewer = None
# setup a placeholder for a pointer to a backup state
self._has_backup = False
# setup a done flag
self.done = True
# setup the controllers, screen, and RAM buffers
self.controllers = [self._controller_buffer(port) for port in range(2)]
self.screen = self._screen_buffer()
self.ram = self._ram_buffer()
def _screen_buffer(self):
"""Setup the screen buffer from the C++ code."""
# get the address of the screen
address = _LIB.Screen(self._env)
# create a buffer from the contents of the address location
buffer_ = ctypes.cast(address, ctypes.POINTER(SCREEN_TENSOR)).contents
# create a NumPy array from the buffer
screen = np.frombuffer(buffer_, dtype='uint8')
# reshape the screen from a column vector to a tensor
screen = screen.reshape(SCREEN_SHAPE_32_BIT)
# flip the bytes if the machine is little-endian (which it likely is)
if sys.byteorder == 'little':
# invert the little-endian BGRx channels to big-endian xRGB
screen = screen[:, :, ::-1]
# remove the 0th axis (padding from storing colors in 32 bit)
return screen[:, :, 1:]
def _ram_buffer(self):
"""Setup the RAM buffer from the C++ code."""
# get the address of the RAM
address = _LIB.Memory(self._env)
# create a buffer from the contents of the address location
buffer_ = ctypes.cast(address, ctypes.POINTER(RAM_VECTOR)).contents
# create a NumPy array from the buffer
return np.frombuffer(buffer_, dtype='uint8')
def _controller_buffer(self, port):
"""
Find the pointer to a controller and setup a NumPy buffer.
Args:
port: the port of the controller to setup
Returns:
a NumPy buffer with the controller's binary data
"""
# get the address of the controller
address = _LIB.Controller(self._env, port)
# create a memory buffer using the ctypes pointer for this vector
buffer_ = ctypes.cast(address, ctypes.POINTER(CONTROLLER_VECTOR)).contents
# create a NumPy buffer from the binary data and return it
return np.frombuffer(buffer_, dtype='uint8')
def _frame_advance(self, action):
"""
Advance a frame in the emulator with an action.
Args:
action (byte): the action to press on the joy-pad
Returns:
None
"""
# set the action on the controller
self.controllers[0][:] = action
# perform a step on the emulator
_LIB.Step(self._env)
def _backup(self):
"""Backup the NES state in the emulator."""
_LIB.Backup(self._env)
self._has_backup = True
def _restore(self):
"""Restore the backup state into the NES emulator."""
_LIB.Restore(self._env)
def _will_reset(self):
"""Handle any RAM hacking after a reset occurs."""
pass
def seed(self, seed=None):
"""
Set the seed for this environment's random number generator.
Returns:
list<bigint>: Returns the list of seeds used in this env's random
number generators. The first value in the list should be the
"main" seed, or the value which a reproducer should pass to
'seed'. Often, the main seed equals the provided 'seed', but
this won't be true if seed=None, for example.
"""
# if there is no seed, return an empty list
if seed is None:
return []
# set the random number seed for the NumPy random number generator
self.np_random.seed(seed)
# return the list of seeds used by RNG(s) in the environment
return [seed]
def reset(self):
"""
Reset the state of the environment and returns an initial observation.
Returns:
state (np.ndarray): next frame as a result of the given action
"""
# call the before reset callback
self._will_reset()
# reset the emulator
if self._has_backup:
self._restore()
else:
_LIB.Reset(self._env)
# call the after reset callback
self._did_reset()
# set the done flag to false
self.done = False
# return the screen from the emulator
return self.screen
def _did_reset(self):
"""Handle any RAM hacking after a reset occurs."""
pass
def step(self, action):
"""
Run one frame of the NES and return the relevant observation data.
Args:
action (byte): the bitmap determining which buttons to press
Returns:
a tuple of:
- state (np.ndarray): next frame as a result of the given action
- reward (float) : amount of reward returned after given action
- done (boolean): whether the episode has ended
- info (dict): contains auxiliary diagnostic information
"""
# if the environment is done, raise an error
if self.done:
raise ValueError('cannot step in a done environment! call `reset`')
# set the action on the controller
self.controllers[0][:] = action
# pass the action to the emulator as an unsigned byte
_LIB.Step(self._env)
# get the reward for this step
reward = self._get_reward()
# get the done flag for this step
self.done = self._get_done()
# get the info for this step
info = self._get_info()
# call the after step callback
self._did_step(self.done)
# bound the reward in [min, max]
if reward < self.reward_range[0]:
reward = self.reward_range[0]
elif reward > self.reward_range[1]:
reward = self.reward_range[1]
# return the screen from the emulator and other relevant data
return self.screen, reward, self.done, info
def _get_reward(self):
"""Return the reward after a step occurs."""
return 0
def _get_done(self):
"""Return True if the episode is over, False otherwise."""
return False
def _get_info(self):
"""Return the info after a step occurs."""
return {}
def _did_step(self, done):
"""
Handle any RAM hacking after a step occurs.
Args:
done (bool): whether the done flag is set to true
Returns:
None
"""
pass
def render(self, mode='human'):
"""
Render the environment.
Args:
mode (str): the mode to render with:
- human: render to the current display
- rgb_array: Return an numpy.ndarray with shape (x, y, 3),
representing RGB values for an x-by-y pixel image
Returns:
a numpy array if mode is 'rgb_array', None otherwise
"""
if mode == 'human':
# if the viewer isn't setup, import it and create one
if self.viewer is None:
from ._image_viewer import ImageViewer
# get the caption for the ImageViewer
if self.spec is None:
# if there is no spec, just use the .nes filename
caption = self._rom_path.split('/')[-1]
else:
# set the caption to the OpenAI Gym id
caption = self.spec.id
# create the ImageViewer to display frames
self.viewer = ImageViewer(
caption=caption,
height=SCREEN_HEIGHT,
width=SCREEN_WIDTH,
)
# show the screen on the image viewer
self.viewer.show(self.screen)
elif mode == 'rgb_array':
return self.screen
else:
# unpack the modes as comma delineated strings ('a', 'b', ...)
render_modes = [repr(x) for x in self.metadata['render.modes']]
msg = 'valid render modes are: {}'.format(', '.join(render_modes))
raise NotImplementedError(msg)
def get_keys_to_action(self):
"""Return the dictionary of keyboard keys to actions."""
# keyboard keys in an array ordered by their byte order in the bitmap
# i.e. right = 7, left = 6, ..., B = 1, A = 0
buttons = np.array([
ord('d'), # right
ord('a'), # left
ord('s'), # down
ord('w'), # up
ord('\r'), # start
ord(' '), # select
ord('p'), # B
ord('o'), # A
])
# the dictionary of key presses to controller codes
keys_to_action = {}
# the combination map of values for the controller
values = 8 * [[0, 1]]
# iterate over all the combinations
for combination in itertools.product(*values):
# unpack the tuple of bits into an integer
byte = int(''.join(map(str, combination)), 2)
# unwrap the pressed buttons based on the bitmap
pressed = buttons[list(map(bool, combination))]
# assign the pressed buttons to the output byte
keys_to_action[tuple(sorted(pressed))] = byte
return keys_to_action
def get_action_meanings(self):
"""Return a list of actions meanings."""
return ['NOOP']
|
Kautenja/nes-py | nes_py/nes_env.py | NESEnv.render | python | def render(self, mode='human'):
if mode == 'human':
# if the viewer isn't setup, import it and create one
if self.viewer is None:
from ._image_viewer import ImageViewer
# get the caption for the ImageViewer
if self.spec is None:
# if there is no spec, just use the .nes filename
caption = self._rom_path.split('/')[-1]
else:
# set the caption to the OpenAI Gym id
caption = self.spec.id
# create the ImageViewer to display frames
self.viewer = ImageViewer(
caption=caption,
height=SCREEN_HEIGHT,
width=SCREEN_WIDTH,
)
# show the screen on the image viewer
self.viewer.show(self.screen)
elif mode == 'rgb_array':
return self.screen
else:
# unpack the modes as comma delineated strings ('a', 'b', ...)
render_modes = [repr(x) for x in self.metadata['render.modes']]
msg = 'valid render modes are: {}'.format(', '.join(render_modes))
raise NotImplementedError(msg) | Render the environment.
Args:
mode (str): the mode to render with:
- human: render to the current display
- rgb_array: Return an numpy.ndarray with shape (x, y, 3),
representing RGB values for an x-by-y pixel image
Returns:
a numpy array if mode is 'rgb_array', None otherwise | train | https://github.com/Kautenja/nes-py/blob/a113885198d418f38fcf24b8f79ac508975788c2/nes_py/nes_env.py#L347-L386 | [
"def show(self, frame):\n \"\"\"\n Show an array of pixels on the window.\n\n Args:\n frame (numpy.ndarray): the frame to show on the window\n\n Returns:\n None\n \"\"\"\n # check that the frame has the correct dimensions\n if len(frame.shape) != 3:\n raise ValueError('frame should have shape with only 3 dimensions')\n # open the window if it isn't open already\n if not self.is_open:\n self.open()\n # prepare the window for the next frame\n self._window.clear()\n self._window.switch_to()\n self._window.dispatch_events()\n # create an image data object\n image = ImageData(\n frame.shape[1],\n frame.shape[0],\n 'RGB',\n frame.tobytes(),\n pitch=frame.shape[1]*-3\n )\n # send the image to the window\n image.blit(0, 0, width=self._window.width, height=self._window.height)\n self._window.flip()\n"
] | class NESEnv(gym.Env):
"""An NES environment based on the LaiNES emulator."""
# relevant meta-data about the environment
metadata = {
'render.modes': ['rgb_array', 'human'],
'video.frames_per_second': 60
}
# the legal range for rewards for this environment
reward_range = (-float('inf'), float('inf'))
# observation space for the environment is static across all instances
observation_space = Box(
low=0,
high=255,
shape=SCREEN_SHAPE_24_BIT,
dtype=np.uint8
)
# action space is a bitmap of button press values for the 8 NES buttons
action_space = Discrete(256)
def __init__(self, rom_path):
"""
Create a new NES environment.
Args:
rom_path (str): the path to the ROM for the environment
Returns:
None
"""
# create a ROM file from the ROM path
rom = ROM(rom_path)
# check that there is PRG ROM
if rom.prg_rom_size == 0:
raise ValueError('ROM has no PRG-ROM banks.')
# ensure that there is no trainer
if rom.has_trainer:
raise ValueError('ROM has trainer. trainer is not supported.')
# try to read the PRG ROM and raise a value error if it fails
_ = rom.prg_rom
# try to read the CHR ROM and raise a value error if it fails
_ = rom.chr_rom
# check the TV system
if rom.is_pal:
raise ValueError('ROM is PAL. PAL is not supported.')
# check that the mapper is implemented
elif rom.mapper not in {0, 1, 2, 3}:
msg = 'ROM has an unsupported mapper number {}.'
raise ValueError(msg.format(rom.mapper))
# create a dedicated random number generator for the environment
self.np_random = np.random.RandomState()
# store the ROM path
self._rom_path = rom_path
# initialize the C++ object for running the environment
self._env = _LIB.Initialize(self._rom_path)
# setup a placeholder for a 'human' render mode viewer
self.viewer = None
# setup a placeholder for a pointer to a backup state
self._has_backup = False
# setup a done flag
self.done = True
# setup the controllers, screen, and RAM buffers
self.controllers = [self._controller_buffer(port) for port in range(2)]
self.screen = self._screen_buffer()
self.ram = self._ram_buffer()
def _screen_buffer(self):
"""Setup the screen buffer from the C++ code."""
# get the address of the screen
address = _LIB.Screen(self._env)
# create a buffer from the contents of the address location
buffer_ = ctypes.cast(address, ctypes.POINTER(SCREEN_TENSOR)).contents
# create a NumPy array from the buffer
screen = np.frombuffer(buffer_, dtype='uint8')
# reshape the screen from a column vector to a tensor
screen = screen.reshape(SCREEN_SHAPE_32_BIT)
# flip the bytes if the machine is little-endian (which it likely is)
if sys.byteorder == 'little':
# invert the little-endian BGRx channels to big-endian xRGB
screen = screen[:, :, ::-1]
# remove the 0th axis (padding from storing colors in 32 bit)
return screen[:, :, 1:]
def _ram_buffer(self):
"""Setup the RAM buffer from the C++ code."""
# get the address of the RAM
address = _LIB.Memory(self._env)
# create a buffer from the contents of the address location
buffer_ = ctypes.cast(address, ctypes.POINTER(RAM_VECTOR)).contents
# create a NumPy array from the buffer
return np.frombuffer(buffer_, dtype='uint8')
def _controller_buffer(self, port):
"""
Find the pointer to a controller and setup a NumPy buffer.
Args:
port: the port of the controller to setup
Returns:
a NumPy buffer with the controller's binary data
"""
# get the address of the controller
address = _LIB.Controller(self._env, port)
# create a memory buffer using the ctypes pointer for this vector
buffer_ = ctypes.cast(address, ctypes.POINTER(CONTROLLER_VECTOR)).contents
# create a NumPy buffer from the binary data and return it
return np.frombuffer(buffer_, dtype='uint8')
def _frame_advance(self, action):
"""
Advance a frame in the emulator with an action.
Args:
action (byte): the action to press on the joy-pad
Returns:
None
"""
# set the action on the controller
self.controllers[0][:] = action
# perform a step on the emulator
_LIB.Step(self._env)
def _backup(self):
"""Backup the NES state in the emulator."""
_LIB.Backup(self._env)
self._has_backup = True
def _restore(self):
"""Restore the backup state into the NES emulator."""
_LIB.Restore(self._env)
def _will_reset(self):
"""Handle any RAM hacking after a reset occurs."""
pass
def seed(self, seed=None):
"""
Set the seed for this environment's random number generator.
Returns:
list<bigint>: Returns the list of seeds used in this env's random
number generators. The first value in the list should be the
"main" seed, or the value which a reproducer should pass to
'seed'. Often, the main seed equals the provided 'seed', but
this won't be true if seed=None, for example.
"""
# if there is no seed, return an empty list
if seed is None:
return []
# set the random number seed for the NumPy random number generator
self.np_random.seed(seed)
# return the list of seeds used by RNG(s) in the environment
return [seed]
def reset(self):
"""
Reset the state of the environment and returns an initial observation.
Returns:
state (np.ndarray): next frame as a result of the given action
"""
# call the before reset callback
self._will_reset()
# reset the emulator
if self._has_backup:
self._restore()
else:
_LIB.Reset(self._env)
# call the after reset callback
self._did_reset()
# set the done flag to false
self.done = False
# return the screen from the emulator
return self.screen
def _did_reset(self):
"""Handle any RAM hacking after a reset occurs."""
pass
def step(self, action):
"""
Run one frame of the NES and return the relevant observation data.
Args:
action (byte): the bitmap determining which buttons to press
Returns:
a tuple of:
- state (np.ndarray): next frame as a result of the given action
- reward (float) : amount of reward returned after given action
- done (boolean): whether the episode has ended
- info (dict): contains auxiliary diagnostic information
"""
# if the environment is done, raise an error
if self.done:
raise ValueError('cannot step in a done environment! call `reset`')
# set the action on the controller
self.controllers[0][:] = action
# pass the action to the emulator as an unsigned byte
_LIB.Step(self._env)
# get the reward for this step
reward = self._get_reward()
# get the done flag for this step
self.done = self._get_done()
# get the info for this step
info = self._get_info()
# call the after step callback
self._did_step(self.done)
# bound the reward in [min, max]
if reward < self.reward_range[0]:
reward = self.reward_range[0]
elif reward > self.reward_range[1]:
reward = self.reward_range[1]
# return the screen from the emulator and other relevant data
return self.screen, reward, self.done, info
def _get_reward(self):
"""Return the reward after a step occurs."""
return 0
def _get_done(self):
"""Return True if the episode is over, False otherwise."""
return False
def _get_info(self):
"""Return the info after a step occurs."""
return {}
def _did_step(self, done):
"""
Handle any RAM hacking after a step occurs.
Args:
done (bool): whether the done flag is set to true
Returns:
None
"""
pass
def close(self):
"""Close the environment."""
# make sure the environment hasn't already been closed
if self._env is None:
raise ValueError('env has already been closed.')
# purge the environment from C++ memory
_LIB.Close(self._env)
# deallocate the object locally
self._env = None
# if there is an image viewer open, delete it
if self.viewer is not None:
self.viewer.close()
def get_keys_to_action(self):
"""Return the dictionary of keyboard keys to actions."""
# keyboard keys in an array ordered by their byte order in the bitmap
# i.e. right = 7, left = 6, ..., B = 1, A = 0
buttons = np.array([
ord('d'), # right
ord('a'), # left
ord('s'), # down
ord('w'), # up
ord('\r'), # start
ord(' '), # select
ord('p'), # B
ord('o'), # A
])
# the dictionary of key presses to controller codes
keys_to_action = {}
# the combination map of values for the controller
values = 8 * [[0, 1]]
# iterate over all the combinations
for combination in itertools.product(*values):
# unpack the tuple of bits into an integer
byte = int(''.join(map(str, combination)), 2)
# unwrap the pressed buttons based on the bitmap
pressed = buttons[list(map(bool, combination))]
# assign the pressed buttons to the output byte
keys_to_action[tuple(sorted(pressed))] = byte
return keys_to_action
def get_action_meanings(self):
"""Return a list of actions meanings."""
return ['NOOP']
|
Kautenja/nes-py | nes_py/nes_env.py | NESEnv.get_keys_to_action | python | def get_keys_to_action(self):
# keyboard keys in an array ordered by their byte order in the bitmap
# i.e. right = 7, left = 6, ..., B = 1, A = 0
buttons = np.array([
ord('d'), # right
ord('a'), # left
ord('s'), # down
ord('w'), # up
ord('\r'), # start
ord(' '), # select
ord('p'), # B
ord('o'), # A
])
# the dictionary of key presses to controller codes
keys_to_action = {}
# the combination map of values for the controller
values = 8 * [[0, 1]]
# iterate over all the combinations
for combination in itertools.product(*values):
# unpack the tuple of bits into an integer
byte = int(''.join(map(str, combination)), 2)
# unwrap the pressed buttons based on the bitmap
pressed = buttons[list(map(bool, combination))]
# assign the pressed buttons to the output byte
keys_to_action[tuple(sorted(pressed))] = byte
return keys_to_action | Return the dictionary of keyboard keys to actions. | train | https://github.com/Kautenja/nes-py/blob/a113885198d418f38fcf24b8f79ac508975788c2/nes_py/nes_env.py#L388-L415 | null | class NESEnv(gym.Env):
"""An NES environment based on the LaiNES emulator."""
# relevant meta-data about the environment
metadata = {
'render.modes': ['rgb_array', 'human'],
'video.frames_per_second': 60
}
# the legal range for rewards for this environment
reward_range = (-float('inf'), float('inf'))
# observation space for the environment is static across all instances
observation_space = Box(
low=0,
high=255,
shape=SCREEN_SHAPE_24_BIT,
dtype=np.uint8
)
# action space is a bitmap of button press values for the 8 NES buttons
action_space = Discrete(256)
def __init__(self, rom_path):
"""
Create a new NES environment.
Args:
rom_path (str): the path to the ROM for the environment
Returns:
None
"""
# create a ROM file from the ROM path
rom = ROM(rom_path)
# check that there is PRG ROM
if rom.prg_rom_size == 0:
raise ValueError('ROM has no PRG-ROM banks.')
# ensure that there is no trainer
if rom.has_trainer:
raise ValueError('ROM has trainer. trainer is not supported.')
# try to read the PRG ROM and raise a value error if it fails
_ = rom.prg_rom
# try to read the CHR ROM and raise a value error if it fails
_ = rom.chr_rom
# check the TV system
if rom.is_pal:
raise ValueError('ROM is PAL. PAL is not supported.')
# check that the mapper is implemented
elif rom.mapper not in {0, 1, 2, 3}:
msg = 'ROM has an unsupported mapper number {}.'
raise ValueError(msg.format(rom.mapper))
# create a dedicated random number generator for the environment
self.np_random = np.random.RandomState()
# store the ROM path
self._rom_path = rom_path
# initialize the C++ object for running the environment
self._env = _LIB.Initialize(self._rom_path)
# setup a placeholder for a 'human' render mode viewer
self.viewer = None
# setup a placeholder for a pointer to a backup state
self._has_backup = False
# setup a done flag
self.done = True
# setup the controllers, screen, and RAM buffers
self.controllers = [self._controller_buffer(port) for port in range(2)]
self.screen = self._screen_buffer()
self.ram = self._ram_buffer()
def _screen_buffer(self):
"""Setup the screen buffer from the C++ code."""
# get the address of the screen
address = _LIB.Screen(self._env)
# create a buffer from the contents of the address location
buffer_ = ctypes.cast(address, ctypes.POINTER(SCREEN_TENSOR)).contents
# create a NumPy array from the buffer
screen = np.frombuffer(buffer_, dtype='uint8')
# reshape the screen from a column vector to a tensor
screen = screen.reshape(SCREEN_SHAPE_32_BIT)
# flip the bytes if the machine is little-endian (which it likely is)
if sys.byteorder == 'little':
# invert the little-endian BGRx channels to big-endian xRGB
screen = screen[:, :, ::-1]
# remove the 0th axis (padding from storing colors in 32 bit)
return screen[:, :, 1:]
def _ram_buffer(self):
"""Setup the RAM buffer from the C++ code."""
# get the address of the RAM
address = _LIB.Memory(self._env)
# create a buffer from the contents of the address location
buffer_ = ctypes.cast(address, ctypes.POINTER(RAM_VECTOR)).contents
# create a NumPy array from the buffer
return np.frombuffer(buffer_, dtype='uint8')
def _controller_buffer(self, port):
"""
Find the pointer to a controller and setup a NumPy buffer.
Args:
port: the port of the controller to setup
Returns:
a NumPy buffer with the controller's binary data
"""
# get the address of the controller
address = _LIB.Controller(self._env, port)
# create a memory buffer using the ctypes pointer for this vector
buffer_ = ctypes.cast(address, ctypes.POINTER(CONTROLLER_VECTOR)).contents
# create a NumPy buffer from the binary data and return it
return np.frombuffer(buffer_, dtype='uint8')
def _frame_advance(self, action):
"""
Advance a frame in the emulator with an action.
Args:
action (byte): the action to press on the joy-pad
Returns:
None
"""
# set the action on the controller
self.controllers[0][:] = action
# perform a step on the emulator
_LIB.Step(self._env)
def _backup(self):
"""Backup the NES state in the emulator."""
_LIB.Backup(self._env)
self._has_backup = True
def _restore(self):
"""Restore the backup state into the NES emulator."""
_LIB.Restore(self._env)
def _will_reset(self):
"""Handle any RAM hacking after a reset occurs."""
pass
def seed(self, seed=None):
"""
Set the seed for this environment's random number generator.
Returns:
list<bigint>: Returns the list of seeds used in this env's random
number generators. The first value in the list should be the
"main" seed, or the value which a reproducer should pass to
'seed'. Often, the main seed equals the provided 'seed', but
this won't be true if seed=None, for example.
"""
# if there is no seed, return an empty list
if seed is None:
return []
# set the random number seed for the NumPy random number generator
self.np_random.seed(seed)
# return the list of seeds used by RNG(s) in the environment
return [seed]
def reset(self):
"""
Reset the state of the environment and returns an initial observation.
Returns:
state (np.ndarray): next frame as a result of the given action
"""
# call the before reset callback
self._will_reset()
# reset the emulator
if self._has_backup:
self._restore()
else:
_LIB.Reset(self._env)
# call the after reset callback
self._did_reset()
# set the done flag to false
self.done = False
# return the screen from the emulator
return self.screen
def _did_reset(self):
"""Handle any RAM hacking after a reset occurs."""
pass
def step(self, action):
"""
Run one frame of the NES and return the relevant observation data.
Args:
action (byte): the bitmap determining which buttons to press
Returns:
a tuple of:
- state (np.ndarray): next frame as a result of the given action
- reward (float) : amount of reward returned after given action
- done (boolean): whether the episode has ended
- info (dict): contains auxiliary diagnostic information
"""
# if the environment is done, raise an error
if self.done:
raise ValueError('cannot step in a done environment! call `reset`')
# set the action on the controller
self.controllers[0][:] = action
# pass the action to the emulator as an unsigned byte
_LIB.Step(self._env)
# get the reward for this step
reward = self._get_reward()
# get the done flag for this step
self.done = self._get_done()
# get the info for this step
info = self._get_info()
# call the after step callback
self._did_step(self.done)
# bound the reward in [min, max]
if reward < self.reward_range[0]:
reward = self.reward_range[0]
elif reward > self.reward_range[1]:
reward = self.reward_range[1]
# return the screen from the emulator and other relevant data
return self.screen, reward, self.done, info
def _get_reward(self):
"""Return the reward after a step occurs."""
return 0
def _get_done(self):
"""Return True if the episode is over, False otherwise."""
return False
def _get_info(self):
"""Return the info after a step occurs."""
return {}
def _did_step(self, done):
"""
Handle any RAM hacking after a step occurs.
Args:
done (bool): whether the done flag is set to true
Returns:
None
"""
pass
def close(self):
"""Close the environment."""
# make sure the environment hasn't already been closed
if self._env is None:
raise ValueError('env has already been closed.')
# purge the environment from C++ memory
_LIB.Close(self._env)
# deallocate the object locally
self._env = None
# if there is an image viewer open, delete it
if self.viewer is not None:
self.viewer.close()
def render(self, mode='human'):
"""
Render the environment.
Args:
mode (str): the mode to render with:
- human: render to the current display
- rgb_array: Return an numpy.ndarray with shape (x, y, 3),
representing RGB values for an x-by-y pixel image
Returns:
a numpy array if mode is 'rgb_array', None otherwise
"""
if mode == 'human':
# if the viewer isn't setup, import it and create one
if self.viewer is None:
from ._image_viewer import ImageViewer
# get the caption for the ImageViewer
if self.spec is None:
# if there is no spec, just use the .nes filename
caption = self._rom_path.split('/')[-1]
else:
# set the caption to the OpenAI Gym id
caption = self.spec.id
# create the ImageViewer to display frames
self.viewer = ImageViewer(
caption=caption,
height=SCREEN_HEIGHT,
width=SCREEN_WIDTH,
)
# show the screen on the image viewer
self.viewer.show(self.screen)
elif mode == 'rgb_array':
return self.screen
else:
# unpack the modes as comma delineated strings ('a', 'b', ...)
render_modes = [repr(x) for x in self.metadata['render.modes']]
msg = 'valid render modes are: {}'.format(', '.join(render_modes))
raise NotImplementedError(msg)
def get_action_meanings(self):
"""Return a list of actions meanings."""
return ['NOOP']
|
Kautenja/nes-py | nes_py/app/cli.py | _get_args | python | def _get_args():
parser = argparse.ArgumentParser(description=__doc__)
# add the argument for the Super Mario Bros environment to run
parser.add_argument('--rom', '-r',
type=str,
help='The path to the ROM to play.',
required=True,
)
# add the argument for the mode of execution as either human or random
parser.add_argument('--mode', '-m',
type=str,
default='human',
choices=['human', 'random'],
help='The execution mode for the emulation.',
)
# add the argument for the number of steps to take in random mode
parser.add_argument('--steps', '-s',
type=int,
default=500,
help='The number of random steps to take.',
)
return parser.parse_args() | Parse arguments from the command line and return them. | train | https://github.com/Kautenja/nes-py/blob/a113885198d418f38fcf24b8f79ac508975788c2/nes_py/app/cli.py#L8-L30 | null | """Command line interface to nes-py NES emulator."""
import argparse
from .play_human import play_human
from .play_random import play_random
from ..nes_env import NESEnv
def main():
"""The main entry point for the command line interface."""
# get arguments from the command line
args = _get_args()
# create the environment
env = NESEnv(args.rom)
# play the environment with the given mode
if args.mode == 'human':
play_human(env)
else:
play_random(env, args.steps)
# explicitly define the outward facing API of this module
__all__ = [main.__name__]
|
Kautenja/nes-py | nes_py/app/cli.py | main | python | def main():
# get arguments from the command line
args = _get_args()
# create the environment
env = NESEnv(args.rom)
# play the environment with the given mode
if args.mode == 'human':
play_human(env)
else:
play_random(env, args.steps) | The main entry point for the command line interface. | train | https://github.com/Kautenja/nes-py/blob/a113885198d418f38fcf24b8f79ac508975788c2/nes_py/app/cli.py#L33-L43 | [
"def _get_args():\n \"\"\"Parse arguments from the command line and return them.\"\"\"\n parser = argparse.ArgumentParser(description=__doc__)\n # add the argument for the Super Mario Bros environment to run\n parser.add_argument('--rom', '-r',\n type=str,\n help='The path to the ROM to play.',\n required=True,\n )\n # add the argument for the mode of execution as either human or random\n parser.add_argument('--mode', '-m',\n type=str,\n default='human',\n choices=['human', 'random'],\n help='The execution mode for the emulation.',\n )\n # add the argument for the number of steps to take in random mode\n parser.add_argument('--steps', '-s',\n type=int,\n default=500,\n help='The number of random steps to take.',\n )\n return parser.parse_args()\n",
"def play_human(env):\n \"\"\"\n Play the environment using keyboard as a human.\n\n Args:\n env (gym.Env): the initialized gym environment to play\n\n Returns:\n None\n\n \"\"\"\n # play the game and catch a potential keyboard interrupt\n try:\n play(env, fps=env.metadata['video.frames_per_second'])\n except KeyboardInterrupt:\n pass\n # reset and close the environment\n env.close()\n",
"def play_random(env, steps):\n \"\"\"\n Play the environment making uniformly random decisions.\n\n Args:\n env (gym.Env): the initialized gym environment to play\n steps (int): the number of random steps to take\n\n Returns:\n None\n\n \"\"\"\n try:\n done = True\n progress = tqdm(range(steps))\n for _ in progress:\n if done:\n _ = env.reset()\n action = env.action_space.sample()\n _, reward, done, info = env.step(action)\n progress.set_postfix(reward=reward, info=info)\n env.render()\n except KeyboardInterrupt:\n pass\n # close the environment\n env.close()\n"
] | """Command line interface to nes-py NES emulator."""
import argparse
from .play_human import play_human
from .play_random import play_random
from ..nes_env import NESEnv
def _get_args():
"""Parse arguments from the command line and return them."""
parser = argparse.ArgumentParser(description=__doc__)
# add the argument for the Super Mario Bros environment to run
parser.add_argument('--rom', '-r',
type=str,
help='The path to the ROM to play.',
required=True,
)
# add the argument for the mode of execution as either human or random
parser.add_argument('--mode', '-m',
type=str,
default='human',
choices=['human', 'random'],
help='The execution mode for the emulation.',
)
# add the argument for the number of steps to take in random mode
parser.add_argument('--steps', '-s',
type=int,
default=500,
help='The number of random steps to take.',
)
return parser.parse_args()
# explicitly define the outward facing API of this module
__all__ = [main.__name__]
|
Kautenja/nes-py | nes_py/app/play_random.py | play_random | python | def play_random(env, steps):
try:
done = True
progress = tqdm(range(steps))
for _ in progress:
if done:
_ = env.reset()
action = env.action_space.sample()
_, reward, done, info = env.step(action)
progress.set_postfix(reward=reward, info=info)
env.render()
except KeyboardInterrupt:
pass
# close the environment
env.close() | Play the environment making uniformly random decisions.
Args:
env (gym.Env): the initialized gym environment to play
steps (int): the number of random steps to take
Returns:
None | train | https://github.com/Kautenja/nes-py/blob/a113885198d418f38fcf24b8f79ac508975788c2/nes_py/app/play_random.py#L5-L30 | null | """Methods for playing the game randomly, or as a human."""
from tqdm import tqdm
# explicitly define the outward facing API of this module
__all__ = [play_random.__name__]
|
pyannote/pyannote-metrics | pyannote/metrics/utils.py | UEMSupportMixin.extrude | python | def extrude(self, uem, reference, collar=0.0, skip_overlap=False):
if collar == 0. and not skip_overlap:
return uem
collars, overlap_regions = [], []
# build list of collars if needed
if collar > 0.:
# iterate over all segments in reference
for segment in reference.itersegments():
# add collar centered on start time
t = segment.start
collars.append(Segment(t - .5 * collar, t + .5 * collar))
# add collar centered on end time
t = segment.end
collars.append(Segment(t - .5 * collar, t + .5 * collar))
# build list of overlap regions if needed
if skip_overlap:
# iterate over pair of intersecting segments
for (segment1, track1), (segment2, track2) in reference.co_iter(reference):
if segment1 == segment2 and track1 == track2:
continue
# add their intersection
overlap_regions.append(segment1 & segment2)
segments = collars + overlap_regions
return Timeline(segments=segments).support().gaps(support=uem) | Extrude reference boundary collars from uem
reference |----| |--------------| |-------------|
uem |---------------------| |-------------------------------|
extruded |--| |--| |---| |-----| |-| |-----| |-----------| |-----|
Parameters
----------
uem : Timeline
Evaluation map.
reference : Annotation
Reference annotation.
collar : float, optional
When provided, set the duration of collars centered around
reference segment boundaries that are extruded from both reference
and hypothesis. Defaults to 0. (i.e. no collar).
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
Returns
-------
extruded_uem : Timeline | train | https://github.com/pyannote/pyannote-metrics/blob/b433fec3bd37ca36fe026a428cd72483d646871a/pyannote/metrics/utils.py#L38-L93 | null | class UEMSupportMixin:
"""Provides 'uemify' method with optional (à la NIST) collar"""
def common_timeline(self, reference, hypothesis):
"""Return timeline common to both reference and hypothesis
reference |--------| |------------| |---------| |----|
hypothesis |--------------| |------| |----------------|
timeline |--|-----|----|---|-|------| |-|---------|----| |----|
Parameters
----------
reference : Annotation
hypothesis : Annotation
Returns
-------
timeline : Timeline
"""
timeline = reference.get_timeline(copy=True)
timeline.update(hypothesis.get_timeline(copy=False))
return timeline.segmentation()
def project(self, annotation, timeline):
"""Project annotation onto timeline segments
reference |__A__| |__B__|
|____C____|
timeline |---|---|---| |---|
projection |_A_|_A_|_C_| |_B_|
|_C_|
Parameters
----------
annotation : Annotation
timeline : Timeline
Returns
-------
projection : Annotation
"""
projection = annotation.empty()
timeline_ = annotation.get_timeline(copy=False)
for segment_, segment in timeline_.co_iter(timeline):
for track_ in annotation.get_tracks(segment_):
track = projection.new_track(segment, candidate=track_)
projection[segment, track] = annotation[segment_, track_]
return projection
def uemify(self, reference, hypothesis, uem=None, collar=0.,
skip_overlap=False, returns_uem=False, returns_timeline=False):
"""Crop 'reference' and 'hypothesis' to 'uem' support
Parameters
----------
reference, hypothesis : Annotation
Reference and hypothesis annotations.
uem : Timeline, optional
Evaluation map.
collar : float, optional
When provided, set the duration of collars centered around
reference segment boundaries that are extruded from both reference
and hypothesis. Defaults to 0. (i.e. no collar).
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
returns_uem : bool, optional
Set to True to return extruded uem as well.
Defaults to False (i.e. only return reference and hypothesis)
returns_timeline : bool, optional
Set to True to oversegment reference and hypothesis so that they
share the same internal timeline.
Returns
-------
reference, hypothesis : Annotation
Extruded reference and hypothesis annotations
uem : Timeline
Extruded uem (returned only when 'returns_uem' is True)
timeline : Timeline:
Common timeline (returned only when 'returns_timeline' is True)
"""
# when uem is not provided, use the union of reference and hypothesis
# extents -- and warn the user about that.
if uem is None:
r_extent = reference.get_timeline().extent()
h_extent = hypothesis.get_timeline().extent()
extent = r_extent | h_extent
uem = Timeline(segments=[extent] if extent else [],
uri=reference.uri)
warnings.warn(
"'uem' was approximated by the union of 'reference' "
"and 'hypothesis' extents.")
# extrude collars (and overlap regions) from uem
uem = self.extrude(uem, reference, collar=collar,
skip_overlap=skip_overlap)
# extrude regions outside of uem
reference = reference.crop(uem, mode='intersection')
hypothesis = hypothesis.crop(uem, mode='intersection')
# project reference and hypothesis on common timeline
if returns_timeline:
timeline = self.common_timeline(reference, hypothesis)
reference = self.project(reference, timeline)
hypothesis = self.project(hypothesis, timeline)
result = (reference, hypothesis)
if returns_uem:
result += (uem, )
if returns_timeline:
result += (timeline, )
return result
|
pyannote/pyannote-metrics | pyannote/metrics/utils.py | UEMSupportMixin.common_timeline | python | def common_timeline(self, reference, hypothesis):
timeline = reference.get_timeline(copy=True)
timeline.update(hypothesis.get_timeline(copy=False))
return timeline.segmentation() | Return timeline common to both reference and hypothesis
reference |--------| |------------| |---------| |----|
hypothesis |--------------| |------| |----------------|
timeline |--|-----|----|---|-|------| |-|---------|----| |----|
Parameters
----------
reference : Annotation
hypothesis : Annotation
Returns
-------
timeline : Timeline | train | https://github.com/pyannote/pyannote-metrics/blob/b433fec3bd37ca36fe026a428cd72483d646871a/pyannote/metrics/utils.py#L95-L113 | null | class UEMSupportMixin:
"""Provides 'uemify' method with optional (à la NIST) collar"""
def extrude(self, uem, reference, collar=0.0, skip_overlap=False):
"""Extrude reference boundary collars from uem
reference |----| |--------------| |-------------|
uem |---------------------| |-------------------------------|
extruded |--| |--| |---| |-----| |-| |-----| |-----------| |-----|
Parameters
----------
uem : Timeline
Evaluation map.
reference : Annotation
Reference annotation.
collar : float, optional
When provided, set the duration of collars centered around
reference segment boundaries that are extruded from both reference
and hypothesis. Defaults to 0. (i.e. no collar).
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
Returns
-------
extruded_uem : Timeline
"""
if collar == 0. and not skip_overlap:
return uem
collars, overlap_regions = [], []
# build list of collars if needed
if collar > 0.:
# iterate over all segments in reference
for segment in reference.itersegments():
# add collar centered on start time
t = segment.start
collars.append(Segment(t - .5 * collar, t + .5 * collar))
# add collar centered on end time
t = segment.end
collars.append(Segment(t - .5 * collar, t + .5 * collar))
# build list of overlap regions if needed
if skip_overlap:
# iterate over pair of intersecting segments
for (segment1, track1), (segment2, track2) in reference.co_iter(reference):
if segment1 == segment2 and track1 == track2:
continue
# add their intersection
overlap_regions.append(segment1 & segment2)
segments = collars + overlap_regions
return Timeline(segments=segments).support().gaps(support=uem)
def project(self, annotation, timeline):
"""Project annotation onto timeline segments
reference |__A__| |__B__|
|____C____|
timeline |---|---|---| |---|
projection |_A_|_A_|_C_| |_B_|
|_C_|
Parameters
----------
annotation : Annotation
timeline : Timeline
Returns
-------
projection : Annotation
"""
projection = annotation.empty()
timeline_ = annotation.get_timeline(copy=False)
for segment_, segment in timeline_.co_iter(timeline):
for track_ in annotation.get_tracks(segment_):
track = projection.new_track(segment, candidate=track_)
projection[segment, track] = annotation[segment_, track_]
return projection
def uemify(self, reference, hypothesis, uem=None, collar=0.,
skip_overlap=False, returns_uem=False, returns_timeline=False):
"""Crop 'reference' and 'hypothesis' to 'uem' support
Parameters
----------
reference, hypothesis : Annotation
Reference and hypothesis annotations.
uem : Timeline, optional
Evaluation map.
collar : float, optional
When provided, set the duration of collars centered around
reference segment boundaries that are extruded from both reference
and hypothesis. Defaults to 0. (i.e. no collar).
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
returns_uem : bool, optional
Set to True to return extruded uem as well.
Defaults to False (i.e. only return reference and hypothesis)
returns_timeline : bool, optional
Set to True to oversegment reference and hypothesis so that they
share the same internal timeline.
Returns
-------
reference, hypothesis : Annotation
Extruded reference and hypothesis annotations
uem : Timeline
Extruded uem (returned only when 'returns_uem' is True)
timeline : Timeline:
Common timeline (returned only when 'returns_timeline' is True)
"""
# when uem is not provided, use the union of reference and hypothesis
# extents -- and warn the user about that.
if uem is None:
r_extent = reference.get_timeline().extent()
h_extent = hypothesis.get_timeline().extent()
extent = r_extent | h_extent
uem = Timeline(segments=[extent] if extent else [],
uri=reference.uri)
warnings.warn(
"'uem' was approximated by the union of 'reference' "
"and 'hypothesis' extents.")
# extrude collars (and overlap regions) from uem
uem = self.extrude(uem, reference, collar=collar,
skip_overlap=skip_overlap)
# extrude regions outside of uem
reference = reference.crop(uem, mode='intersection')
hypothesis = hypothesis.crop(uem, mode='intersection')
# project reference and hypothesis on common timeline
if returns_timeline:
timeline = self.common_timeline(reference, hypothesis)
reference = self.project(reference, timeline)
hypothesis = self.project(hypothesis, timeline)
result = (reference, hypothesis)
if returns_uem:
result += (uem, )
if returns_timeline:
result += (timeline, )
return result
|
pyannote/pyannote-metrics | pyannote/metrics/utils.py | UEMSupportMixin.project | python | def project(self, annotation, timeline):
projection = annotation.empty()
timeline_ = annotation.get_timeline(copy=False)
for segment_, segment in timeline_.co_iter(timeline):
for track_ in annotation.get_tracks(segment_):
track = projection.new_track(segment, candidate=track_)
projection[segment, track] = annotation[segment_, track_]
return projection | Project annotation onto timeline segments
reference |__A__| |__B__|
|____C____|
timeline |---|---|---| |---|
projection |_A_|_A_|_C_| |_B_|
|_C_|
Parameters
----------
annotation : Annotation
timeline : Timeline
Returns
-------
projection : Annotation | train | https://github.com/pyannote/pyannote-metrics/blob/b433fec3bd37ca36fe026a428cd72483d646871a/pyannote/metrics/utils.py#L115-L141 | null | class UEMSupportMixin:
"""Provides 'uemify' method with optional (à la NIST) collar"""
def extrude(self, uem, reference, collar=0.0, skip_overlap=False):
"""Extrude reference boundary collars from uem
reference |----| |--------------| |-------------|
uem |---------------------| |-------------------------------|
extruded |--| |--| |---| |-----| |-| |-----| |-----------| |-----|
Parameters
----------
uem : Timeline
Evaluation map.
reference : Annotation
Reference annotation.
collar : float, optional
When provided, set the duration of collars centered around
reference segment boundaries that are extruded from both reference
and hypothesis. Defaults to 0. (i.e. no collar).
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
Returns
-------
extruded_uem : Timeline
"""
if collar == 0. and not skip_overlap:
return uem
collars, overlap_regions = [], []
# build list of collars if needed
if collar > 0.:
# iterate over all segments in reference
for segment in reference.itersegments():
# add collar centered on start time
t = segment.start
collars.append(Segment(t - .5 * collar, t + .5 * collar))
# add collar centered on end time
t = segment.end
collars.append(Segment(t - .5 * collar, t + .5 * collar))
# build list of overlap regions if needed
if skip_overlap:
# iterate over pair of intersecting segments
for (segment1, track1), (segment2, track2) in reference.co_iter(reference):
if segment1 == segment2 and track1 == track2:
continue
# add their intersection
overlap_regions.append(segment1 & segment2)
segments = collars + overlap_regions
return Timeline(segments=segments).support().gaps(support=uem)
def common_timeline(self, reference, hypothesis):
"""Return timeline common to both reference and hypothesis
reference |--------| |------------| |---------| |----|
hypothesis |--------------| |------| |----------------|
timeline |--|-----|----|---|-|------| |-|---------|----| |----|
Parameters
----------
reference : Annotation
hypothesis : Annotation
Returns
-------
timeline : Timeline
"""
timeline = reference.get_timeline(copy=True)
timeline.update(hypothesis.get_timeline(copy=False))
return timeline.segmentation()
def uemify(self, reference, hypothesis, uem=None, collar=0.,
skip_overlap=False, returns_uem=False, returns_timeline=False):
"""Crop 'reference' and 'hypothesis' to 'uem' support
Parameters
----------
reference, hypothesis : Annotation
Reference and hypothesis annotations.
uem : Timeline, optional
Evaluation map.
collar : float, optional
When provided, set the duration of collars centered around
reference segment boundaries that are extruded from both reference
and hypothesis. Defaults to 0. (i.e. no collar).
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
returns_uem : bool, optional
Set to True to return extruded uem as well.
Defaults to False (i.e. only return reference and hypothesis)
returns_timeline : bool, optional
Set to True to oversegment reference and hypothesis so that they
share the same internal timeline.
Returns
-------
reference, hypothesis : Annotation
Extruded reference and hypothesis annotations
uem : Timeline
Extruded uem (returned only when 'returns_uem' is True)
timeline : Timeline:
Common timeline (returned only when 'returns_timeline' is True)
"""
# when uem is not provided, use the union of reference and hypothesis
# extents -- and warn the user about that.
if uem is None:
r_extent = reference.get_timeline().extent()
h_extent = hypothesis.get_timeline().extent()
extent = r_extent | h_extent
uem = Timeline(segments=[extent] if extent else [],
uri=reference.uri)
warnings.warn(
"'uem' was approximated by the union of 'reference' "
"and 'hypothesis' extents.")
# extrude collars (and overlap regions) from uem
uem = self.extrude(uem, reference, collar=collar,
skip_overlap=skip_overlap)
# extrude regions outside of uem
reference = reference.crop(uem, mode='intersection')
hypothesis = hypothesis.crop(uem, mode='intersection')
# project reference and hypothesis on common timeline
if returns_timeline:
timeline = self.common_timeline(reference, hypothesis)
reference = self.project(reference, timeline)
hypothesis = self.project(hypothesis, timeline)
result = (reference, hypothesis)
if returns_uem:
result += (uem, )
if returns_timeline:
result += (timeline, )
return result
|
pyannote/pyannote-metrics | pyannote/metrics/utils.py | UEMSupportMixin.uemify | python | def uemify(self, reference, hypothesis, uem=None, collar=0.,
skip_overlap=False, returns_uem=False, returns_timeline=False):
# when uem is not provided, use the union of reference and hypothesis
# extents -- and warn the user about that.
if uem is None:
r_extent = reference.get_timeline().extent()
h_extent = hypothesis.get_timeline().extent()
extent = r_extent | h_extent
uem = Timeline(segments=[extent] if extent else [],
uri=reference.uri)
warnings.warn(
"'uem' was approximated by the union of 'reference' "
"and 'hypothesis' extents.")
# extrude collars (and overlap regions) from uem
uem = self.extrude(uem, reference, collar=collar,
skip_overlap=skip_overlap)
# extrude regions outside of uem
reference = reference.crop(uem, mode='intersection')
hypothesis = hypothesis.crop(uem, mode='intersection')
# project reference and hypothesis on common timeline
if returns_timeline:
timeline = self.common_timeline(reference, hypothesis)
reference = self.project(reference, timeline)
hypothesis = self.project(hypothesis, timeline)
result = (reference, hypothesis)
if returns_uem:
result += (uem, )
if returns_timeline:
result += (timeline, )
return result | Crop 'reference' and 'hypothesis' to 'uem' support
Parameters
----------
reference, hypothesis : Annotation
Reference and hypothesis annotations.
uem : Timeline, optional
Evaluation map.
collar : float, optional
When provided, set the duration of collars centered around
reference segment boundaries that are extruded from both reference
and hypothesis. Defaults to 0. (i.e. no collar).
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
returns_uem : bool, optional
Set to True to return extruded uem as well.
Defaults to False (i.e. only return reference and hypothesis)
returns_timeline : bool, optional
Set to True to oversegment reference and hypothesis so that they
share the same internal timeline.
Returns
-------
reference, hypothesis : Annotation
Extruded reference and hypothesis annotations
uem : Timeline
Extruded uem (returned only when 'returns_uem' is True)
timeline : Timeline:
Common timeline (returned only when 'returns_timeline' is True) | train | https://github.com/pyannote/pyannote-metrics/blob/b433fec3bd37ca36fe026a428cd72483d646871a/pyannote/metrics/utils.py#L143-L210 | [
"def extrude(self, uem, reference, collar=0.0, skip_overlap=False):\n \"\"\"Extrude reference boundary collars from uem\n\n reference |----| |--------------| |-------------|\n uem |---------------------| |-------------------------------|\n extruded |--| |--| |---| |-----| |-| |-----| |-----------| |-----|\n\n Parameters\n ----------\n uem : Timeline\n Evaluation map.\n reference : Annotation\n Reference annotation.\n collar : float, optional\n When provided, set the duration of collars centered around\n reference segment boundaries that are extruded from both reference\n and hypothesis. Defaults to 0. (i.e. no collar).\n skip_overlap : bool, optional\n Set to True to not evaluate overlap regions.\n Defaults to False (i.e. keep overlap regions).\n\n Returns\n -------\n extruded_uem : Timeline\n \"\"\"\n\n if collar == 0. and not skip_overlap:\n return uem\n\n collars, overlap_regions = [], []\n\n # build list of collars if needed\n if collar > 0.:\n # iterate over all segments in reference\n for segment in reference.itersegments():\n\n # add collar centered on start time\n t = segment.start\n collars.append(Segment(t - .5 * collar, t + .5 * collar))\n\n # add collar centered on end time\n t = segment.end\n collars.append(Segment(t - .5 * collar, t + .5 * collar))\n\n # build list of overlap regions if needed\n if skip_overlap:\n # iterate over pair of intersecting segments\n for (segment1, track1), (segment2, track2) in reference.co_iter(reference):\n if segment1 == segment2 and track1 == track2:\n continue\n # add their intersection\n overlap_regions.append(segment1 & segment2)\n\n segments = collars + overlap_regions\n\n return Timeline(segments=segments).support().gaps(support=uem)\n",
"def common_timeline(self, reference, hypothesis):\n \"\"\"Return timeline common to both reference and hypothesis\n\n reference |--------| |------------| |---------| |----|\n hypothesis |--------------| |------| |----------------|\n timeline |--|-----|----|---|-|------| |-|---------|----| |----|\n\n Parameters\n ----------\n reference : Annotation\n hypothesis : Annotation\n\n Returns\n -------\n timeline : Timeline\n \"\"\"\n timeline = reference.get_timeline(copy=True)\n timeline.update(hypothesis.get_timeline(copy=False))\n return timeline.segmentation()\n",
"def project(self, annotation, timeline):\n \"\"\"Project annotation onto timeline segments\n\n reference |__A__| |__B__|\n |____C____|\n\n timeline |---|---|---| |---|\n\n projection |_A_|_A_|_C_| |_B_|\n |_C_|\n\n Parameters\n ----------\n annotation : Annotation\n timeline : Timeline\n\n Returns\n -------\n projection : Annotation\n \"\"\"\n projection = annotation.empty()\n timeline_ = annotation.get_timeline(copy=False)\n for segment_, segment in timeline_.co_iter(timeline):\n for track_ in annotation.get_tracks(segment_):\n track = projection.new_track(segment, candidate=track_)\n projection[segment, track] = annotation[segment_, track_]\n return projection\n"
] | class UEMSupportMixin:
"""Provides 'uemify' method with optional (à la NIST) collar"""
def extrude(self, uem, reference, collar=0.0, skip_overlap=False):
"""Extrude reference boundary collars from uem
reference |----| |--------------| |-------------|
uem |---------------------| |-------------------------------|
extruded |--| |--| |---| |-----| |-| |-----| |-----------| |-----|
Parameters
----------
uem : Timeline
Evaluation map.
reference : Annotation
Reference annotation.
collar : float, optional
When provided, set the duration of collars centered around
reference segment boundaries that are extruded from both reference
and hypothesis. Defaults to 0. (i.e. no collar).
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
Returns
-------
extruded_uem : Timeline
"""
if collar == 0. and not skip_overlap:
return uem
collars, overlap_regions = [], []
# build list of collars if needed
if collar > 0.:
# iterate over all segments in reference
for segment in reference.itersegments():
# add collar centered on start time
t = segment.start
collars.append(Segment(t - .5 * collar, t + .5 * collar))
# add collar centered on end time
t = segment.end
collars.append(Segment(t - .5 * collar, t + .5 * collar))
# build list of overlap regions if needed
if skip_overlap:
# iterate over pair of intersecting segments
for (segment1, track1), (segment2, track2) in reference.co_iter(reference):
if segment1 == segment2 and track1 == track2:
continue
# add their intersection
overlap_regions.append(segment1 & segment2)
segments = collars + overlap_regions
return Timeline(segments=segments).support().gaps(support=uem)
def common_timeline(self, reference, hypothesis):
"""Return timeline common to both reference and hypothesis
reference |--------| |------------| |---------| |----|
hypothesis |--------------| |------| |----------------|
timeline |--|-----|----|---|-|------| |-|---------|----| |----|
Parameters
----------
reference : Annotation
hypothesis : Annotation
Returns
-------
timeline : Timeline
"""
timeline = reference.get_timeline(copy=True)
timeline.update(hypothesis.get_timeline(copy=False))
return timeline.segmentation()
def project(self, annotation, timeline):
"""Project annotation onto timeline segments
reference |__A__| |__B__|
|____C____|
timeline |---|---|---| |---|
projection |_A_|_A_|_C_| |_B_|
|_C_|
Parameters
----------
annotation : Annotation
timeline : Timeline
Returns
-------
projection : Annotation
"""
projection = annotation.empty()
timeline_ = annotation.get_timeline(copy=False)
for segment_, segment in timeline_.co_iter(timeline):
for track_ in annotation.get_tracks(segment_):
track = projection.new_track(segment, candidate=track_)
projection[segment, track] = annotation[segment_, track_]
return projection
|
pyannote/pyannote-metrics | pyannote/metrics/spotting.py | LowLatencySpeakerSpotting.det_curve | python | def det_curve(self, cost_miss=100, cost_fa=1, prior_target=0.01,
return_latency=False):
if self.latencies is None:
y_true = np.array([trial['target'] for _, trial in self])
scores = np.array([trial['score'] for _, trial in self])
fpr, fnr, thresholds, eer = det_curve(y_true, scores, distances=False)
fpr, fnr, thresholds = fpr[::-1], fnr[::-1], thresholds[::-1]
cdet = cost_miss * fnr * prior_target + \
cost_fa * fpr * (1. - prior_target)
if return_latency:
# needed to align the thresholds used in the DET curve
# with (self.)thresholds used to compute latencies.
indices = np.searchsorted(thresholds, self.thresholds, side='left')
thresholds = np.take(thresholds, indices, mode='clip')
fpr = np.take(fpr, indices, mode='clip')
fnr = np.take(fnr, indices, mode='clip')
cdet = np.take(cdet, indices, mode='clip')
return thresholds, fpr, fnr, eer, cdet, \
self.speaker_latency, self.absolute_latency
else:
return thresholds, fpr, fnr, eer, cdet
else:
y_true = np.array([trial['target'] for _, trial in self])
spk_scores = np.array([trial['spk_score'] for _, trial in self])
abs_scores = np.array([trial['abs_score'] for _, trial in self])
result = {}
for key, scores in {'speaker': spk_scores,
'absolute': abs_scores}.items():
result[key] = {}
for i, latency in enumerate(self.latencies):
fpr, fnr, theta, eer = det_curve(y_true, scores[:, i],
distances=False)
fpr, fnr, theta = fpr[::-1], fnr[::-1], theta[::-1]
cdet = cost_miss * fnr * prior_target + \
cost_fa * fpr * (1. - prior_target)
result[key][latency] = theta, fpr, fnr, eer, cdet
return result | DET curve
Parameters
----------
cost_miss : float, optional
Cost of missed detections. Defaults to 100.
cost_fa : float, optional
Cost of false alarms. Defaults to 1.
prior_target : float, optional
Target trial prior. Defaults to 0.5.
return_latency : bool, optional
Set to True to return latency.
Has no effect when latencies are given at initialization time.
Returns
-------
thresholds : numpy array
Detection thresholds
fpr : numpy array
False alarm rate
fnr : numpy array
False rejection rate
eer : float
Equal error rate
cdet : numpy array
Cdet cost function
speaker_latency : numpy array
absolute_latency : numpy array
Speaker and absolute latency when return_latency is set to True. | train | https://github.com/pyannote/pyannote-metrics/blob/b433fec3bd37ca36fe026a428cd72483d646871a/pyannote/metrics/spotting.py#L238-L315 | [
"def det_curve(y_true, scores, distances=False):\n \"\"\"DET curve\n\n Parameters\n ----------\n y_true : (n_samples, ) array-like\n Boolean reference.\n scores : (n_samples, ) array-like\n Predicted score.\n distances : boolean, optional\n When True, indicate that `scores` are actually `distances`\n\n Returns\n -------\n fpr : numpy array\n False alarm rate\n fnr : numpy array\n False rejection rate\n thresholds : numpy array\n Corresponding thresholds\n eer : float\n Equal error rate\n \"\"\"\n\n if distances:\n scores = -scores\n\n # compute false positive and false negative rates\n # (a.k.a. false alarm and false rejection rates)\n fpr, tpr, thresholds = sklearn.metrics.roc_curve(\n y_true, scores, pos_label=True)\n fnr = 1 - tpr\n if distances:\n thresholds = -thresholds\n\n # estimate equal error rate\n eer_index = np.where(fpr > fnr)[0][0]\n eer = .25 * (fpr[eer_index-1] + fpr[eer_index] +\n fnr[eer_index-1] + fnr[eer_index])\n\n return fpr, fnr, thresholds, eer\n"
] | class LowLatencySpeakerSpotting(BaseMetric):
"""Evaluation of low-latency speaker spotting (LLSS) systems
LLSS systems can be evaluated in two ways: with fixed or variable latency.
* When latency is fixed a priori (default), only scores reported by the
system within the requested latency range are considered. Varying the
detection threshold has no impact on the actual latency of the system. It
only impacts the detection performance.
* In variable latency mode, the whole stream of scores is considered.
Varying the detection threshold will impact both the detection performance
and the detection latency. Each trial will result in the alarm being
triggered with a different latency. In case the alarm is not triggered at
all (missed detection), the latency is arbitrarily set to the value one
would obtain if it were triggered at the end of the last target speech
turn. The reported latency is the average latency over all target trials.
Parameters
----------
latencies : float iterable, optional
Switch to fixed latency mode, using provided `latencies`.
Defaults to [1, 5, 10, 30, 60] (in seconds).
thresholds : float iterable, optional
Switch to variable latency mode, using provided detection `thresholds`.
Defaults to fixed latency mode.
"""
@classmethod
def metric_name(cls):
return "Low-latency speaker spotting"
@classmethod
def metric_components(cls):
return {'target': 0.}
def __init__(self, thresholds=None, latencies=None):
super(LowLatencySpeakerSpotting, self).__init__()
if thresholds is None and latencies is None:
latencies = [1, 5, 10, 30, 60]
if thresholds is not None and latencies is not None:
raise ValueError(
'One must choose between fixed and variable latency.')
if thresholds is not None:
self.thresholds = np.sort(thresholds)
if latencies is not None:
latencies = np.sort(latencies)
self.latencies = latencies
def compute_metric(self, detail):
return None
def _fixed_latency(self, reference, timestamps, scores):
if not reference:
target_trial = False
spk_score = np.max(scores) * np.ones((len(self.latencies), 1))
abs_score = spk_score
else:
target_trial = True
# cumulative target speech duration after each speech turn
total = np.cumsum([segment.duration for segment in reference])
# maximum score in timerange [0, t]
# where t is when latency is reached
spk_score = []
abs_score = []
# index of speech turn when given latency is reached
for i, latency in zip(np.searchsorted(total, self.latencies),
self.latencies):
# maximum score in timerange [0, t]
# where t is when latency is reached
try:
t = reference[i].end - (total[i] - latency)
up_to = np.searchsorted(timestamps, t)
if up_to < 1:
s = -sys.float_info.max
else:
s = np.max(scores[:up_to])
except IndexError as e:
s = np.max(scores)
spk_score.append(s)
# maximum score in timerange [0, t + latency]
# where t is when target speaker starts speaking
t = reference[0].start + latency
up_to = np.searchsorted(timestamps, t)
if up_to < 1:
s = -sys.float_info.max
else:
s = np.max(scores[:up_to])
abs_score.append(s)
spk_score = np.array(spk_score).reshape((-1, 1))
abs_score = np.array(abs_score).reshape((-1, 1))
return {
'target': target_trial,
'speaker_latency': self.latencies,
'spk_score': spk_score,
'absolute_latency': self.latencies,
'abs_score': abs_score,
}
def _variable_latency(self, reference, timestamps, scores, **kwargs):
# pre-compute latencies
speaker_latency = np.NAN * np.ones((len(timestamps), 1))
absolute_latency = np.NAN * np.ones((len(timestamps), 1))
if isinstance(reference, Annotation):
reference = reference.get_timeline(copy=False)
if reference:
first_time = reference[0].start
for i, t in enumerate(timestamps):
so_far = Segment(first_time, t)
speaker_latency[i] = reference.crop(so_far).duration()
absolute_latency[i] = max(0, so_far.duration)
# TODO | speed up latency pre-computation
# for every threshold, compute when (if ever) alarm is triggered
maxcum = (np.maximum.accumulate(scores)).reshape((-1, 1))
triggered = maxcum > self.thresholds
indices = np.array([np.searchsorted(triggered[:,i], True)
for i, _ in enumerate(self.thresholds)])
if reference:
target_trial = True
absolute_latency = np.take(absolute_latency, indices, mode='clip')
speaker_latency = np.take(speaker_latency, indices, mode='clip')
# is alarm triggered at all?
positive = triggered[-1, :]
# in case alarm is not triggered, set absolute latency to duration
# between first and last speech turn of the target speaker...
absolute_latency[~positive] = reference.extent().duration
# ...and set speaker latency to target's total speech duration
speaker_latency[~positive] = reference.duration()
else:
target_trial = False
# the notion of "latency" is not applicable to non-target trials
absolute_latency = np.NAN
speaker_latency = np.NAN
return {
'target': target_trial,
'absolute_latency': absolute_latency,
'speaker_latency': speaker_latency,
'score': np.max(scores)
}
def compute_components(self, reference, hypothesis, **kwargs):
"""
Parameters
----------
reference : Timeline or Annotation
hypothesis : SlidingWindowFeature or (time, score) iterable
"""
if isinstance(hypothesis, SlidingWindowFeature):
hypothesis = [(window.end, value) for window, value in hypothesis]
timestamps, scores = zip(*hypothesis)
if self.latencies is None:
return self._variable_latency(reference, timestamps, scores)
else:
return self._fixed_latency(reference, timestamps, scores)
@property
def absolute_latency(self):
latencies = [trial['absolute_latency'] for _, trial in self
if trial['target']]
return np.nanmean(latencies, axis=0)
@property
def speaker_latency(self):
latencies = [trial['speaker_latency'] for _, trial in self
if trial['target']]
return np.nanmean(latencies, axis=0)
|
pyannote/pyannote-metrics | scripts/pyannote-metrics.py | get_hypothesis | python | def get_hypothesis(hypotheses, current_file):
uri = current_file['uri']
if uri in hypotheses:
return hypotheses[uri]
# if the exact 'uri' is not available in hypothesis,
# look for matching substring
tmp_uri = [u for u in hypotheses if u in uri]
# no matching speech turns. return empty annotation
if len(tmp_uri) == 0:
msg = f'Could not find hypothesis for file "{uri}"; assuming empty file.'
warnings.warn(msg)
return Annotation(uri=uri, modality='speaker')
# exactly one matching file. return it
if len(tmp_uri) == 1:
hypothesis = hypotheses[tmp_uri[0]]
hypothesis.uri = uri
return hypothesis
# more that one matching file. error.
msg = f'Found too many hypotheses matching file "{uri}" ({uris}).'
raise ValueError(msg.format(uri=uri, uris=tmp_uri)) | Get hypothesis for given file
Parameters
----------
hypotheses : `dict`
Speaker diarization hypothesis provided by `load_rttm`.
current_file : `dict`
File description as given by pyannote.database protocols.
Returns
-------
hypothesis : `pyannote.core.Annotation`
Hypothesis corresponding to `current_file`. | train | https://github.com/pyannote/pyannote-metrics/blob/b433fec3bd37ca36fe026a428cd72483d646871a/scripts/pyannote-metrics.py#L142-L181 | null | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2017-2019 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Herve BREDIN - http://herve.niderb.fr
"""
Evaluation
Usage:
pyannote-metrics.py detection [--subset=<subset> --collar=<seconds> --skip-overlap] <database.task.protocol> <hypothesis.rttm>
pyannote-metrics.py segmentation [--subset=<subset> --tolerance=<seconds>] <database.task.protocol> <hypothesis.rttm>
pyannote-metrics.py diarization [--subset=<subset> --greedy --collar=<seconds> --skip-overlap] <database.task.protocol> <hypothesis.rttm>
pyannote-metrics.py identification [--subset=<subset> --collar=<seconds> --skip-overlap] <database.task.protocol> <hypothesis.rttm>
pyannote-metrics.py spotting [--subset=<subset> --latency=<seconds>... --filter=<expression>...] <database.task.protocol> <hypothesis.json>
pyannote-metrics.py -h | --help
pyannote-metrics.py --version
Options:
<database.task.protocol> Set evaluation protocol (e.g. "Etape.SpeakerDiarization.TV")
--subset=<subset> Evaluated subset (train|developement|test) [default: test]
--collar=<seconds> Collar, in seconds [default: 0.0].
--skip-overlap Do not evaluate overlap regions.
--tolerance=<seconds> Tolerance, in seconds [default: 0.5].
--greedy Use greedy diarization error rate.
--latency=<seconds> Evaluate with fixed latency.
--filter=<expression> Filter out target trials that do not match the
expression; e.g. use --filter="speech>10" to skip
target trials with less than 10s of speech from
the target.
-h --help Show this screen.
--version Show version.
All modes but "spotting" expect hypothesis using the RTTM file format.
RTTM files contain one line per speech turn, using the following convention:
SPEAKER {uri} 1 {start_time} {duration} <NA> <NA> {speaker_id} <NA> <NA>
* uri: file identifier (as given by pyannote.database protocols)
* start_time: speech turn start time in seconds
* duration: speech turn duration in seconds
* speaker_id: speaker identifier
"spotting" mode expects hypothesis using the following JSON file format.
It should contain a list of trial hypothesis, using the same trial order as
pyannote.database speaker spotting protocols (e.g. protocol.test_trial())
[
{'uri': '<uri>', 'model_id': '<model_id>', 'scores': [[<t1>, <v1>], [<t2>, <v2>], ... [<tn>, <vn>]]},
{'uri': '<uri>', 'model_id': '<model_id>', 'scores': [[<t1>, <v1>], [<t2>, <v2>], ... [<tn>, <vn>]]},
{'uri': '<uri>', 'model_id': '<model_id>', 'scores': [[<t1>, <v1>], [<t2>, <v2>], ... [<tn>, <vn>]]},
...
{'uri': '<uri>', 'model_id': '<model_id>', 'scores': [[<t1>, <v1>], [<t2>, <v2>], ... [<tn>, <vn>]]},
]
* uri: file identifier (as given by pyannote.database protocols)
* model_id: target identifier (as given by pyannote.database protocols)
* [ti, vi]: [time, value] pair indicating that the system has output the
score vi at time ti (e.g. [10.2, 0.2] means that the system
gave a score of 0.2 at time 10.2s).
Calling "spotting" mode will create a bunch of files.
* <hypothesis.det.txt> contains DET curve using the following raw file format:
<threshold> <fpr> <fnr>
* <hypothesis.lcy.txt> contains latency curves using this format:
<threshold> <fpr> <fnr> <speaker_latency> <absolute_latency>
"""
# command line parsing
from docopt import docopt
import sys
import json
import warnings
import functools
import numpy as np
import pandas as pd
from tabulate import tabulate
# import multiprocessing as mp
from pyannote.core import Annotation
from pyannote.database.util import load_rttm
# evaluation protocols
from pyannote.database import get_protocol
from pyannote.database.util import get_annotated
from pyannote.metrics.detection import DetectionErrorRate
from pyannote.metrics.detection import DetectionAccuracy
from pyannote.metrics.detection import DetectionRecall
from pyannote.metrics.detection import DetectionPrecision
from pyannote.metrics.segmentation import SegmentationPurity
from pyannote.metrics.segmentation import SegmentationCoverage
from pyannote.metrics.segmentation import SegmentationPrecision
from pyannote.metrics.segmentation import SegmentationRecall
from pyannote.metrics.diarization import GreedyDiarizationErrorRate
from pyannote.metrics.diarization import DiarizationErrorRate
from pyannote.metrics.diarization import DiarizationPurity
from pyannote.metrics.diarization import DiarizationCoverage
from pyannote.metrics.identification import IdentificationErrorRate
from pyannote.metrics.identification import IdentificationPrecision
from pyannote.metrics.identification import IdentificationRecall
from pyannote.metrics.spotting import LowLatencySpeakerSpotting
showwarning_orig = warnings.showwarning
def showwarning(message, category, *args, **kwargs):
import sys
print(category.__name__ + ':', str(message))
warnings.showwarning = showwarning
def process_one(item, hypotheses=None, metrics=None):
reference = item['annotation']
hypothesis = get_hypothesis(hypotheses, item)
uem = get_annotated(item)
return {key: metric(reference, hypothesis, uem=uem)
for key, metric in metrics.items()}
def get_reports(protocol, subset, hypotheses, metrics):
process = functools.partial(process_one,
hypotheses=hypotheses,
metrics=metrics)
# get items and their number
progress = protocol.progress
protocol.progress = False
items = list(getattr(protocol, subset)())
protocol.progress = progress
n_items = len(items)
for item in items:
process(item)
# HB. 2018-02-05: parallel processing was removed because it is not clear
# how to handle the case where the same 'uri' is processed several times
# in a possibly different order for each sub-metric...
# # heuristic to estimate the optimal number of processes
# chunksize = 20
# processes = max(1, min(mp.cpu_count(), n_items // chunksize))
# pool = mp.Pool(processes)
# _ = pool.map(process, items, chunksize=chunksize)
return {key: metric.report(display=False)
for key, metric in metrics.items()}
def reindex(report):
"""Reindex report so that 'TOTAL' is the last row"""
index = list(report.index)
i = index.index('TOTAL')
return report.reindex(index[:i] + index[i+1:] + ['TOTAL'])
def detection(protocol, subset, hypotheses, collar=0.0, skip_overlap=False):
options = {'collar': collar,
'skip_overlap': skip_overlap,
'parallel': True}
metrics = {
'error': DetectionErrorRate(**options),
'accuracy': DetectionAccuracy(**options),
'precision': DetectionPrecision(**options),
'recall': DetectionRecall(**options)}
reports = get_reports(protocol, subset, hypotheses, metrics)
report = metrics['error'].report(display=False)
accuracy = metrics['accuracy'].report(display=False)
precision = metrics['precision'].report(display=False)
recall = metrics['recall'].report(display=False)
report['accuracy', '%'] = accuracy[metrics['accuracy'].name, '%']
report['precision', '%'] = precision[metrics['precision'].name, '%']
report['recall', '%'] = recall[metrics['recall'].name, '%']
report = reindex(report)
columns = list(report.columns)
report = report[[columns[0]] + columns[-3:] + columns[1:-3]]
summary = 'Detection (collar = {0:g} ms{1})'.format(
1000*collar, ', no overlap' if skip_overlap else '')
headers = [summary] + \
[report.columns[i][0] for i in range(4)] + \
['%' if c[1] == '%' else c[0] for c in report.columns[4:]]
print(tabulate(report, headers=headers, tablefmt="simple",
floatfmt=".2f", numalign="decimal", stralign="left",
missingval="", showindex="default", disable_numparse=False))
def segmentation(protocol, subset, hypotheses, tolerance=0.5):
options = {'tolerance': tolerance, 'parallel': True}
metrics = {'coverage': SegmentationCoverage(**options),
'purity': SegmentationPurity(**options),
'precision': SegmentationPrecision(**options),
'recall': SegmentationRecall(**options)}
reports = get_reports(protocol, subset, hypotheses, metrics)
coverage = metrics['coverage'].report(display=False)
purity = metrics['purity'].report(display=False)
precision = metrics['precision'].report(display=False)
recall = metrics['recall'].report(display=False)
coverage = coverage[metrics['coverage'].name]
purity = purity[metrics['purity'].name]
precision = precision[metrics['precision'].name]
recall = recall[metrics['recall'].name]
report = pd.concat([coverage, purity, precision, recall], axis=1)
report = reindex(report)
headers = ['Segmentation (tolerance = {0:g} ms)'.format(1000*tolerance),
'coverage', 'purity', 'precision', 'recall']
print(tabulate(report, headers=headers, tablefmt="simple",
floatfmt=".2f", numalign="decimal", stralign="left",
missingval="", showindex="default", disable_numparse=False))
def diarization(protocol, subset, hypotheses, greedy=False,
collar=0.0, skip_overlap=False):
options = {'collar': collar,
'skip_overlap': skip_overlap,
'parallel': True}
metrics = {
'purity': DiarizationPurity(**options),
'coverage': DiarizationCoverage(**options)}
if greedy:
metrics['error'] = GreedyDiarizationErrorRate(**options)
else:
metrics['error'] = DiarizationErrorRate(**options)
reports = get_reports(protocol, subset, hypotheses, metrics)
report = metrics['error'].report(display=False)
purity = metrics['purity'].report(display=False)
coverage = metrics['coverage'].report(display=False)
report['purity', '%'] = purity[metrics['purity'].name, '%']
report['coverage', '%'] = coverage[metrics['coverage'].name, '%']
columns = list(report.columns)
report = report[[columns[0]] + columns[-2:] + columns[1:-2]]
report = reindex(report)
summary = 'Diarization ({0:s}collar = {1:g} ms{2})'.format(
'greedy, ' if greedy else '',
1000 * collar,
', no overlap' if skip_overlap else '')
headers = [summary] + \
[report.columns[i][0] for i in range(3)] + \
['%' if c[1] == '%' else c[0] for c in report.columns[3:]]
print(tabulate(report, headers=headers, tablefmt="simple",
floatfmt=".2f", numalign="decimal", stralign="left",
missingval="", showindex="default", disable_numparse=False))
def identification(protocol, subset, hypotheses,
collar=0.0, skip_overlap=False):
options = {'collar': collar,
'skip_overlap': skip_overlap,
'parallel': True}
metrics = {
'error': IdentificationErrorRate(**options),
'precision': IdentificationPrecision(**options),
'recall': IdentificationRecall(**options)}
reports = get_reports(protocol, subset, hypotheses, metrics)
report = metrics['error'].report(display=False)
precision = metrics['precision'].report(display=False)
recall = metrics['recall'].report(display=False)
report['precision', '%'] = precision[metrics['precision'].name, '%']
report['recall', '%'] = recall[metrics['recall'].name, '%']
columns = list(report.columns)
report = report[[columns[0]] + columns[-2:] + columns[1:-2]]
report = reindex(report)
summary = 'Identification (collar = {1:g} ms{2})'.format(
1000 * collar,
', no overlap' if skip_overlap else '')
headers = [summary] + \
[report.columns[i][0] for i in range(3)] + \
['%' if c[1] == '%' else c[0] for c in report.columns[3:]]
print(tabulate(report, headers=headers, tablefmt="simple",
floatfmt=".2f", numalign="decimal", stralign="left",
missingval="", showindex="default", disable_numparse=False))
def spotting(protocol, subset, latencies, hypotheses, output_prefix,
filter_func=None):
if not latencies:
Scores = []
protocol.diarization = False
trials = getattr(protocol, '{subset}_trial'.format(subset=subset))()
for i, (current_trial, hypothesis) in enumerate(zip(trials, hypotheses)):
# check trial/hypothesis target consistency
try:
assert current_trial['model_id'] == hypothesis['model_id']
except AssertionError as e:
msg = ('target mismatch in trial #{i} '
'(found: {found}, should be: {should_be})')
raise ValueError(
msg.format(i=i, found=hypothesis['model_id'],
should_be=current_trial['model_id']))
# check trial/hypothesis file consistency
try:
assert current_trial['uri'] == hypothesis['uri']
except AssertionError as e:
msg = ('file mismatch in trial #{i} '
'(found: {found}, should be: {should_be})')
raise ValueError(
msg.format(i=i, found=hypothesis['uri'],
should_be=current_trial['uri']))
# check at least one score is provided
try:
assert len(hypothesis['scores']) > 0
except AssertionError as e:
msg = ('empty list of scores in trial #{i}.')
raise ValueError(msg.format(i=i))
timestamps, scores = zip(*hypothesis['scores'])
if not latencies:
Scores.append(scores)
# check trial/hypothesis timerange consistency
try_with = current_trial['try_with']
try:
assert min(timestamps) >= try_with.start
except AssertionError as e:
msg = ('incorrect timestamp in trial #{i} '
'(found: {found:g}, should be: >= {should_be:g})')
raise ValueError(
msg.format(i=i,
found=min(timestamps),
should_be=try_with.start))
if not latencies:
# estimate best set of thresholds
scores = np.concatenate(Scores)
epsilons = np.array(
[n * 10**(-e) for e in range(4, 1, -1) for n in range(1, 10)])
percentile = np.concatenate([epsilons, np.arange(0.1, 100., 0.1), 100 - epsilons[::-1]])
thresholds = np.percentile(scores, percentile)
if not latencies:
metric = LowLatencySpeakerSpotting(thresholds=thresholds)
else:
metric = LowLatencySpeakerSpotting(latencies=latencies)
trials = getattr(protocol, '{subset}_trial'.format(subset=subset))()
for i, (current_trial, hypothesis) in enumerate(zip(trials, hypotheses)):
if filter_func is not None:
speech = current_trial['reference'].duration()
target_trial = speech > 0
if target_trial and filter_func(speech):
continue
reference = current_trial['reference']
metric(reference, hypothesis['scores'])
if not latencies:
thresholds, fpr, fnr, eer, _ = metric.det_curve(return_latency=False)
# save DET curve to hypothesis.det.txt
det_path = '{output_prefix}.det.txt'.format(output_prefix=output_prefix)
det_tmpl = '{t:.9f} {p:.9f} {n:.9f}\n'
with open(det_path, mode='w') as fp:
fp.write('# threshold false_positive_rate false_negative_rate\n')
for t, p, n in zip(thresholds, fpr, fnr):
line = det_tmpl.format(t=t, p=p, n=n)
fp.write(line)
print('> {det_path}'.format(det_path=det_path))
thresholds, fpr, fnr, _, _, speaker_lcy, absolute_lcy = \
metric.det_curve(return_latency=True)
# save DET curve to hypothesis.det.txt
lcy_path = '{output_prefix}.lcy.txt'.format(output_prefix=output_prefix)
lcy_tmpl = '{t:.9f} {p:.9f} {n:.9f} {s:.6f} {a:.6f}\n'
with open(lcy_path, mode='w') as fp:
fp.write('# threshold false_positive_rate false_negative_rate speaker_latency absolute_latency\n')
for t, p, n, s, a in zip(thresholds, fpr, fnr, speaker_lcy, absolute_lcy):
if p == 1:
continue
if np.isnan(s):
continue
line = lcy_tmpl.format(t=t, p=p, n=n, s=s, a=a)
fp.write(line)
print('> {lcy_path}'.format(lcy_path=lcy_path))
print()
print('EER% = {eer:.2f}'.format(eer=100 * eer))
else:
results = metric.det_curve()
logs = []
for key in sorted(results):
result = results[key]
log = {'latency': key}
for latency in latencies:
thresholds, fpr, fnr, eer, _ = result[latency]
#print('EER @ {latency}s = {eer:.2f}%'.format(latency=latency,
# eer=100 * eer))
log[latency] = eer
# save DET curve to hypothesis.det.{lcy}s.txt
det_path = '{output_prefix}.det.{key}.{latency:g}s.txt'.format(
output_prefix=output_prefix, key=key, latency=latency)
det_tmpl = '{t:.9f} {p:.9f} {n:.9f}\n'
with open(det_path, mode='w') as fp:
fp.write('# threshold false_positive_rate false_negative_rate\n')
for t, p, n in zip(thresholds, fpr, fnr):
line = det_tmpl.format(t=t, p=p, n=n)
fp.write(line)
logs.append(log)
det_path = '{output_prefix}.det.{key}.XXs.txt'.format(
output_prefix=output_prefix, key=key)
print('> {det_path}'.format(det_path=det_path))
print()
df = 100 * pd.DataFrame.from_dict(logs).set_index('latency')[latencies]
print(tabulate(df, tablefmt="simple",
headers=['latency'] + ['EER% @ {l:g}s'.format(l=l) for l in latencies],
floatfmt=".2f", numalign="decimal", stralign="left",
missingval="", showindex="default", disable_numparse=False))
if __name__ == '__main__':
arguments = docopt(__doc__, version='Evaluation')
# protocol
protocol_name = arguments['<database.task.protocol>']
protocol = get_protocol(protocol_name, progress=True)
# subset (train, development, or test)
subset = arguments['--subset']
collar = float(arguments['--collar'])
skip_overlap = arguments['--skip-overlap']
tolerance = float(arguments['--tolerance'])
if arguments['spotting']:
hypothesis_json = arguments['<hypothesis.json>']
with open(hypothesis_json, mode='r') as fp:
hypotheses = json.load(fp)
output_prefix = hypothesis_json[:-5]
latencies = [float(l) for l in arguments['--latency']]
filters = arguments['--filter']
if filters:
from sympy import sympify, lambdify, symbols
speech = symbols('speech')
filter_funcs = []
filter_funcs = [
lambdify([speech], sympify(expression))
for expression in filters]
filter_func = lambda speech: \
any(~func(speech) for func in filter_funcs)
else:
filter_func = None
spotting(protocol, subset, latencies, hypotheses, output_prefix,
filter_func=filter_func)
sys.exit(0)
hypothesis_rttm = arguments['<hypothesis.rttm>']
try:
hypotheses = load_rttm(hypothesis_rttm)
except FileNotFoundError:
msg = f'Could not find file {hypothesis_rttm}.'
sys.exit(msg)
except:
msg = (
f'Failed to load {hypothesis_rttm}, please check its format '
f'(only RTTM files are supported).'
)
sys.exit(msg)
if arguments['detection']:
detection(protocol, subset, hypotheses,
collar=collar, skip_overlap=skip_overlap)
if arguments['segmentation']:
segmentation(protocol, subset, hypotheses, tolerance=tolerance)
if arguments['diarization']:
greedy = arguments['--greedy']
diarization(protocol, subset, hypotheses, greedy=greedy,
collar=collar, skip_overlap=skip_overlap)
if arguments['identification']:
identification(protocol, subset, hypotheses,
collar=collar, skip_overlap=skip_overlap)
|
pyannote/pyannote-metrics | scripts/pyannote-metrics.py | reindex | python | def reindex(report):
index = list(report.index)
i = index.index('TOTAL')
return report.reindex(index[:i] + index[i+1:] + ['TOTAL']) | Reindex report so that 'TOTAL' is the last row | train | https://github.com/pyannote/pyannote-metrics/blob/b433fec3bd37ca36fe026a428cd72483d646871a/scripts/pyannote-metrics.py#L219-L223 | null | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2017-2019 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Herve BREDIN - http://herve.niderb.fr
"""
Evaluation
Usage:
pyannote-metrics.py detection [--subset=<subset> --collar=<seconds> --skip-overlap] <database.task.protocol> <hypothesis.rttm>
pyannote-metrics.py segmentation [--subset=<subset> --tolerance=<seconds>] <database.task.protocol> <hypothesis.rttm>
pyannote-metrics.py diarization [--subset=<subset> --greedy --collar=<seconds> --skip-overlap] <database.task.protocol> <hypothesis.rttm>
pyannote-metrics.py identification [--subset=<subset> --collar=<seconds> --skip-overlap] <database.task.protocol> <hypothesis.rttm>
pyannote-metrics.py spotting [--subset=<subset> --latency=<seconds>... --filter=<expression>...] <database.task.protocol> <hypothesis.json>
pyannote-metrics.py -h | --help
pyannote-metrics.py --version
Options:
<database.task.protocol> Set evaluation protocol (e.g. "Etape.SpeakerDiarization.TV")
--subset=<subset> Evaluated subset (train|developement|test) [default: test]
--collar=<seconds> Collar, in seconds [default: 0.0].
--skip-overlap Do not evaluate overlap regions.
--tolerance=<seconds> Tolerance, in seconds [default: 0.5].
--greedy Use greedy diarization error rate.
--latency=<seconds> Evaluate with fixed latency.
--filter=<expression> Filter out target trials that do not match the
expression; e.g. use --filter="speech>10" to skip
target trials with less than 10s of speech from
the target.
-h --help Show this screen.
--version Show version.
All modes but "spotting" expect hypothesis using the RTTM file format.
RTTM files contain one line per speech turn, using the following convention:
SPEAKER {uri} 1 {start_time} {duration} <NA> <NA> {speaker_id} <NA> <NA>
* uri: file identifier (as given by pyannote.database protocols)
* start_time: speech turn start time in seconds
* duration: speech turn duration in seconds
* speaker_id: speaker identifier
"spotting" mode expects hypothesis using the following JSON file format.
It should contain a list of trial hypothesis, using the same trial order as
pyannote.database speaker spotting protocols (e.g. protocol.test_trial())
[
{'uri': '<uri>', 'model_id': '<model_id>', 'scores': [[<t1>, <v1>], [<t2>, <v2>], ... [<tn>, <vn>]]},
{'uri': '<uri>', 'model_id': '<model_id>', 'scores': [[<t1>, <v1>], [<t2>, <v2>], ... [<tn>, <vn>]]},
{'uri': '<uri>', 'model_id': '<model_id>', 'scores': [[<t1>, <v1>], [<t2>, <v2>], ... [<tn>, <vn>]]},
...
{'uri': '<uri>', 'model_id': '<model_id>', 'scores': [[<t1>, <v1>], [<t2>, <v2>], ... [<tn>, <vn>]]},
]
* uri: file identifier (as given by pyannote.database protocols)
* model_id: target identifier (as given by pyannote.database protocols)
* [ti, vi]: [time, value] pair indicating that the system has output the
score vi at time ti (e.g. [10.2, 0.2] means that the system
gave a score of 0.2 at time 10.2s).
Calling "spotting" mode will create a bunch of files.
* <hypothesis.det.txt> contains DET curve using the following raw file format:
<threshold> <fpr> <fnr>
* <hypothesis.lcy.txt> contains latency curves using this format:
<threshold> <fpr> <fnr> <speaker_latency> <absolute_latency>
"""
# command line parsing
from docopt import docopt
import sys
import json
import warnings
import functools
import numpy as np
import pandas as pd
from tabulate import tabulate
# import multiprocessing as mp
from pyannote.core import Annotation
from pyannote.database.util import load_rttm
# evaluation protocols
from pyannote.database import get_protocol
from pyannote.database.util import get_annotated
from pyannote.metrics.detection import DetectionErrorRate
from pyannote.metrics.detection import DetectionAccuracy
from pyannote.metrics.detection import DetectionRecall
from pyannote.metrics.detection import DetectionPrecision
from pyannote.metrics.segmentation import SegmentationPurity
from pyannote.metrics.segmentation import SegmentationCoverage
from pyannote.metrics.segmentation import SegmentationPrecision
from pyannote.metrics.segmentation import SegmentationRecall
from pyannote.metrics.diarization import GreedyDiarizationErrorRate
from pyannote.metrics.diarization import DiarizationErrorRate
from pyannote.metrics.diarization import DiarizationPurity
from pyannote.metrics.diarization import DiarizationCoverage
from pyannote.metrics.identification import IdentificationErrorRate
from pyannote.metrics.identification import IdentificationPrecision
from pyannote.metrics.identification import IdentificationRecall
from pyannote.metrics.spotting import LowLatencySpeakerSpotting
showwarning_orig = warnings.showwarning
def showwarning(message, category, *args, **kwargs):
import sys
print(category.__name__ + ':', str(message))
warnings.showwarning = showwarning
def get_hypothesis(hypotheses, current_file):
"""Get hypothesis for given file
Parameters
----------
hypotheses : `dict`
Speaker diarization hypothesis provided by `load_rttm`.
current_file : `dict`
File description as given by pyannote.database protocols.
Returns
-------
hypothesis : `pyannote.core.Annotation`
Hypothesis corresponding to `current_file`.
"""
uri = current_file['uri']
if uri in hypotheses:
return hypotheses[uri]
# if the exact 'uri' is not available in hypothesis,
# look for matching substring
tmp_uri = [u for u in hypotheses if u in uri]
# no matching speech turns. return empty annotation
if len(tmp_uri) == 0:
msg = f'Could not find hypothesis for file "{uri}"; assuming empty file.'
warnings.warn(msg)
return Annotation(uri=uri, modality='speaker')
# exactly one matching file. return it
if len(tmp_uri) == 1:
hypothesis = hypotheses[tmp_uri[0]]
hypothesis.uri = uri
return hypothesis
# more that one matching file. error.
msg = f'Found too many hypotheses matching file "{uri}" ({uris}).'
raise ValueError(msg.format(uri=uri, uris=tmp_uri))
def process_one(item, hypotheses=None, metrics=None):
reference = item['annotation']
hypothesis = get_hypothesis(hypotheses, item)
uem = get_annotated(item)
return {key: metric(reference, hypothesis, uem=uem)
for key, metric in metrics.items()}
def get_reports(protocol, subset, hypotheses, metrics):
process = functools.partial(process_one,
hypotheses=hypotheses,
metrics=metrics)
# get items and their number
progress = protocol.progress
protocol.progress = False
items = list(getattr(protocol, subset)())
protocol.progress = progress
n_items = len(items)
for item in items:
process(item)
# HB. 2018-02-05: parallel processing was removed because it is not clear
# how to handle the case where the same 'uri' is processed several times
# in a possibly different order for each sub-metric...
# # heuristic to estimate the optimal number of processes
# chunksize = 20
# processes = max(1, min(mp.cpu_count(), n_items // chunksize))
# pool = mp.Pool(processes)
# _ = pool.map(process, items, chunksize=chunksize)
return {key: metric.report(display=False)
for key, metric in metrics.items()}
def detection(protocol, subset, hypotheses, collar=0.0, skip_overlap=False):
options = {'collar': collar,
'skip_overlap': skip_overlap,
'parallel': True}
metrics = {
'error': DetectionErrorRate(**options),
'accuracy': DetectionAccuracy(**options),
'precision': DetectionPrecision(**options),
'recall': DetectionRecall(**options)}
reports = get_reports(protocol, subset, hypotheses, metrics)
report = metrics['error'].report(display=False)
accuracy = metrics['accuracy'].report(display=False)
precision = metrics['precision'].report(display=False)
recall = metrics['recall'].report(display=False)
report['accuracy', '%'] = accuracy[metrics['accuracy'].name, '%']
report['precision', '%'] = precision[metrics['precision'].name, '%']
report['recall', '%'] = recall[metrics['recall'].name, '%']
report = reindex(report)
columns = list(report.columns)
report = report[[columns[0]] + columns[-3:] + columns[1:-3]]
summary = 'Detection (collar = {0:g} ms{1})'.format(
1000*collar, ', no overlap' if skip_overlap else '')
headers = [summary] + \
[report.columns[i][0] for i in range(4)] + \
['%' if c[1] == '%' else c[0] for c in report.columns[4:]]
print(tabulate(report, headers=headers, tablefmt="simple",
floatfmt=".2f", numalign="decimal", stralign="left",
missingval="", showindex="default", disable_numparse=False))
def segmentation(protocol, subset, hypotheses, tolerance=0.5):
options = {'tolerance': tolerance, 'parallel': True}
metrics = {'coverage': SegmentationCoverage(**options),
'purity': SegmentationPurity(**options),
'precision': SegmentationPrecision(**options),
'recall': SegmentationRecall(**options)}
reports = get_reports(protocol, subset, hypotheses, metrics)
coverage = metrics['coverage'].report(display=False)
purity = metrics['purity'].report(display=False)
precision = metrics['precision'].report(display=False)
recall = metrics['recall'].report(display=False)
coverage = coverage[metrics['coverage'].name]
purity = purity[metrics['purity'].name]
precision = precision[metrics['precision'].name]
recall = recall[metrics['recall'].name]
report = pd.concat([coverage, purity, precision, recall], axis=1)
report = reindex(report)
headers = ['Segmentation (tolerance = {0:g} ms)'.format(1000*tolerance),
'coverage', 'purity', 'precision', 'recall']
print(tabulate(report, headers=headers, tablefmt="simple",
floatfmt=".2f", numalign="decimal", stralign="left",
missingval="", showindex="default", disable_numparse=False))
def diarization(protocol, subset, hypotheses, greedy=False,
collar=0.0, skip_overlap=False):
options = {'collar': collar,
'skip_overlap': skip_overlap,
'parallel': True}
metrics = {
'purity': DiarizationPurity(**options),
'coverage': DiarizationCoverage(**options)}
if greedy:
metrics['error'] = GreedyDiarizationErrorRate(**options)
else:
metrics['error'] = DiarizationErrorRate(**options)
reports = get_reports(protocol, subset, hypotheses, metrics)
report = metrics['error'].report(display=False)
purity = metrics['purity'].report(display=False)
coverage = metrics['coverage'].report(display=False)
report['purity', '%'] = purity[metrics['purity'].name, '%']
report['coverage', '%'] = coverage[metrics['coverage'].name, '%']
columns = list(report.columns)
report = report[[columns[0]] + columns[-2:] + columns[1:-2]]
report = reindex(report)
summary = 'Diarization ({0:s}collar = {1:g} ms{2})'.format(
'greedy, ' if greedy else '',
1000 * collar,
', no overlap' if skip_overlap else '')
headers = [summary] + \
[report.columns[i][0] for i in range(3)] + \
['%' if c[1] == '%' else c[0] for c in report.columns[3:]]
print(tabulate(report, headers=headers, tablefmt="simple",
floatfmt=".2f", numalign="decimal", stralign="left",
missingval="", showindex="default", disable_numparse=False))
def identification(protocol, subset, hypotheses,
collar=0.0, skip_overlap=False):
options = {'collar': collar,
'skip_overlap': skip_overlap,
'parallel': True}
metrics = {
'error': IdentificationErrorRate(**options),
'precision': IdentificationPrecision(**options),
'recall': IdentificationRecall(**options)}
reports = get_reports(protocol, subset, hypotheses, metrics)
report = metrics['error'].report(display=False)
precision = metrics['precision'].report(display=False)
recall = metrics['recall'].report(display=False)
report['precision', '%'] = precision[metrics['precision'].name, '%']
report['recall', '%'] = recall[metrics['recall'].name, '%']
columns = list(report.columns)
report = report[[columns[0]] + columns[-2:] + columns[1:-2]]
report = reindex(report)
summary = 'Identification (collar = {1:g} ms{2})'.format(
1000 * collar,
', no overlap' if skip_overlap else '')
headers = [summary] + \
[report.columns[i][0] for i in range(3)] + \
['%' if c[1] == '%' else c[0] for c in report.columns[3:]]
print(tabulate(report, headers=headers, tablefmt="simple",
floatfmt=".2f", numalign="decimal", stralign="left",
missingval="", showindex="default", disable_numparse=False))
def spotting(protocol, subset, latencies, hypotheses, output_prefix,
filter_func=None):
if not latencies:
Scores = []
protocol.diarization = False
trials = getattr(protocol, '{subset}_trial'.format(subset=subset))()
for i, (current_trial, hypothesis) in enumerate(zip(trials, hypotheses)):
# check trial/hypothesis target consistency
try:
assert current_trial['model_id'] == hypothesis['model_id']
except AssertionError as e:
msg = ('target mismatch in trial #{i} '
'(found: {found}, should be: {should_be})')
raise ValueError(
msg.format(i=i, found=hypothesis['model_id'],
should_be=current_trial['model_id']))
# check trial/hypothesis file consistency
try:
assert current_trial['uri'] == hypothesis['uri']
except AssertionError as e:
msg = ('file mismatch in trial #{i} '
'(found: {found}, should be: {should_be})')
raise ValueError(
msg.format(i=i, found=hypothesis['uri'],
should_be=current_trial['uri']))
# check at least one score is provided
try:
assert len(hypothesis['scores']) > 0
except AssertionError as e:
msg = ('empty list of scores in trial #{i}.')
raise ValueError(msg.format(i=i))
timestamps, scores = zip(*hypothesis['scores'])
if not latencies:
Scores.append(scores)
# check trial/hypothesis timerange consistency
try_with = current_trial['try_with']
try:
assert min(timestamps) >= try_with.start
except AssertionError as e:
msg = ('incorrect timestamp in trial #{i} '
'(found: {found:g}, should be: >= {should_be:g})')
raise ValueError(
msg.format(i=i,
found=min(timestamps),
should_be=try_with.start))
if not latencies:
# estimate best set of thresholds
scores = np.concatenate(Scores)
epsilons = np.array(
[n * 10**(-e) for e in range(4, 1, -1) for n in range(1, 10)])
percentile = np.concatenate([epsilons, np.arange(0.1, 100., 0.1), 100 - epsilons[::-1]])
thresholds = np.percentile(scores, percentile)
if not latencies:
metric = LowLatencySpeakerSpotting(thresholds=thresholds)
else:
metric = LowLatencySpeakerSpotting(latencies=latencies)
trials = getattr(protocol, '{subset}_trial'.format(subset=subset))()
for i, (current_trial, hypothesis) in enumerate(zip(trials, hypotheses)):
if filter_func is not None:
speech = current_trial['reference'].duration()
target_trial = speech > 0
if target_trial and filter_func(speech):
continue
reference = current_trial['reference']
metric(reference, hypothesis['scores'])
if not latencies:
thresholds, fpr, fnr, eer, _ = metric.det_curve(return_latency=False)
# save DET curve to hypothesis.det.txt
det_path = '{output_prefix}.det.txt'.format(output_prefix=output_prefix)
det_tmpl = '{t:.9f} {p:.9f} {n:.9f}\n'
with open(det_path, mode='w') as fp:
fp.write('# threshold false_positive_rate false_negative_rate\n')
for t, p, n in zip(thresholds, fpr, fnr):
line = det_tmpl.format(t=t, p=p, n=n)
fp.write(line)
print('> {det_path}'.format(det_path=det_path))
thresholds, fpr, fnr, _, _, speaker_lcy, absolute_lcy = \
metric.det_curve(return_latency=True)
# save DET curve to hypothesis.det.txt
lcy_path = '{output_prefix}.lcy.txt'.format(output_prefix=output_prefix)
lcy_tmpl = '{t:.9f} {p:.9f} {n:.9f} {s:.6f} {a:.6f}\n'
with open(lcy_path, mode='w') as fp:
fp.write('# threshold false_positive_rate false_negative_rate speaker_latency absolute_latency\n')
for t, p, n, s, a in zip(thresholds, fpr, fnr, speaker_lcy, absolute_lcy):
if p == 1:
continue
if np.isnan(s):
continue
line = lcy_tmpl.format(t=t, p=p, n=n, s=s, a=a)
fp.write(line)
print('> {lcy_path}'.format(lcy_path=lcy_path))
print()
print('EER% = {eer:.2f}'.format(eer=100 * eer))
else:
results = metric.det_curve()
logs = []
for key in sorted(results):
result = results[key]
log = {'latency': key}
for latency in latencies:
thresholds, fpr, fnr, eer, _ = result[latency]
#print('EER @ {latency}s = {eer:.2f}%'.format(latency=latency,
# eer=100 * eer))
log[latency] = eer
# save DET curve to hypothesis.det.{lcy}s.txt
det_path = '{output_prefix}.det.{key}.{latency:g}s.txt'.format(
output_prefix=output_prefix, key=key, latency=latency)
det_tmpl = '{t:.9f} {p:.9f} {n:.9f}\n'
with open(det_path, mode='w') as fp:
fp.write('# threshold false_positive_rate false_negative_rate\n')
for t, p, n in zip(thresholds, fpr, fnr):
line = det_tmpl.format(t=t, p=p, n=n)
fp.write(line)
logs.append(log)
det_path = '{output_prefix}.det.{key}.XXs.txt'.format(
output_prefix=output_prefix, key=key)
print('> {det_path}'.format(det_path=det_path))
print()
df = 100 * pd.DataFrame.from_dict(logs).set_index('latency')[latencies]
print(tabulate(df, tablefmt="simple",
headers=['latency'] + ['EER% @ {l:g}s'.format(l=l) for l in latencies],
floatfmt=".2f", numalign="decimal", stralign="left",
missingval="", showindex="default", disable_numparse=False))
if __name__ == '__main__':
arguments = docopt(__doc__, version='Evaluation')
# protocol
protocol_name = arguments['<database.task.protocol>']
protocol = get_protocol(protocol_name, progress=True)
# subset (train, development, or test)
subset = arguments['--subset']
collar = float(arguments['--collar'])
skip_overlap = arguments['--skip-overlap']
tolerance = float(arguments['--tolerance'])
if arguments['spotting']:
hypothesis_json = arguments['<hypothesis.json>']
with open(hypothesis_json, mode='r') as fp:
hypotheses = json.load(fp)
output_prefix = hypothesis_json[:-5]
latencies = [float(l) for l in arguments['--latency']]
filters = arguments['--filter']
if filters:
from sympy import sympify, lambdify, symbols
speech = symbols('speech')
filter_funcs = []
filter_funcs = [
lambdify([speech], sympify(expression))
for expression in filters]
filter_func = lambda speech: \
any(~func(speech) for func in filter_funcs)
else:
filter_func = None
spotting(protocol, subset, latencies, hypotheses, output_prefix,
filter_func=filter_func)
sys.exit(0)
hypothesis_rttm = arguments['<hypothesis.rttm>']
try:
hypotheses = load_rttm(hypothesis_rttm)
except FileNotFoundError:
msg = f'Could not find file {hypothesis_rttm}.'
sys.exit(msg)
except:
msg = (
f'Failed to load {hypothesis_rttm}, please check its format '
f'(only RTTM files are supported).'
)
sys.exit(msg)
if arguments['detection']:
detection(protocol, subset, hypotheses,
collar=collar, skip_overlap=skip_overlap)
if arguments['segmentation']:
segmentation(protocol, subset, hypotheses, tolerance=tolerance)
if arguments['diarization']:
greedy = arguments['--greedy']
diarization(protocol, subset, hypotheses, greedy=greedy,
collar=collar, skip_overlap=skip_overlap)
if arguments['identification']:
identification(protocol, subset, hypotheses,
collar=collar, skip_overlap=skip_overlap)
|
pyannote/pyannote-metrics | pyannote/metrics/plot/binary_classification.py | plot_distributions | python | def plot_distributions(y_true, scores, save_to, xlim=None, nbins=100, ymax=3., dpi=150):
plt.figure(figsize=(12, 12))
if xlim is None:
xlim = (np.min(scores), np.max(scores))
bins = np.linspace(xlim[0], xlim[1], nbins)
plt.hist(scores[y_true], bins=bins, color='g', alpha=0.5, normed=True)
plt.hist(scores[~y_true], bins=bins, color='r', alpha=0.5, normed=True)
# TODO heuristic to estimate ymax from nbins and xlim
plt.ylim(0, ymax)
plt.tight_layout()
plt.savefig(save_to + '.scores.png', dpi=dpi)
plt.savefig(save_to + '.scores.eps')
plt.close()
return True | Scores distributions
This function will create (and overwrite) the following files:
- {save_to}.scores.png
- {save_to}.scores.eps
Parameters
----------
y_true : (n_samples, ) array-like
Boolean reference.
scores : (n_samples, ) array-like
Predicted score.
save_to : str
Files path prefix | train | https://github.com/pyannote/pyannote-metrics/blob/b433fec3bd37ca36fe026a428cd72483d646871a/pyannote/metrics/plot/binary_classification.py#L42-L75 | null | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2016 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
import matplotlib
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from pyannote.metrics.binary_classification import det_curve
from pyannote.metrics.binary_classification import precision_recall_curve
def plot_det_curve(y_true, scores, save_to,
distances=False, dpi=150):
"""DET curve
This function will create (and overwrite) the following files:
- {save_to}.det.png
- {save_to}.det.eps
- {save_to}.det.txt
Parameters
----------
y_true : (n_samples, ) array-like
Boolean reference.
scores : (n_samples, ) array-like
Predicted score.
save_to : str
Files path prefix.
distances : boolean, optional
When True, indicate that `scores` are actually `distances`
dpi : int, optional
Resolution of .png file. Defaults to 150.
Returns
-------
eer : float
Equal error rate
"""
fpr, fnr, thresholds, eer = det_curve(y_true, scores, distances=distances)
# plot DET curve
plt.figure(figsize=(12, 12))
plt.loglog(fpr, fnr, 'b')
plt.loglog([eer], [eer], 'bo')
plt.xlabel('False Positive Rate')
plt.ylabel('False Negative Rate')
plt.xlim(1e-2, 1.)
plt.ylim(1e-2, 1.)
plt.grid(True)
plt.tight_layout()
plt.savefig(save_to + '.det.png', dpi=dpi)
plt.savefig(save_to + '.det.eps')
plt.close()
# save DET curve in text file
txt = save_to + '.det.txt'
line = '{t:.6f} {fp:.6f} {fn:.6f}\n'
with open(txt, 'w') as f:
for i, (t, fp, fn) in enumerate(zip(thresholds, fpr, fnr)):
f.write(line.format(t=t, fp=fp, fn=fn))
return eer
def plot_precision_recall_curve(y_true, scores, save_to,
distances=False, dpi=150):
"""Precision/recall curve
This function will create (and overwrite) the following files:
- {save_to}.precision_recall.png
- {save_to}.precision_recall.eps
- {save_to}.precision_recall.txt
Parameters
----------
y_true : (n_samples, ) array-like
Boolean reference.
scores : (n_samples, ) array-like
Predicted score.
save_to : str
Files path prefix.
distances : boolean, optional
When True, indicate that `scores` are actually `distances`
dpi : int, optional
Resolution of .png file. Defaults to 150.
Returns
-------
auc : float
Area under precision/recall curve
"""
precision, recall, thresholds, auc = precision_recall_curve(
y_true, scores, distances=distances)
# plot P/R curve
plt.figure(figsize=(12, 12))
plt.plot(recall, precision, 'b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.tight_layout()
plt.savefig(save_to + '.precision_recall.png', dpi=dpi)
plt.savefig(save_to + '.precision_recall.eps')
plt.close()
# save P/R curve in text file
txt = save_to + '.precision_recall.txt'
line = '{t:.6f} {p:.6f} {r:.6f}\n'
with open(txt, 'w') as f:
for i, (t, p, r) in enumerate(zip(thresholds, precision, recall)):
f.write(line.format(t=t, p=p, r=r))
return auc
|
pyannote/pyannote-metrics | pyannote/metrics/plot/binary_classification.py | plot_det_curve | python | def plot_det_curve(y_true, scores, save_to,
distances=False, dpi=150):
fpr, fnr, thresholds, eer = det_curve(y_true, scores, distances=distances)
# plot DET curve
plt.figure(figsize=(12, 12))
plt.loglog(fpr, fnr, 'b')
plt.loglog([eer], [eer], 'bo')
plt.xlabel('False Positive Rate')
plt.ylabel('False Negative Rate')
plt.xlim(1e-2, 1.)
plt.ylim(1e-2, 1.)
plt.grid(True)
plt.tight_layout()
plt.savefig(save_to + '.det.png', dpi=dpi)
plt.savefig(save_to + '.det.eps')
plt.close()
# save DET curve in text file
txt = save_to + '.det.txt'
line = '{t:.6f} {fp:.6f} {fn:.6f}\n'
with open(txt, 'w') as f:
for i, (t, fp, fn) in enumerate(zip(thresholds, fpr, fnr)):
f.write(line.format(t=t, fp=fp, fn=fn))
return eer | DET curve
This function will create (and overwrite) the following files:
- {save_to}.det.png
- {save_to}.det.eps
- {save_to}.det.txt
Parameters
----------
y_true : (n_samples, ) array-like
Boolean reference.
scores : (n_samples, ) array-like
Predicted score.
save_to : str
Files path prefix.
distances : boolean, optional
When True, indicate that `scores` are actually `distances`
dpi : int, optional
Resolution of .png file. Defaults to 150.
Returns
-------
eer : float
Equal error rate | train | https://github.com/pyannote/pyannote-metrics/blob/b433fec3bd37ca36fe026a428cd72483d646871a/pyannote/metrics/plot/binary_classification.py#L78-L129 | [
"def det_curve(y_true, scores, distances=False):\n \"\"\"DET curve\n\n Parameters\n ----------\n y_true : (n_samples, ) array-like\n Boolean reference.\n scores : (n_samples, ) array-like\n Predicted score.\n distances : boolean, optional\n When True, indicate that `scores` are actually `distances`\n\n Returns\n -------\n fpr : numpy array\n False alarm rate\n fnr : numpy array\n False rejection rate\n thresholds : numpy array\n Corresponding thresholds\n eer : float\n Equal error rate\n \"\"\"\n\n if distances:\n scores = -scores\n\n # compute false positive and false negative rates\n # (a.k.a. false alarm and false rejection rates)\n fpr, tpr, thresholds = sklearn.metrics.roc_curve(\n y_true, scores, pos_label=True)\n fnr = 1 - tpr\n if distances:\n thresholds = -thresholds\n\n # estimate equal error rate\n eer_index = np.where(fpr > fnr)[0][0]\n eer = .25 * (fpr[eer_index-1] + fpr[eer_index] +\n fnr[eer_index-1] + fnr[eer_index])\n\n return fpr, fnr, thresholds, eer\n"
] | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2016 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
import matplotlib
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from pyannote.metrics.binary_classification import det_curve
from pyannote.metrics.binary_classification import precision_recall_curve
def plot_distributions(y_true, scores, save_to, xlim=None, nbins=100, ymax=3., dpi=150):
"""Scores distributions
This function will create (and overwrite) the following files:
- {save_to}.scores.png
- {save_to}.scores.eps
Parameters
----------
y_true : (n_samples, ) array-like
Boolean reference.
scores : (n_samples, ) array-like
Predicted score.
save_to : str
Files path prefix
"""
plt.figure(figsize=(12, 12))
if xlim is None:
xlim = (np.min(scores), np.max(scores))
bins = np.linspace(xlim[0], xlim[1], nbins)
plt.hist(scores[y_true], bins=bins, color='g', alpha=0.5, normed=True)
plt.hist(scores[~y_true], bins=bins, color='r', alpha=0.5, normed=True)
# TODO heuristic to estimate ymax from nbins and xlim
plt.ylim(0, ymax)
plt.tight_layout()
plt.savefig(save_to + '.scores.png', dpi=dpi)
plt.savefig(save_to + '.scores.eps')
plt.close()
return True
def plot_precision_recall_curve(y_true, scores, save_to,
distances=False, dpi=150):
"""Precision/recall curve
This function will create (and overwrite) the following files:
- {save_to}.precision_recall.png
- {save_to}.precision_recall.eps
- {save_to}.precision_recall.txt
Parameters
----------
y_true : (n_samples, ) array-like
Boolean reference.
scores : (n_samples, ) array-like
Predicted score.
save_to : str
Files path prefix.
distances : boolean, optional
When True, indicate that `scores` are actually `distances`
dpi : int, optional
Resolution of .png file. Defaults to 150.
Returns
-------
auc : float
Area under precision/recall curve
"""
precision, recall, thresholds, auc = precision_recall_curve(
y_true, scores, distances=distances)
# plot P/R curve
plt.figure(figsize=(12, 12))
plt.plot(recall, precision, 'b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.tight_layout()
plt.savefig(save_to + '.precision_recall.png', dpi=dpi)
plt.savefig(save_to + '.precision_recall.eps')
plt.close()
# save P/R curve in text file
txt = save_to + '.precision_recall.txt'
line = '{t:.6f} {p:.6f} {r:.6f}\n'
with open(txt, 'w') as f:
for i, (t, p, r) in enumerate(zip(thresholds, precision, recall)):
f.write(line.format(t=t, p=p, r=r))
return auc
|
pyannote/pyannote-metrics | pyannote/metrics/plot/binary_classification.py | plot_precision_recall_curve | python | def plot_precision_recall_curve(y_true, scores, save_to,
distances=False, dpi=150):
precision, recall, thresholds, auc = precision_recall_curve(
y_true, scores, distances=distances)
# plot P/R curve
plt.figure(figsize=(12, 12))
plt.plot(recall, precision, 'b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.tight_layout()
plt.savefig(save_to + '.precision_recall.png', dpi=dpi)
plt.savefig(save_to + '.precision_recall.eps')
plt.close()
# save P/R curve in text file
txt = save_to + '.precision_recall.txt'
line = '{t:.6f} {p:.6f} {r:.6f}\n'
with open(txt, 'w') as f:
for i, (t, p, r) in enumerate(zip(thresholds, precision, recall)):
f.write(line.format(t=t, p=p, r=r))
return auc | Precision/recall curve
This function will create (and overwrite) the following files:
- {save_to}.precision_recall.png
- {save_to}.precision_recall.eps
- {save_to}.precision_recall.txt
Parameters
----------
y_true : (n_samples, ) array-like
Boolean reference.
scores : (n_samples, ) array-like
Predicted score.
save_to : str
Files path prefix.
distances : boolean, optional
When True, indicate that `scores` are actually `distances`
dpi : int, optional
Resolution of .png file. Defaults to 150.
Returns
-------
auc : float
Area under precision/recall curve | train | https://github.com/pyannote/pyannote-metrics/blob/b433fec3bd37ca36fe026a428cd72483d646871a/pyannote/metrics/plot/binary_classification.py#L132-L183 | [
"def precision_recall_curve(y_true, scores, distances=False):\n \"\"\"Precision-recall curve\n\n Parameters\n ----------\n y_true : (n_samples, ) array-like\n Boolean reference.\n scores : (n_samples, ) array-like\n Predicted score.\n distances : boolean, optional\n When True, indicate that `scores` are actually `distances`\n\n Returns\n -------\n precision : numpy array\n Precision\n recall : numpy array\n Recall\n thresholds : numpy array\n Corresponding thresholds\n auc : float\n Area under curve\n\n \"\"\"\n\n if distances:\n scores = -scores\n\n precision, recall, thresholds = sklearn.metrics.precision_recall_curve(\n y_true, scores, pos_label=True)\n\n if distances:\n thresholds = -thresholds\n\n auc = sklearn.metrics.auc(precision, recall, reorder=True)\n\n return precision, recall, thresholds, auc\n"
] | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2016 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
import matplotlib
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from pyannote.metrics.binary_classification import det_curve
from pyannote.metrics.binary_classification import precision_recall_curve
def plot_distributions(y_true, scores, save_to, xlim=None, nbins=100, ymax=3., dpi=150):
"""Scores distributions
This function will create (and overwrite) the following files:
- {save_to}.scores.png
- {save_to}.scores.eps
Parameters
----------
y_true : (n_samples, ) array-like
Boolean reference.
scores : (n_samples, ) array-like
Predicted score.
save_to : str
Files path prefix
"""
plt.figure(figsize=(12, 12))
if xlim is None:
xlim = (np.min(scores), np.max(scores))
bins = np.linspace(xlim[0], xlim[1], nbins)
plt.hist(scores[y_true], bins=bins, color='g', alpha=0.5, normed=True)
plt.hist(scores[~y_true], bins=bins, color='r', alpha=0.5, normed=True)
# TODO heuristic to estimate ymax from nbins and xlim
plt.ylim(0, ymax)
plt.tight_layout()
plt.savefig(save_to + '.scores.png', dpi=dpi)
plt.savefig(save_to + '.scores.eps')
plt.close()
return True
def plot_det_curve(y_true, scores, save_to,
distances=False, dpi=150):
"""DET curve
This function will create (and overwrite) the following files:
- {save_to}.det.png
- {save_to}.det.eps
- {save_to}.det.txt
Parameters
----------
y_true : (n_samples, ) array-like
Boolean reference.
scores : (n_samples, ) array-like
Predicted score.
save_to : str
Files path prefix.
distances : boolean, optional
When True, indicate that `scores` are actually `distances`
dpi : int, optional
Resolution of .png file. Defaults to 150.
Returns
-------
eer : float
Equal error rate
"""
fpr, fnr, thresholds, eer = det_curve(y_true, scores, distances=distances)
# plot DET curve
plt.figure(figsize=(12, 12))
plt.loglog(fpr, fnr, 'b')
plt.loglog([eer], [eer], 'bo')
plt.xlabel('False Positive Rate')
plt.ylabel('False Negative Rate')
plt.xlim(1e-2, 1.)
plt.ylim(1e-2, 1.)
plt.grid(True)
plt.tight_layout()
plt.savefig(save_to + '.det.png', dpi=dpi)
plt.savefig(save_to + '.det.eps')
plt.close()
# save DET curve in text file
txt = save_to + '.det.txt'
line = '{t:.6f} {fp:.6f} {fn:.6f}\n'
with open(txt, 'w') as f:
for i, (t, fp, fn) in enumerate(zip(thresholds, fpr, fnr)):
f.write(line.format(t=t, fp=fp, fn=fn))
return eer
|
pyannote/pyannote-metrics | pyannote/metrics/binary_classification.py | det_curve | python | def det_curve(y_true, scores, distances=False):
if distances:
scores = -scores
# compute false positive and false negative rates
# (a.k.a. false alarm and false rejection rates)
fpr, tpr, thresholds = sklearn.metrics.roc_curve(
y_true, scores, pos_label=True)
fnr = 1 - tpr
if distances:
thresholds = -thresholds
# estimate equal error rate
eer_index = np.where(fpr > fnr)[0][0]
eer = .25 * (fpr[eer_index-1] + fpr[eer_index] +
fnr[eer_index-1] + fnr[eer_index])
return fpr, fnr, thresholds, eer | DET curve
Parameters
----------
y_true : (n_samples, ) array-like
Boolean reference.
scores : (n_samples, ) array-like
Predicted score.
distances : boolean, optional
When True, indicate that `scores` are actually `distances`
Returns
-------
fpr : numpy array
False alarm rate
fnr : numpy array
False rejection rate
thresholds : numpy array
Corresponding thresholds
eer : float
Equal error rate | train | https://github.com/pyannote/pyannote-metrics/blob/b433fec3bd37ca36fe026a428cd72483d646871a/pyannote/metrics/binary_classification.py#L38-L78 | null | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2016-2017 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
import sklearn.metrics
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.calibration import CalibratedClassifierCV
from sklearn.model_selection._split import _CVIterableWrapper
from collections import Counter
def precision_recall_curve(y_true, scores, distances=False):
"""Precision-recall curve
Parameters
----------
y_true : (n_samples, ) array-like
Boolean reference.
scores : (n_samples, ) array-like
Predicted score.
distances : boolean, optional
When True, indicate that `scores` are actually `distances`
Returns
-------
precision : numpy array
Precision
recall : numpy array
Recall
thresholds : numpy array
Corresponding thresholds
auc : float
Area under curve
"""
if distances:
scores = -scores
precision, recall, thresholds = sklearn.metrics.precision_recall_curve(
y_true, scores, pos_label=True)
if distances:
thresholds = -thresholds
auc = sklearn.metrics.auc(precision, recall, reorder=True)
return precision, recall, thresholds, auc
class _Passthrough(BaseEstimator):
"""Dummy binary classifier used by score Calibration class"""
def __init__(self):
super(_Passthrough, self).__init__()
self.classes_ = np.array([False, True], dtype=np.bool)
def fit(self, scores, y_true):
return self
def decision_function(self, scores):
"""Returns the input scores unchanged"""
return scores
class Calibration(object):
"""Probability calibration for binary classification tasks
Parameters
----------
method : {'isotonic', 'sigmoid'}, optional
See `CalibratedClassifierCV`. Defaults to 'isotonic'.
equal_priors : bool, optional
Set to True to force equal priors. Default behavior is to estimate
priors from the data itself.
Usage
-----
>>> calibration = Calibration()
>>> calibration.fit(train_score, train_y)
>>> test_probability = calibration.transform(test_score)
See also
--------
CalibratedClassifierCV
"""
def __init__(self, equal_priors=False, method='isotonic'):
super(Calibration, self).__init__()
self.method = method
self.equal_priors = equal_priors
def fit(self, scores, y_true):
"""Train calibration
Parameters
----------
scores : (n_samples, ) array-like
Uncalibrated scores.
y_true : (n_samples, ) array-like
True labels (dtype=bool).
"""
# to force equal priors, randomly select (and average over)
# up to fifty balanced (i.e. #true == #false) calibration sets.
if self.equal_priors:
counter = Counter(y_true)
positive, negative = counter[True], counter[False]
if positive > negative:
majority, minority = True, False
n_majority, n_minority = positive, negative
else:
majority, minority = False, True
n_majority, n_minority = negative, positive
n_splits = min(50, n_majority // n_minority + 1)
minority_index = np.where(y_true == minority)[0]
majority_index = np.where(y_true == majority)[0]
cv = []
for _ in range(n_splits):
test_index = np.hstack([
np.random.choice(majority_index,
size=n_minority,
replace=False),
minority_index])
cv.append(([], test_index))
cv = _CVIterableWrapper(cv)
# to estimate priors from the data itself, use the whole set
else:
cv = 'prefit'
self.calibration_ = CalibratedClassifierCV(
base_estimator=_Passthrough(), method=self.method, cv=cv)
self.calibration_.fit(scores.reshape(-1, 1), y_true)
return self
def transform(self, scores):
"""Calibrate scores into probabilities
Parameters
----------
scores : (n_samples, ) array-like
Uncalibrated scores.
Returns
-------
probabilities : (n_samples, ) array-like
Calibrated scores (i.e. probabilities)
"""
return self.calibration_.predict_proba(scores.reshape(-1, 1))[:, 1]
|
pyannote/pyannote-metrics | pyannote/metrics/binary_classification.py | precision_recall_curve | python | def precision_recall_curve(y_true, scores, distances=False):
if distances:
scores = -scores
precision, recall, thresholds = sklearn.metrics.precision_recall_curve(
y_true, scores, pos_label=True)
if distances:
thresholds = -thresholds
auc = sklearn.metrics.auc(precision, recall, reorder=True)
return precision, recall, thresholds, auc | Precision-recall curve
Parameters
----------
y_true : (n_samples, ) array-like
Boolean reference.
scores : (n_samples, ) array-like
Predicted score.
distances : boolean, optional
When True, indicate that `scores` are actually `distances`
Returns
-------
precision : numpy array
Precision
recall : numpy array
Recall
thresholds : numpy array
Corresponding thresholds
auc : float
Area under curve | train | https://github.com/pyannote/pyannote-metrics/blob/b433fec3bd37ca36fe026a428cd72483d646871a/pyannote/metrics/binary_classification.py#L81-L117 | null | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2016-2017 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
import sklearn.metrics
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.calibration import CalibratedClassifierCV
from sklearn.model_selection._split import _CVIterableWrapper
from collections import Counter
def det_curve(y_true, scores, distances=False):
"""DET curve
Parameters
----------
y_true : (n_samples, ) array-like
Boolean reference.
scores : (n_samples, ) array-like
Predicted score.
distances : boolean, optional
When True, indicate that `scores` are actually `distances`
Returns
-------
fpr : numpy array
False alarm rate
fnr : numpy array
False rejection rate
thresholds : numpy array
Corresponding thresholds
eer : float
Equal error rate
"""
if distances:
scores = -scores
# compute false positive and false negative rates
# (a.k.a. false alarm and false rejection rates)
fpr, tpr, thresholds = sklearn.metrics.roc_curve(
y_true, scores, pos_label=True)
fnr = 1 - tpr
if distances:
thresholds = -thresholds
# estimate equal error rate
eer_index = np.where(fpr > fnr)[0][0]
eer = .25 * (fpr[eer_index-1] + fpr[eer_index] +
fnr[eer_index-1] + fnr[eer_index])
return fpr, fnr, thresholds, eer
class _Passthrough(BaseEstimator):
"""Dummy binary classifier used by score Calibration class"""
def __init__(self):
super(_Passthrough, self).__init__()
self.classes_ = np.array([False, True], dtype=np.bool)
def fit(self, scores, y_true):
return self
def decision_function(self, scores):
"""Returns the input scores unchanged"""
return scores
class Calibration(object):
"""Probability calibration for binary classification tasks
Parameters
----------
method : {'isotonic', 'sigmoid'}, optional
See `CalibratedClassifierCV`. Defaults to 'isotonic'.
equal_priors : bool, optional
Set to True to force equal priors. Default behavior is to estimate
priors from the data itself.
Usage
-----
>>> calibration = Calibration()
>>> calibration.fit(train_score, train_y)
>>> test_probability = calibration.transform(test_score)
See also
--------
CalibratedClassifierCV
"""
def __init__(self, equal_priors=False, method='isotonic'):
super(Calibration, self).__init__()
self.method = method
self.equal_priors = equal_priors
def fit(self, scores, y_true):
"""Train calibration
Parameters
----------
scores : (n_samples, ) array-like
Uncalibrated scores.
y_true : (n_samples, ) array-like
True labels (dtype=bool).
"""
# to force equal priors, randomly select (and average over)
# up to fifty balanced (i.e. #true == #false) calibration sets.
if self.equal_priors:
counter = Counter(y_true)
positive, negative = counter[True], counter[False]
if positive > negative:
majority, minority = True, False
n_majority, n_minority = positive, negative
else:
majority, minority = False, True
n_majority, n_minority = negative, positive
n_splits = min(50, n_majority // n_minority + 1)
minority_index = np.where(y_true == minority)[0]
majority_index = np.where(y_true == majority)[0]
cv = []
for _ in range(n_splits):
test_index = np.hstack([
np.random.choice(majority_index,
size=n_minority,
replace=False),
minority_index])
cv.append(([], test_index))
cv = _CVIterableWrapper(cv)
# to estimate priors from the data itself, use the whole set
else:
cv = 'prefit'
self.calibration_ = CalibratedClassifierCV(
base_estimator=_Passthrough(), method=self.method, cv=cv)
self.calibration_.fit(scores.reshape(-1, 1), y_true)
return self
def transform(self, scores):
"""Calibrate scores into probabilities
Parameters
----------
scores : (n_samples, ) array-like
Uncalibrated scores.
Returns
-------
probabilities : (n_samples, ) array-like
Calibrated scores (i.e. probabilities)
"""
return self.calibration_.predict_proba(scores.reshape(-1, 1))[:, 1]
|
pyannote/pyannote-metrics | pyannote/metrics/binary_classification.py | Calibration.fit | python | def fit(self, scores, y_true):
# to force equal priors, randomly select (and average over)
# up to fifty balanced (i.e. #true == #false) calibration sets.
if self.equal_priors:
counter = Counter(y_true)
positive, negative = counter[True], counter[False]
if positive > negative:
majority, minority = True, False
n_majority, n_minority = positive, negative
else:
majority, minority = False, True
n_majority, n_minority = negative, positive
n_splits = min(50, n_majority // n_minority + 1)
minority_index = np.where(y_true == minority)[0]
majority_index = np.where(y_true == majority)[0]
cv = []
for _ in range(n_splits):
test_index = np.hstack([
np.random.choice(majority_index,
size=n_minority,
replace=False),
minority_index])
cv.append(([], test_index))
cv = _CVIterableWrapper(cv)
# to estimate priors from the data itself, use the whole set
else:
cv = 'prefit'
self.calibration_ = CalibratedClassifierCV(
base_estimator=_Passthrough(), method=self.method, cv=cv)
self.calibration_.fit(scores.reshape(-1, 1), y_true)
return self | Train calibration
Parameters
----------
scores : (n_samples, ) array-like
Uncalibrated scores.
y_true : (n_samples, ) array-like
True labels (dtype=bool). | train | https://github.com/pyannote/pyannote-metrics/blob/b433fec3bd37ca36fe026a428cd72483d646871a/pyannote/metrics/binary_classification.py#L163-L211 | null | class Calibration(object):
"""Probability calibration for binary classification tasks
Parameters
----------
method : {'isotonic', 'sigmoid'}, optional
See `CalibratedClassifierCV`. Defaults to 'isotonic'.
equal_priors : bool, optional
Set to True to force equal priors. Default behavior is to estimate
priors from the data itself.
Usage
-----
>>> calibration = Calibration()
>>> calibration.fit(train_score, train_y)
>>> test_probability = calibration.transform(test_score)
See also
--------
CalibratedClassifierCV
"""
def __init__(self, equal_priors=False, method='isotonic'):
super(Calibration, self).__init__()
self.method = method
self.equal_priors = equal_priors
def transform(self, scores):
"""Calibrate scores into probabilities
Parameters
----------
scores : (n_samples, ) array-like
Uncalibrated scores.
Returns
-------
probabilities : (n_samples, ) array-like
Calibrated scores (i.e. probabilities)
"""
return self.calibration_.predict_proba(scores.reshape(-1, 1))[:, 1]
|
pyannote/pyannote-metrics | pyannote/metrics/errors/identification.py | IdentificationErrorAnalysis.difference | python | def difference(self, reference, hypothesis, uem=None, uemified=False):
R, H, common_timeline = self.uemify(
reference, hypothesis, uem=uem,
collar=self.collar, skip_overlap=self.skip_overlap,
returns_timeline=True)
errors = Annotation(uri=reference.uri, modality=reference.modality)
# loop on all segments
for segment in common_timeline:
# list of labels in reference segment
rlabels = R.get_labels(segment, unique=False)
# list of labels in hypothesis segment
hlabels = H.get_labels(segment, unique=False)
_, details = self.matcher(rlabels, hlabels)
for r, h in details[MATCH_CORRECT]:
track = errors.new_track(segment, prefix=MATCH_CORRECT)
errors[segment, track] = (MATCH_CORRECT, r, h)
for r, h in details[MATCH_CONFUSION]:
track = errors.new_track(segment, prefix=MATCH_CONFUSION)
errors[segment, track] = (MATCH_CONFUSION, r, h)
for r in details[MATCH_MISSED_DETECTION]:
track = errors.new_track(segment,
prefix=MATCH_MISSED_DETECTION)
errors[segment, track] = (MATCH_MISSED_DETECTION, r, None)
for h in details[MATCH_FALSE_ALARM]:
track = errors.new_track(segment, prefix=MATCH_FALSE_ALARM)
errors[segment, track] = (MATCH_FALSE_ALARM, None, h)
if uemified:
return reference, hypothesis, errors
else:
return errors | Get error analysis as `Annotation`
Labels are (status, reference_label, hypothesis_label) tuples.
`status` is either 'correct', 'confusion', 'missed detection' or
'false alarm'.
`reference_label` is None in case of 'false alarm'.
`hypothesis_label` is None in case of 'missed detection'.
Parameters
----------
uemified : bool, optional
Returns "uemified" version of reference and hypothesis.
Defaults to False.
Returns
-------
errors : `Annotation` | train | https://github.com/pyannote/pyannote-metrics/blob/b433fec3bd37ca36fe026a428cd72483d646871a/pyannote/metrics/errors/identification.py#L75-L134 | [
"def uemify(self, reference, hypothesis, uem=None, collar=0.,\n skip_overlap=False, returns_uem=False, returns_timeline=False):\n \"\"\"Crop 'reference' and 'hypothesis' to 'uem' support\n\n Parameters\n ----------\n reference, hypothesis : Annotation\n Reference and hypothesis annotations.\n uem : Timeline, optional\n Evaluation map.\n collar : float, optional\n When provided, set the duration of collars centered around\n reference segment boundaries that are extruded from both reference\n and hypothesis. Defaults to 0. (i.e. no collar).\n skip_overlap : bool, optional\n Set to True to not evaluate overlap regions.\n Defaults to False (i.e. keep overlap regions).\n returns_uem : bool, optional\n Set to True to return extruded uem as well.\n Defaults to False (i.e. only return reference and hypothesis)\n returns_timeline : bool, optional\n Set to True to oversegment reference and hypothesis so that they\n share the same internal timeline.\n\n Returns\n -------\n reference, hypothesis : Annotation\n Extruded reference and hypothesis annotations\n uem : Timeline\n Extruded uem (returned only when 'returns_uem' is True)\n timeline : Timeline:\n Common timeline (returned only when 'returns_timeline' is True)\n \"\"\"\n\n # when uem is not provided, use the union of reference and hypothesis\n # extents -- and warn the user about that.\n if uem is None:\n r_extent = reference.get_timeline().extent()\n h_extent = hypothesis.get_timeline().extent()\n extent = r_extent | h_extent\n uem = Timeline(segments=[extent] if extent else [],\n uri=reference.uri)\n warnings.warn(\n \"'uem' was approximated by the union of 'reference' \"\n \"and 'hypothesis' extents.\")\n\n # extrude collars (and overlap regions) from uem\n uem = self.extrude(uem, reference, collar=collar,\n skip_overlap=skip_overlap)\n\n # extrude regions outside of uem\n reference = reference.crop(uem, mode='intersection')\n hypothesis = hypothesis.crop(uem, mode='intersection')\n\n # project reference and hypothesis on common timeline\n if returns_timeline:\n timeline = self.common_timeline(reference, hypothesis)\n reference = self.project(reference, timeline)\n hypothesis = self.project(hypothesis, timeline)\n\n result = (reference, hypothesis)\n if returns_uem:\n result += (uem, )\n\n if returns_timeline:\n result += (timeline, )\n\n return result\n"
] | class IdentificationErrorAnalysis(UEMSupportMixin, object):
"""
Parameters
----------
collar : float, optional
Duration (in seconds) of collars removed from evaluation around
boundaries of reference segments.
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
"""
def __init__(self, collar=0., skip_overlap=False):
super(IdentificationErrorAnalysis, self).__init__()
self.matcher = LabelMatcher()
self.munkres = Munkres()
self.collar = collar
self.skip_overlap=skip_overlap
def _match_errors(self, before, after):
b_type, b_ref, b_hyp = before
a_type, a_ref, a_hyp = after
return (b_ref == a_ref) * (1 + (b_type == a_type) + (b_hyp == a_hyp))
def regression(self, reference, before, after, uem=None, uemified=False):
_, before, errors_before = self.difference(
reference, before, uem=uem, uemified=True)
reference, after, errors_after = self.difference(
reference, after, uem=uem, uemified=True)
behaviors = Annotation(uri=reference.uri, modality=reference.modality)
# common (up-sampled) timeline
common_timeline = errors_after.get_timeline().union(
errors_before.get_timeline())
common_timeline = common_timeline.segmentation()
# align 'before' errors on common timeline
B = self._tagger(errors_before, common_timeline)
# align 'after' errors on common timeline
A = self._tagger(errors_after, common_timeline)
for segment in common_timeline:
old_errors = B.get_labels(segment, unique=False)
new_errors = A.get_labels(segment, unique=False)
n1 = len(old_errors)
n2 = len(new_errors)
n = max(n1, n2)
match = np.zeros((n, n), dtype=int)
for i1, e1 in enumerate(old_errors):
for i2, e2 in enumerate(new_errors):
match[i1, i2] = self._match_errors(e1, e2)
mapping = self.munkres.compute(2 - match)
for i1, i2 in mapping:
if i1 >= n1:
track = behaviors.new_track(segment,
candidate=REGRESSION,
prefix=REGRESSION)
behaviors[segment, track] = (
REGRESSION, None, new_errors[i2])
elif i2 >= n2:
track = behaviors.new_track(segment,
candidate=IMPROVEMENT,
prefix=IMPROVEMENT)
behaviors[segment, track] = (
IMPROVEMENT, old_errors[i1], None)
elif old_errors[i1][0] == MATCH_CORRECT:
if new_errors[i2][0] == MATCH_CORRECT:
track = behaviors.new_track(segment,
candidate=BOTH_CORRECT,
prefix=BOTH_CORRECT)
behaviors[segment, track] = (
BOTH_CORRECT, old_errors[i1], new_errors[i2])
else:
track = behaviors.new_track(segment,
candidate=REGRESSION,
prefix=REGRESSION)
behaviors[segment, track] = (
REGRESSION, old_errors[i1], new_errors[i2])
else:
if new_errors[i2][0] == MATCH_CORRECT:
track = behaviors.new_track(segment,
candidate=IMPROVEMENT,
prefix=IMPROVEMENT)
behaviors[segment, track] = (
IMPROVEMENT, old_errors[i1], new_errors[i2])
else:
track = behaviors.new_track(segment,
candidate=BOTH_INCORRECT,
prefix=BOTH_INCORRECT)
behaviors[segment, track] = (
BOTH_INCORRECT, old_errors[i1], new_errors[i2])
behaviors = behaviors.support()
if uemified:
return reference, before, after, behaviors
else:
return behaviors
def matrix(self, reference, hypothesis, uem=None):
reference, hypothesis, errors = self.difference(
reference, hypothesis, uem=uem, uemified=True)
chart = errors.chart()
# rLabels contains reference labels
# hLabels contains hypothesis labels confused with a reference label
# falseAlarmLabels contains false alarm hypothesis labels that do not
# exist in reference labels // corner case //
falseAlarmLabels = set(hypothesis.labels()) - set(reference.labels())
hLabels = set(reference.labels()) | set(hypothesis.labels())
rLabels = set(reference.labels())
# sort these sets of labels
cmp_func = reference._cmp_labels
falseAlarmLabels = sorted(falseAlarmLabels, cmp=cmp_func)
rLabels = sorted(rLabels, cmp=cmp_func)
hLabels = sorted(hLabels, cmp=cmp_func)
# append false alarm labels as last 'reference' labels
# (make sure to mark them as such)
rLabels = rLabels + [(MATCH_FALSE_ALARM, hLabel)
for hLabel in falseAlarmLabels]
# prepend duration columns before the detailed confusion matrix
hLabels = [
REFERENCE_TOTAL, HYPOTHESIS_TOTAL,
MATCH_CORRECT, MATCH_CONFUSION,
MATCH_FALSE_ALARM, MATCH_MISSED_DETECTION
] + hLabels
# initialize empty matrix
matrix = DataArray(
np.zeros((len(rLabels), len(hLabels))),
coords=[('reference', rLabels), ('hypothesis', hLabels)])
# loop on chart
for (status, rLabel, hLabel), duration in chart:
# increment correct
if status == MATCH_CORRECT:
matrix.loc[rLabel, hLabel] += duration
matrix.loc[rLabel, MATCH_CORRECT] += duration
# increment confusion matrix
if status == MATCH_CONFUSION:
matrix.loc[rLabel, hLabel] += duration
matrix.loc[rLabel, MATCH_CONFUSION] += duration
if hLabel in falseAlarmLabels:
matrix.loc[(MATCH_FALSE_ALARM, hLabel), rLabel] += duration
matrix.loc[(MATCH_FALSE_ALARM, hLabel), MATCH_CONFUSION] += duration
else:
matrix.loc[hLabel, rLabel] += duration
matrix.loc[hLabel, MATCH_CONFUSION] += duration
if status == MATCH_FALSE_ALARM:
# hLabel is also a reference label
if hLabel in falseAlarmLabels:
matrix.loc[(MATCH_FALSE_ALARM, hLabel), MATCH_FALSE_ALARM] += duration
else:
matrix.loc[hLabel, MATCH_FALSE_ALARM] += duration
if status == MATCH_MISSED_DETECTION:
matrix.loc[rLabel, MATCH_MISSED_DETECTION] += duration
# total reference and hypothesis duration
for rLabel in rLabels:
if isinstance(rLabel, tuple) and rLabel[0] == MATCH_FALSE_ALARM:
r = 0.
h = hypothesis.label_duration(rLabel[1])
else:
r = reference.label_duration(rLabel)
h = hypothesis.label_duration(rLabel)
matrix.loc[rLabel, REFERENCE_TOTAL] = r
matrix.loc[rLabel, HYPOTHESIS_TOTAL] = h
return matrix
|
pyannote/pyannote-metrics | pyannote/metrics/base.py | BaseMetric.reset | python | def reset(self):
if self.parallel:
from pyannote.metrics import manager_
self.accumulated_ = manager_.dict()
self.results_ = manager_.list()
self.uris_ = manager_.dict()
else:
self.accumulated_ = dict()
self.results_ = list()
self.uris_ = dict()
for value in self.components_:
self.accumulated_[value] = 0. | Reset accumulated components and metric values | train | https://github.com/pyannote/pyannote-metrics/blob/b433fec3bd37ca36fe026a428cd72483d646871a/pyannote/metrics/base.py#L76-L88 | null | class BaseMetric(object):
"""
:class:`BaseMetric` is the base class for most pyannote evaluation metrics.
Parameters
----------
parallel : bool, optional
Defaults to True
Attributes
----------
name : str
Human-readable name of the metric (eg. 'diarization error rate')
"""
@classmethod
def metric_name(cls):
raise NotImplementedError(
cls.__name__ + " is missing a 'metric_name' class method. "
"It should return the name of the metric as string.")
@classmethod
def metric_components(cls):
raise NotImplementedError(
cls.__name__ + " is missing a 'metric_components' class method. "
"It should return the list of names of metric components.")
def __init__(self, parallel=False, **kwargs):
super(BaseMetric, self).__init__()
self.parallel = parallel
self.metric_name_ = self.__class__.metric_name()
self.components_ = set(self.__class__.metric_components())
self.reset()
def init_components(self):
return {value: 0. for value in self.components_}
def __get_name(self):
return self.__class__.metric_name()
name = property(fget=__get_name, doc="Metric name.")
def __call__(self, reference, hypothesis, detailed=False, **kwargs):
"""Compute metric value and accumulate components
Parameters
----------
reference : type depends on the metric
Manual `reference`
hypothesis : same as `reference`
Evaluated `hypothesis`
detailed : bool, optional
By default (False), return metric value only.
Set `detailed` to True to return dictionary where keys are
components names and values are component values
Returns
-------
value : float (if `detailed` is False)
Metric value
components : dict (if `detailed` is True)
`components` updated with metric value
"""
# compute metric components
components = self.compute_components(reference, hypothesis, **kwargs)
# compute rate based on components
components[self.metric_name_] = self.compute_metric(components)
# keep track of this computation
uri = reference.uri
if uri not in self.uris_:
self.uris_[uri] = 1
else:
self.uris_[uri] += 1
uri = uri + ' #{0:d}'.format(self.uris_[uri])
self.results_.append((uri, components))
# accumulate components
for name in self.components_:
self.accumulated_[name] += components[name]
if detailed:
return components
return components[self.metric_name_]
def report(self, display=False):
"""Evaluation report
Parameters
----------
display : bool, optional
Set to True to print the report to stdout.
Returns
-------
report : pandas.DataFrame
Dataframe with one column per metric component, one row per
evaluated item, and one final row for accumulated results.
"""
report = []
uris = []
percent = 'total' in self.metric_components()
for uri, components in self.results_:
row = {}
if percent:
total = components['total']
for key, value in components.items():
if key == self.name:
row[key, '%'] = 100 * value
elif key == 'total':
row[key, ''] = value
else:
row[key, ''] = value
if percent:
if total > 0:
row[key, '%'] = 100 * value / total
else:
row[key, '%'] = np.NaN
report.append(row)
uris.append(uri)
row = {}
components = self.accumulated_
if percent:
total = components['total']
for key, value in components.items():
if key == self.name:
row[key, '%'] = 100 * value
elif key == 'total':
row[key, ''] = value
else:
row[key, ''] = value
if percent:
if total > 0:
row[key, '%'] = 100 * value / total
else:
row[key, '%'] = np.NaN
row[self.name, '%'] = 100 * abs(self)
report.append(row)
uris.append('TOTAL')
df = pd.DataFrame(report)
df['item'] = uris
df = df.set_index('item')
df.columns = pd.MultiIndex.from_tuples(df.columns)
df = df[[self.name] + self.metric_components()]
if display:
print(df.to_string(index=True, sparsify=False, justify='right', float_format=lambda f: '{0:.2f}'.format(f)))
return df
def __str__(self):
report = self.report(display=False)
return report.to_string(
sparsify=False,
float_format=lambda f: '{0:.2f}'.format(f))
def __abs__(self):
"""Compute metric value from accumulated components"""
return self.compute_metric(self.accumulated_)
def __getitem__(self, component):
"""Get value of accumulated `component`.
Parameters
----------
component : str
Name of `component`
Returns
-------
value : type depends on the metric
Value of accumulated `component`
"""
if component == slice(None, None, None):
return dict(self.accumulated_)
else:
return self.accumulated_[component]
def __iter__(self):
"""Iterator over the accumulated (uri, value)"""
for uri, component in self.results_:
yield uri, component
def compute_components(self, reference, hypothesis, **kwargs):
"""Compute metric components
Parameters
----------
reference : type depends on the metric
Manual `reference`
hypothesis : same as `reference`
Evaluated `hypothesis`
Returns
-------
components : dict
Dictionary where keys are component names and values are component
values
"""
raise NotImplementedError(
cls.__name__ + " is missing a 'compute_components' method."
"It should return a dictionary where keys are component names "
"and values are component values.")
def compute_metric(self, components):
"""Compute metric value from computed `components`
Parameters
----------
components : dict
Dictionary where keys are components names and values are component
values
Returns
-------
value : type depends on the metric
Metric value
"""
raise NotImplementedError(
cls.__name__ + " is missing a 'compute_metric' method. "
"It should return the actual value of the metric based "
"on the precomputed component dictionary given as input.")
def confidence_interval(self, alpha=0.9):
"""Compute confidence interval on accumulated metric values
Parameters
----------
alpha : float, optional
Probability that the returned confidence interval contains
the true metric value.
Returns
-------
(center, (lower, upper))
with center the mean of the conditional pdf of the metric value
and (lower, upper) is a confidence interval centered on the median,
containing the estimate to a probability alpha.
See Also:
---------
scipy.stats.bayes_mvs
"""
m, _, _ = scipy.stats.bayes_mvs(
[r[self.metric_name_] for _, r in self.results_], alpha=alpha)
return m
|
pyannote/pyannote-metrics | pyannote/metrics/base.py | BaseMetric.report | python | def report(self, display=False):
report = []
uris = []
percent = 'total' in self.metric_components()
for uri, components in self.results_:
row = {}
if percent:
total = components['total']
for key, value in components.items():
if key == self.name:
row[key, '%'] = 100 * value
elif key == 'total':
row[key, ''] = value
else:
row[key, ''] = value
if percent:
if total > 0:
row[key, '%'] = 100 * value / total
else:
row[key, '%'] = np.NaN
report.append(row)
uris.append(uri)
row = {}
components = self.accumulated_
if percent:
total = components['total']
for key, value in components.items():
if key == self.name:
row[key, '%'] = 100 * value
elif key == 'total':
row[key, ''] = value
else:
row[key, ''] = value
if percent:
if total > 0:
row[key, '%'] = 100 * value / total
else:
row[key, '%'] = np.NaN
row[self.name, '%'] = 100 * abs(self)
report.append(row)
uris.append('TOTAL')
df = pd.DataFrame(report)
df['item'] = uris
df = df.set_index('item')
df.columns = pd.MultiIndex.from_tuples(df.columns)
df = df[[self.name] + self.metric_components()]
if display:
print(df.to_string(index=True, sparsify=False, justify='right', float_format=lambda f: '{0:.2f}'.format(f)))
return df | Evaluation report
Parameters
----------
display : bool, optional
Set to True to print the report to stdout.
Returns
-------
report : pandas.DataFrame
Dataframe with one column per metric component, one row per
evaluated item, and one final row for accumulated results. | train | https://github.com/pyannote/pyannote-metrics/blob/b433fec3bd37ca36fe026a428cd72483d646871a/pyannote/metrics/base.py#L144-L219 | [
"def metric_components(cls):\n raise NotImplementedError(\n cls.__name__ + \" is missing a 'metric_components' class method. \"\n \"It should return the list of names of metric components.\")\n"
] | class BaseMetric(object):
"""
:class:`BaseMetric` is the base class for most pyannote evaluation metrics.
Parameters
----------
parallel : bool, optional
Defaults to True
Attributes
----------
name : str
Human-readable name of the metric (eg. 'diarization error rate')
"""
@classmethod
def metric_name(cls):
raise NotImplementedError(
cls.__name__ + " is missing a 'metric_name' class method. "
"It should return the name of the metric as string.")
@classmethod
def metric_components(cls):
raise NotImplementedError(
cls.__name__ + " is missing a 'metric_components' class method. "
"It should return the list of names of metric components.")
def __init__(self, parallel=False, **kwargs):
super(BaseMetric, self).__init__()
self.parallel = parallel
self.metric_name_ = self.__class__.metric_name()
self.components_ = set(self.__class__.metric_components())
self.reset()
def init_components(self):
return {value: 0. for value in self.components_}
def reset(self):
"""Reset accumulated components and metric values"""
if self.parallel:
from pyannote.metrics import manager_
self.accumulated_ = manager_.dict()
self.results_ = manager_.list()
self.uris_ = manager_.dict()
else:
self.accumulated_ = dict()
self.results_ = list()
self.uris_ = dict()
for value in self.components_:
self.accumulated_[value] = 0.
def __get_name(self):
return self.__class__.metric_name()
name = property(fget=__get_name, doc="Metric name.")
def __call__(self, reference, hypothesis, detailed=False, **kwargs):
"""Compute metric value and accumulate components
Parameters
----------
reference : type depends on the metric
Manual `reference`
hypothesis : same as `reference`
Evaluated `hypothesis`
detailed : bool, optional
By default (False), return metric value only.
Set `detailed` to True to return dictionary where keys are
components names and values are component values
Returns
-------
value : float (if `detailed` is False)
Metric value
components : dict (if `detailed` is True)
`components` updated with metric value
"""
# compute metric components
components = self.compute_components(reference, hypothesis, **kwargs)
# compute rate based on components
components[self.metric_name_] = self.compute_metric(components)
# keep track of this computation
uri = reference.uri
if uri not in self.uris_:
self.uris_[uri] = 1
else:
self.uris_[uri] += 1
uri = uri + ' #{0:d}'.format(self.uris_[uri])
self.results_.append((uri, components))
# accumulate components
for name in self.components_:
self.accumulated_[name] += components[name]
if detailed:
return components
return components[self.metric_name_]
def __str__(self):
report = self.report(display=False)
return report.to_string(
sparsify=False,
float_format=lambda f: '{0:.2f}'.format(f))
def __abs__(self):
"""Compute metric value from accumulated components"""
return self.compute_metric(self.accumulated_)
def __getitem__(self, component):
"""Get value of accumulated `component`.
Parameters
----------
component : str
Name of `component`
Returns
-------
value : type depends on the metric
Value of accumulated `component`
"""
if component == slice(None, None, None):
return dict(self.accumulated_)
else:
return self.accumulated_[component]
def __iter__(self):
"""Iterator over the accumulated (uri, value)"""
for uri, component in self.results_:
yield uri, component
def compute_components(self, reference, hypothesis, **kwargs):
"""Compute metric components
Parameters
----------
reference : type depends on the metric
Manual `reference`
hypothesis : same as `reference`
Evaluated `hypothesis`
Returns
-------
components : dict
Dictionary where keys are component names and values are component
values
"""
raise NotImplementedError(
cls.__name__ + " is missing a 'compute_components' method."
"It should return a dictionary where keys are component names "
"and values are component values.")
def compute_metric(self, components):
"""Compute metric value from computed `components`
Parameters
----------
components : dict
Dictionary where keys are components names and values are component
values
Returns
-------
value : type depends on the metric
Metric value
"""
raise NotImplementedError(
cls.__name__ + " is missing a 'compute_metric' method. "
"It should return the actual value of the metric based "
"on the precomputed component dictionary given as input.")
def confidence_interval(self, alpha=0.9):
"""Compute confidence interval on accumulated metric values
Parameters
----------
alpha : float, optional
Probability that the returned confidence interval contains
the true metric value.
Returns
-------
(center, (lower, upper))
with center the mean of the conditional pdf of the metric value
and (lower, upper) is a confidence interval centered on the median,
containing the estimate to a probability alpha.
See Also:
---------
scipy.stats.bayes_mvs
"""
m, _, _ = scipy.stats.bayes_mvs(
[r[self.metric_name_] for _, r in self.results_], alpha=alpha)
return m
|
pyannote/pyannote-metrics | pyannote/metrics/base.py | BaseMetric.confidence_interval | python | def confidence_interval(self, alpha=0.9):
m, _, _ = scipy.stats.bayes_mvs(
[r[self.metric_name_] for _, r in self.results_], alpha=alpha)
return m | Compute confidence interval on accumulated metric values
Parameters
----------
alpha : float, optional
Probability that the returned confidence interval contains
the true metric value.
Returns
-------
(center, (lower, upper))
with center the mean of the conditional pdf of the metric value
and (lower, upper) is a confidence interval centered on the median,
containing the estimate to a probability alpha.
See Also:
---------
scipy.stats.bayes_mvs | train | https://github.com/pyannote/pyannote-metrics/blob/b433fec3bd37ca36fe026a428cd72483d646871a/pyannote/metrics/base.py#L296-L319 | null | class BaseMetric(object):
"""
:class:`BaseMetric` is the base class for most pyannote evaluation metrics.
Parameters
----------
parallel : bool, optional
Defaults to True
Attributes
----------
name : str
Human-readable name of the metric (eg. 'diarization error rate')
"""
@classmethod
def metric_name(cls):
raise NotImplementedError(
cls.__name__ + " is missing a 'metric_name' class method. "
"It should return the name of the metric as string.")
@classmethod
def metric_components(cls):
raise NotImplementedError(
cls.__name__ + " is missing a 'metric_components' class method. "
"It should return the list of names of metric components.")
def __init__(self, parallel=False, **kwargs):
super(BaseMetric, self).__init__()
self.parallel = parallel
self.metric_name_ = self.__class__.metric_name()
self.components_ = set(self.__class__.metric_components())
self.reset()
def init_components(self):
return {value: 0. for value in self.components_}
def reset(self):
"""Reset accumulated components and metric values"""
if self.parallel:
from pyannote.metrics import manager_
self.accumulated_ = manager_.dict()
self.results_ = manager_.list()
self.uris_ = manager_.dict()
else:
self.accumulated_ = dict()
self.results_ = list()
self.uris_ = dict()
for value in self.components_:
self.accumulated_[value] = 0.
def __get_name(self):
return self.__class__.metric_name()
name = property(fget=__get_name, doc="Metric name.")
def __call__(self, reference, hypothesis, detailed=False, **kwargs):
"""Compute metric value and accumulate components
Parameters
----------
reference : type depends on the metric
Manual `reference`
hypothesis : same as `reference`
Evaluated `hypothesis`
detailed : bool, optional
By default (False), return metric value only.
Set `detailed` to True to return dictionary where keys are
components names and values are component values
Returns
-------
value : float (if `detailed` is False)
Metric value
components : dict (if `detailed` is True)
`components` updated with metric value
"""
# compute metric components
components = self.compute_components(reference, hypothesis, **kwargs)
# compute rate based on components
components[self.metric_name_] = self.compute_metric(components)
# keep track of this computation
uri = reference.uri
if uri not in self.uris_:
self.uris_[uri] = 1
else:
self.uris_[uri] += 1
uri = uri + ' #{0:d}'.format(self.uris_[uri])
self.results_.append((uri, components))
# accumulate components
for name in self.components_:
self.accumulated_[name] += components[name]
if detailed:
return components
return components[self.metric_name_]
def report(self, display=False):
"""Evaluation report
Parameters
----------
display : bool, optional
Set to True to print the report to stdout.
Returns
-------
report : pandas.DataFrame
Dataframe with one column per metric component, one row per
evaluated item, and one final row for accumulated results.
"""
report = []
uris = []
percent = 'total' in self.metric_components()
for uri, components in self.results_:
row = {}
if percent:
total = components['total']
for key, value in components.items():
if key == self.name:
row[key, '%'] = 100 * value
elif key == 'total':
row[key, ''] = value
else:
row[key, ''] = value
if percent:
if total > 0:
row[key, '%'] = 100 * value / total
else:
row[key, '%'] = np.NaN
report.append(row)
uris.append(uri)
row = {}
components = self.accumulated_
if percent:
total = components['total']
for key, value in components.items():
if key == self.name:
row[key, '%'] = 100 * value
elif key == 'total':
row[key, ''] = value
else:
row[key, ''] = value
if percent:
if total > 0:
row[key, '%'] = 100 * value / total
else:
row[key, '%'] = np.NaN
row[self.name, '%'] = 100 * abs(self)
report.append(row)
uris.append('TOTAL')
df = pd.DataFrame(report)
df['item'] = uris
df = df.set_index('item')
df.columns = pd.MultiIndex.from_tuples(df.columns)
df = df[[self.name] + self.metric_components()]
if display:
print(df.to_string(index=True, sparsify=False, justify='right', float_format=lambda f: '{0:.2f}'.format(f)))
return df
def __str__(self):
report = self.report(display=False)
return report.to_string(
sparsify=False,
float_format=lambda f: '{0:.2f}'.format(f))
def __abs__(self):
"""Compute metric value from accumulated components"""
return self.compute_metric(self.accumulated_)
def __getitem__(self, component):
"""Get value of accumulated `component`.
Parameters
----------
component : str
Name of `component`
Returns
-------
value : type depends on the metric
Value of accumulated `component`
"""
if component == slice(None, None, None):
return dict(self.accumulated_)
else:
return self.accumulated_[component]
def __iter__(self):
"""Iterator over the accumulated (uri, value)"""
for uri, component in self.results_:
yield uri, component
def compute_components(self, reference, hypothesis, **kwargs):
"""Compute metric components
Parameters
----------
reference : type depends on the metric
Manual `reference`
hypothesis : same as `reference`
Evaluated `hypothesis`
Returns
-------
components : dict
Dictionary where keys are component names and values are component
values
"""
raise NotImplementedError(
cls.__name__ + " is missing a 'compute_components' method."
"It should return a dictionary where keys are component names "
"and values are component values.")
def compute_metric(self, components):
"""Compute metric value from computed `components`
Parameters
----------
components : dict
Dictionary where keys are components names and values are component
values
Returns
-------
value : type depends on the metric
Metric value
"""
raise NotImplementedError(
cls.__name__ + " is missing a 'compute_metric' method. "
"It should return the actual value of the metric based "
"on the precomputed component dictionary given as input.")
|
pyannote/pyannote-metrics | pyannote/metrics/base.py | Precision.compute_metric | python | def compute_metric(self, components):
numerator = components[PRECISION_RELEVANT_RETRIEVED]
denominator = components[PRECISION_RETRIEVED]
if denominator == 0.:
if numerator == 0:
return 1.
else:
raise ValueError('')
else:
return numerator/denominator | Compute precision from `components` | train | https://github.com/pyannote/pyannote-metrics/blob/b433fec3bd37ca36fe026a428cd72483d646871a/pyannote/metrics/base.py#L347-L357 | null | class Precision(BaseMetric):
"""
:class:`Precision` is a base class for precision-like evaluation metrics.
It defines two components '# retrieved' and '# relevant retrieved' and the
compute_metric() method to compute the actual precision:
Precision = # retrieved / # relevant retrieved
Inheriting classes must implement compute_components().
"""
@classmethod
def metric_name(cls):
return PRECISION_NAME
@classmethod
def metric_components(cls):
return [PRECISION_RETRIEVED, PRECISION_RELEVANT_RETRIEVED]
|
pyannote/pyannote-metrics | pyannote/metrics/base.py | Recall.compute_metric | python | def compute_metric(self, components):
numerator = components[RECALL_RELEVANT_RETRIEVED]
denominator = components[RECALL_RELEVANT]
if denominator == 0.:
if numerator == 0:
return 1.
else:
raise ValueError('')
else:
return numerator/denominator | Compute recall from `components` | train | https://github.com/pyannote/pyannote-metrics/blob/b433fec3bd37ca36fe026a428cd72483d646871a/pyannote/metrics/base.py#L384-L394 | null | class Recall(BaseMetric):
"""
:class:`Recall` is a base class for recall-like evaluation metrics.
It defines two components '# relevant' and '# relevant retrieved' and the
compute_metric() method to compute the actual recall:
Recall = # relevant retrieved / # relevant
Inheriting classes must implement compute_components().
"""
@classmethod
def metric_name(cls):
return RECALL_NAME
@classmethod
def metric_components(cls):
return [RECALL_RELEVANT, RECALL_RELEVANT_RETRIEVED]
|
pyannote/pyannote-metrics | pyannote/metrics/diarization.py | DiarizationErrorRate.optimal_mapping | python | def optimal_mapping(self, reference, hypothesis, uem=None):
# NOTE that this 'uemification' will not be called when
# 'optimal_mapping' is called from 'compute_components' as it
# has already been done in 'compute_components'
if uem:
reference, hypothesis = self.uemify(reference, hypothesis, uem=uem)
# call hungarian mapper
mapping = self.mapper_(hypothesis, reference)
return mapping | Optimal label mapping
Parameters
----------
reference : Annotation
hypothesis : Annotation
Reference and hypothesis diarization
uem : Timeline
Evaluation map
Returns
-------
mapping : dict
Mapping between hypothesis (key) and reference (value) labels | train | https://github.com/pyannote/pyannote-metrics/blob/b433fec3bd37ca36fe026a428cd72483d646871a/pyannote/metrics/diarization.py#L106-L131 | [
"def uemify(self, reference, hypothesis, uem=None, collar=0.,\n skip_overlap=False, returns_uem=False, returns_timeline=False):\n \"\"\"Crop 'reference' and 'hypothesis' to 'uem' support\n\n Parameters\n ----------\n reference, hypothesis : Annotation\n Reference and hypothesis annotations.\n uem : Timeline, optional\n Evaluation map.\n collar : float, optional\n When provided, set the duration of collars centered around\n reference segment boundaries that are extruded from both reference\n and hypothesis. Defaults to 0. (i.e. no collar).\n skip_overlap : bool, optional\n Set to True to not evaluate overlap regions.\n Defaults to False (i.e. keep overlap regions).\n returns_uem : bool, optional\n Set to True to return extruded uem as well.\n Defaults to False (i.e. only return reference and hypothesis)\n returns_timeline : bool, optional\n Set to True to oversegment reference and hypothesis so that they\n share the same internal timeline.\n\n Returns\n -------\n reference, hypothesis : Annotation\n Extruded reference and hypothesis annotations\n uem : Timeline\n Extruded uem (returned only when 'returns_uem' is True)\n timeline : Timeline:\n Common timeline (returned only when 'returns_timeline' is True)\n \"\"\"\n\n # when uem is not provided, use the union of reference and hypothesis\n # extents -- and warn the user about that.\n if uem is None:\n r_extent = reference.get_timeline().extent()\n h_extent = hypothesis.get_timeline().extent()\n extent = r_extent | h_extent\n uem = Timeline(segments=[extent] if extent else [],\n uri=reference.uri)\n warnings.warn(\n \"'uem' was approximated by the union of 'reference' \"\n \"and 'hypothesis' extents.\")\n\n # extrude collars (and overlap regions) from uem\n uem = self.extrude(uem, reference, collar=collar,\n skip_overlap=skip_overlap)\n\n # extrude regions outside of uem\n reference = reference.crop(uem, mode='intersection')\n hypothesis = hypothesis.crop(uem, mode='intersection')\n\n # project reference and hypothesis on common timeline\n if returns_timeline:\n timeline = self.common_timeline(reference, hypothesis)\n reference = self.project(reference, timeline)\n hypothesis = self.project(hypothesis, timeline)\n\n result = (reference, hypothesis)\n if returns_uem:\n result += (uem, )\n\n if returns_timeline:\n result += (timeline, )\n\n return result\n"
] | class DiarizationErrorRate(IdentificationErrorRate):
"""Diarization error rate
First, the optimal mapping between reference and hypothesis labels
is obtained using the Hungarian algorithm. Then, the actual diarization
error rate is computed as the identification error rate with each hypothesis
label translated into the corresponding reference label.
Parameters
----------
collar : float, optional
Duration (in seconds) of collars removed from evaluation around
boundaries of reference segments.
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
Usage
-----
* Diarization error rate between `reference` and `hypothesis` annotations
>>> metric = DiarizationErrorRate()
>>> reference = Annotation(...) # doctest: +SKIP
>>> hypothesis = Annotation(...) # doctest: +SKIP
>>> value = metric(reference, hypothesis) # doctest: +SKIP
* Compute global diarization error rate and confidence interval
over multiple documents
>>> for reference, hypothesis in ... # doctest: +SKIP
... metric(reference, hypothesis) # doctest: +SKIP
>>> global_value = abs(metric) # doctest: +SKIP
>>> mean, (lower, upper) = metric.confidence_interval() # doctest: +SKIP
* Get diarization error rate detailed components
>>> components = metric(reference, hypothesis, detailed=True) #doctest +SKIP
* Get accumulated components
>>> components = metric[:] # doctest: +SKIP
>>> metric['confusion'] # doctest: +SKIP
See Also
--------
:class:`pyannote.metric.base.BaseMetric`: details on accumulation
:class:`pyannote.metric.identification.IdentificationErrorRate`: identification error rate
"""
@classmethod
def metric_name(cls):
return DER_NAME
def __init__(self, collar=0.0, skip_overlap=False, **kwargs):
super(DiarizationErrorRate, self).__init__(
collar=collar, skip_overlap=skip_overlap, **kwargs)
self.mapper_ = HungarianMapper()
def compute_components(self, reference, hypothesis, uem=None, **kwargs):
# crop reference and hypothesis to evaluated regions (uem)
# remove collars around reference segment boundaries
# remove overlap regions (if requested)
reference, hypothesis, uem = self.uemify(
reference, hypothesis, uem=uem,
collar=self.collar, skip_overlap=self.skip_overlap,
returns_uem=True)
# NOTE that this 'uemification' must be done here because it
# might have an impact on the search for the optimal mapping.
# make sure reference only contains string labels ('A', 'B', ...)
reference = reference.anonymize_labels(generator='string')
# make sure hypothesis only contains integer labels (1, 2, ...)
hypothesis = hypothesis.anonymize_labels(generator='int')
# optimal (int --> str) mapping
mapping = self.optimal_mapping(reference, hypothesis)
# compute identification error rate based on mapped hypothesis
# NOTE that collar is set to 0.0 because 'uemify' has already
# been applied (same reason for setting skip_overlap to False)
mapped = hypothesis.translate(mapping)
return super(DiarizationErrorRate, self)\
.compute_components(reference, mapped, uem=uem,
collar=0.0, skip_overlap=False,
**kwargs)
|
pyannote/pyannote-metrics | pyannote/metrics/diarization.py | GreedyDiarizationErrorRate.greedy_mapping | python | def greedy_mapping(self, reference, hypothesis, uem=None):
if uem:
reference, hypothesis = self.uemify(reference, hypothesis, uem=uem)
return self.mapper_(hypothesis, reference) | Greedy label mapping
Parameters
----------
reference : Annotation
hypothesis : Annotation
Reference and hypothesis diarization
uem : Timeline
Evaluation map
Returns
-------
mapping : dict
Mapping between hypothesis (key) and reference (value) labels | train | https://github.com/pyannote/pyannote-metrics/blob/b433fec3bd37ca36fe026a428cd72483d646871a/pyannote/metrics/diarization.py#L223-L241 | [
"def uemify(self, reference, hypothesis, uem=None, collar=0.,\n skip_overlap=False, returns_uem=False, returns_timeline=False):\n \"\"\"Crop 'reference' and 'hypothesis' to 'uem' support\n\n Parameters\n ----------\n reference, hypothesis : Annotation\n Reference and hypothesis annotations.\n uem : Timeline, optional\n Evaluation map.\n collar : float, optional\n When provided, set the duration of collars centered around\n reference segment boundaries that are extruded from both reference\n and hypothesis. Defaults to 0. (i.e. no collar).\n skip_overlap : bool, optional\n Set to True to not evaluate overlap regions.\n Defaults to False (i.e. keep overlap regions).\n returns_uem : bool, optional\n Set to True to return extruded uem as well.\n Defaults to False (i.e. only return reference and hypothesis)\n returns_timeline : bool, optional\n Set to True to oversegment reference and hypothesis so that they\n share the same internal timeline.\n\n Returns\n -------\n reference, hypothesis : Annotation\n Extruded reference and hypothesis annotations\n uem : Timeline\n Extruded uem (returned only when 'returns_uem' is True)\n timeline : Timeline:\n Common timeline (returned only when 'returns_timeline' is True)\n \"\"\"\n\n # when uem is not provided, use the union of reference and hypothesis\n # extents -- and warn the user about that.\n if uem is None:\n r_extent = reference.get_timeline().extent()\n h_extent = hypothesis.get_timeline().extent()\n extent = r_extent | h_extent\n uem = Timeline(segments=[extent] if extent else [],\n uri=reference.uri)\n warnings.warn(\n \"'uem' was approximated by the union of 'reference' \"\n \"and 'hypothesis' extents.\")\n\n # extrude collars (and overlap regions) from uem\n uem = self.extrude(uem, reference, collar=collar,\n skip_overlap=skip_overlap)\n\n # extrude regions outside of uem\n reference = reference.crop(uem, mode='intersection')\n hypothesis = hypothesis.crop(uem, mode='intersection')\n\n # project reference and hypothesis on common timeline\n if returns_timeline:\n timeline = self.common_timeline(reference, hypothesis)\n reference = self.project(reference, timeline)\n hypothesis = self.project(hypothesis, timeline)\n\n result = (reference, hypothesis)\n if returns_uem:\n result += (uem, )\n\n if returns_timeline:\n result += (timeline, )\n\n return result\n"
] | class GreedyDiarizationErrorRate(IdentificationErrorRate):
"""Greedy diarization error rate
First, the greedy mapping between reference and hypothesis labels is
obtained. Then, the actual diarization error rate is computed as the
identification error rate with each hypothesis label translated into the
corresponding reference label.
Parameters
----------
collar : float, optional
Duration (in seconds) of collars removed from evaluation around
boundaries of reference segments.
skip_overlap : bool, optional
Set to True to not evaluate overlap regions.
Defaults to False (i.e. keep overlap regions).
Usage
-----
* Greedy diarization error rate between `reference` and `hypothesis` annotations
>>> metric = GreedyDiarizationErrorRate()
>>> reference = Annotation(...) # doctest: +SKIP
>>> hypothesis = Annotation(...) # doctest: +SKIP
>>> value = metric(reference, hypothesis) # doctest: +SKIP
* Compute global greedy diarization error rate and confidence interval
over multiple documents
>>> for reference, hypothesis in ... # doctest: +SKIP
... metric(reference, hypothesis) # doctest: +SKIP
>>> global_value = abs(metric) # doctest: +SKIP
>>> mean, (lower, upper) = metric.confidence_interval() # doctest: +SKIP
* Get greedy diarization error rate detailed components
>>> components = metric(reference, hypothesis, detailed=True) #doctest +SKIP
* Get accumulated components
>>> components = metric[:] # doctest: +SKIP
>>> metric['confusion'] # doctest: +SKIP
See Also
--------
:class:`pyannote.metric.base.BaseMetric`: details on accumulation
"""
@classmethod
def metric_name(cls):
return DER_NAME
def __init__(self, collar=0.0, skip_overlap=False, **kwargs):
super(GreedyDiarizationErrorRate, self).__init__(
collar=collar, skip_overlap=skip_overlap, **kwargs)
self.mapper_ = GreedyMapper()
def compute_components(self, reference, hypothesis, uem=None, **kwargs):
# crop reference and hypothesis to evaluated regions (uem)
# remove collars around reference segment boundaries
# remove overlap regions (if requested)
reference, hypothesis, uem = self.uemify(
reference, hypothesis, uem=uem,
collar=self.collar, skip_overlap=self.skip_overlap,
returns_uem=True)
# NOTE that this 'uemification' must be done here because it
# might have an impact on the search for the greedy mapping.
# make sure reference only contains string labels ('A', 'B', ...)
reference = reference.anonymize_labels(generator='string')
# make sure hypothesis only contains integer labels (1, 2, ...)
hypothesis = hypothesis.anonymize_labels(generator='int')
# greedy (int --> str) mapping
mapping = self.greedy_mapping(reference, hypothesis)
# compute identification error rate based on mapped hypothesis
# NOTE that collar is set to 0.0 because 'uemify' has already
# been applied (same reason for setting skip_overlap to False)
mapped = hypothesis.translate(mapping)
return super(GreedyDiarizationErrorRate, self)\
.compute_components(reference, mapped, uem=uem,
collar=0.0, skip_overlap=False,
**kwargs)
|
berdario/pew | pew/pew.py | shell_config_cmd | python | def shell_config_cmd(argv):
"Prints the path for the current $SHELL helper file"
shell = supported_shell()
if shell:
print(pew_site / 'shell_config' / ('init.' + shell))
else:
err('Completions and prompts are unavailable for %s' %
repr(os.environ.get('SHELL', ''))) | Prints the path for the current $SHELL helper file | train | https://github.com/berdario/pew/blob/37d9ff79342336b8ef6437d9a551008be07afe9b/pew/pew.py#L85-L92 | [
"def supported_shell():\n shell = Path(os.environ.get('SHELL', '')).stem\n if shell in ('bash', 'zsh', 'fish'):\n return shell\n"
] | from __future__ import print_function, absolute_import, unicode_literals
import os
import sys
import argparse
import shutil
import random
import textwrap
from functools import partial
from subprocess import CalledProcessError
from pathlib import Path
try:
from shutil import get_terminal_size
except ImportError:
from backports.shutil_get_terminal_size import get_terminal_size
windows = sys.platform == 'win32'
from clonevirtualenv import clone_virtualenv
if not windows:
try:
# Try importing these packages if avaiable
from pythonz.commands.install import InstallCommand
from pythonz.commands.uninstall import UninstallCommand
from pythonz.installer.pythoninstaller import PythonInstaller, AlreadyInstalledError
from pythonz.commands.list import ListCommand
from pythonz.define import PATH_PYTHONS
from pythonz.commands.locate import LocateCommand as LocatePython
def ListPythons():
try:
Path(PATH_PYTHONS).mkdir(parents=True)
except OSError:
pass
return ListCommand()
except:
# create mock commands
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('You need to install the pythonz extra. pip install pew[pythonz]')
else:
# Pythonz does not support windows
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('Command not supported on this platform')
import shellingham
from pew._utils import (check_call, invoke, expandpath, own, env_bin_dir,
check_path, temp_environ, NamedTemporaryFile, to_unicode)
from pew._print_utils import print_virtualenvs
if sys.version_info[0] == 2:
input = raw_input
err = partial(print, file=sys.stderr)
if windows:
default_home = '~/.virtualenvs'
else:
default_home = os.path.join(
os.environ.get('XDG_DATA_HOME', '~/.local/share'), 'virtualenvs')
workon_home = expandpath(
os.environ.get('WORKON_HOME', default_home))
def makedirs_and_symlink_if_needed(workon_home):
if not workon_home.exists() and own(workon_home):
workon_home.mkdir(parents=True)
link = expandpath('~/.virtualenvs')
if os.name == 'posix' and 'WORKON_HOME' not in os.environ and \
'XDG_DATA_HOME' not in os.environ and not link.exists():
link.symlink_to(str(workon_home))
return True
else:
return False
pew_site = Path(__file__).parent
def supported_shell():
shell = Path(os.environ.get('SHELL', '')).stem
if shell in ('bash', 'zsh', 'fish'):
return shell
def deploy_completions():
completions = {'complete.bash': Path('/etc/bash_completion.d/pew'),
'complete.zsh': Path('/usr/local/share/zsh/site-functions/_pew'),
'complete.fish': Path('/etc/fish/completions/pew.fish')}
for comp, dest in completions.items():
if not dest.parent.exists():
dest.parent.mkdir(parents=True)
shutil.copy(str(pew_site / 'shell_config' / comp), str(dest))
def get_project_dir(env):
project_file = workon_home / env / '.project'
if project_file.exists():
with project_file.open() as f:
project_dir = f.readline().strip()
if os.path.exists(project_dir):
return project_dir
else:
err('Corrupted or outdated:', project_file, '\nDirectory',
project_dir, "doesn't exist.")
def unsetenv(key):
if key in os.environ:
del os.environ[key]
def compute_path(env):
envdir = workon_home / env
return os.pathsep.join([
str(envdir / env_bin_dir),
os.environ['PATH'],
])
def inve(env, command, *args, **kwargs):
"""Run a command in the given virtual environment.
Pass additional keyword arguments to ``subprocess.check_call()``."""
# we don't strictly need to restore the environment, since pew runs in
# its own process, but it feels like the right thing to do
with temp_environ():
os.environ['VIRTUAL_ENV'] = str(workon_home / env)
os.environ['PATH'] = compute_path(env)
unsetenv('PYTHONHOME')
unsetenv('__PYVENV_LAUNCHER__')
try:
return check_call([command] + list(args), shell=windows, **kwargs)
# need to have shell=True on windows, otherwise the PYTHONPATH
# won't inherit the PATH
except OSError as e:
if e.errno == 2:
err('Unable to find', command)
return 2
else:
raise
def fork_shell(env, shellcmd, cwd):
or_ctrld = '' if windows else "or 'Ctrl+D' "
err("Launching subshell in virtual environment. Type 'exit' ", or_ctrld,
"to return.", sep='')
if 'VIRTUAL_ENV' in os.environ:
err("Be aware that this environment will be nested on top "
"of '%s'" % Path(os.environ['VIRTUAL_ENV']).name)
return inve(env, *shellcmd, cwd=cwd)
def fork_bash(env, cwd):
# bash is a special little snowflake, and prevent_path_errors cannot work there
# https://github.com/berdario/pew/issues/58#issuecomment-102182346
bashrcpath = expandpath('~/.bashrc')
if bashrcpath.exists():
with NamedTemporaryFile('w+') as rcfile:
with bashrcpath.open() as bashrc:
rcfile.write(bashrc.read())
rcfile.write('\nexport PATH="' + to_unicode(compute_path(env)) + '"')
rcfile.flush()
return fork_shell(env, ['bash', '--rcfile', rcfile.name], cwd)
else:
return fork_shell(env, ['bash'], cwd)
def fork_cmder(env, cwd):
shell_cmd = ['cmd']
cmderrc_path = r'%CMDER_ROOT%\vendor\init.bat'
if expandpath(cmderrc_path).exists():
shell_cmd += ['/k', cmderrc_path]
if cwd:
os.environ['CMDER_START'] = cwd
return fork_shell(env, shell_cmd, cwd)
def _detect_shell():
shell = os.environ.get('SHELL', None)
if not shell:
if 'CMDER_ROOT' in os.environ:
shell = 'Cmder'
elif windows:
try:
_, shell = shellingham.detect_shell()
except shellingham.ShellDetectionFailure:
shell = os.environ.get('COMSPEC', 'cmd.exe')
else:
shell = 'sh'
return shell
def shell(env, cwd=None):
env = str(env)
shell = _detect_shell()
shell_name = Path(shell).stem
if shell_name not in ('Cmder', 'bash', 'elvish', 'powershell', 'klingon', 'cmd'):
# On Windows the PATH is usually set with System Utility
# so we won't worry about trying to check mistakes there
shell_check = (sys.executable + ' -c "from pew.pew import '
'prevent_path_errors; prevent_path_errors()"')
try:
inve(env, shell, '-c', shell_check)
except CalledProcessError:
return
if shell_name == 'bash':
return fork_bash(env, cwd)
elif shell_name == 'Cmder':
return fork_cmder(env, cwd)
else:
return fork_shell(env, [shell], cwd)
def mkvirtualenv(envname, python=None, packages=[], project=None,
requirements=None, rest=[]):
if python:
rest = ["--python=%s" % python] + rest
path = (workon_home / envname).absolute()
try:
check_call([sys.executable, "-m", "virtualenv", str(path)] + rest)
except (CalledProcessError, KeyboardInterrupt):
rmvirtualenvs([envname])
raise
else:
if project:
setvirtualenvproject(envname, project.absolute())
if requirements:
inve(envname, 'pip', 'install', '-r', str(expandpath(requirements)))
if packages:
inve(envname, 'pip', 'install', *packages)
def mkvirtualenv_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--python')
parser.add_argument('-i', action='append', dest='packages', help='Install \
a package after the environment is created. This option may be repeated.')
parser.add_argument('-r', dest='requirements', help='Provide a pip \
requirements file to install a base set of packages into the new environment.')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
return parser
def new_cmd(argv):
"""Create a new environment, in $WORKON_HOME."""
parser = mkvirtualenv_argparser()
parser.add_argument('-a', dest='project', help='Provide a full path to a \
project directory to associate with the new environment.')
parser.add_argument('envname')
args, rest = parser.parse_known_args(argv)
project = expandpath(args.project) if args.project else None
mkvirtualenv(args.envname, args.python, args.packages, project,
args.requirements, rest)
if args.activate:
shell(args.envname)
def rmvirtualenvs(envs):
error_happened = False
for env in envs:
env = workon_home / env
if os.environ.get('VIRTUAL_ENV') == str(env):
err("ERROR: You cannot remove the active environment (%s)." % env)
error_happened = True
break
try:
shutil.rmtree(str(env))
except OSError as e:
err("Error while trying to remove the {0} env: \n{1}".format
(env, e.strerror))
error_happened = True
return error_happened
def rm_cmd(argv):
"""Remove one or more environment, from $WORKON_HOME."""
if len(argv) < 1:
sys.exit("Please specify an environment")
return rmvirtualenvs(argv)
def packages(site_packages):
nodes = site_packages.iterdir()
return set([x.stem.split('-')[0] for x in nodes]) - set(['__pycache__'])
def showvirtualenv(env):
columns, _ = get_terminal_size()
pkgs = sorted(packages(sitepackages_dir(env)))
env_python = workon_home / env / env_bin_dir / 'python'
l = len(env) + 2
version = invoke(str(env_python), '-V')
version = ' - '.join((version.out + version.err).splitlines())
print(env, ': ', version, sep='')
print(textwrap.fill(' '.join(pkgs),
width=columns-l,
initial_indent=(l * ' '),
subsequent_indent=(l * ' ')), '\n')
def show_cmd(argv):
try:
showvirtualenv(argv[0])
except IndexError:
if 'VIRTUAL_ENV' in os.environ:
showvirtualenv(Path(os.environ['VIRTUAL_ENV']).name)
else:
sys.exit('pew show [env]')
def lsenvs():
return sorted(set(env.parts[-3] for env in
workon_home.glob(os.path.join('*', env_bin_dir, 'python*'))))
def lsvirtualenv(verbose):
envs = lsenvs()
if not verbose:
print_virtualenvs(*envs)
else:
for env in envs:
showvirtualenv(env)
def ls_cmd(argv):
"""List available environments."""
parser = argparse.ArgumentParser()
p_group = parser.add_mutually_exclusive_group()
p_group.add_argument('-b', '--brief', action='store_false')
p_group.add_argument('-l', '--long', action='store_true')
args = parser.parse_args(argv)
lsvirtualenv(args.long)
def parse_envname(argv, no_arg_callback):
if len(argv) < 1 or argv[0] is None:
no_arg_callback()
env = argv[0]
if env.startswith('/'):
sys.exit("ERROR: Invalid environment name '{0}'.".format(env))
if not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist. Create it with \
'pew new {0}'.".format(env))
else:
return env
def workon_cmd(argv):
"""List or change working virtual environments."""
parser = argparse.ArgumentParser(prog='pew workon')
parser.add_argument('envname', nargs='?')
parser.add_argument(
'-n', '--no-cd', action='store_true',
help=('Do not change working directory to project directory after '
'activating virtualenv.')
)
args = parser.parse_args(argv)
def list_and_exit():
lsvirtualenv(False)
sys.exit(0)
env = parse_envname([args.envname], list_and_exit)
# Check if the virtualenv has an associated project directory and in
# this case, use it as the current working directory.
project_dir = get_project_dir(env)
if project_dir is None or args.no_cd:
project_dir = os.getcwd()
return shell(env, cwd=project_dir)
def sitepackages_dir(env=os.environ.get('VIRTUAL_ENV')):
if not env:
sys.exit('ERROR: no virtualenv active')
else:
env_python = workon_home / env / env_bin_dir / 'python'
return Path(invoke(str(env_python), '-c', 'import distutils; \
print(distutils.sysconfig.get_python_lib())').out)
def add_cmd(argv):
"""Add the specified directories to the Python path for the currently active virtualenv.
This will be done by placing the directory names in a path file named
"virtualenv_path_extensions.pth" inside the virtualenv's site-packages
directory; if this file does not exists, it will be created first.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='remove', action='store_true')
parser.add_argument('dirs', nargs='+')
args = parser.parse_args(argv)
extra_paths = sitepackages_dir() / '_virtualenv_path_extensions.pth'
new_paths = [os.path.abspath(d) + "\n" for d in args.dirs]
if not extra_paths.exists():
with extra_paths.open('w') as extra:
extra.write('''import sys; sys.__plen = len(sys.path)
import sys; new=sys.path[sys.__plen:]; del sys.path[sys.__plen:]; p=getattr(sys,'__egginsert',0); sys.path[p:p]=new; sys.__egginsert = p+len(new)
''')
def rewrite(f):
with extra_paths.open('r+') as extra:
to_write = f(extra.readlines())
extra.seek(0)
extra.truncate()
extra.writelines(to_write)
if args.remove:
rewrite(lambda ls: [line for line in ls if line not in new_paths])
else:
rewrite(lambda lines: lines[0:1] + new_paths + lines[1:])
def sitepackages_dir_cmd(argv):
print(sitepackages_dir())
def lssitepackages_cmd(argv):
"""Show the content of the site-packages directory of the current virtualenv."""
site = sitepackages_dir()
print(*sorted(site.iterdir()), sep=os.linesep)
extra_paths = site / '_virtualenv_path_extensions.pth'
if extra_paths.exists():
print('from _virtualenv_path_extensions.pth:')
with extra_paths.open() as extra:
print(''.join(extra.readlines()))
def toggleglobalsitepackages_cmd(argv):
"""Toggle the current virtualenv between having and not having access to the global site-packages."""
quiet = argv == ['-q']
site = sitepackages_dir()
ngsp_file = site.parent / 'no-global-site-packages.txt'
if ngsp_file.exists():
ngsp_file.unlink()
if not quiet:
print('Enabled global site-packages')
else:
with ngsp_file.open('w'):
if not quiet:
print('Disabled global site-packages')
def cp_cmd(argv):
"""Duplicate the named virtualenv to make a new one."""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target', nargs='?')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
args = parser.parse_args(argv)
target_name = copy_virtualenv_project(args.source, args.target)
if args.activate:
shell(target_name)
def copy_virtualenv_project(source, target):
source = expandpath(source)
if not source.exists():
source = workon_home / source
if not source.exists():
sys.exit('Please provide a valid virtualenv to copy')
target_name = target or source.name
target = workon_home / target_name
if target.exists():
sys.exit('%s virtualenv already exists in %s.' % (
target_name, workon_home
))
print('Copying {0} in {1}'.format(source, target_name))
clone_virtualenv(str(source), str(target))
return target_name
def rename_cmd(argv):
"""Rename a virtualenv"""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target')
pargs = parser.parse_args(argv)
copy_virtualenv_project(pargs.source, pargs.target)
return rmvirtualenvs([pargs.source])
def setvirtualenvproject(env, project):
print('Setting project for {0} to {1}'.format(env, project))
with (workon_home / env / '.project').open('wb') as prj:
prj.write(str(project).encode())
def setproject_cmd(argv):
"""Given a virtualenv directory and a project directory, set the \
virtualenv up to be associated with the project."""
args = dict(enumerate(argv))
project = os.path.abspath(args.get(1, '.'))
env = args.get(0, os.environ.get('VIRTUAL_ENV'))
if not env:
sys.exit('pew setproject [virtualenv] [project_path]')
if not (workon_home / env).exists():
sys.exit("Environment '%s' doesn't exist." % env)
if not os.path.isdir(project):
sys.exit('pew setproject: %s does not exist' % project)
setvirtualenvproject(env, project)
def getproject_cmd(argv):
"""Print a virtualenv's project directory, if set.
If called without providing a virtualenv name as argument, print the
current virtualenv's project directory.
"""
# Parse command line arguments
parser = argparse.ArgumentParser(
description="Print an environment's project directory.",
)
parser.add_argument(
'envname',
nargs='?',
default=os.environ.get('VIRTUAL_ENV'),
help=(
'The name of the environment to return the project directory '
'for. If omitted, will use the currently active environment.'
),
)
args = parser.parse_args(argv)
# Now, do the actual work
if not args.envname:
sys.exit('ERROR: no virtualenv active')
if not (workon_home / args.envname).exists():
sys.exit("ERROR: Environment '{0}' does not exist."
.format(args.envname))
project_dir = get_project_dir(args.envname)
if project_dir is None:
sys.exit("ERROR: no project directory set for Environment '{0}'"
.format(args.envname))
print(project_dir)
def mkproject_cmd(argv):
"""Create a new project directory and its associated virtualenv."""
if '-l' in argv or '--list' in argv:
templates = [t.name[9:] for t in workon_home.glob("template_*")]
print("Available project templates:", *templates, sep='\n')
return
parser = mkvirtualenv_argparser()
parser.add_argument('envname')
parser.add_argument(
'-t', action='append', default=[], dest='templates', help='Multiple \
templates may be selected. They are applied in the order specified on the \
command line.')
parser.add_argument(
'-l', '--list', action='store_true', help='List available templates.')
args, rest = parser.parse_known_args(argv)
projects_home = Path(os.environ.get('PROJECT_HOME', '.'))
if not projects_home.exists():
sys.exit('ERROR: Projects directory %s does not exist. \
Create it or set PROJECT_HOME to an existing directory.' % projects_home)
project = (projects_home / args.envname).absolute()
if project.exists():
sys.exit('Project %s already exists.' % args.envname)
mkvirtualenv(args.envname, args.python, args.packages, project.absolute(),
args.requirements, rest)
project.mkdir()
for template_name in args.templates:
template = workon_home / ("template_" + template_name)
inve(args.envname, str(template), args.envname, str(project))
if args.activate:
shell(args.envname, cwd=str(project))
def mktmpenv_cmd(argv):
"""Create a temporary virtualenv."""
parser = mkvirtualenv_argparser()
env = '.'
while (workon_home / env).exists():
env = hex(random.getrandbits(64))[2:-1]
args, rest = parser.parse_known_args(argv)
mkvirtualenv(env, args.python, args.packages, requirements=args.requirements,
rest=rest)
print('This is a temporary environment. It will be deleted when you exit')
try:
if args.activate:
# only used for testing on windows
shell(env)
finally:
return rmvirtualenvs([env])
def wipeenv_cmd(argv):
"""Remove all installed packages from the current (or supplied) env."""
env = argv[0] if argv else os.environ.get('VIRTUAL_ENV')
if not env:
sys.exit('ERROR: no virtualenv active')
elif not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist.".format(env))
else:
env_pip = str(workon_home / env / env_bin_dir / 'pip')
all_pkgs = set(invoke(env_pip, 'freeze').out.splitlines())
pkgs = set(p for p in all_pkgs if len(p.split("==")) == 2)
ignored = sorted(all_pkgs - pkgs)
pkgs = set(p.split("==")[0] for p in pkgs)
to_remove = sorted(pkgs - set(['distribute', 'wsgiref']))
if to_remove:
print("Ignoring:\n %s" % "\n ".join(ignored))
print("Uninstalling packages:\n %s" % "\n ".join(to_remove))
return inve(env, 'pip', 'uninstall', '-y', *to_remove)
else:
print("Nothing to remove")
def inall_cmd(argv):
"""Run a command in each virtualenv."""
envs = lsenvs()
errors = False
for env in envs:
print("\n%s:" % env)
try:
inve(env, *argv)
except CalledProcessError as e:
errors = True
err(e)
sys.exit(errors)
def in_cmd(argv):
"""Run a command in the given virtualenv."""
if len(argv) == 1:
return workon_cmd(argv)
parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
return inve(*argv)
def restore_cmd(argv):
"""Try to restore a broken virtualenv by reinstalling the same python version on top of it"""
if len(argv) < 1:
sys.exit('You must provide a valid virtualenv to target')
env = argv[0]
path = workon_home / env
py = path / env_bin_dir / ('python.exe' if windows else 'python')
exact_py = py.resolve().name
return check_call([sys.executable, "-m", "virtualenv", str(path.absolute()), "--python=%s" % exact_py])
def dir_cmd(argv):
"""Print the path for the virtualenv directory"""
env = parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
print(workon_home / env)
def install_cmd(argv):
'''Use Pythonz to download and build the specified Python version'''
installer = InstallCommand()
options, versions = installer.parser.parse_args(argv)
if len(versions) != 1:
installer.parser.print_help()
sys.exit(1)
else:
try:
actual_installer = PythonInstaller.get_installer(versions[0], options)
return actual_installer.install()
except AlreadyInstalledError as e:
print(e)
def uninstall_cmd(argv):
'''Use Pythonz to uninstall the specified Python version'''
return UninstallCommand().run(argv)
def list_pythons_cmd(argv):
'''List the pythons installed by Pythonz (or all the installable ones)'''
return ListPythons().run(argv)
def locate_python_cmd(argv):
'''Locate the path for the python version installed by Pythonz'''
return LocatePython().run(argv)
def version_cmd(argv):
"""Prints current pew version"""
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('pew').version
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
print('Setuptools has some issues here, failed to get our own package.', file=sys.stderr)
print(__version__)
def prevent_path_errors():
if 'VIRTUAL_ENV' in os.environ and not check_path():
sys.exit('''ERROR: The virtualenv hasn't been activated correctly.
Either the env is corrupted (try running `pew restore env`),
Or an upgrade of your Python version broke your env,
Or check the contents of your $PATH. You might be adding new directories to it
from inside your shell's configuration file.
In this case, for further details please see: https://github.com/berdario/pew#the-environment-doesnt-seem-to-be-activated''')
def first_run_setup():
shell = supported_shell()
if shell:
if shell == 'fish':
source_cmd = 'source (pew shell_config)'
else:
source_cmd = 'source "$(pew shell_config)"'
rcpath = expandpath({'bash': '~/.bashrc'
, 'zsh': '~/.zshrc'
, 'fish': '~/.config/fish/config.fish'}[shell])
if rcpath.exists():
update_config_file(rcpath, source_cmd)
else:
print("It seems that you're running pew for the first time\n"
"If you want source shell competions and update your prompt, "
"Add the following line to your shell config file:\n %s" % source_cmd)
print('\nWill now continue with the command:', *sys.argv[1:])
input('[enter]')
def update_config_file(rcpath, source_cmd):
with rcpath.open('r+') as rcfile:
if source_cmd not in (line.strip() for line in rcfile.readlines()):
choice = 'X'
while choice not in ('y', '', 'n'):
choice = input("It seems that you're running pew for the first time\n"
"do you want to modify %s to source completions and"
" update your prompt? [y/N]\n> " % rcpath).lower()
if choice == 'y':
rcfile.write('\n# added by Pew\n%s\n' % source_cmd)
print('Done')
else:
print('\nOk, if you want to do it manually, just add\n %s\nat'
' the end of %s' % (source_cmd, rcpath))
def print_commands(cmds):
longest = max(map(len, cmds)) + 3
columns, _ = get_terminal_size()
print('Available commands:\n')
for cmd, fun in sorted(cmds.items()):
if fun.__doc__:
print(textwrap.fill(
fun.__doc__.splitlines()[0],
columns or 1000,
initial_indent=(' {0}: '.format(cmd)).ljust(longest),
subsequent_indent=longest * ' '))
else:
print(' ' + cmd)
def pew():
first_run = makedirs_and_symlink_if_needed(workon_home)
if first_run and sys.stdin.isatty():
first_run_setup()
cmds = dict((cmd[:-4], fun)
for cmd, fun in globals().items() if cmd.endswith('_cmd'))
if sys.argv[1:]:
if sys.argv[1] in cmds:
command = cmds[sys.argv[1]]
try:
return command(sys.argv[2:])
except CalledProcessError as e:
return e.returncode
except KeyboardInterrupt:
pass
else:
err("ERROR: command", sys.argv[1], "does not exist.")
print_commands(cmds)
sys.exit(1)
else:
print_commands(cmds)
|
berdario/pew | pew/pew.py | inve | python | def inve(env, command, *args, **kwargs):
# we don't strictly need to restore the environment, since pew runs in
# its own process, but it feels like the right thing to do
with temp_environ():
os.environ['VIRTUAL_ENV'] = str(workon_home / env)
os.environ['PATH'] = compute_path(env)
unsetenv('PYTHONHOME')
unsetenv('__PYVENV_LAUNCHER__')
try:
return check_call([command] + list(args), shell=windows, **kwargs)
# need to have shell=True on windows, otherwise the PYTHONPATH
# won't inherit the PATH
except OSError as e:
if e.errno == 2:
err('Unable to find', command)
return 2
else:
raise | Run a command in the given virtual environment.
Pass additional keyword arguments to ``subprocess.check_call()``. | train | https://github.com/berdario/pew/blob/37d9ff79342336b8ef6437d9a551008be07afe9b/pew/pew.py#L130-L152 | [
"def unsetenv(key):\n if key in os.environ:\n del os.environ[key]\n",
"def compute_path(env):\n envdir = workon_home / env\n return os.pathsep.join([\n str(envdir / env_bin_dir),\n os.environ['PATH'],\n ])\n"
] | from __future__ import print_function, absolute_import, unicode_literals
import os
import sys
import argparse
import shutil
import random
import textwrap
from functools import partial
from subprocess import CalledProcessError
from pathlib import Path
try:
from shutil import get_terminal_size
except ImportError:
from backports.shutil_get_terminal_size import get_terminal_size
windows = sys.platform == 'win32'
from clonevirtualenv import clone_virtualenv
if not windows:
try:
# Try importing these packages if avaiable
from pythonz.commands.install import InstallCommand
from pythonz.commands.uninstall import UninstallCommand
from pythonz.installer.pythoninstaller import PythonInstaller, AlreadyInstalledError
from pythonz.commands.list import ListCommand
from pythonz.define import PATH_PYTHONS
from pythonz.commands.locate import LocateCommand as LocatePython
def ListPythons():
try:
Path(PATH_PYTHONS).mkdir(parents=True)
except OSError:
pass
return ListCommand()
except:
# create mock commands
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('You need to install the pythonz extra. pip install pew[pythonz]')
else:
# Pythonz does not support windows
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('Command not supported on this platform')
import shellingham
from pew._utils import (check_call, invoke, expandpath, own, env_bin_dir,
check_path, temp_environ, NamedTemporaryFile, to_unicode)
from pew._print_utils import print_virtualenvs
if sys.version_info[0] == 2:
input = raw_input
err = partial(print, file=sys.stderr)
if windows:
default_home = '~/.virtualenvs'
else:
default_home = os.path.join(
os.environ.get('XDG_DATA_HOME', '~/.local/share'), 'virtualenvs')
workon_home = expandpath(
os.environ.get('WORKON_HOME', default_home))
def makedirs_and_symlink_if_needed(workon_home):
if not workon_home.exists() and own(workon_home):
workon_home.mkdir(parents=True)
link = expandpath('~/.virtualenvs')
if os.name == 'posix' and 'WORKON_HOME' not in os.environ and \
'XDG_DATA_HOME' not in os.environ and not link.exists():
link.symlink_to(str(workon_home))
return True
else:
return False
pew_site = Path(__file__).parent
def supported_shell():
shell = Path(os.environ.get('SHELL', '')).stem
if shell in ('bash', 'zsh', 'fish'):
return shell
def shell_config_cmd(argv):
"Prints the path for the current $SHELL helper file"
shell = supported_shell()
if shell:
print(pew_site / 'shell_config' / ('init.' + shell))
else:
err('Completions and prompts are unavailable for %s' %
repr(os.environ.get('SHELL', '')))
def deploy_completions():
completions = {'complete.bash': Path('/etc/bash_completion.d/pew'),
'complete.zsh': Path('/usr/local/share/zsh/site-functions/_pew'),
'complete.fish': Path('/etc/fish/completions/pew.fish')}
for comp, dest in completions.items():
if not dest.parent.exists():
dest.parent.mkdir(parents=True)
shutil.copy(str(pew_site / 'shell_config' / comp), str(dest))
def get_project_dir(env):
project_file = workon_home / env / '.project'
if project_file.exists():
with project_file.open() as f:
project_dir = f.readline().strip()
if os.path.exists(project_dir):
return project_dir
else:
err('Corrupted or outdated:', project_file, '\nDirectory',
project_dir, "doesn't exist.")
def unsetenv(key):
if key in os.environ:
del os.environ[key]
def compute_path(env):
envdir = workon_home / env
return os.pathsep.join([
str(envdir / env_bin_dir),
os.environ['PATH'],
])
def fork_shell(env, shellcmd, cwd):
or_ctrld = '' if windows else "or 'Ctrl+D' "
err("Launching subshell in virtual environment. Type 'exit' ", or_ctrld,
"to return.", sep='')
if 'VIRTUAL_ENV' in os.environ:
err("Be aware that this environment will be nested on top "
"of '%s'" % Path(os.environ['VIRTUAL_ENV']).name)
return inve(env, *shellcmd, cwd=cwd)
def fork_bash(env, cwd):
# bash is a special little snowflake, and prevent_path_errors cannot work there
# https://github.com/berdario/pew/issues/58#issuecomment-102182346
bashrcpath = expandpath('~/.bashrc')
if bashrcpath.exists():
with NamedTemporaryFile('w+') as rcfile:
with bashrcpath.open() as bashrc:
rcfile.write(bashrc.read())
rcfile.write('\nexport PATH="' + to_unicode(compute_path(env)) + '"')
rcfile.flush()
return fork_shell(env, ['bash', '--rcfile', rcfile.name], cwd)
else:
return fork_shell(env, ['bash'], cwd)
def fork_cmder(env, cwd):
shell_cmd = ['cmd']
cmderrc_path = r'%CMDER_ROOT%\vendor\init.bat'
if expandpath(cmderrc_path).exists():
shell_cmd += ['/k', cmderrc_path]
if cwd:
os.environ['CMDER_START'] = cwd
return fork_shell(env, shell_cmd, cwd)
def _detect_shell():
shell = os.environ.get('SHELL', None)
if not shell:
if 'CMDER_ROOT' in os.environ:
shell = 'Cmder'
elif windows:
try:
_, shell = shellingham.detect_shell()
except shellingham.ShellDetectionFailure:
shell = os.environ.get('COMSPEC', 'cmd.exe')
else:
shell = 'sh'
return shell
def shell(env, cwd=None):
env = str(env)
shell = _detect_shell()
shell_name = Path(shell).stem
if shell_name not in ('Cmder', 'bash', 'elvish', 'powershell', 'klingon', 'cmd'):
# On Windows the PATH is usually set with System Utility
# so we won't worry about trying to check mistakes there
shell_check = (sys.executable + ' -c "from pew.pew import '
'prevent_path_errors; prevent_path_errors()"')
try:
inve(env, shell, '-c', shell_check)
except CalledProcessError:
return
if shell_name == 'bash':
return fork_bash(env, cwd)
elif shell_name == 'Cmder':
return fork_cmder(env, cwd)
else:
return fork_shell(env, [shell], cwd)
def mkvirtualenv(envname, python=None, packages=[], project=None,
requirements=None, rest=[]):
if python:
rest = ["--python=%s" % python] + rest
path = (workon_home / envname).absolute()
try:
check_call([sys.executable, "-m", "virtualenv", str(path)] + rest)
except (CalledProcessError, KeyboardInterrupt):
rmvirtualenvs([envname])
raise
else:
if project:
setvirtualenvproject(envname, project.absolute())
if requirements:
inve(envname, 'pip', 'install', '-r', str(expandpath(requirements)))
if packages:
inve(envname, 'pip', 'install', *packages)
def mkvirtualenv_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--python')
parser.add_argument('-i', action='append', dest='packages', help='Install \
a package after the environment is created. This option may be repeated.')
parser.add_argument('-r', dest='requirements', help='Provide a pip \
requirements file to install a base set of packages into the new environment.')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
return parser
def new_cmd(argv):
"""Create a new environment, in $WORKON_HOME."""
parser = mkvirtualenv_argparser()
parser.add_argument('-a', dest='project', help='Provide a full path to a \
project directory to associate with the new environment.')
parser.add_argument('envname')
args, rest = parser.parse_known_args(argv)
project = expandpath(args.project) if args.project else None
mkvirtualenv(args.envname, args.python, args.packages, project,
args.requirements, rest)
if args.activate:
shell(args.envname)
def rmvirtualenvs(envs):
error_happened = False
for env in envs:
env = workon_home / env
if os.environ.get('VIRTUAL_ENV') == str(env):
err("ERROR: You cannot remove the active environment (%s)." % env)
error_happened = True
break
try:
shutil.rmtree(str(env))
except OSError as e:
err("Error while trying to remove the {0} env: \n{1}".format
(env, e.strerror))
error_happened = True
return error_happened
def rm_cmd(argv):
"""Remove one or more environment, from $WORKON_HOME."""
if len(argv) < 1:
sys.exit("Please specify an environment")
return rmvirtualenvs(argv)
def packages(site_packages):
nodes = site_packages.iterdir()
return set([x.stem.split('-')[0] for x in nodes]) - set(['__pycache__'])
def showvirtualenv(env):
columns, _ = get_terminal_size()
pkgs = sorted(packages(sitepackages_dir(env)))
env_python = workon_home / env / env_bin_dir / 'python'
l = len(env) + 2
version = invoke(str(env_python), '-V')
version = ' - '.join((version.out + version.err).splitlines())
print(env, ': ', version, sep='')
print(textwrap.fill(' '.join(pkgs),
width=columns-l,
initial_indent=(l * ' '),
subsequent_indent=(l * ' ')), '\n')
def show_cmd(argv):
try:
showvirtualenv(argv[0])
except IndexError:
if 'VIRTUAL_ENV' in os.environ:
showvirtualenv(Path(os.environ['VIRTUAL_ENV']).name)
else:
sys.exit('pew show [env]')
def lsenvs():
return sorted(set(env.parts[-3] for env in
workon_home.glob(os.path.join('*', env_bin_dir, 'python*'))))
def lsvirtualenv(verbose):
envs = lsenvs()
if not verbose:
print_virtualenvs(*envs)
else:
for env in envs:
showvirtualenv(env)
def ls_cmd(argv):
"""List available environments."""
parser = argparse.ArgumentParser()
p_group = parser.add_mutually_exclusive_group()
p_group.add_argument('-b', '--brief', action='store_false')
p_group.add_argument('-l', '--long', action='store_true')
args = parser.parse_args(argv)
lsvirtualenv(args.long)
def parse_envname(argv, no_arg_callback):
if len(argv) < 1 or argv[0] is None:
no_arg_callback()
env = argv[0]
if env.startswith('/'):
sys.exit("ERROR: Invalid environment name '{0}'.".format(env))
if not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist. Create it with \
'pew new {0}'.".format(env))
else:
return env
def workon_cmd(argv):
"""List or change working virtual environments."""
parser = argparse.ArgumentParser(prog='pew workon')
parser.add_argument('envname', nargs='?')
parser.add_argument(
'-n', '--no-cd', action='store_true',
help=('Do not change working directory to project directory after '
'activating virtualenv.')
)
args = parser.parse_args(argv)
def list_and_exit():
lsvirtualenv(False)
sys.exit(0)
env = parse_envname([args.envname], list_and_exit)
# Check if the virtualenv has an associated project directory and in
# this case, use it as the current working directory.
project_dir = get_project_dir(env)
if project_dir is None or args.no_cd:
project_dir = os.getcwd()
return shell(env, cwd=project_dir)
def sitepackages_dir(env=os.environ.get('VIRTUAL_ENV')):
if not env:
sys.exit('ERROR: no virtualenv active')
else:
env_python = workon_home / env / env_bin_dir / 'python'
return Path(invoke(str(env_python), '-c', 'import distutils; \
print(distutils.sysconfig.get_python_lib())').out)
def add_cmd(argv):
"""Add the specified directories to the Python path for the currently active virtualenv.
This will be done by placing the directory names in a path file named
"virtualenv_path_extensions.pth" inside the virtualenv's site-packages
directory; if this file does not exists, it will be created first.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='remove', action='store_true')
parser.add_argument('dirs', nargs='+')
args = parser.parse_args(argv)
extra_paths = sitepackages_dir() / '_virtualenv_path_extensions.pth'
new_paths = [os.path.abspath(d) + "\n" for d in args.dirs]
if not extra_paths.exists():
with extra_paths.open('w') as extra:
extra.write('''import sys; sys.__plen = len(sys.path)
import sys; new=sys.path[sys.__plen:]; del sys.path[sys.__plen:]; p=getattr(sys,'__egginsert',0); sys.path[p:p]=new; sys.__egginsert = p+len(new)
''')
def rewrite(f):
with extra_paths.open('r+') as extra:
to_write = f(extra.readlines())
extra.seek(0)
extra.truncate()
extra.writelines(to_write)
if args.remove:
rewrite(lambda ls: [line for line in ls if line not in new_paths])
else:
rewrite(lambda lines: lines[0:1] + new_paths + lines[1:])
def sitepackages_dir_cmd(argv):
print(sitepackages_dir())
def lssitepackages_cmd(argv):
"""Show the content of the site-packages directory of the current virtualenv."""
site = sitepackages_dir()
print(*sorted(site.iterdir()), sep=os.linesep)
extra_paths = site / '_virtualenv_path_extensions.pth'
if extra_paths.exists():
print('from _virtualenv_path_extensions.pth:')
with extra_paths.open() as extra:
print(''.join(extra.readlines()))
def toggleglobalsitepackages_cmd(argv):
"""Toggle the current virtualenv between having and not having access to the global site-packages."""
quiet = argv == ['-q']
site = sitepackages_dir()
ngsp_file = site.parent / 'no-global-site-packages.txt'
if ngsp_file.exists():
ngsp_file.unlink()
if not quiet:
print('Enabled global site-packages')
else:
with ngsp_file.open('w'):
if not quiet:
print('Disabled global site-packages')
def cp_cmd(argv):
"""Duplicate the named virtualenv to make a new one."""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target', nargs='?')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
args = parser.parse_args(argv)
target_name = copy_virtualenv_project(args.source, args.target)
if args.activate:
shell(target_name)
def copy_virtualenv_project(source, target):
source = expandpath(source)
if not source.exists():
source = workon_home / source
if not source.exists():
sys.exit('Please provide a valid virtualenv to copy')
target_name = target or source.name
target = workon_home / target_name
if target.exists():
sys.exit('%s virtualenv already exists in %s.' % (
target_name, workon_home
))
print('Copying {0} in {1}'.format(source, target_name))
clone_virtualenv(str(source), str(target))
return target_name
def rename_cmd(argv):
"""Rename a virtualenv"""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target')
pargs = parser.parse_args(argv)
copy_virtualenv_project(pargs.source, pargs.target)
return rmvirtualenvs([pargs.source])
def setvirtualenvproject(env, project):
print('Setting project for {0} to {1}'.format(env, project))
with (workon_home / env / '.project').open('wb') as prj:
prj.write(str(project).encode())
def setproject_cmd(argv):
"""Given a virtualenv directory and a project directory, set the \
virtualenv up to be associated with the project."""
args = dict(enumerate(argv))
project = os.path.abspath(args.get(1, '.'))
env = args.get(0, os.environ.get('VIRTUAL_ENV'))
if not env:
sys.exit('pew setproject [virtualenv] [project_path]')
if not (workon_home / env).exists():
sys.exit("Environment '%s' doesn't exist." % env)
if not os.path.isdir(project):
sys.exit('pew setproject: %s does not exist' % project)
setvirtualenvproject(env, project)
def getproject_cmd(argv):
"""Print a virtualenv's project directory, if set.
If called without providing a virtualenv name as argument, print the
current virtualenv's project directory.
"""
# Parse command line arguments
parser = argparse.ArgumentParser(
description="Print an environment's project directory.",
)
parser.add_argument(
'envname',
nargs='?',
default=os.environ.get('VIRTUAL_ENV'),
help=(
'The name of the environment to return the project directory '
'for. If omitted, will use the currently active environment.'
),
)
args = parser.parse_args(argv)
# Now, do the actual work
if not args.envname:
sys.exit('ERROR: no virtualenv active')
if not (workon_home / args.envname).exists():
sys.exit("ERROR: Environment '{0}' does not exist."
.format(args.envname))
project_dir = get_project_dir(args.envname)
if project_dir is None:
sys.exit("ERROR: no project directory set for Environment '{0}'"
.format(args.envname))
print(project_dir)
def mkproject_cmd(argv):
"""Create a new project directory and its associated virtualenv."""
if '-l' in argv or '--list' in argv:
templates = [t.name[9:] for t in workon_home.glob("template_*")]
print("Available project templates:", *templates, sep='\n')
return
parser = mkvirtualenv_argparser()
parser.add_argument('envname')
parser.add_argument(
'-t', action='append', default=[], dest='templates', help='Multiple \
templates may be selected. They are applied in the order specified on the \
command line.')
parser.add_argument(
'-l', '--list', action='store_true', help='List available templates.')
args, rest = parser.parse_known_args(argv)
projects_home = Path(os.environ.get('PROJECT_HOME', '.'))
if not projects_home.exists():
sys.exit('ERROR: Projects directory %s does not exist. \
Create it or set PROJECT_HOME to an existing directory.' % projects_home)
project = (projects_home / args.envname).absolute()
if project.exists():
sys.exit('Project %s already exists.' % args.envname)
mkvirtualenv(args.envname, args.python, args.packages, project.absolute(),
args.requirements, rest)
project.mkdir()
for template_name in args.templates:
template = workon_home / ("template_" + template_name)
inve(args.envname, str(template), args.envname, str(project))
if args.activate:
shell(args.envname, cwd=str(project))
def mktmpenv_cmd(argv):
"""Create a temporary virtualenv."""
parser = mkvirtualenv_argparser()
env = '.'
while (workon_home / env).exists():
env = hex(random.getrandbits(64))[2:-1]
args, rest = parser.parse_known_args(argv)
mkvirtualenv(env, args.python, args.packages, requirements=args.requirements,
rest=rest)
print('This is a temporary environment. It will be deleted when you exit')
try:
if args.activate:
# only used for testing on windows
shell(env)
finally:
return rmvirtualenvs([env])
def wipeenv_cmd(argv):
"""Remove all installed packages from the current (or supplied) env."""
env = argv[0] if argv else os.environ.get('VIRTUAL_ENV')
if not env:
sys.exit('ERROR: no virtualenv active')
elif not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist.".format(env))
else:
env_pip = str(workon_home / env / env_bin_dir / 'pip')
all_pkgs = set(invoke(env_pip, 'freeze').out.splitlines())
pkgs = set(p for p in all_pkgs if len(p.split("==")) == 2)
ignored = sorted(all_pkgs - pkgs)
pkgs = set(p.split("==")[0] for p in pkgs)
to_remove = sorted(pkgs - set(['distribute', 'wsgiref']))
if to_remove:
print("Ignoring:\n %s" % "\n ".join(ignored))
print("Uninstalling packages:\n %s" % "\n ".join(to_remove))
return inve(env, 'pip', 'uninstall', '-y', *to_remove)
else:
print("Nothing to remove")
def inall_cmd(argv):
"""Run a command in each virtualenv."""
envs = lsenvs()
errors = False
for env in envs:
print("\n%s:" % env)
try:
inve(env, *argv)
except CalledProcessError as e:
errors = True
err(e)
sys.exit(errors)
def in_cmd(argv):
"""Run a command in the given virtualenv."""
if len(argv) == 1:
return workon_cmd(argv)
parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
return inve(*argv)
def restore_cmd(argv):
"""Try to restore a broken virtualenv by reinstalling the same python version on top of it"""
if len(argv) < 1:
sys.exit('You must provide a valid virtualenv to target')
env = argv[0]
path = workon_home / env
py = path / env_bin_dir / ('python.exe' if windows else 'python')
exact_py = py.resolve().name
return check_call([sys.executable, "-m", "virtualenv", str(path.absolute()), "--python=%s" % exact_py])
def dir_cmd(argv):
"""Print the path for the virtualenv directory"""
env = parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
print(workon_home / env)
def install_cmd(argv):
'''Use Pythonz to download and build the specified Python version'''
installer = InstallCommand()
options, versions = installer.parser.parse_args(argv)
if len(versions) != 1:
installer.parser.print_help()
sys.exit(1)
else:
try:
actual_installer = PythonInstaller.get_installer(versions[0], options)
return actual_installer.install()
except AlreadyInstalledError as e:
print(e)
def uninstall_cmd(argv):
'''Use Pythonz to uninstall the specified Python version'''
return UninstallCommand().run(argv)
def list_pythons_cmd(argv):
'''List the pythons installed by Pythonz (or all the installable ones)'''
return ListPythons().run(argv)
def locate_python_cmd(argv):
'''Locate the path for the python version installed by Pythonz'''
return LocatePython().run(argv)
def version_cmd(argv):
"""Prints current pew version"""
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('pew').version
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
print('Setuptools has some issues here, failed to get our own package.', file=sys.stderr)
print(__version__)
def prevent_path_errors():
if 'VIRTUAL_ENV' in os.environ and not check_path():
sys.exit('''ERROR: The virtualenv hasn't been activated correctly.
Either the env is corrupted (try running `pew restore env`),
Or an upgrade of your Python version broke your env,
Or check the contents of your $PATH. You might be adding new directories to it
from inside your shell's configuration file.
In this case, for further details please see: https://github.com/berdario/pew#the-environment-doesnt-seem-to-be-activated''')
def first_run_setup():
shell = supported_shell()
if shell:
if shell == 'fish':
source_cmd = 'source (pew shell_config)'
else:
source_cmd = 'source "$(pew shell_config)"'
rcpath = expandpath({'bash': '~/.bashrc'
, 'zsh': '~/.zshrc'
, 'fish': '~/.config/fish/config.fish'}[shell])
if rcpath.exists():
update_config_file(rcpath, source_cmd)
else:
print("It seems that you're running pew for the first time\n"
"If you want source shell competions and update your prompt, "
"Add the following line to your shell config file:\n %s" % source_cmd)
print('\nWill now continue with the command:', *sys.argv[1:])
input('[enter]')
def update_config_file(rcpath, source_cmd):
with rcpath.open('r+') as rcfile:
if source_cmd not in (line.strip() for line in rcfile.readlines()):
choice = 'X'
while choice not in ('y', '', 'n'):
choice = input("It seems that you're running pew for the first time\n"
"do you want to modify %s to source completions and"
" update your prompt? [y/N]\n> " % rcpath).lower()
if choice == 'y':
rcfile.write('\n# added by Pew\n%s\n' % source_cmd)
print('Done')
else:
print('\nOk, if you want to do it manually, just add\n %s\nat'
' the end of %s' % (source_cmd, rcpath))
def print_commands(cmds):
longest = max(map(len, cmds)) + 3
columns, _ = get_terminal_size()
print('Available commands:\n')
for cmd, fun in sorted(cmds.items()):
if fun.__doc__:
print(textwrap.fill(
fun.__doc__.splitlines()[0],
columns or 1000,
initial_indent=(' {0}: '.format(cmd)).ljust(longest),
subsequent_indent=longest * ' '))
else:
print(' ' + cmd)
def pew():
first_run = makedirs_and_symlink_if_needed(workon_home)
if first_run and sys.stdin.isatty():
first_run_setup()
cmds = dict((cmd[:-4], fun)
for cmd, fun in globals().items() if cmd.endswith('_cmd'))
if sys.argv[1:]:
if sys.argv[1] in cmds:
command = cmds[sys.argv[1]]
try:
return command(sys.argv[2:])
except CalledProcessError as e:
return e.returncode
except KeyboardInterrupt:
pass
else:
err("ERROR: command", sys.argv[1], "does not exist.")
print_commands(cmds)
sys.exit(1)
else:
print_commands(cmds)
|
berdario/pew | pew/pew.py | new_cmd | python | def new_cmd(argv):
parser = mkvirtualenv_argparser()
parser.add_argument('-a', dest='project', help='Provide a full path to a \
project directory to associate with the new environment.')
parser.add_argument('envname')
args, rest = parser.parse_known_args(argv)
project = expandpath(args.project) if args.project else None
mkvirtualenv(args.envname, args.python, args.packages, project,
args.requirements, rest)
if args.activate:
shell(args.envname) | Create a new environment, in $WORKON_HOME. | train | https://github.com/berdario/pew/blob/37d9ff79342336b8ef6437d9a551008be07afe9b/pew/pew.py#L260-L273 | [
"def expandpath(path):\n return Path(os.path.expanduser(os.path.expandvars(path)))\n",
"def shell(env, cwd=None):\n env = str(env)\n shell = _detect_shell()\n shell_name = Path(shell).stem\n if shell_name not in ('Cmder', 'bash', 'elvish', 'powershell', 'klingon', 'cmd'):\n # On Windows the PATH is usually set with System Utility\n # so we won't worry about trying to check mistakes there\n shell_check = (sys.executable + ' -c \"from pew.pew import '\n 'prevent_path_errors; prevent_path_errors()\"')\n try:\n inve(env, shell, '-c', shell_check)\n except CalledProcessError:\n return\n if shell_name == 'bash':\n return fork_bash(env, cwd)\n elif shell_name == 'Cmder':\n return fork_cmder(env, cwd)\n else:\n return fork_shell(env, [shell], cwd)\n",
"def mkvirtualenv(envname, python=None, packages=[], project=None,\n requirements=None, rest=[]):\n\n if python:\n rest = [\"--python=%s\" % python] + rest\n\n path = (workon_home / envname).absolute()\n\n try:\n check_call([sys.executable, \"-m\", \"virtualenv\", str(path)] + rest)\n except (CalledProcessError, KeyboardInterrupt):\n rmvirtualenvs([envname])\n raise\n else:\n if project:\n setvirtualenvproject(envname, project.absolute())\n if requirements:\n inve(envname, 'pip', 'install', '-r', str(expandpath(requirements)))\n if packages:\n inve(envname, 'pip', 'install', *packages)\n",
"def mkvirtualenv_argparser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--python')\n parser.add_argument('-i', action='append', dest='packages', help='Install \\\na package after the environment is created. This option may be repeated.')\n parser.add_argument('-r', dest='requirements', help='Provide a pip \\\nrequirements file to install a base set of packages into the new environment.')\n parser.add_argument('-d', '--dont-activate', action='store_false',\n default=True, dest='activate', help=\"After \\\n creation, continue with the existing shell (don't \\\n activate the new environment).\")\n return parser\n"
] | from __future__ import print_function, absolute_import, unicode_literals
import os
import sys
import argparse
import shutil
import random
import textwrap
from functools import partial
from subprocess import CalledProcessError
from pathlib import Path
try:
from shutil import get_terminal_size
except ImportError:
from backports.shutil_get_terminal_size import get_terminal_size
windows = sys.platform == 'win32'
from clonevirtualenv import clone_virtualenv
if not windows:
try:
# Try importing these packages if avaiable
from pythonz.commands.install import InstallCommand
from pythonz.commands.uninstall import UninstallCommand
from pythonz.installer.pythoninstaller import PythonInstaller, AlreadyInstalledError
from pythonz.commands.list import ListCommand
from pythonz.define import PATH_PYTHONS
from pythonz.commands.locate import LocateCommand as LocatePython
def ListPythons():
try:
Path(PATH_PYTHONS).mkdir(parents=True)
except OSError:
pass
return ListCommand()
except:
# create mock commands
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('You need to install the pythonz extra. pip install pew[pythonz]')
else:
# Pythonz does not support windows
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('Command not supported on this platform')
import shellingham
from pew._utils import (check_call, invoke, expandpath, own, env_bin_dir,
check_path, temp_environ, NamedTemporaryFile, to_unicode)
from pew._print_utils import print_virtualenvs
if sys.version_info[0] == 2:
input = raw_input
err = partial(print, file=sys.stderr)
if windows:
default_home = '~/.virtualenvs'
else:
default_home = os.path.join(
os.environ.get('XDG_DATA_HOME', '~/.local/share'), 'virtualenvs')
workon_home = expandpath(
os.environ.get('WORKON_HOME', default_home))
def makedirs_and_symlink_if_needed(workon_home):
if not workon_home.exists() and own(workon_home):
workon_home.mkdir(parents=True)
link = expandpath('~/.virtualenvs')
if os.name == 'posix' and 'WORKON_HOME' not in os.environ and \
'XDG_DATA_HOME' not in os.environ and not link.exists():
link.symlink_to(str(workon_home))
return True
else:
return False
pew_site = Path(__file__).parent
def supported_shell():
shell = Path(os.environ.get('SHELL', '')).stem
if shell in ('bash', 'zsh', 'fish'):
return shell
def shell_config_cmd(argv):
"Prints the path for the current $SHELL helper file"
shell = supported_shell()
if shell:
print(pew_site / 'shell_config' / ('init.' + shell))
else:
err('Completions and prompts are unavailable for %s' %
repr(os.environ.get('SHELL', '')))
def deploy_completions():
completions = {'complete.bash': Path('/etc/bash_completion.d/pew'),
'complete.zsh': Path('/usr/local/share/zsh/site-functions/_pew'),
'complete.fish': Path('/etc/fish/completions/pew.fish')}
for comp, dest in completions.items():
if not dest.parent.exists():
dest.parent.mkdir(parents=True)
shutil.copy(str(pew_site / 'shell_config' / comp), str(dest))
def get_project_dir(env):
project_file = workon_home / env / '.project'
if project_file.exists():
with project_file.open() as f:
project_dir = f.readline().strip()
if os.path.exists(project_dir):
return project_dir
else:
err('Corrupted or outdated:', project_file, '\nDirectory',
project_dir, "doesn't exist.")
def unsetenv(key):
if key in os.environ:
del os.environ[key]
def compute_path(env):
envdir = workon_home / env
return os.pathsep.join([
str(envdir / env_bin_dir),
os.environ['PATH'],
])
def inve(env, command, *args, **kwargs):
"""Run a command in the given virtual environment.
Pass additional keyword arguments to ``subprocess.check_call()``."""
# we don't strictly need to restore the environment, since pew runs in
# its own process, but it feels like the right thing to do
with temp_environ():
os.environ['VIRTUAL_ENV'] = str(workon_home / env)
os.environ['PATH'] = compute_path(env)
unsetenv('PYTHONHOME')
unsetenv('__PYVENV_LAUNCHER__')
try:
return check_call([command] + list(args), shell=windows, **kwargs)
# need to have shell=True on windows, otherwise the PYTHONPATH
# won't inherit the PATH
except OSError as e:
if e.errno == 2:
err('Unable to find', command)
return 2
else:
raise
def fork_shell(env, shellcmd, cwd):
or_ctrld = '' if windows else "or 'Ctrl+D' "
err("Launching subshell in virtual environment. Type 'exit' ", or_ctrld,
"to return.", sep='')
if 'VIRTUAL_ENV' in os.environ:
err("Be aware that this environment will be nested on top "
"of '%s'" % Path(os.environ['VIRTUAL_ENV']).name)
return inve(env, *shellcmd, cwd=cwd)
def fork_bash(env, cwd):
# bash is a special little snowflake, and prevent_path_errors cannot work there
# https://github.com/berdario/pew/issues/58#issuecomment-102182346
bashrcpath = expandpath('~/.bashrc')
if bashrcpath.exists():
with NamedTemporaryFile('w+') as rcfile:
with bashrcpath.open() as bashrc:
rcfile.write(bashrc.read())
rcfile.write('\nexport PATH="' + to_unicode(compute_path(env)) + '"')
rcfile.flush()
return fork_shell(env, ['bash', '--rcfile', rcfile.name], cwd)
else:
return fork_shell(env, ['bash'], cwd)
def fork_cmder(env, cwd):
shell_cmd = ['cmd']
cmderrc_path = r'%CMDER_ROOT%\vendor\init.bat'
if expandpath(cmderrc_path).exists():
shell_cmd += ['/k', cmderrc_path]
if cwd:
os.environ['CMDER_START'] = cwd
return fork_shell(env, shell_cmd, cwd)
def _detect_shell():
shell = os.environ.get('SHELL', None)
if not shell:
if 'CMDER_ROOT' in os.environ:
shell = 'Cmder'
elif windows:
try:
_, shell = shellingham.detect_shell()
except shellingham.ShellDetectionFailure:
shell = os.environ.get('COMSPEC', 'cmd.exe')
else:
shell = 'sh'
return shell
def shell(env, cwd=None):
env = str(env)
shell = _detect_shell()
shell_name = Path(shell).stem
if shell_name not in ('Cmder', 'bash', 'elvish', 'powershell', 'klingon', 'cmd'):
# On Windows the PATH is usually set with System Utility
# so we won't worry about trying to check mistakes there
shell_check = (sys.executable + ' -c "from pew.pew import '
'prevent_path_errors; prevent_path_errors()"')
try:
inve(env, shell, '-c', shell_check)
except CalledProcessError:
return
if shell_name == 'bash':
return fork_bash(env, cwd)
elif shell_name == 'Cmder':
return fork_cmder(env, cwd)
else:
return fork_shell(env, [shell], cwd)
def mkvirtualenv(envname, python=None, packages=[], project=None,
requirements=None, rest=[]):
if python:
rest = ["--python=%s" % python] + rest
path = (workon_home / envname).absolute()
try:
check_call([sys.executable, "-m", "virtualenv", str(path)] + rest)
except (CalledProcessError, KeyboardInterrupt):
rmvirtualenvs([envname])
raise
else:
if project:
setvirtualenvproject(envname, project.absolute())
if requirements:
inve(envname, 'pip', 'install', '-r', str(expandpath(requirements)))
if packages:
inve(envname, 'pip', 'install', *packages)
def mkvirtualenv_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--python')
parser.add_argument('-i', action='append', dest='packages', help='Install \
a package after the environment is created. This option may be repeated.')
parser.add_argument('-r', dest='requirements', help='Provide a pip \
requirements file to install a base set of packages into the new environment.')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
return parser
def rmvirtualenvs(envs):
error_happened = False
for env in envs:
env = workon_home / env
if os.environ.get('VIRTUAL_ENV') == str(env):
err("ERROR: You cannot remove the active environment (%s)." % env)
error_happened = True
break
try:
shutil.rmtree(str(env))
except OSError as e:
err("Error while trying to remove the {0} env: \n{1}".format
(env, e.strerror))
error_happened = True
return error_happened
def rm_cmd(argv):
"""Remove one or more environment, from $WORKON_HOME."""
if len(argv) < 1:
sys.exit("Please specify an environment")
return rmvirtualenvs(argv)
def packages(site_packages):
nodes = site_packages.iterdir()
return set([x.stem.split('-')[0] for x in nodes]) - set(['__pycache__'])
def showvirtualenv(env):
columns, _ = get_terminal_size()
pkgs = sorted(packages(sitepackages_dir(env)))
env_python = workon_home / env / env_bin_dir / 'python'
l = len(env) + 2
version = invoke(str(env_python), '-V')
version = ' - '.join((version.out + version.err).splitlines())
print(env, ': ', version, sep='')
print(textwrap.fill(' '.join(pkgs),
width=columns-l,
initial_indent=(l * ' '),
subsequent_indent=(l * ' ')), '\n')
def show_cmd(argv):
try:
showvirtualenv(argv[0])
except IndexError:
if 'VIRTUAL_ENV' in os.environ:
showvirtualenv(Path(os.environ['VIRTUAL_ENV']).name)
else:
sys.exit('pew show [env]')
def lsenvs():
return sorted(set(env.parts[-3] for env in
workon_home.glob(os.path.join('*', env_bin_dir, 'python*'))))
def lsvirtualenv(verbose):
envs = lsenvs()
if not verbose:
print_virtualenvs(*envs)
else:
for env in envs:
showvirtualenv(env)
def ls_cmd(argv):
"""List available environments."""
parser = argparse.ArgumentParser()
p_group = parser.add_mutually_exclusive_group()
p_group.add_argument('-b', '--brief', action='store_false')
p_group.add_argument('-l', '--long', action='store_true')
args = parser.parse_args(argv)
lsvirtualenv(args.long)
def parse_envname(argv, no_arg_callback):
if len(argv) < 1 or argv[0] is None:
no_arg_callback()
env = argv[0]
if env.startswith('/'):
sys.exit("ERROR: Invalid environment name '{0}'.".format(env))
if not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist. Create it with \
'pew new {0}'.".format(env))
else:
return env
def workon_cmd(argv):
"""List or change working virtual environments."""
parser = argparse.ArgumentParser(prog='pew workon')
parser.add_argument('envname', nargs='?')
parser.add_argument(
'-n', '--no-cd', action='store_true',
help=('Do not change working directory to project directory after '
'activating virtualenv.')
)
args = parser.parse_args(argv)
def list_and_exit():
lsvirtualenv(False)
sys.exit(0)
env = parse_envname([args.envname], list_and_exit)
# Check if the virtualenv has an associated project directory and in
# this case, use it as the current working directory.
project_dir = get_project_dir(env)
if project_dir is None or args.no_cd:
project_dir = os.getcwd()
return shell(env, cwd=project_dir)
def sitepackages_dir(env=os.environ.get('VIRTUAL_ENV')):
if not env:
sys.exit('ERROR: no virtualenv active')
else:
env_python = workon_home / env / env_bin_dir / 'python'
return Path(invoke(str(env_python), '-c', 'import distutils; \
print(distutils.sysconfig.get_python_lib())').out)
def add_cmd(argv):
"""Add the specified directories to the Python path for the currently active virtualenv.
This will be done by placing the directory names in a path file named
"virtualenv_path_extensions.pth" inside the virtualenv's site-packages
directory; if this file does not exists, it will be created first.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='remove', action='store_true')
parser.add_argument('dirs', nargs='+')
args = parser.parse_args(argv)
extra_paths = sitepackages_dir() / '_virtualenv_path_extensions.pth'
new_paths = [os.path.abspath(d) + "\n" for d in args.dirs]
if not extra_paths.exists():
with extra_paths.open('w') as extra:
extra.write('''import sys; sys.__plen = len(sys.path)
import sys; new=sys.path[sys.__plen:]; del sys.path[sys.__plen:]; p=getattr(sys,'__egginsert',0); sys.path[p:p]=new; sys.__egginsert = p+len(new)
''')
def rewrite(f):
with extra_paths.open('r+') as extra:
to_write = f(extra.readlines())
extra.seek(0)
extra.truncate()
extra.writelines(to_write)
if args.remove:
rewrite(lambda ls: [line for line in ls if line not in new_paths])
else:
rewrite(lambda lines: lines[0:1] + new_paths + lines[1:])
def sitepackages_dir_cmd(argv):
print(sitepackages_dir())
def lssitepackages_cmd(argv):
"""Show the content of the site-packages directory of the current virtualenv."""
site = sitepackages_dir()
print(*sorted(site.iterdir()), sep=os.linesep)
extra_paths = site / '_virtualenv_path_extensions.pth'
if extra_paths.exists():
print('from _virtualenv_path_extensions.pth:')
with extra_paths.open() as extra:
print(''.join(extra.readlines()))
def toggleglobalsitepackages_cmd(argv):
"""Toggle the current virtualenv between having and not having access to the global site-packages."""
quiet = argv == ['-q']
site = sitepackages_dir()
ngsp_file = site.parent / 'no-global-site-packages.txt'
if ngsp_file.exists():
ngsp_file.unlink()
if not quiet:
print('Enabled global site-packages')
else:
with ngsp_file.open('w'):
if not quiet:
print('Disabled global site-packages')
def cp_cmd(argv):
"""Duplicate the named virtualenv to make a new one."""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target', nargs='?')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
args = parser.parse_args(argv)
target_name = copy_virtualenv_project(args.source, args.target)
if args.activate:
shell(target_name)
def copy_virtualenv_project(source, target):
source = expandpath(source)
if not source.exists():
source = workon_home / source
if not source.exists():
sys.exit('Please provide a valid virtualenv to copy')
target_name = target or source.name
target = workon_home / target_name
if target.exists():
sys.exit('%s virtualenv already exists in %s.' % (
target_name, workon_home
))
print('Copying {0} in {1}'.format(source, target_name))
clone_virtualenv(str(source), str(target))
return target_name
def rename_cmd(argv):
"""Rename a virtualenv"""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target')
pargs = parser.parse_args(argv)
copy_virtualenv_project(pargs.source, pargs.target)
return rmvirtualenvs([pargs.source])
def setvirtualenvproject(env, project):
print('Setting project for {0} to {1}'.format(env, project))
with (workon_home / env / '.project').open('wb') as prj:
prj.write(str(project).encode())
def setproject_cmd(argv):
"""Given a virtualenv directory and a project directory, set the \
virtualenv up to be associated with the project."""
args = dict(enumerate(argv))
project = os.path.abspath(args.get(1, '.'))
env = args.get(0, os.environ.get('VIRTUAL_ENV'))
if not env:
sys.exit('pew setproject [virtualenv] [project_path]')
if not (workon_home / env).exists():
sys.exit("Environment '%s' doesn't exist." % env)
if not os.path.isdir(project):
sys.exit('pew setproject: %s does not exist' % project)
setvirtualenvproject(env, project)
def getproject_cmd(argv):
"""Print a virtualenv's project directory, if set.
If called without providing a virtualenv name as argument, print the
current virtualenv's project directory.
"""
# Parse command line arguments
parser = argparse.ArgumentParser(
description="Print an environment's project directory.",
)
parser.add_argument(
'envname',
nargs='?',
default=os.environ.get('VIRTUAL_ENV'),
help=(
'The name of the environment to return the project directory '
'for. If omitted, will use the currently active environment.'
),
)
args = parser.parse_args(argv)
# Now, do the actual work
if not args.envname:
sys.exit('ERROR: no virtualenv active')
if not (workon_home / args.envname).exists():
sys.exit("ERROR: Environment '{0}' does not exist."
.format(args.envname))
project_dir = get_project_dir(args.envname)
if project_dir is None:
sys.exit("ERROR: no project directory set for Environment '{0}'"
.format(args.envname))
print(project_dir)
def mkproject_cmd(argv):
"""Create a new project directory and its associated virtualenv."""
if '-l' in argv or '--list' in argv:
templates = [t.name[9:] for t in workon_home.glob("template_*")]
print("Available project templates:", *templates, sep='\n')
return
parser = mkvirtualenv_argparser()
parser.add_argument('envname')
parser.add_argument(
'-t', action='append', default=[], dest='templates', help='Multiple \
templates may be selected. They are applied in the order specified on the \
command line.')
parser.add_argument(
'-l', '--list', action='store_true', help='List available templates.')
args, rest = parser.parse_known_args(argv)
projects_home = Path(os.environ.get('PROJECT_HOME', '.'))
if not projects_home.exists():
sys.exit('ERROR: Projects directory %s does not exist. \
Create it or set PROJECT_HOME to an existing directory.' % projects_home)
project = (projects_home / args.envname).absolute()
if project.exists():
sys.exit('Project %s already exists.' % args.envname)
mkvirtualenv(args.envname, args.python, args.packages, project.absolute(),
args.requirements, rest)
project.mkdir()
for template_name in args.templates:
template = workon_home / ("template_" + template_name)
inve(args.envname, str(template), args.envname, str(project))
if args.activate:
shell(args.envname, cwd=str(project))
def mktmpenv_cmd(argv):
"""Create a temporary virtualenv."""
parser = mkvirtualenv_argparser()
env = '.'
while (workon_home / env).exists():
env = hex(random.getrandbits(64))[2:-1]
args, rest = parser.parse_known_args(argv)
mkvirtualenv(env, args.python, args.packages, requirements=args.requirements,
rest=rest)
print('This is a temporary environment. It will be deleted when you exit')
try:
if args.activate:
# only used for testing on windows
shell(env)
finally:
return rmvirtualenvs([env])
def wipeenv_cmd(argv):
"""Remove all installed packages from the current (or supplied) env."""
env = argv[0] if argv else os.environ.get('VIRTUAL_ENV')
if not env:
sys.exit('ERROR: no virtualenv active')
elif not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist.".format(env))
else:
env_pip = str(workon_home / env / env_bin_dir / 'pip')
all_pkgs = set(invoke(env_pip, 'freeze').out.splitlines())
pkgs = set(p for p in all_pkgs if len(p.split("==")) == 2)
ignored = sorted(all_pkgs - pkgs)
pkgs = set(p.split("==")[0] for p in pkgs)
to_remove = sorted(pkgs - set(['distribute', 'wsgiref']))
if to_remove:
print("Ignoring:\n %s" % "\n ".join(ignored))
print("Uninstalling packages:\n %s" % "\n ".join(to_remove))
return inve(env, 'pip', 'uninstall', '-y', *to_remove)
else:
print("Nothing to remove")
def inall_cmd(argv):
"""Run a command in each virtualenv."""
envs = lsenvs()
errors = False
for env in envs:
print("\n%s:" % env)
try:
inve(env, *argv)
except CalledProcessError as e:
errors = True
err(e)
sys.exit(errors)
def in_cmd(argv):
"""Run a command in the given virtualenv."""
if len(argv) == 1:
return workon_cmd(argv)
parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
return inve(*argv)
def restore_cmd(argv):
"""Try to restore a broken virtualenv by reinstalling the same python version on top of it"""
if len(argv) < 1:
sys.exit('You must provide a valid virtualenv to target')
env = argv[0]
path = workon_home / env
py = path / env_bin_dir / ('python.exe' if windows else 'python')
exact_py = py.resolve().name
return check_call([sys.executable, "-m", "virtualenv", str(path.absolute()), "--python=%s" % exact_py])
def dir_cmd(argv):
"""Print the path for the virtualenv directory"""
env = parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
print(workon_home / env)
def install_cmd(argv):
'''Use Pythonz to download and build the specified Python version'''
installer = InstallCommand()
options, versions = installer.parser.parse_args(argv)
if len(versions) != 1:
installer.parser.print_help()
sys.exit(1)
else:
try:
actual_installer = PythonInstaller.get_installer(versions[0], options)
return actual_installer.install()
except AlreadyInstalledError as e:
print(e)
def uninstall_cmd(argv):
'''Use Pythonz to uninstall the specified Python version'''
return UninstallCommand().run(argv)
def list_pythons_cmd(argv):
'''List the pythons installed by Pythonz (or all the installable ones)'''
return ListPythons().run(argv)
def locate_python_cmd(argv):
'''Locate the path for the python version installed by Pythonz'''
return LocatePython().run(argv)
def version_cmd(argv):
"""Prints current pew version"""
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('pew').version
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
print('Setuptools has some issues here, failed to get our own package.', file=sys.stderr)
print(__version__)
def prevent_path_errors():
if 'VIRTUAL_ENV' in os.environ and not check_path():
sys.exit('''ERROR: The virtualenv hasn't been activated correctly.
Either the env is corrupted (try running `pew restore env`),
Or an upgrade of your Python version broke your env,
Or check the contents of your $PATH. You might be adding new directories to it
from inside your shell's configuration file.
In this case, for further details please see: https://github.com/berdario/pew#the-environment-doesnt-seem-to-be-activated''')
def first_run_setup():
shell = supported_shell()
if shell:
if shell == 'fish':
source_cmd = 'source (pew shell_config)'
else:
source_cmd = 'source "$(pew shell_config)"'
rcpath = expandpath({'bash': '~/.bashrc'
, 'zsh': '~/.zshrc'
, 'fish': '~/.config/fish/config.fish'}[shell])
if rcpath.exists():
update_config_file(rcpath, source_cmd)
else:
print("It seems that you're running pew for the first time\n"
"If you want source shell competions and update your prompt, "
"Add the following line to your shell config file:\n %s" % source_cmd)
print('\nWill now continue with the command:', *sys.argv[1:])
input('[enter]')
def update_config_file(rcpath, source_cmd):
with rcpath.open('r+') as rcfile:
if source_cmd not in (line.strip() for line in rcfile.readlines()):
choice = 'X'
while choice not in ('y', '', 'n'):
choice = input("It seems that you're running pew for the first time\n"
"do you want to modify %s to source completions and"
" update your prompt? [y/N]\n> " % rcpath).lower()
if choice == 'y':
rcfile.write('\n# added by Pew\n%s\n' % source_cmd)
print('Done')
else:
print('\nOk, if you want to do it manually, just add\n %s\nat'
' the end of %s' % (source_cmd, rcpath))
def print_commands(cmds):
longest = max(map(len, cmds)) + 3
columns, _ = get_terminal_size()
print('Available commands:\n')
for cmd, fun in sorted(cmds.items()):
if fun.__doc__:
print(textwrap.fill(
fun.__doc__.splitlines()[0],
columns or 1000,
initial_indent=(' {0}: '.format(cmd)).ljust(longest),
subsequent_indent=longest * ' '))
else:
print(' ' + cmd)
def pew():
first_run = makedirs_and_symlink_if_needed(workon_home)
if first_run and sys.stdin.isatty():
first_run_setup()
cmds = dict((cmd[:-4], fun)
for cmd, fun in globals().items() if cmd.endswith('_cmd'))
if sys.argv[1:]:
if sys.argv[1] in cmds:
command = cmds[sys.argv[1]]
try:
return command(sys.argv[2:])
except CalledProcessError as e:
return e.returncode
except KeyboardInterrupt:
pass
else:
err("ERROR: command", sys.argv[1], "does not exist.")
print_commands(cmds)
sys.exit(1)
else:
print_commands(cmds)
|
berdario/pew | pew/pew.py | ls_cmd | python | def ls_cmd(argv):
parser = argparse.ArgumentParser()
p_group = parser.add_mutually_exclusive_group()
p_group.add_argument('-b', '--brief', action='store_false')
p_group.add_argument('-l', '--long', action='store_true')
args = parser.parse_args(argv)
lsvirtualenv(args.long) | List available environments. | train | https://github.com/berdario/pew/blob/37d9ff79342336b8ef6437d9a551008be07afe9b/pew/pew.py#L345-L352 | [
"def lsvirtualenv(verbose):\n envs = lsenvs()\n\n if not verbose:\n print_virtualenvs(*envs)\n else:\n for env in envs:\n showvirtualenv(env)\n"
] | from __future__ import print_function, absolute_import, unicode_literals
import os
import sys
import argparse
import shutil
import random
import textwrap
from functools import partial
from subprocess import CalledProcessError
from pathlib import Path
try:
from shutil import get_terminal_size
except ImportError:
from backports.shutil_get_terminal_size import get_terminal_size
windows = sys.platform == 'win32'
from clonevirtualenv import clone_virtualenv
if not windows:
try:
# Try importing these packages if avaiable
from pythonz.commands.install import InstallCommand
from pythonz.commands.uninstall import UninstallCommand
from pythonz.installer.pythoninstaller import PythonInstaller, AlreadyInstalledError
from pythonz.commands.list import ListCommand
from pythonz.define import PATH_PYTHONS
from pythonz.commands.locate import LocateCommand as LocatePython
def ListPythons():
try:
Path(PATH_PYTHONS).mkdir(parents=True)
except OSError:
pass
return ListCommand()
except:
# create mock commands
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('You need to install the pythonz extra. pip install pew[pythonz]')
else:
# Pythonz does not support windows
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('Command not supported on this platform')
import shellingham
from pew._utils import (check_call, invoke, expandpath, own, env_bin_dir,
check_path, temp_environ, NamedTemporaryFile, to_unicode)
from pew._print_utils import print_virtualenvs
if sys.version_info[0] == 2:
input = raw_input
err = partial(print, file=sys.stderr)
if windows:
default_home = '~/.virtualenvs'
else:
default_home = os.path.join(
os.environ.get('XDG_DATA_HOME', '~/.local/share'), 'virtualenvs')
workon_home = expandpath(
os.environ.get('WORKON_HOME', default_home))
def makedirs_and_symlink_if_needed(workon_home):
if not workon_home.exists() and own(workon_home):
workon_home.mkdir(parents=True)
link = expandpath('~/.virtualenvs')
if os.name == 'posix' and 'WORKON_HOME' not in os.environ and \
'XDG_DATA_HOME' not in os.environ and not link.exists():
link.symlink_to(str(workon_home))
return True
else:
return False
pew_site = Path(__file__).parent
def supported_shell():
shell = Path(os.environ.get('SHELL', '')).stem
if shell in ('bash', 'zsh', 'fish'):
return shell
def shell_config_cmd(argv):
"Prints the path for the current $SHELL helper file"
shell = supported_shell()
if shell:
print(pew_site / 'shell_config' / ('init.' + shell))
else:
err('Completions and prompts are unavailable for %s' %
repr(os.environ.get('SHELL', '')))
def deploy_completions():
completions = {'complete.bash': Path('/etc/bash_completion.d/pew'),
'complete.zsh': Path('/usr/local/share/zsh/site-functions/_pew'),
'complete.fish': Path('/etc/fish/completions/pew.fish')}
for comp, dest in completions.items():
if not dest.parent.exists():
dest.parent.mkdir(parents=True)
shutil.copy(str(pew_site / 'shell_config' / comp), str(dest))
def get_project_dir(env):
project_file = workon_home / env / '.project'
if project_file.exists():
with project_file.open() as f:
project_dir = f.readline().strip()
if os.path.exists(project_dir):
return project_dir
else:
err('Corrupted or outdated:', project_file, '\nDirectory',
project_dir, "doesn't exist.")
def unsetenv(key):
if key in os.environ:
del os.environ[key]
def compute_path(env):
envdir = workon_home / env
return os.pathsep.join([
str(envdir / env_bin_dir),
os.environ['PATH'],
])
def inve(env, command, *args, **kwargs):
"""Run a command in the given virtual environment.
Pass additional keyword arguments to ``subprocess.check_call()``."""
# we don't strictly need to restore the environment, since pew runs in
# its own process, but it feels like the right thing to do
with temp_environ():
os.environ['VIRTUAL_ENV'] = str(workon_home / env)
os.environ['PATH'] = compute_path(env)
unsetenv('PYTHONHOME')
unsetenv('__PYVENV_LAUNCHER__')
try:
return check_call([command] + list(args), shell=windows, **kwargs)
# need to have shell=True on windows, otherwise the PYTHONPATH
# won't inherit the PATH
except OSError as e:
if e.errno == 2:
err('Unable to find', command)
return 2
else:
raise
def fork_shell(env, shellcmd, cwd):
or_ctrld = '' if windows else "or 'Ctrl+D' "
err("Launching subshell in virtual environment. Type 'exit' ", or_ctrld,
"to return.", sep='')
if 'VIRTUAL_ENV' in os.environ:
err("Be aware that this environment will be nested on top "
"of '%s'" % Path(os.environ['VIRTUAL_ENV']).name)
return inve(env, *shellcmd, cwd=cwd)
def fork_bash(env, cwd):
# bash is a special little snowflake, and prevent_path_errors cannot work there
# https://github.com/berdario/pew/issues/58#issuecomment-102182346
bashrcpath = expandpath('~/.bashrc')
if bashrcpath.exists():
with NamedTemporaryFile('w+') as rcfile:
with bashrcpath.open() as bashrc:
rcfile.write(bashrc.read())
rcfile.write('\nexport PATH="' + to_unicode(compute_path(env)) + '"')
rcfile.flush()
return fork_shell(env, ['bash', '--rcfile', rcfile.name], cwd)
else:
return fork_shell(env, ['bash'], cwd)
def fork_cmder(env, cwd):
shell_cmd = ['cmd']
cmderrc_path = r'%CMDER_ROOT%\vendor\init.bat'
if expandpath(cmderrc_path).exists():
shell_cmd += ['/k', cmderrc_path]
if cwd:
os.environ['CMDER_START'] = cwd
return fork_shell(env, shell_cmd, cwd)
def _detect_shell():
shell = os.environ.get('SHELL', None)
if not shell:
if 'CMDER_ROOT' in os.environ:
shell = 'Cmder'
elif windows:
try:
_, shell = shellingham.detect_shell()
except shellingham.ShellDetectionFailure:
shell = os.environ.get('COMSPEC', 'cmd.exe')
else:
shell = 'sh'
return shell
def shell(env, cwd=None):
env = str(env)
shell = _detect_shell()
shell_name = Path(shell).stem
if shell_name not in ('Cmder', 'bash', 'elvish', 'powershell', 'klingon', 'cmd'):
# On Windows the PATH is usually set with System Utility
# so we won't worry about trying to check mistakes there
shell_check = (sys.executable + ' -c "from pew.pew import '
'prevent_path_errors; prevent_path_errors()"')
try:
inve(env, shell, '-c', shell_check)
except CalledProcessError:
return
if shell_name == 'bash':
return fork_bash(env, cwd)
elif shell_name == 'Cmder':
return fork_cmder(env, cwd)
else:
return fork_shell(env, [shell], cwd)
def mkvirtualenv(envname, python=None, packages=[], project=None,
requirements=None, rest=[]):
if python:
rest = ["--python=%s" % python] + rest
path = (workon_home / envname).absolute()
try:
check_call([sys.executable, "-m", "virtualenv", str(path)] + rest)
except (CalledProcessError, KeyboardInterrupt):
rmvirtualenvs([envname])
raise
else:
if project:
setvirtualenvproject(envname, project.absolute())
if requirements:
inve(envname, 'pip', 'install', '-r', str(expandpath(requirements)))
if packages:
inve(envname, 'pip', 'install', *packages)
def mkvirtualenv_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--python')
parser.add_argument('-i', action='append', dest='packages', help='Install \
a package after the environment is created. This option may be repeated.')
parser.add_argument('-r', dest='requirements', help='Provide a pip \
requirements file to install a base set of packages into the new environment.')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
return parser
def new_cmd(argv):
"""Create a new environment, in $WORKON_HOME."""
parser = mkvirtualenv_argparser()
parser.add_argument('-a', dest='project', help='Provide a full path to a \
project directory to associate with the new environment.')
parser.add_argument('envname')
args, rest = parser.parse_known_args(argv)
project = expandpath(args.project) if args.project else None
mkvirtualenv(args.envname, args.python, args.packages, project,
args.requirements, rest)
if args.activate:
shell(args.envname)
def rmvirtualenvs(envs):
error_happened = False
for env in envs:
env = workon_home / env
if os.environ.get('VIRTUAL_ENV') == str(env):
err("ERROR: You cannot remove the active environment (%s)." % env)
error_happened = True
break
try:
shutil.rmtree(str(env))
except OSError as e:
err("Error while trying to remove the {0} env: \n{1}".format
(env, e.strerror))
error_happened = True
return error_happened
def rm_cmd(argv):
"""Remove one or more environment, from $WORKON_HOME."""
if len(argv) < 1:
sys.exit("Please specify an environment")
return rmvirtualenvs(argv)
def packages(site_packages):
nodes = site_packages.iterdir()
return set([x.stem.split('-')[0] for x in nodes]) - set(['__pycache__'])
def showvirtualenv(env):
columns, _ = get_terminal_size()
pkgs = sorted(packages(sitepackages_dir(env)))
env_python = workon_home / env / env_bin_dir / 'python'
l = len(env) + 2
version = invoke(str(env_python), '-V')
version = ' - '.join((version.out + version.err).splitlines())
print(env, ': ', version, sep='')
print(textwrap.fill(' '.join(pkgs),
width=columns-l,
initial_indent=(l * ' '),
subsequent_indent=(l * ' ')), '\n')
def show_cmd(argv):
try:
showvirtualenv(argv[0])
except IndexError:
if 'VIRTUAL_ENV' in os.environ:
showvirtualenv(Path(os.environ['VIRTUAL_ENV']).name)
else:
sys.exit('pew show [env]')
def lsenvs():
return sorted(set(env.parts[-3] for env in
workon_home.glob(os.path.join('*', env_bin_dir, 'python*'))))
def lsvirtualenv(verbose):
envs = lsenvs()
if not verbose:
print_virtualenvs(*envs)
else:
for env in envs:
showvirtualenv(env)
def parse_envname(argv, no_arg_callback):
if len(argv) < 1 or argv[0] is None:
no_arg_callback()
env = argv[0]
if env.startswith('/'):
sys.exit("ERROR: Invalid environment name '{0}'.".format(env))
if not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist. Create it with \
'pew new {0}'.".format(env))
else:
return env
def workon_cmd(argv):
"""List or change working virtual environments."""
parser = argparse.ArgumentParser(prog='pew workon')
parser.add_argument('envname', nargs='?')
parser.add_argument(
'-n', '--no-cd', action='store_true',
help=('Do not change working directory to project directory after '
'activating virtualenv.')
)
args = parser.parse_args(argv)
def list_and_exit():
lsvirtualenv(False)
sys.exit(0)
env = parse_envname([args.envname], list_and_exit)
# Check if the virtualenv has an associated project directory and in
# this case, use it as the current working directory.
project_dir = get_project_dir(env)
if project_dir is None or args.no_cd:
project_dir = os.getcwd()
return shell(env, cwd=project_dir)
def sitepackages_dir(env=os.environ.get('VIRTUAL_ENV')):
if not env:
sys.exit('ERROR: no virtualenv active')
else:
env_python = workon_home / env / env_bin_dir / 'python'
return Path(invoke(str(env_python), '-c', 'import distutils; \
print(distutils.sysconfig.get_python_lib())').out)
def add_cmd(argv):
"""Add the specified directories to the Python path for the currently active virtualenv.
This will be done by placing the directory names in a path file named
"virtualenv_path_extensions.pth" inside the virtualenv's site-packages
directory; if this file does not exists, it will be created first.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='remove', action='store_true')
parser.add_argument('dirs', nargs='+')
args = parser.parse_args(argv)
extra_paths = sitepackages_dir() / '_virtualenv_path_extensions.pth'
new_paths = [os.path.abspath(d) + "\n" for d in args.dirs]
if not extra_paths.exists():
with extra_paths.open('w') as extra:
extra.write('''import sys; sys.__plen = len(sys.path)
import sys; new=sys.path[sys.__plen:]; del sys.path[sys.__plen:]; p=getattr(sys,'__egginsert',0); sys.path[p:p]=new; sys.__egginsert = p+len(new)
''')
def rewrite(f):
with extra_paths.open('r+') as extra:
to_write = f(extra.readlines())
extra.seek(0)
extra.truncate()
extra.writelines(to_write)
if args.remove:
rewrite(lambda ls: [line for line in ls if line not in new_paths])
else:
rewrite(lambda lines: lines[0:1] + new_paths + lines[1:])
def sitepackages_dir_cmd(argv):
print(sitepackages_dir())
def lssitepackages_cmd(argv):
"""Show the content of the site-packages directory of the current virtualenv."""
site = sitepackages_dir()
print(*sorted(site.iterdir()), sep=os.linesep)
extra_paths = site / '_virtualenv_path_extensions.pth'
if extra_paths.exists():
print('from _virtualenv_path_extensions.pth:')
with extra_paths.open() as extra:
print(''.join(extra.readlines()))
def toggleglobalsitepackages_cmd(argv):
"""Toggle the current virtualenv between having and not having access to the global site-packages."""
quiet = argv == ['-q']
site = sitepackages_dir()
ngsp_file = site.parent / 'no-global-site-packages.txt'
if ngsp_file.exists():
ngsp_file.unlink()
if not quiet:
print('Enabled global site-packages')
else:
with ngsp_file.open('w'):
if not quiet:
print('Disabled global site-packages')
def cp_cmd(argv):
"""Duplicate the named virtualenv to make a new one."""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target', nargs='?')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
args = parser.parse_args(argv)
target_name = copy_virtualenv_project(args.source, args.target)
if args.activate:
shell(target_name)
def copy_virtualenv_project(source, target):
source = expandpath(source)
if not source.exists():
source = workon_home / source
if not source.exists():
sys.exit('Please provide a valid virtualenv to copy')
target_name = target or source.name
target = workon_home / target_name
if target.exists():
sys.exit('%s virtualenv already exists in %s.' % (
target_name, workon_home
))
print('Copying {0} in {1}'.format(source, target_name))
clone_virtualenv(str(source), str(target))
return target_name
def rename_cmd(argv):
"""Rename a virtualenv"""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target')
pargs = parser.parse_args(argv)
copy_virtualenv_project(pargs.source, pargs.target)
return rmvirtualenvs([pargs.source])
def setvirtualenvproject(env, project):
print('Setting project for {0} to {1}'.format(env, project))
with (workon_home / env / '.project').open('wb') as prj:
prj.write(str(project).encode())
def setproject_cmd(argv):
"""Given a virtualenv directory and a project directory, set the \
virtualenv up to be associated with the project."""
args = dict(enumerate(argv))
project = os.path.abspath(args.get(1, '.'))
env = args.get(0, os.environ.get('VIRTUAL_ENV'))
if not env:
sys.exit('pew setproject [virtualenv] [project_path]')
if not (workon_home / env).exists():
sys.exit("Environment '%s' doesn't exist." % env)
if not os.path.isdir(project):
sys.exit('pew setproject: %s does not exist' % project)
setvirtualenvproject(env, project)
def getproject_cmd(argv):
"""Print a virtualenv's project directory, if set.
If called without providing a virtualenv name as argument, print the
current virtualenv's project directory.
"""
# Parse command line arguments
parser = argparse.ArgumentParser(
description="Print an environment's project directory.",
)
parser.add_argument(
'envname',
nargs='?',
default=os.environ.get('VIRTUAL_ENV'),
help=(
'The name of the environment to return the project directory '
'for. If omitted, will use the currently active environment.'
),
)
args = parser.parse_args(argv)
# Now, do the actual work
if not args.envname:
sys.exit('ERROR: no virtualenv active')
if not (workon_home / args.envname).exists():
sys.exit("ERROR: Environment '{0}' does not exist."
.format(args.envname))
project_dir = get_project_dir(args.envname)
if project_dir is None:
sys.exit("ERROR: no project directory set for Environment '{0}'"
.format(args.envname))
print(project_dir)
def mkproject_cmd(argv):
"""Create a new project directory and its associated virtualenv."""
if '-l' in argv or '--list' in argv:
templates = [t.name[9:] for t in workon_home.glob("template_*")]
print("Available project templates:", *templates, sep='\n')
return
parser = mkvirtualenv_argparser()
parser.add_argument('envname')
parser.add_argument(
'-t', action='append', default=[], dest='templates', help='Multiple \
templates may be selected. They are applied in the order specified on the \
command line.')
parser.add_argument(
'-l', '--list', action='store_true', help='List available templates.')
args, rest = parser.parse_known_args(argv)
projects_home = Path(os.environ.get('PROJECT_HOME', '.'))
if not projects_home.exists():
sys.exit('ERROR: Projects directory %s does not exist. \
Create it or set PROJECT_HOME to an existing directory.' % projects_home)
project = (projects_home / args.envname).absolute()
if project.exists():
sys.exit('Project %s already exists.' % args.envname)
mkvirtualenv(args.envname, args.python, args.packages, project.absolute(),
args.requirements, rest)
project.mkdir()
for template_name in args.templates:
template = workon_home / ("template_" + template_name)
inve(args.envname, str(template), args.envname, str(project))
if args.activate:
shell(args.envname, cwd=str(project))
def mktmpenv_cmd(argv):
"""Create a temporary virtualenv."""
parser = mkvirtualenv_argparser()
env = '.'
while (workon_home / env).exists():
env = hex(random.getrandbits(64))[2:-1]
args, rest = parser.parse_known_args(argv)
mkvirtualenv(env, args.python, args.packages, requirements=args.requirements,
rest=rest)
print('This is a temporary environment. It will be deleted when you exit')
try:
if args.activate:
# only used for testing on windows
shell(env)
finally:
return rmvirtualenvs([env])
def wipeenv_cmd(argv):
"""Remove all installed packages from the current (or supplied) env."""
env = argv[0] if argv else os.environ.get('VIRTUAL_ENV')
if not env:
sys.exit('ERROR: no virtualenv active')
elif not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist.".format(env))
else:
env_pip = str(workon_home / env / env_bin_dir / 'pip')
all_pkgs = set(invoke(env_pip, 'freeze').out.splitlines())
pkgs = set(p for p in all_pkgs if len(p.split("==")) == 2)
ignored = sorted(all_pkgs - pkgs)
pkgs = set(p.split("==")[0] for p in pkgs)
to_remove = sorted(pkgs - set(['distribute', 'wsgiref']))
if to_remove:
print("Ignoring:\n %s" % "\n ".join(ignored))
print("Uninstalling packages:\n %s" % "\n ".join(to_remove))
return inve(env, 'pip', 'uninstall', '-y', *to_remove)
else:
print("Nothing to remove")
def inall_cmd(argv):
"""Run a command in each virtualenv."""
envs = lsenvs()
errors = False
for env in envs:
print("\n%s:" % env)
try:
inve(env, *argv)
except CalledProcessError as e:
errors = True
err(e)
sys.exit(errors)
def in_cmd(argv):
"""Run a command in the given virtualenv."""
if len(argv) == 1:
return workon_cmd(argv)
parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
return inve(*argv)
def restore_cmd(argv):
"""Try to restore a broken virtualenv by reinstalling the same python version on top of it"""
if len(argv) < 1:
sys.exit('You must provide a valid virtualenv to target')
env = argv[0]
path = workon_home / env
py = path / env_bin_dir / ('python.exe' if windows else 'python')
exact_py = py.resolve().name
return check_call([sys.executable, "-m", "virtualenv", str(path.absolute()), "--python=%s" % exact_py])
def dir_cmd(argv):
"""Print the path for the virtualenv directory"""
env = parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
print(workon_home / env)
def install_cmd(argv):
'''Use Pythonz to download and build the specified Python version'''
installer = InstallCommand()
options, versions = installer.parser.parse_args(argv)
if len(versions) != 1:
installer.parser.print_help()
sys.exit(1)
else:
try:
actual_installer = PythonInstaller.get_installer(versions[0], options)
return actual_installer.install()
except AlreadyInstalledError as e:
print(e)
def uninstall_cmd(argv):
'''Use Pythonz to uninstall the specified Python version'''
return UninstallCommand().run(argv)
def list_pythons_cmd(argv):
'''List the pythons installed by Pythonz (or all the installable ones)'''
return ListPythons().run(argv)
def locate_python_cmd(argv):
'''Locate the path for the python version installed by Pythonz'''
return LocatePython().run(argv)
def version_cmd(argv):
"""Prints current pew version"""
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('pew').version
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
print('Setuptools has some issues here, failed to get our own package.', file=sys.stderr)
print(__version__)
def prevent_path_errors():
if 'VIRTUAL_ENV' in os.environ and not check_path():
sys.exit('''ERROR: The virtualenv hasn't been activated correctly.
Either the env is corrupted (try running `pew restore env`),
Or an upgrade of your Python version broke your env,
Or check the contents of your $PATH. You might be adding new directories to it
from inside your shell's configuration file.
In this case, for further details please see: https://github.com/berdario/pew#the-environment-doesnt-seem-to-be-activated''')
def first_run_setup():
shell = supported_shell()
if shell:
if shell == 'fish':
source_cmd = 'source (pew shell_config)'
else:
source_cmd = 'source "$(pew shell_config)"'
rcpath = expandpath({'bash': '~/.bashrc'
, 'zsh': '~/.zshrc'
, 'fish': '~/.config/fish/config.fish'}[shell])
if rcpath.exists():
update_config_file(rcpath, source_cmd)
else:
print("It seems that you're running pew for the first time\n"
"If you want source shell competions and update your prompt, "
"Add the following line to your shell config file:\n %s" % source_cmd)
print('\nWill now continue with the command:', *sys.argv[1:])
input('[enter]')
def update_config_file(rcpath, source_cmd):
with rcpath.open('r+') as rcfile:
if source_cmd not in (line.strip() for line in rcfile.readlines()):
choice = 'X'
while choice not in ('y', '', 'n'):
choice = input("It seems that you're running pew for the first time\n"
"do you want to modify %s to source completions and"
" update your prompt? [y/N]\n> " % rcpath).lower()
if choice == 'y':
rcfile.write('\n# added by Pew\n%s\n' % source_cmd)
print('Done')
else:
print('\nOk, if you want to do it manually, just add\n %s\nat'
' the end of %s' % (source_cmd, rcpath))
def print_commands(cmds):
longest = max(map(len, cmds)) + 3
columns, _ = get_terminal_size()
print('Available commands:\n')
for cmd, fun in sorted(cmds.items()):
if fun.__doc__:
print(textwrap.fill(
fun.__doc__.splitlines()[0],
columns or 1000,
initial_indent=(' {0}: '.format(cmd)).ljust(longest),
subsequent_indent=longest * ' '))
else:
print(' ' + cmd)
def pew():
first_run = makedirs_and_symlink_if_needed(workon_home)
if first_run and sys.stdin.isatty():
first_run_setup()
cmds = dict((cmd[:-4], fun)
for cmd, fun in globals().items() if cmd.endswith('_cmd'))
if sys.argv[1:]:
if sys.argv[1] in cmds:
command = cmds[sys.argv[1]]
try:
return command(sys.argv[2:])
except CalledProcessError as e:
return e.returncode
except KeyboardInterrupt:
pass
else:
err("ERROR: command", sys.argv[1], "does not exist.")
print_commands(cmds)
sys.exit(1)
else:
print_commands(cmds)
|
berdario/pew | pew/pew.py | workon_cmd | python | def workon_cmd(argv):
parser = argparse.ArgumentParser(prog='pew workon')
parser.add_argument('envname', nargs='?')
parser.add_argument(
'-n', '--no-cd', action='store_true',
help=('Do not change working directory to project directory after '
'activating virtualenv.')
)
args = parser.parse_args(argv)
def list_and_exit():
lsvirtualenv(False)
sys.exit(0)
env = parse_envname([args.envname], list_and_exit)
# Check if the virtualenv has an associated project directory and in
# this case, use it as the current working directory.
project_dir = get_project_dir(env)
if project_dir is None or args.no_cd:
project_dir = os.getcwd()
return shell(env, cwd=project_dir) | List or change working virtual environments. | train | https://github.com/berdario/pew/blob/37d9ff79342336b8ef6437d9a551008be07afe9b/pew/pew.py#L367-L390 | [
"def shell(env, cwd=None):\n env = str(env)\n shell = _detect_shell()\n shell_name = Path(shell).stem\n if shell_name not in ('Cmder', 'bash', 'elvish', 'powershell', 'klingon', 'cmd'):\n # On Windows the PATH is usually set with System Utility\n # so we won't worry about trying to check mistakes there\n shell_check = (sys.executable + ' -c \"from pew.pew import '\n 'prevent_path_errors; prevent_path_errors()\"')\n try:\n inve(env, shell, '-c', shell_check)\n except CalledProcessError:\n return\n if shell_name == 'bash':\n return fork_bash(env, cwd)\n elif shell_name == 'Cmder':\n return fork_cmder(env, cwd)\n else:\n return fork_shell(env, [shell], cwd)\n",
"def get_project_dir(env):\n project_file = workon_home / env / '.project'\n if project_file.exists():\n with project_file.open() as f:\n project_dir = f.readline().strip()\n if os.path.exists(project_dir):\n return project_dir\n else:\n err('Corrupted or outdated:', project_file, '\\nDirectory',\n project_dir, \"doesn't exist.\")\n",
"def parse_envname(argv, no_arg_callback):\n if len(argv) < 1 or argv[0] is None:\n no_arg_callback()\n\n env = argv[0]\n if env.startswith('/'):\n sys.exit(\"ERROR: Invalid environment name '{0}'.\".format(env))\n if not (workon_home / env).exists():\n sys.exit(\"ERROR: Environment '{0}' does not exist. Create it with \\\n'pew new {0}'.\".format(env))\n else:\n return env\n"
] | from __future__ import print_function, absolute_import, unicode_literals
import os
import sys
import argparse
import shutil
import random
import textwrap
from functools import partial
from subprocess import CalledProcessError
from pathlib import Path
try:
from shutil import get_terminal_size
except ImportError:
from backports.shutil_get_terminal_size import get_terminal_size
windows = sys.platform == 'win32'
from clonevirtualenv import clone_virtualenv
if not windows:
try:
# Try importing these packages if avaiable
from pythonz.commands.install import InstallCommand
from pythonz.commands.uninstall import UninstallCommand
from pythonz.installer.pythoninstaller import PythonInstaller, AlreadyInstalledError
from pythonz.commands.list import ListCommand
from pythonz.define import PATH_PYTHONS
from pythonz.commands.locate import LocateCommand as LocatePython
def ListPythons():
try:
Path(PATH_PYTHONS).mkdir(parents=True)
except OSError:
pass
return ListCommand()
except:
# create mock commands
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('You need to install the pythonz extra. pip install pew[pythonz]')
else:
# Pythonz does not support windows
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('Command not supported on this platform')
import shellingham
from pew._utils import (check_call, invoke, expandpath, own, env_bin_dir,
check_path, temp_environ, NamedTemporaryFile, to_unicode)
from pew._print_utils import print_virtualenvs
if sys.version_info[0] == 2:
input = raw_input
err = partial(print, file=sys.stderr)
if windows:
default_home = '~/.virtualenvs'
else:
default_home = os.path.join(
os.environ.get('XDG_DATA_HOME', '~/.local/share'), 'virtualenvs')
workon_home = expandpath(
os.environ.get('WORKON_HOME', default_home))
def makedirs_and_symlink_if_needed(workon_home):
if not workon_home.exists() and own(workon_home):
workon_home.mkdir(parents=True)
link = expandpath('~/.virtualenvs')
if os.name == 'posix' and 'WORKON_HOME' not in os.environ and \
'XDG_DATA_HOME' not in os.environ and not link.exists():
link.symlink_to(str(workon_home))
return True
else:
return False
pew_site = Path(__file__).parent
def supported_shell():
shell = Path(os.environ.get('SHELL', '')).stem
if shell in ('bash', 'zsh', 'fish'):
return shell
def shell_config_cmd(argv):
"Prints the path for the current $SHELL helper file"
shell = supported_shell()
if shell:
print(pew_site / 'shell_config' / ('init.' + shell))
else:
err('Completions and prompts are unavailable for %s' %
repr(os.environ.get('SHELL', '')))
def deploy_completions():
completions = {'complete.bash': Path('/etc/bash_completion.d/pew'),
'complete.zsh': Path('/usr/local/share/zsh/site-functions/_pew'),
'complete.fish': Path('/etc/fish/completions/pew.fish')}
for comp, dest in completions.items():
if not dest.parent.exists():
dest.parent.mkdir(parents=True)
shutil.copy(str(pew_site / 'shell_config' / comp), str(dest))
def get_project_dir(env):
project_file = workon_home / env / '.project'
if project_file.exists():
with project_file.open() as f:
project_dir = f.readline().strip()
if os.path.exists(project_dir):
return project_dir
else:
err('Corrupted or outdated:', project_file, '\nDirectory',
project_dir, "doesn't exist.")
def unsetenv(key):
if key in os.environ:
del os.environ[key]
def compute_path(env):
envdir = workon_home / env
return os.pathsep.join([
str(envdir / env_bin_dir),
os.environ['PATH'],
])
def inve(env, command, *args, **kwargs):
"""Run a command in the given virtual environment.
Pass additional keyword arguments to ``subprocess.check_call()``."""
# we don't strictly need to restore the environment, since pew runs in
# its own process, but it feels like the right thing to do
with temp_environ():
os.environ['VIRTUAL_ENV'] = str(workon_home / env)
os.environ['PATH'] = compute_path(env)
unsetenv('PYTHONHOME')
unsetenv('__PYVENV_LAUNCHER__')
try:
return check_call([command] + list(args), shell=windows, **kwargs)
# need to have shell=True on windows, otherwise the PYTHONPATH
# won't inherit the PATH
except OSError as e:
if e.errno == 2:
err('Unable to find', command)
return 2
else:
raise
def fork_shell(env, shellcmd, cwd):
or_ctrld = '' if windows else "or 'Ctrl+D' "
err("Launching subshell in virtual environment. Type 'exit' ", or_ctrld,
"to return.", sep='')
if 'VIRTUAL_ENV' in os.environ:
err("Be aware that this environment will be nested on top "
"of '%s'" % Path(os.environ['VIRTUAL_ENV']).name)
return inve(env, *shellcmd, cwd=cwd)
def fork_bash(env, cwd):
# bash is a special little snowflake, and prevent_path_errors cannot work there
# https://github.com/berdario/pew/issues/58#issuecomment-102182346
bashrcpath = expandpath('~/.bashrc')
if bashrcpath.exists():
with NamedTemporaryFile('w+') as rcfile:
with bashrcpath.open() as bashrc:
rcfile.write(bashrc.read())
rcfile.write('\nexport PATH="' + to_unicode(compute_path(env)) + '"')
rcfile.flush()
return fork_shell(env, ['bash', '--rcfile', rcfile.name], cwd)
else:
return fork_shell(env, ['bash'], cwd)
def fork_cmder(env, cwd):
shell_cmd = ['cmd']
cmderrc_path = r'%CMDER_ROOT%\vendor\init.bat'
if expandpath(cmderrc_path).exists():
shell_cmd += ['/k', cmderrc_path]
if cwd:
os.environ['CMDER_START'] = cwd
return fork_shell(env, shell_cmd, cwd)
def _detect_shell():
shell = os.environ.get('SHELL', None)
if not shell:
if 'CMDER_ROOT' in os.environ:
shell = 'Cmder'
elif windows:
try:
_, shell = shellingham.detect_shell()
except shellingham.ShellDetectionFailure:
shell = os.environ.get('COMSPEC', 'cmd.exe')
else:
shell = 'sh'
return shell
def shell(env, cwd=None):
env = str(env)
shell = _detect_shell()
shell_name = Path(shell).stem
if shell_name not in ('Cmder', 'bash', 'elvish', 'powershell', 'klingon', 'cmd'):
# On Windows the PATH is usually set with System Utility
# so we won't worry about trying to check mistakes there
shell_check = (sys.executable + ' -c "from pew.pew import '
'prevent_path_errors; prevent_path_errors()"')
try:
inve(env, shell, '-c', shell_check)
except CalledProcessError:
return
if shell_name == 'bash':
return fork_bash(env, cwd)
elif shell_name == 'Cmder':
return fork_cmder(env, cwd)
else:
return fork_shell(env, [shell], cwd)
def mkvirtualenv(envname, python=None, packages=[], project=None,
requirements=None, rest=[]):
if python:
rest = ["--python=%s" % python] + rest
path = (workon_home / envname).absolute()
try:
check_call([sys.executable, "-m", "virtualenv", str(path)] + rest)
except (CalledProcessError, KeyboardInterrupt):
rmvirtualenvs([envname])
raise
else:
if project:
setvirtualenvproject(envname, project.absolute())
if requirements:
inve(envname, 'pip', 'install', '-r', str(expandpath(requirements)))
if packages:
inve(envname, 'pip', 'install', *packages)
def mkvirtualenv_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--python')
parser.add_argument('-i', action='append', dest='packages', help='Install \
a package after the environment is created. This option may be repeated.')
parser.add_argument('-r', dest='requirements', help='Provide a pip \
requirements file to install a base set of packages into the new environment.')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
return parser
def new_cmd(argv):
"""Create a new environment, in $WORKON_HOME."""
parser = mkvirtualenv_argparser()
parser.add_argument('-a', dest='project', help='Provide a full path to a \
project directory to associate with the new environment.')
parser.add_argument('envname')
args, rest = parser.parse_known_args(argv)
project = expandpath(args.project) if args.project else None
mkvirtualenv(args.envname, args.python, args.packages, project,
args.requirements, rest)
if args.activate:
shell(args.envname)
def rmvirtualenvs(envs):
error_happened = False
for env in envs:
env = workon_home / env
if os.environ.get('VIRTUAL_ENV') == str(env):
err("ERROR: You cannot remove the active environment (%s)." % env)
error_happened = True
break
try:
shutil.rmtree(str(env))
except OSError as e:
err("Error while trying to remove the {0} env: \n{1}".format
(env, e.strerror))
error_happened = True
return error_happened
def rm_cmd(argv):
"""Remove one or more environment, from $WORKON_HOME."""
if len(argv) < 1:
sys.exit("Please specify an environment")
return rmvirtualenvs(argv)
def packages(site_packages):
nodes = site_packages.iterdir()
return set([x.stem.split('-')[0] for x in nodes]) - set(['__pycache__'])
def showvirtualenv(env):
columns, _ = get_terminal_size()
pkgs = sorted(packages(sitepackages_dir(env)))
env_python = workon_home / env / env_bin_dir / 'python'
l = len(env) + 2
version = invoke(str(env_python), '-V')
version = ' - '.join((version.out + version.err).splitlines())
print(env, ': ', version, sep='')
print(textwrap.fill(' '.join(pkgs),
width=columns-l,
initial_indent=(l * ' '),
subsequent_indent=(l * ' ')), '\n')
def show_cmd(argv):
try:
showvirtualenv(argv[0])
except IndexError:
if 'VIRTUAL_ENV' in os.environ:
showvirtualenv(Path(os.environ['VIRTUAL_ENV']).name)
else:
sys.exit('pew show [env]')
def lsenvs():
return sorted(set(env.parts[-3] for env in
workon_home.glob(os.path.join('*', env_bin_dir, 'python*'))))
def lsvirtualenv(verbose):
envs = lsenvs()
if not verbose:
print_virtualenvs(*envs)
else:
for env in envs:
showvirtualenv(env)
def ls_cmd(argv):
"""List available environments."""
parser = argparse.ArgumentParser()
p_group = parser.add_mutually_exclusive_group()
p_group.add_argument('-b', '--brief', action='store_false')
p_group.add_argument('-l', '--long', action='store_true')
args = parser.parse_args(argv)
lsvirtualenv(args.long)
def parse_envname(argv, no_arg_callback):
if len(argv) < 1 or argv[0] is None:
no_arg_callback()
env = argv[0]
if env.startswith('/'):
sys.exit("ERROR: Invalid environment name '{0}'.".format(env))
if not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist. Create it with \
'pew new {0}'.".format(env))
else:
return env
def sitepackages_dir(env=os.environ.get('VIRTUAL_ENV')):
if not env:
sys.exit('ERROR: no virtualenv active')
else:
env_python = workon_home / env / env_bin_dir / 'python'
return Path(invoke(str(env_python), '-c', 'import distutils; \
print(distutils.sysconfig.get_python_lib())').out)
def add_cmd(argv):
"""Add the specified directories to the Python path for the currently active virtualenv.
This will be done by placing the directory names in a path file named
"virtualenv_path_extensions.pth" inside the virtualenv's site-packages
directory; if this file does not exists, it will be created first.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='remove', action='store_true')
parser.add_argument('dirs', nargs='+')
args = parser.parse_args(argv)
extra_paths = sitepackages_dir() / '_virtualenv_path_extensions.pth'
new_paths = [os.path.abspath(d) + "\n" for d in args.dirs]
if not extra_paths.exists():
with extra_paths.open('w') as extra:
extra.write('''import sys; sys.__plen = len(sys.path)
import sys; new=sys.path[sys.__plen:]; del sys.path[sys.__plen:]; p=getattr(sys,'__egginsert',0); sys.path[p:p]=new; sys.__egginsert = p+len(new)
''')
def rewrite(f):
with extra_paths.open('r+') as extra:
to_write = f(extra.readlines())
extra.seek(0)
extra.truncate()
extra.writelines(to_write)
if args.remove:
rewrite(lambda ls: [line for line in ls if line not in new_paths])
else:
rewrite(lambda lines: lines[0:1] + new_paths + lines[1:])
def sitepackages_dir_cmd(argv):
print(sitepackages_dir())
def lssitepackages_cmd(argv):
"""Show the content of the site-packages directory of the current virtualenv."""
site = sitepackages_dir()
print(*sorted(site.iterdir()), sep=os.linesep)
extra_paths = site / '_virtualenv_path_extensions.pth'
if extra_paths.exists():
print('from _virtualenv_path_extensions.pth:')
with extra_paths.open() as extra:
print(''.join(extra.readlines()))
def toggleglobalsitepackages_cmd(argv):
"""Toggle the current virtualenv between having and not having access to the global site-packages."""
quiet = argv == ['-q']
site = sitepackages_dir()
ngsp_file = site.parent / 'no-global-site-packages.txt'
if ngsp_file.exists():
ngsp_file.unlink()
if not quiet:
print('Enabled global site-packages')
else:
with ngsp_file.open('w'):
if not quiet:
print('Disabled global site-packages')
def cp_cmd(argv):
"""Duplicate the named virtualenv to make a new one."""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target', nargs='?')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
args = parser.parse_args(argv)
target_name = copy_virtualenv_project(args.source, args.target)
if args.activate:
shell(target_name)
def copy_virtualenv_project(source, target):
source = expandpath(source)
if not source.exists():
source = workon_home / source
if not source.exists():
sys.exit('Please provide a valid virtualenv to copy')
target_name = target or source.name
target = workon_home / target_name
if target.exists():
sys.exit('%s virtualenv already exists in %s.' % (
target_name, workon_home
))
print('Copying {0} in {1}'.format(source, target_name))
clone_virtualenv(str(source), str(target))
return target_name
def rename_cmd(argv):
"""Rename a virtualenv"""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target')
pargs = parser.parse_args(argv)
copy_virtualenv_project(pargs.source, pargs.target)
return rmvirtualenvs([pargs.source])
def setvirtualenvproject(env, project):
print('Setting project for {0} to {1}'.format(env, project))
with (workon_home / env / '.project').open('wb') as prj:
prj.write(str(project).encode())
def setproject_cmd(argv):
"""Given a virtualenv directory and a project directory, set the \
virtualenv up to be associated with the project."""
args = dict(enumerate(argv))
project = os.path.abspath(args.get(1, '.'))
env = args.get(0, os.environ.get('VIRTUAL_ENV'))
if not env:
sys.exit('pew setproject [virtualenv] [project_path]')
if not (workon_home / env).exists():
sys.exit("Environment '%s' doesn't exist." % env)
if not os.path.isdir(project):
sys.exit('pew setproject: %s does not exist' % project)
setvirtualenvproject(env, project)
def getproject_cmd(argv):
"""Print a virtualenv's project directory, if set.
If called without providing a virtualenv name as argument, print the
current virtualenv's project directory.
"""
# Parse command line arguments
parser = argparse.ArgumentParser(
description="Print an environment's project directory.",
)
parser.add_argument(
'envname',
nargs='?',
default=os.environ.get('VIRTUAL_ENV'),
help=(
'The name of the environment to return the project directory '
'for. If omitted, will use the currently active environment.'
),
)
args = parser.parse_args(argv)
# Now, do the actual work
if not args.envname:
sys.exit('ERROR: no virtualenv active')
if not (workon_home / args.envname).exists():
sys.exit("ERROR: Environment '{0}' does not exist."
.format(args.envname))
project_dir = get_project_dir(args.envname)
if project_dir is None:
sys.exit("ERROR: no project directory set for Environment '{0}'"
.format(args.envname))
print(project_dir)
def mkproject_cmd(argv):
"""Create a new project directory and its associated virtualenv."""
if '-l' in argv or '--list' in argv:
templates = [t.name[9:] for t in workon_home.glob("template_*")]
print("Available project templates:", *templates, sep='\n')
return
parser = mkvirtualenv_argparser()
parser.add_argument('envname')
parser.add_argument(
'-t', action='append', default=[], dest='templates', help='Multiple \
templates may be selected. They are applied in the order specified on the \
command line.')
parser.add_argument(
'-l', '--list', action='store_true', help='List available templates.')
args, rest = parser.parse_known_args(argv)
projects_home = Path(os.environ.get('PROJECT_HOME', '.'))
if not projects_home.exists():
sys.exit('ERROR: Projects directory %s does not exist. \
Create it or set PROJECT_HOME to an existing directory.' % projects_home)
project = (projects_home / args.envname).absolute()
if project.exists():
sys.exit('Project %s already exists.' % args.envname)
mkvirtualenv(args.envname, args.python, args.packages, project.absolute(),
args.requirements, rest)
project.mkdir()
for template_name in args.templates:
template = workon_home / ("template_" + template_name)
inve(args.envname, str(template), args.envname, str(project))
if args.activate:
shell(args.envname, cwd=str(project))
def mktmpenv_cmd(argv):
"""Create a temporary virtualenv."""
parser = mkvirtualenv_argparser()
env = '.'
while (workon_home / env).exists():
env = hex(random.getrandbits(64))[2:-1]
args, rest = parser.parse_known_args(argv)
mkvirtualenv(env, args.python, args.packages, requirements=args.requirements,
rest=rest)
print('This is a temporary environment. It will be deleted when you exit')
try:
if args.activate:
# only used for testing on windows
shell(env)
finally:
return rmvirtualenvs([env])
def wipeenv_cmd(argv):
"""Remove all installed packages from the current (or supplied) env."""
env = argv[0] if argv else os.environ.get('VIRTUAL_ENV')
if not env:
sys.exit('ERROR: no virtualenv active')
elif not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist.".format(env))
else:
env_pip = str(workon_home / env / env_bin_dir / 'pip')
all_pkgs = set(invoke(env_pip, 'freeze').out.splitlines())
pkgs = set(p for p in all_pkgs if len(p.split("==")) == 2)
ignored = sorted(all_pkgs - pkgs)
pkgs = set(p.split("==")[0] for p in pkgs)
to_remove = sorted(pkgs - set(['distribute', 'wsgiref']))
if to_remove:
print("Ignoring:\n %s" % "\n ".join(ignored))
print("Uninstalling packages:\n %s" % "\n ".join(to_remove))
return inve(env, 'pip', 'uninstall', '-y', *to_remove)
else:
print("Nothing to remove")
def inall_cmd(argv):
"""Run a command in each virtualenv."""
envs = lsenvs()
errors = False
for env in envs:
print("\n%s:" % env)
try:
inve(env, *argv)
except CalledProcessError as e:
errors = True
err(e)
sys.exit(errors)
def in_cmd(argv):
"""Run a command in the given virtualenv."""
if len(argv) == 1:
return workon_cmd(argv)
parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
return inve(*argv)
def restore_cmd(argv):
"""Try to restore a broken virtualenv by reinstalling the same python version on top of it"""
if len(argv) < 1:
sys.exit('You must provide a valid virtualenv to target')
env = argv[0]
path = workon_home / env
py = path / env_bin_dir / ('python.exe' if windows else 'python')
exact_py = py.resolve().name
return check_call([sys.executable, "-m", "virtualenv", str(path.absolute()), "--python=%s" % exact_py])
def dir_cmd(argv):
"""Print the path for the virtualenv directory"""
env = parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
print(workon_home / env)
def install_cmd(argv):
'''Use Pythonz to download and build the specified Python version'''
installer = InstallCommand()
options, versions = installer.parser.parse_args(argv)
if len(versions) != 1:
installer.parser.print_help()
sys.exit(1)
else:
try:
actual_installer = PythonInstaller.get_installer(versions[0], options)
return actual_installer.install()
except AlreadyInstalledError as e:
print(e)
def uninstall_cmd(argv):
'''Use Pythonz to uninstall the specified Python version'''
return UninstallCommand().run(argv)
def list_pythons_cmd(argv):
'''List the pythons installed by Pythonz (or all the installable ones)'''
return ListPythons().run(argv)
def locate_python_cmd(argv):
'''Locate the path for the python version installed by Pythonz'''
return LocatePython().run(argv)
def version_cmd(argv):
"""Prints current pew version"""
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('pew').version
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
print('Setuptools has some issues here, failed to get our own package.', file=sys.stderr)
print(__version__)
def prevent_path_errors():
if 'VIRTUAL_ENV' in os.environ and not check_path():
sys.exit('''ERROR: The virtualenv hasn't been activated correctly.
Either the env is corrupted (try running `pew restore env`),
Or an upgrade of your Python version broke your env,
Or check the contents of your $PATH. You might be adding new directories to it
from inside your shell's configuration file.
In this case, for further details please see: https://github.com/berdario/pew#the-environment-doesnt-seem-to-be-activated''')
def first_run_setup():
shell = supported_shell()
if shell:
if shell == 'fish':
source_cmd = 'source (pew shell_config)'
else:
source_cmd = 'source "$(pew shell_config)"'
rcpath = expandpath({'bash': '~/.bashrc'
, 'zsh': '~/.zshrc'
, 'fish': '~/.config/fish/config.fish'}[shell])
if rcpath.exists():
update_config_file(rcpath, source_cmd)
else:
print("It seems that you're running pew for the first time\n"
"If you want source shell competions and update your prompt, "
"Add the following line to your shell config file:\n %s" % source_cmd)
print('\nWill now continue with the command:', *sys.argv[1:])
input('[enter]')
def update_config_file(rcpath, source_cmd):
with rcpath.open('r+') as rcfile:
if source_cmd not in (line.strip() for line in rcfile.readlines()):
choice = 'X'
while choice not in ('y', '', 'n'):
choice = input("It seems that you're running pew for the first time\n"
"do you want to modify %s to source completions and"
" update your prompt? [y/N]\n> " % rcpath).lower()
if choice == 'y':
rcfile.write('\n# added by Pew\n%s\n' % source_cmd)
print('Done')
else:
print('\nOk, if you want to do it manually, just add\n %s\nat'
' the end of %s' % (source_cmd, rcpath))
def print_commands(cmds):
longest = max(map(len, cmds)) + 3
columns, _ = get_terminal_size()
print('Available commands:\n')
for cmd, fun in sorted(cmds.items()):
if fun.__doc__:
print(textwrap.fill(
fun.__doc__.splitlines()[0],
columns or 1000,
initial_indent=(' {0}: '.format(cmd)).ljust(longest),
subsequent_indent=longest * ' '))
else:
print(' ' + cmd)
def pew():
first_run = makedirs_and_symlink_if_needed(workon_home)
if first_run and sys.stdin.isatty():
first_run_setup()
cmds = dict((cmd[:-4], fun)
for cmd, fun in globals().items() if cmd.endswith('_cmd'))
if sys.argv[1:]:
if sys.argv[1] in cmds:
command = cmds[sys.argv[1]]
try:
return command(sys.argv[2:])
except CalledProcessError as e:
return e.returncode
except KeyboardInterrupt:
pass
else:
err("ERROR: command", sys.argv[1], "does not exist.")
print_commands(cmds)
sys.exit(1)
else:
print_commands(cmds)
|
berdario/pew | pew/pew.py | add_cmd | python | def add_cmd(argv):
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='remove', action='store_true')
parser.add_argument('dirs', nargs='+')
args = parser.parse_args(argv)
extra_paths = sitepackages_dir() / '_virtualenv_path_extensions.pth'
new_paths = [os.path.abspath(d) + "\n" for d in args.dirs]
if not extra_paths.exists():
with extra_paths.open('w') as extra:
extra.write('''import sys; sys.__plen = len(sys.path)
import sys; new=sys.path[sys.__plen:]; del sys.path[sys.__plen:]; p=getattr(sys,'__egginsert',0); sys.path[p:p]=new; sys.__egginsert = p+len(new)
''')
def rewrite(f):
with extra_paths.open('r+') as extra:
to_write = f(extra.readlines())
extra.seek(0)
extra.truncate()
extra.writelines(to_write)
if args.remove:
rewrite(lambda ls: [line for line in ls if line not in new_paths])
else:
rewrite(lambda lines: lines[0:1] + new_paths + lines[1:]) | Add the specified directories to the Python path for the currently active virtualenv.
This will be done by placing the directory names in a path file named
"virtualenv_path_extensions.pth" inside the virtualenv's site-packages
directory; if this file does not exists, it will be created first. | train | https://github.com/berdario/pew/blob/37d9ff79342336b8ef6437d9a551008be07afe9b/pew/pew.py#L402-L433 | [
"def sitepackages_dir(env=os.environ.get('VIRTUAL_ENV')):\n if not env:\n sys.exit('ERROR: no virtualenv active')\n else:\n env_python = workon_home / env / env_bin_dir / 'python'\n return Path(invoke(str(env_python), '-c', 'import distutils; \\\nprint(distutils.sysconfig.get_python_lib())').out)\n",
"def rewrite(f):\n with extra_paths.open('r+') as extra:\n to_write = f(extra.readlines())\n extra.seek(0)\n extra.truncate()\n extra.writelines(to_write)\n"
] | from __future__ import print_function, absolute_import, unicode_literals
import os
import sys
import argparse
import shutil
import random
import textwrap
from functools import partial
from subprocess import CalledProcessError
from pathlib import Path
try:
from shutil import get_terminal_size
except ImportError:
from backports.shutil_get_terminal_size import get_terminal_size
windows = sys.platform == 'win32'
from clonevirtualenv import clone_virtualenv
if not windows:
try:
# Try importing these packages if avaiable
from pythonz.commands.install import InstallCommand
from pythonz.commands.uninstall import UninstallCommand
from pythonz.installer.pythoninstaller import PythonInstaller, AlreadyInstalledError
from pythonz.commands.list import ListCommand
from pythonz.define import PATH_PYTHONS
from pythonz.commands.locate import LocateCommand as LocatePython
def ListPythons():
try:
Path(PATH_PYTHONS).mkdir(parents=True)
except OSError:
pass
return ListCommand()
except:
# create mock commands
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('You need to install the pythonz extra. pip install pew[pythonz]')
else:
# Pythonz does not support windows
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('Command not supported on this platform')
import shellingham
from pew._utils import (check_call, invoke, expandpath, own, env_bin_dir,
check_path, temp_environ, NamedTemporaryFile, to_unicode)
from pew._print_utils import print_virtualenvs
if sys.version_info[0] == 2:
input = raw_input
err = partial(print, file=sys.stderr)
if windows:
default_home = '~/.virtualenvs'
else:
default_home = os.path.join(
os.environ.get('XDG_DATA_HOME', '~/.local/share'), 'virtualenvs')
workon_home = expandpath(
os.environ.get('WORKON_HOME', default_home))
def makedirs_and_symlink_if_needed(workon_home):
if not workon_home.exists() and own(workon_home):
workon_home.mkdir(parents=True)
link = expandpath('~/.virtualenvs')
if os.name == 'posix' and 'WORKON_HOME' not in os.environ and \
'XDG_DATA_HOME' not in os.environ and not link.exists():
link.symlink_to(str(workon_home))
return True
else:
return False
pew_site = Path(__file__).parent
def supported_shell():
shell = Path(os.environ.get('SHELL', '')).stem
if shell in ('bash', 'zsh', 'fish'):
return shell
def shell_config_cmd(argv):
"Prints the path for the current $SHELL helper file"
shell = supported_shell()
if shell:
print(pew_site / 'shell_config' / ('init.' + shell))
else:
err('Completions and prompts are unavailable for %s' %
repr(os.environ.get('SHELL', '')))
def deploy_completions():
completions = {'complete.bash': Path('/etc/bash_completion.d/pew'),
'complete.zsh': Path('/usr/local/share/zsh/site-functions/_pew'),
'complete.fish': Path('/etc/fish/completions/pew.fish')}
for comp, dest in completions.items():
if not dest.parent.exists():
dest.parent.mkdir(parents=True)
shutil.copy(str(pew_site / 'shell_config' / comp), str(dest))
def get_project_dir(env):
project_file = workon_home / env / '.project'
if project_file.exists():
with project_file.open() as f:
project_dir = f.readline().strip()
if os.path.exists(project_dir):
return project_dir
else:
err('Corrupted or outdated:', project_file, '\nDirectory',
project_dir, "doesn't exist.")
def unsetenv(key):
if key in os.environ:
del os.environ[key]
def compute_path(env):
envdir = workon_home / env
return os.pathsep.join([
str(envdir / env_bin_dir),
os.environ['PATH'],
])
def inve(env, command, *args, **kwargs):
"""Run a command in the given virtual environment.
Pass additional keyword arguments to ``subprocess.check_call()``."""
# we don't strictly need to restore the environment, since pew runs in
# its own process, but it feels like the right thing to do
with temp_environ():
os.environ['VIRTUAL_ENV'] = str(workon_home / env)
os.environ['PATH'] = compute_path(env)
unsetenv('PYTHONHOME')
unsetenv('__PYVENV_LAUNCHER__')
try:
return check_call([command] + list(args), shell=windows, **kwargs)
# need to have shell=True on windows, otherwise the PYTHONPATH
# won't inherit the PATH
except OSError as e:
if e.errno == 2:
err('Unable to find', command)
return 2
else:
raise
def fork_shell(env, shellcmd, cwd):
or_ctrld = '' if windows else "or 'Ctrl+D' "
err("Launching subshell in virtual environment. Type 'exit' ", or_ctrld,
"to return.", sep='')
if 'VIRTUAL_ENV' in os.environ:
err("Be aware that this environment will be nested on top "
"of '%s'" % Path(os.environ['VIRTUAL_ENV']).name)
return inve(env, *shellcmd, cwd=cwd)
def fork_bash(env, cwd):
# bash is a special little snowflake, and prevent_path_errors cannot work there
# https://github.com/berdario/pew/issues/58#issuecomment-102182346
bashrcpath = expandpath('~/.bashrc')
if bashrcpath.exists():
with NamedTemporaryFile('w+') as rcfile:
with bashrcpath.open() as bashrc:
rcfile.write(bashrc.read())
rcfile.write('\nexport PATH="' + to_unicode(compute_path(env)) + '"')
rcfile.flush()
return fork_shell(env, ['bash', '--rcfile', rcfile.name], cwd)
else:
return fork_shell(env, ['bash'], cwd)
def fork_cmder(env, cwd):
shell_cmd = ['cmd']
cmderrc_path = r'%CMDER_ROOT%\vendor\init.bat'
if expandpath(cmderrc_path).exists():
shell_cmd += ['/k', cmderrc_path]
if cwd:
os.environ['CMDER_START'] = cwd
return fork_shell(env, shell_cmd, cwd)
def _detect_shell():
shell = os.environ.get('SHELL', None)
if not shell:
if 'CMDER_ROOT' in os.environ:
shell = 'Cmder'
elif windows:
try:
_, shell = shellingham.detect_shell()
except shellingham.ShellDetectionFailure:
shell = os.environ.get('COMSPEC', 'cmd.exe')
else:
shell = 'sh'
return shell
def shell(env, cwd=None):
env = str(env)
shell = _detect_shell()
shell_name = Path(shell).stem
if shell_name not in ('Cmder', 'bash', 'elvish', 'powershell', 'klingon', 'cmd'):
# On Windows the PATH is usually set with System Utility
# so we won't worry about trying to check mistakes there
shell_check = (sys.executable + ' -c "from pew.pew import '
'prevent_path_errors; prevent_path_errors()"')
try:
inve(env, shell, '-c', shell_check)
except CalledProcessError:
return
if shell_name == 'bash':
return fork_bash(env, cwd)
elif shell_name == 'Cmder':
return fork_cmder(env, cwd)
else:
return fork_shell(env, [shell], cwd)
def mkvirtualenv(envname, python=None, packages=[], project=None,
requirements=None, rest=[]):
if python:
rest = ["--python=%s" % python] + rest
path = (workon_home / envname).absolute()
try:
check_call([sys.executable, "-m", "virtualenv", str(path)] + rest)
except (CalledProcessError, KeyboardInterrupt):
rmvirtualenvs([envname])
raise
else:
if project:
setvirtualenvproject(envname, project.absolute())
if requirements:
inve(envname, 'pip', 'install', '-r', str(expandpath(requirements)))
if packages:
inve(envname, 'pip', 'install', *packages)
def mkvirtualenv_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--python')
parser.add_argument('-i', action='append', dest='packages', help='Install \
a package after the environment is created. This option may be repeated.')
parser.add_argument('-r', dest='requirements', help='Provide a pip \
requirements file to install a base set of packages into the new environment.')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
return parser
def new_cmd(argv):
"""Create a new environment, in $WORKON_HOME."""
parser = mkvirtualenv_argparser()
parser.add_argument('-a', dest='project', help='Provide a full path to a \
project directory to associate with the new environment.')
parser.add_argument('envname')
args, rest = parser.parse_known_args(argv)
project = expandpath(args.project) if args.project else None
mkvirtualenv(args.envname, args.python, args.packages, project,
args.requirements, rest)
if args.activate:
shell(args.envname)
def rmvirtualenvs(envs):
error_happened = False
for env in envs:
env = workon_home / env
if os.environ.get('VIRTUAL_ENV') == str(env):
err("ERROR: You cannot remove the active environment (%s)." % env)
error_happened = True
break
try:
shutil.rmtree(str(env))
except OSError as e:
err("Error while trying to remove the {0} env: \n{1}".format
(env, e.strerror))
error_happened = True
return error_happened
def rm_cmd(argv):
"""Remove one or more environment, from $WORKON_HOME."""
if len(argv) < 1:
sys.exit("Please specify an environment")
return rmvirtualenvs(argv)
def packages(site_packages):
nodes = site_packages.iterdir()
return set([x.stem.split('-')[0] for x in nodes]) - set(['__pycache__'])
def showvirtualenv(env):
columns, _ = get_terminal_size()
pkgs = sorted(packages(sitepackages_dir(env)))
env_python = workon_home / env / env_bin_dir / 'python'
l = len(env) + 2
version = invoke(str(env_python), '-V')
version = ' - '.join((version.out + version.err).splitlines())
print(env, ': ', version, sep='')
print(textwrap.fill(' '.join(pkgs),
width=columns-l,
initial_indent=(l * ' '),
subsequent_indent=(l * ' ')), '\n')
def show_cmd(argv):
try:
showvirtualenv(argv[0])
except IndexError:
if 'VIRTUAL_ENV' in os.environ:
showvirtualenv(Path(os.environ['VIRTUAL_ENV']).name)
else:
sys.exit('pew show [env]')
def lsenvs():
return sorted(set(env.parts[-3] for env in
workon_home.glob(os.path.join('*', env_bin_dir, 'python*'))))
def lsvirtualenv(verbose):
envs = lsenvs()
if not verbose:
print_virtualenvs(*envs)
else:
for env in envs:
showvirtualenv(env)
def ls_cmd(argv):
"""List available environments."""
parser = argparse.ArgumentParser()
p_group = parser.add_mutually_exclusive_group()
p_group.add_argument('-b', '--brief', action='store_false')
p_group.add_argument('-l', '--long', action='store_true')
args = parser.parse_args(argv)
lsvirtualenv(args.long)
def parse_envname(argv, no_arg_callback):
if len(argv) < 1 or argv[0] is None:
no_arg_callback()
env = argv[0]
if env.startswith('/'):
sys.exit("ERROR: Invalid environment name '{0}'.".format(env))
if not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist. Create it with \
'pew new {0}'.".format(env))
else:
return env
def workon_cmd(argv):
"""List or change working virtual environments."""
parser = argparse.ArgumentParser(prog='pew workon')
parser.add_argument('envname', nargs='?')
parser.add_argument(
'-n', '--no-cd', action='store_true',
help=('Do not change working directory to project directory after '
'activating virtualenv.')
)
args = parser.parse_args(argv)
def list_and_exit():
lsvirtualenv(False)
sys.exit(0)
env = parse_envname([args.envname], list_and_exit)
# Check if the virtualenv has an associated project directory and in
# this case, use it as the current working directory.
project_dir = get_project_dir(env)
if project_dir is None or args.no_cd:
project_dir = os.getcwd()
return shell(env, cwd=project_dir)
def sitepackages_dir(env=os.environ.get('VIRTUAL_ENV')):
if not env:
sys.exit('ERROR: no virtualenv active')
else:
env_python = workon_home / env / env_bin_dir / 'python'
return Path(invoke(str(env_python), '-c', 'import distutils; \
print(distutils.sysconfig.get_python_lib())').out)
def sitepackages_dir_cmd(argv):
print(sitepackages_dir())
def lssitepackages_cmd(argv):
"""Show the content of the site-packages directory of the current virtualenv."""
site = sitepackages_dir()
print(*sorted(site.iterdir()), sep=os.linesep)
extra_paths = site / '_virtualenv_path_extensions.pth'
if extra_paths.exists():
print('from _virtualenv_path_extensions.pth:')
with extra_paths.open() as extra:
print(''.join(extra.readlines()))
def toggleglobalsitepackages_cmd(argv):
"""Toggle the current virtualenv between having and not having access to the global site-packages."""
quiet = argv == ['-q']
site = sitepackages_dir()
ngsp_file = site.parent / 'no-global-site-packages.txt'
if ngsp_file.exists():
ngsp_file.unlink()
if not quiet:
print('Enabled global site-packages')
else:
with ngsp_file.open('w'):
if not quiet:
print('Disabled global site-packages')
def cp_cmd(argv):
"""Duplicate the named virtualenv to make a new one."""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target', nargs='?')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
args = parser.parse_args(argv)
target_name = copy_virtualenv_project(args.source, args.target)
if args.activate:
shell(target_name)
def copy_virtualenv_project(source, target):
source = expandpath(source)
if not source.exists():
source = workon_home / source
if not source.exists():
sys.exit('Please provide a valid virtualenv to copy')
target_name = target or source.name
target = workon_home / target_name
if target.exists():
sys.exit('%s virtualenv already exists in %s.' % (
target_name, workon_home
))
print('Copying {0} in {1}'.format(source, target_name))
clone_virtualenv(str(source), str(target))
return target_name
def rename_cmd(argv):
"""Rename a virtualenv"""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target')
pargs = parser.parse_args(argv)
copy_virtualenv_project(pargs.source, pargs.target)
return rmvirtualenvs([pargs.source])
def setvirtualenvproject(env, project):
print('Setting project for {0} to {1}'.format(env, project))
with (workon_home / env / '.project').open('wb') as prj:
prj.write(str(project).encode())
def setproject_cmd(argv):
"""Given a virtualenv directory and a project directory, set the \
virtualenv up to be associated with the project."""
args = dict(enumerate(argv))
project = os.path.abspath(args.get(1, '.'))
env = args.get(0, os.environ.get('VIRTUAL_ENV'))
if not env:
sys.exit('pew setproject [virtualenv] [project_path]')
if not (workon_home / env).exists():
sys.exit("Environment '%s' doesn't exist." % env)
if not os.path.isdir(project):
sys.exit('pew setproject: %s does not exist' % project)
setvirtualenvproject(env, project)
def getproject_cmd(argv):
"""Print a virtualenv's project directory, if set.
If called without providing a virtualenv name as argument, print the
current virtualenv's project directory.
"""
# Parse command line arguments
parser = argparse.ArgumentParser(
description="Print an environment's project directory.",
)
parser.add_argument(
'envname',
nargs='?',
default=os.environ.get('VIRTUAL_ENV'),
help=(
'The name of the environment to return the project directory '
'for. If omitted, will use the currently active environment.'
),
)
args = parser.parse_args(argv)
# Now, do the actual work
if not args.envname:
sys.exit('ERROR: no virtualenv active')
if not (workon_home / args.envname).exists():
sys.exit("ERROR: Environment '{0}' does not exist."
.format(args.envname))
project_dir = get_project_dir(args.envname)
if project_dir is None:
sys.exit("ERROR: no project directory set for Environment '{0}'"
.format(args.envname))
print(project_dir)
def mkproject_cmd(argv):
"""Create a new project directory and its associated virtualenv."""
if '-l' in argv or '--list' in argv:
templates = [t.name[9:] for t in workon_home.glob("template_*")]
print("Available project templates:", *templates, sep='\n')
return
parser = mkvirtualenv_argparser()
parser.add_argument('envname')
parser.add_argument(
'-t', action='append', default=[], dest='templates', help='Multiple \
templates may be selected. They are applied in the order specified on the \
command line.')
parser.add_argument(
'-l', '--list', action='store_true', help='List available templates.')
args, rest = parser.parse_known_args(argv)
projects_home = Path(os.environ.get('PROJECT_HOME', '.'))
if not projects_home.exists():
sys.exit('ERROR: Projects directory %s does not exist. \
Create it or set PROJECT_HOME to an existing directory.' % projects_home)
project = (projects_home / args.envname).absolute()
if project.exists():
sys.exit('Project %s already exists.' % args.envname)
mkvirtualenv(args.envname, args.python, args.packages, project.absolute(),
args.requirements, rest)
project.mkdir()
for template_name in args.templates:
template = workon_home / ("template_" + template_name)
inve(args.envname, str(template), args.envname, str(project))
if args.activate:
shell(args.envname, cwd=str(project))
def mktmpenv_cmd(argv):
"""Create a temporary virtualenv."""
parser = mkvirtualenv_argparser()
env = '.'
while (workon_home / env).exists():
env = hex(random.getrandbits(64))[2:-1]
args, rest = parser.parse_known_args(argv)
mkvirtualenv(env, args.python, args.packages, requirements=args.requirements,
rest=rest)
print('This is a temporary environment. It will be deleted when you exit')
try:
if args.activate:
# only used for testing on windows
shell(env)
finally:
return rmvirtualenvs([env])
def wipeenv_cmd(argv):
"""Remove all installed packages from the current (or supplied) env."""
env = argv[0] if argv else os.environ.get('VIRTUAL_ENV')
if not env:
sys.exit('ERROR: no virtualenv active')
elif not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist.".format(env))
else:
env_pip = str(workon_home / env / env_bin_dir / 'pip')
all_pkgs = set(invoke(env_pip, 'freeze').out.splitlines())
pkgs = set(p for p in all_pkgs if len(p.split("==")) == 2)
ignored = sorted(all_pkgs - pkgs)
pkgs = set(p.split("==")[0] for p in pkgs)
to_remove = sorted(pkgs - set(['distribute', 'wsgiref']))
if to_remove:
print("Ignoring:\n %s" % "\n ".join(ignored))
print("Uninstalling packages:\n %s" % "\n ".join(to_remove))
return inve(env, 'pip', 'uninstall', '-y', *to_remove)
else:
print("Nothing to remove")
def inall_cmd(argv):
"""Run a command in each virtualenv."""
envs = lsenvs()
errors = False
for env in envs:
print("\n%s:" % env)
try:
inve(env, *argv)
except CalledProcessError as e:
errors = True
err(e)
sys.exit(errors)
def in_cmd(argv):
"""Run a command in the given virtualenv."""
if len(argv) == 1:
return workon_cmd(argv)
parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
return inve(*argv)
def restore_cmd(argv):
"""Try to restore a broken virtualenv by reinstalling the same python version on top of it"""
if len(argv) < 1:
sys.exit('You must provide a valid virtualenv to target')
env = argv[0]
path = workon_home / env
py = path / env_bin_dir / ('python.exe' if windows else 'python')
exact_py = py.resolve().name
return check_call([sys.executable, "-m", "virtualenv", str(path.absolute()), "--python=%s" % exact_py])
def dir_cmd(argv):
"""Print the path for the virtualenv directory"""
env = parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
print(workon_home / env)
def install_cmd(argv):
'''Use Pythonz to download and build the specified Python version'''
installer = InstallCommand()
options, versions = installer.parser.parse_args(argv)
if len(versions) != 1:
installer.parser.print_help()
sys.exit(1)
else:
try:
actual_installer = PythonInstaller.get_installer(versions[0], options)
return actual_installer.install()
except AlreadyInstalledError as e:
print(e)
def uninstall_cmd(argv):
'''Use Pythonz to uninstall the specified Python version'''
return UninstallCommand().run(argv)
def list_pythons_cmd(argv):
'''List the pythons installed by Pythonz (or all the installable ones)'''
return ListPythons().run(argv)
def locate_python_cmd(argv):
'''Locate the path for the python version installed by Pythonz'''
return LocatePython().run(argv)
def version_cmd(argv):
"""Prints current pew version"""
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('pew').version
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
print('Setuptools has some issues here, failed to get our own package.', file=sys.stderr)
print(__version__)
def prevent_path_errors():
if 'VIRTUAL_ENV' in os.environ and not check_path():
sys.exit('''ERROR: The virtualenv hasn't been activated correctly.
Either the env is corrupted (try running `pew restore env`),
Or an upgrade of your Python version broke your env,
Or check the contents of your $PATH. You might be adding new directories to it
from inside your shell's configuration file.
In this case, for further details please see: https://github.com/berdario/pew#the-environment-doesnt-seem-to-be-activated''')
def first_run_setup():
shell = supported_shell()
if shell:
if shell == 'fish':
source_cmd = 'source (pew shell_config)'
else:
source_cmd = 'source "$(pew shell_config)"'
rcpath = expandpath({'bash': '~/.bashrc'
, 'zsh': '~/.zshrc'
, 'fish': '~/.config/fish/config.fish'}[shell])
if rcpath.exists():
update_config_file(rcpath, source_cmd)
else:
print("It seems that you're running pew for the first time\n"
"If you want source shell competions and update your prompt, "
"Add the following line to your shell config file:\n %s" % source_cmd)
print('\nWill now continue with the command:', *sys.argv[1:])
input('[enter]')
def update_config_file(rcpath, source_cmd):
with rcpath.open('r+') as rcfile:
if source_cmd not in (line.strip() for line in rcfile.readlines()):
choice = 'X'
while choice not in ('y', '', 'n'):
choice = input("It seems that you're running pew for the first time\n"
"do you want to modify %s to source completions and"
" update your prompt? [y/N]\n> " % rcpath).lower()
if choice == 'y':
rcfile.write('\n# added by Pew\n%s\n' % source_cmd)
print('Done')
else:
print('\nOk, if you want to do it manually, just add\n %s\nat'
' the end of %s' % (source_cmd, rcpath))
def print_commands(cmds):
longest = max(map(len, cmds)) + 3
columns, _ = get_terminal_size()
print('Available commands:\n')
for cmd, fun in sorted(cmds.items()):
if fun.__doc__:
print(textwrap.fill(
fun.__doc__.splitlines()[0],
columns or 1000,
initial_indent=(' {0}: '.format(cmd)).ljust(longest),
subsequent_indent=longest * ' '))
else:
print(' ' + cmd)
def pew():
first_run = makedirs_and_symlink_if_needed(workon_home)
if first_run and sys.stdin.isatty():
first_run_setup()
cmds = dict((cmd[:-4], fun)
for cmd, fun in globals().items() if cmd.endswith('_cmd'))
if sys.argv[1:]:
if sys.argv[1] in cmds:
command = cmds[sys.argv[1]]
try:
return command(sys.argv[2:])
except CalledProcessError as e:
return e.returncode
except KeyboardInterrupt:
pass
else:
err("ERROR: command", sys.argv[1], "does not exist.")
print_commands(cmds)
sys.exit(1)
else:
print_commands(cmds)
|
berdario/pew | pew/pew.py | lssitepackages_cmd | python | def lssitepackages_cmd(argv):
site = sitepackages_dir()
print(*sorted(site.iterdir()), sep=os.linesep)
extra_paths = site / '_virtualenv_path_extensions.pth'
if extra_paths.exists():
print('from _virtualenv_path_extensions.pth:')
with extra_paths.open() as extra:
print(''.join(extra.readlines())) | Show the content of the site-packages directory of the current virtualenv. | train | https://github.com/berdario/pew/blob/37d9ff79342336b8ef6437d9a551008be07afe9b/pew/pew.py#L440-L448 | [
"def sitepackages_dir(env=os.environ.get('VIRTUAL_ENV')):\n if not env:\n sys.exit('ERROR: no virtualenv active')\n else:\n env_python = workon_home / env / env_bin_dir / 'python'\n return Path(invoke(str(env_python), '-c', 'import distutils; \\\nprint(distutils.sysconfig.get_python_lib())').out)\n"
] | from __future__ import print_function, absolute_import, unicode_literals
import os
import sys
import argparse
import shutil
import random
import textwrap
from functools import partial
from subprocess import CalledProcessError
from pathlib import Path
try:
from shutil import get_terminal_size
except ImportError:
from backports.shutil_get_terminal_size import get_terminal_size
windows = sys.platform == 'win32'
from clonevirtualenv import clone_virtualenv
if not windows:
try:
# Try importing these packages if avaiable
from pythonz.commands.install import InstallCommand
from pythonz.commands.uninstall import UninstallCommand
from pythonz.installer.pythoninstaller import PythonInstaller, AlreadyInstalledError
from pythonz.commands.list import ListCommand
from pythonz.define import PATH_PYTHONS
from pythonz.commands.locate import LocateCommand as LocatePython
def ListPythons():
try:
Path(PATH_PYTHONS).mkdir(parents=True)
except OSError:
pass
return ListCommand()
except:
# create mock commands
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('You need to install the pythonz extra. pip install pew[pythonz]')
else:
# Pythonz does not support windows
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('Command not supported on this platform')
import shellingham
from pew._utils import (check_call, invoke, expandpath, own, env_bin_dir,
check_path, temp_environ, NamedTemporaryFile, to_unicode)
from pew._print_utils import print_virtualenvs
if sys.version_info[0] == 2:
input = raw_input
err = partial(print, file=sys.stderr)
if windows:
default_home = '~/.virtualenvs'
else:
default_home = os.path.join(
os.environ.get('XDG_DATA_HOME', '~/.local/share'), 'virtualenvs')
workon_home = expandpath(
os.environ.get('WORKON_HOME', default_home))
def makedirs_and_symlink_if_needed(workon_home):
if not workon_home.exists() and own(workon_home):
workon_home.mkdir(parents=True)
link = expandpath('~/.virtualenvs')
if os.name == 'posix' and 'WORKON_HOME' not in os.environ and \
'XDG_DATA_HOME' not in os.environ and not link.exists():
link.symlink_to(str(workon_home))
return True
else:
return False
pew_site = Path(__file__).parent
def supported_shell():
shell = Path(os.environ.get('SHELL', '')).stem
if shell in ('bash', 'zsh', 'fish'):
return shell
def shell_config_cmd(argv):
"Prints the path for the current $SHELL helper file"
shell = supported_shell()
if shell:
print(pew_site / 'shell_config' / ('init.' + shell))
else:
err('Completions and prompts are unavailable for %s' %
repr(os.environ.get('SHELL', '')))
def deploy_completions():
completions = {'complete.bash': Path('/etc/bash_completion.d/pew'),
'complete.zsh': Path('/usr/local/share/zsh/site-functions/_pew'),
'complete.fish': Path('/etc/fish/completions/pew.fish')}
for comp, dest in completions.items():
if not dest.parent.exists():
dest.parent.mkdir(parents=True)
shutil.copy(str(pew_site / 'shell_config' / comp), str(dest))
def get_project_dir(env):
project_file = workon_home / env / '.project'
if project_file.exists():
with project_file.open() as f:
project_dir = f.readline().strip()
if os.path.exists(project_dir):
return project_dir
else:
err('Corrupted or outdated:', project_file, '\nDirectory',
project_dir, "doesn't exist.")
def unsetenv(key):
if key in os.environ:
del os.environ[key]
def compute_path(env):
envdir = workon_home / env
return os.pathsep.join([
str(envdir / env_bin_dir),
os.environ['PATH'],
])
def inve(env, command, *args, **kwargs):
"""Run a command in the given virtual environment.
Pass additional keyword arguments to ``subprocess.check_call()``."""
# we don't strictly need to restore the environment, since pew runs in
# its own process, but it feels like the right thing to do
with temp_environ():
os.environ['VIRTUAL_ENV'] = str(workon_home / env)
os.environ['PATH'] = compute_path(env)
unsetenv('PYTHONHOME')
unsetenv('__PYVENV_LAUNCHER__')
try:
return check_call([command] + list(args), shell=windows, **kwargs)
# need to have shell=True on windows, otherwise the PYTHONPATH
# won't inherit the PATH
except OSError as e:
if e.errno == 2:
err('Unable to find', command)
return 2
else:
raise
def fork_shell(env, shellcmd, cwd):
or_ctrld = '' if windows else "or 'Ctrl+D' "
err("Launching subshell in virtual environment. Type 'exit' ", or_ctrld,
"to return.", sep='')
if 'VIRTUAL_ENV' in os.environ:
err("Be aware that this environment will be nested on top "
"of '%s'" % Path(os.environ['VIRTUAL_ENV']).name)
return inve(env, *shellcmd, cwd=cwd)
def fork_bash(env, cwd):
# bash is a special little snowflake, and prevent_path_errors cannot work there
# https://github.com/berdario/pew/issues/58#issuecomment-102182346
bashrcpath = expandpath('~/.bashrc')
if bashrcpath.exists():
with NamedTemporaryFile('w+') as rcfile:
with bashrcpath.open() as bashrc:
rcfile.write(bashrc.read())
rcfile.write('\nexport PATH="' + to_unicode(compute_path(env)) + '"')
rcfile.flush()
return fork_shell(env, ['bash', '--rcfile', rcfile.name], cwd)
else:
return fork_shell(env, ['bash'], cwd)
def fork_cmder(env, cwd):
shell_cmd = ['cmd']
cmderrc_path = r'%CMDER_ROOT%\vendor\init.bat'
if expandpath(cmderrc_path).exists():
shell_cmd += ['/k', cmderrc_path]
if cwd:
os.environ['CMDER_START'] = cwd
return fork_shell(env, shell_cmd, cwd)
def _detect_shell():
shell = os.environ.get('SHELL', None)
if not shell:
if 'CMDER_ROOT' in os.environ:
shell = 'Cmder'
elif windows:
try:
_, shell = shellingham.detect_shell()
except shellingham.ShellDetectionFailure:
shell = os.environ.get('COMSPEC', 'cmd.exe')
else:
shell = 'sh'
return shell
def shell(env, cwd=None):
env = str(env)
shell = _detect_shell()
shell_name = Path(shell).stem
if shell_name not in ('Cmder', 'bash', 'elvish', 'powershell', 'klingon', 'cmd'):
# On Windows the PATH is usually set with System Utility
# so we won't worry about trying to check mistakes there
shell_check = (sys.executable + ' -c "from pew.pew import '
'prevent_path_errors; prevent_path_errors()"')
try:
inve(env, shell, '-c', shell_check)
except CalledProcessError:
return
if shell_name == 'bash':
return fork_bash(env, cwd)
elif shell_name == 'Cmder':
return fork_cmder(env, cwd)
else:
return fork_shell(env, [shell], cwd)
def mkvirtualenv(envname, python=None, packages=[], project=None,
requirements=None, rest=[]):
if python:
rest = ["--python=%s" % python] + rest
path = (workon_home / envname).absolute()
try:
check_call([sys.executable, "-m", "virtualenv", str(path)] + rest)
except (CalledProcessError, KeyboardInterrupt):
rmvirtualenvs([envname])
raise
else:
if project:
setvirtualenvproject(envname, project.absolute())
if requirements:
inve(envname, 'pip', 'install', '-r', str(expandpath(requirements)))
if packages:
inve(envname, 'pip', 'install', *packages)
def mkvirtualenv_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--python')
parser.add_argument('-i', action='append', dest='packages', help='Install \
a package after the environment is created. This option may be repeated.')
parser.add_argument('-r', dest='requirements', help='Provide a pip \
requirements file to install a base set of packages into the new environment.')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
return parser
def new_cmd(argv):
"""Create a new environment, in $WORKON_HOME."""
parser = mkvirtualenv_argparser()
parser.add_argument('-a', dest='project', help='Provide a full path to a \
project directory to associate with the new environment.')
parser.add_argument('envname')
args, rest = parser.parse_known_args(argv)
project = expandpath(args.project) if args.project else None
mkvirtualenv(args.envname, args.python, args.packages, project,
args.requirements, rest)
if args.activate:
shell(args.envname)
def rmvirtualenvs(envs):
error_happened = False
for env in envs:
env = workon_home / env
if os.environ.get('VIRTUAL_ENV') == str(env):
err("ERROR: You cannot remove the active environment (%s)." % env)
error_happened = True
break
try:
shutil.rmtree(str(env))
except OSError as e:
err("Error while trying to remove the {0} env: \n{1}".format
(env, e.strerror))
error_happened = True
return error_happened
def rm_cmd(argv):
"""Remove one or more environment, from $WORKON_HOME."""
if len(argv) < 1:
sys.exit("Please specify an environment")
return rmvirtualenvs(argv)
def packages(site_packages):
nodes = site_packages.iterdir()
return set([x.stem.split('-')[0] for x in nodes]) - set(['__pycache__'])
def showvirtualenv(env):
columns, _ = get_terminal_size()
pkgs = sorted(packages(sitepackages_dir(env)))
env_python = workon_home / env / env_bin_dir / 'python'
l = len(env) + 2
version = invoke(str(env_python), '-V')
version = ' - '.join((version.out + version.err).splitlines())
print(env, ': ', version, sep='')
print(textwrap.fill(' '.join(pkgs),
width=columns-l,
initial_indent=(l * ' '),
subsequent_indent=(l * ' ')), '\n')
def show_cmd(argv):
try:
showvirtualenv(argv[0])
except IndexError:
if 'VIRTUAL_ENV' in os.environ:
showvirtualenv(Path(os.environ['VIRTUAL_ENV']).name)
else:
sys.exit('pew show [env]')
def lsenvs():
return sorted(set(env.parts[-3] for env in
workon_home.glob(os.path.join('*', env_bin_dir, 'python*'))))
def lsvirtualenv(verbose):
envs = lsenvs()
if not verbose:
print_virtualenvs(*envs)
else:
for env in envs:
showvirtualenv(env)
def ls_cmd(argv):
"""List available environments."""
parser = argparse.ArgumentParser()
p_group = parser.add_mutually_exclusive_group()
p_group.add_argument('-b', '--brief', action='store_false')
p_group.add_argument('-l', '--long', action='store_true')
args = parser.parse_args(argv)
lsvirtualenv(args.long)
def parse_envname(argv, no_arg_callback):
if len(argv) < 1 or argv[0] is None:
no_arg_callback()
env = argv[0]
if env.startswith('/'):
sys.exit("ERROR: Invalid environment name '{0}'.".format(env))
if not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist. Create it with \
'pew new {0}'.".format(env))
else:
return env
def workon_cmd(argv):
"""List or change working virtual environments."""
parser = argparse.ArgumentParser(prog='pew workon')
parser.add_argument('envname', nargs='?')
parser.add_argument(
'-n', '--no-cd', action='store_true',
help=('Do not change working directory to project directory after '
'activating virtualenv.')
)
args = parser.parse_args(argv)
def list_and_exit():
lsvirtualenv(False)
sys.exit(0)
env = parse_envname([args.envname], list_and_exit)
# Check if the virtualenv has an associated project directory and in
# this case, use it as the current working directory.
project_dir = get_project_dir(env)
if project_dir is None or args.no_cd:
project_dir = os.getcwd()
return shell(env, cwd=project_dir)
def sitepackages_dir(env=os.environ.get('VIRTUAL_ENV')):
if not env:
sys.exit('ERROR: no virtualenv active')
else:
env_python = workon_home / env / env_bin_dir / 'python'
return Path(invoke(str(env_python), '-c', 'import distutils; \
print(distutils.sysconfig.get_python_lib())').out)
def add_cmd(argv):
"""Add the specified directories to the Python path for the currently active virtualenv.
This will be done by placing the directory names in a path file named
"virtualenv_path_extensions.pth" inside the virtualenv's site-packages
directory; if this file does not exists, it will be created first.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='remove', action='store_true')
parser.add_argument('dirs', nargs='+')
args = parser.parse_args(argv)
extra_paths = sitepackages_dir() / '_virtualenv_path_extensions.pth'
new_paths = [os.path.abspath(d) + "\n" for d in args.dirs]
if not extra_paths.exists():
with extra_paths.open('w') as extra:
extra.write('''import sys; sys.__plen = len(sys.path)
import sys; new=sys.path[sys.__plen:]; del sys.path[sys.__plen:]; p=getattr(sys,'__egginsert',0); sys.path[p:p]=new; sys.__egginsert = p+len(new)
''')
def rewrite(f):
with extra_paths.open('r+') as extra:
to_write = f(extra.readlines())
extra.seek(0)
extra.truncate()
extra.writelines(to_write)
if args.remove:
rewrite(lambda ls: [line for line in ls if line not in new_paths])
else:
rewrite(lambda lines: lines[0:1] + new_paths + lines[1:])
def sitepackages_dir_cmd(argv):
print(sitepackages_dir())
def toggleglobalsitepackages_cmd(argv):
"""Toggle the current virtualenv between having and not having access to the global site-packages."""
quiet = argv == ['-q']
site = sitepackages_dir()
ngsp_file = site.parent / 'no-global-site-packages.txt'
if ngsp_file.exists():
ngsp_file.unlink()
if not quiet:
print('Enabled global site-packages')
else:
with ngsp_file.open('w'):
if not quiet:
print('Disabled global site-packages')
def cp_cmd(argv):
"""Duplicate the named virtualenv to make a new one."""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target', nargs='?')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
args = parser.parse_args(argv)
target_name = copy_virtualenv_project(args.source, args.target)
if args.activate:
shell(target_name)
def copy_virtualenv_project(source, target):
source = expandpath(source)
if not source.exists():
source = workon_home / source
if not source.exists():
sys.exit('Please provide a valid virtualenv to copy')
target_name = target or source.name
target = workon_home / target_name
if target.exists():
sys.exit('%s virtualenv already exists in %s.' % (
target_name, workon_home
))
print('Copying {0} in {1}'.format(source, target_name))
clone_virtualenv(str(source), str(target))
return target_name
def rename_cmd(argv):
"""Rename a virtualenv"""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target')
pargs = parser.parse_args(argv)
copy_virtualenv_project(pargs.source, pargs.target)
return rmvirtualenvs([pargs.source])
def setvirtualenvproject(env, project):
print('Setting project for {0} to {1}'.format(env, project))
with (workon_home / env / '.project').open('wb') as prj:
prj.write(str(project).encode())
def setproject_cmd(argv):
"""Given a virtualenv directory and a project directory, set the \
virtualenv up to be associated with the project."""
args = dict(enumerate(argv))
project = os.path.abspath(args.get(1, '.'))
env = args.get(0, os.environ.get('VIRTUAL_ENV'))
if not env:
sys.exit('pew setproject [virtualenv] [project_path]')
if not (workon_home / env).exists():
sys.exit("Environment '%s' doesn't exist." % env)
if not os.path.isdir(project):
sys.exit('pew setproject: %s does not exist' % project)
setvirtualenvproject(env, project)
def getproject_cmd(argv):
"""Print a virtualenv's project directory, if set.
If called without providing a virtualenv name as argument, print the
current virtualenv's project directory.
"""
# Parse command line arguments
parser = argparse.ArgumentParser(
description="Print an environment's project directory.",
)
parser.add_argument(
'envname',
nargs='?',
default=os.environ.get('VIRTUAL_ENV'),
help=(
'The name of the environment to return the project directory '
'for. If omitted, will use the currently active environment.'
),
)
args = parser.parse_args(argv)
# Now, do the actual work
if not args.envname:
sys.exit('ERROR: no virtualenv active')
if not (workon_home / args.envname).exists():
sys.exit("ERROR: Environment '{0}' does not exist."
.format(args.envname))
project_dir = get_project_dir(args.envname)
if project_dir is None:
sys.exit("ERROR: no project directory set for Environment '{0}'"
.format(args.envname))
print(project_dir)
def mkproject_cmd(argv):
"""Create a new project directory and its associated virtualenv."""
if '-l' in argv or '--list' in argv:
templates = [t.name[9:] for t in workon_home.glob("template_*")]
print("Available project templates:", *templates, sep='\n')
return
parser = mkvirtualenv_argparser()
parser.add_argument('envname')
parser.add_argument(
'-t', action='append', default=[], dest='templates', help='Multiple \
templates may be selected. They are applied in the order specified on the \
command line.')
parser.add_argument(
'-l', '--list', action='store_true', help='List available templates.')
args, rest = parser.parse_known_args(argv)
projects_home = Path(os.environ.get('PROJECT_HOME', '.'))
if not projects_home.exists():
sys.exit('ERROR: Projects directory %s does not exist. \
Create it or set PROJECT_HOME to an existing directory.' % projects_home)
project = (projects_home / args.envname).absolute()
if project.exists():
sys.exit('Project %s already exists.' % args.envname)
mkvirtualenv(args.envname, args.python, args.packages, project.absolute(),
args.requirements, rest)
project.mkdir()
for template_name in args.templates:
template = workon_home / ("template_" + template_name)
inve(args.envname, str(template), args.envname, str(project))
if args.activate:
shell(args.envname, cwd=str(project))
def mktmpenv_cmd(argv):
"""Create a temporary virtualenv."""
parser = mkvirtualenv_argparser()
env = '.'
while (workon_home / env).exists():
env = hex(random.getrandbits(64))[2:-1]
args, rest = parser.parse_known_args(argv)
mkvirtualenv(env, args.python, args.packages, requirements=args.requirements,
rest=rest)
print('This is a temporary environment. It will be deleted when you exit')
try:
if args.activate:
# only used for testing on windows
shell(env)
finally:
return rmvirtualenvs([env])
def wipeenv_cmd(argv):
"""Remove all installed packages from the current (or supplied) env."""
env = argv[0] if argv else os.environ.get('VIRTUAL_ENV')
if not env:
sys.exit('ERROR: no virtualenv active')
elif not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist.".format(env))
else:
env_pip = str(workon_home / env / env_bin_dir / 'pip')
all_pkgs = set(invoke(env_pip, 'freeze').out.splitlines())
pkgs = set(p for p in all_pkgs if len(p.split("==")) == 2)
ignored = sorted(all_pkgs - pkgs)
pkgs = set(p.split("==")[0] for p in pkgs)
to_remove = sorted(pkgs - set(['distribute', 'wsgiref']))
if to_remove:
print("Ignoring:\n %s" % "\n ".join(ignored))
print("Uninstalling packages:\n %s" % "\n ".join(to_remove))
return inve(env, 'pip', 'uninstall', '-y', *to_remove)
else:
print("Nothing to remove")
def inall_cmd(argv):
"""Run a command in each virtualenv."""
envs = lsenvs()
errors = False
for env in envs:
print("\n%s:" % env)
try:
inve(env, *argv)
except CalledProcessError as e:
errors = True
err(e)
sys.exit(errors)
def in_cmd(argv):
"""Run a command in the given virtualenv."""
if len(argv) == 1:
return workon_cmd(argv)
parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
return inve(*argv)
def restore_cmd(argv):
"""Try to restore a broken virtualenv by reinstalling the same python version on top of it"""
if len(argv) < 1:
sys.exit('You must provide a valid virtualenv to target')
env = argv[0]
path = workon_home / env
py = path / env_bin_dir / ('python.exe' if windows else 'python')
exact_py = py.resolve().name
return check_call([sys.executable, "-m", "virtualenv", str(path.absolute()), "--python=%s" % exact_py])
def dir_cmd(argv):
"""Print the path for the virtualenv directory"""
env = parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
print(workon_home / env)
def install_cmd(argv):
'''Use Pythonz to download and build the specified Python version'''
installer = InstallCommand()
options, versions = installer.parser.parse_args(argv)
if len(versions) != 1:
installer.parser.print_help()
sys.exit(1)
else:
try:
actual_installer = PythonInstaller.get_installer(versions[0], options)
return actual_installer.install()
except AlreadyInstalledError as e:
print(e)
def uninstall_cmd(argv):
'''Use Pythonz to uninstall the specified Python version'''
return UninstallCommand().run(argv)
def list_pythons_cmd(argv):
'''List the pythons installed by Pythonz (or all the installable ones)'''
return ListPythons().run(argv)
def locate_python_cmd(argv):
'''Locate the path for the python version installed by Pythonz'''
return LocatePython().run(argv)
def version_cmd(argv):
"""Prints current pew version"""
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('pew').version
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
print('Setuptools has some issues here, failed to get our own package.', file=sys.stderr)
print(__version__)
def prevent_path_errors():
if 'VIRTUAL_ENV' in os.environ and not check_path():
sys.exit('''ERROR: The virtualenv hasn't been activated correctly.
Either the env is corrupted (try running `pew restore env`),
Or an upgrade of your Python version broke your env,
Or check the contents of your $PATH. You might be adding new directories to it
from inside your shell's configuration file.
In this case, for further details please see: https://github.com/berdario/pew#the-environment-doesnt-seem-to-be-activated''')
def first_run_setup():
shell = supported_shell()
if shell:
if shell == 'fish':
source_cmd = 'source (pew shell_config)'
else:
source_cmd = 'source "$(pew shell_config)"'
rcpath = expandpath({'bash': '~/.bashrc'
, 'zsh': '~/.zshrc'
, 'fish': '~/.config/fish/config.fish'}[shell])
if rcpath.exists():
update_config_file(rcpath, source_cmd)
else:
print("It seems that you're running pew for the first time\n"
"If you want source shell competions and update your prompt, "
"Add the following line to your shell config file:\n %s" % source_cmd)
print('\nWill now continue with the command:', *sys.argv[1:])
input('[enter]')
def update_config_file(rcpath, source_cmd):
with rcpath.open('r+') as rcfile:
if source_cmd not in (line.strip() for line in rcfile.readlines()):
choice = 'X'
while choice not in ('y', '', 'n'):
choice = input("It seems that you're running pew for the first time\n"
"do you want to modify %s to source completions and"
" update your prompt? [y/N]\n> " % rcpath).lower()
if choice == 'y':
rcfile.write('\n# added by Pew\n%s\n' % source_cmd)
print('Done')
else:
print('\nOk, if you want to do it manually, just add\n %s\nat'
' the end of %s' % (source_cmd, rcpath))
def print_commands(cmds):
longest = max(map(len, cmds)) + 3
columns, _ = get_terminal_size()
print('Available commands:\n')
for cmd, fun in sorted(cmds.items()):
if fun.__doc__:
print(textwrap.fill(
fun.__doc__.splitlines()[0],
columns or 1000,
initial_indent=(' {0}: '.format(cmd)).ljust(longest),
subsequent_indent=longest * ' '))
else:
print(' ' + cmd)
def pew():
first_run = makedirs_and_symlink_if_needed(workon_home)
if first_run and sys.stdin.isatty():
first_run_setup()
cmds = dict((cmd[:-4], fun)
for cmd, fun in globals().items() if cmd.endswith('_cmd'))
if sys.argv[1:]:
if sys.argv[1] in cmds:
command = cmds[sys.argv[1]]
try:
return command(sys.argv[2:])
except CalledProcessError as e:
return e.returncode
except KeyboardInterrupt:
pass
else:
err("ERROR: command", sys.argv[1], "does not exist.")
print_commands(cmds)
sys.exit(1)
else:
print_commands(cmds)
|
berdario/pew | pew/pew.py | toggleglobalsitepackages_cmd | python | def toggleglobalsitepackages_cmd(argv):
quiet = argv == ['-q']
site = sitepackages_dir()
ngsp_file = site.parent / 'no-global-site-packages.txt'
if ngsp_file.exists():
ngsp_file.unlink()
if not quiet:
print('Enabled global site-packages')
else:
with ngsp_file.open('w'):
if not quiet:
print('Disabled global site-packages') | Toggle the current virtualenv between having and not having access to the global site-packages. | train | https://github.com/berdario/pew/blob/37d9ff79342336b8ef6437d9a551008be07afe9b/pew/pew.py#L451-L463 | [
"def sitepackages_dir(env=os.environ.get('VIRTUAL_ENV')):\n if not env:\n sys.exit('ERROR: no virtualenv active')\n else:\n env_python = workon_home / env / env_bin_dir / 'python'\n return Path(invoke(str(env_python), '-c', 'import distutils; \\\nprint(distutils.sysconfig.get_python_lib())').out)\n"
] | from __future__ import print_function, absolute_import, unicode_literals
import os
import sys
import argparse
import shutil
import random
import textwrap
from functools import partial
from subprocess import CalledProcessError
from pathlib import Path
try:
from shutil import get_terminal_size
except ImportError:
from backports.shutil_get_terminal_size import get_terminal_size
windows = sys.platform == 'win32'
from clonevirtualenv import clone_virtualenv
if not windows:
try:
# Try importing these packages if avaiable
from pythonz.commands.install import InstallCommand
from pythonz.commands.uninstall import UninstallCommand
from pythonz.installer.pythoninstaller import PythonInstaller, AlreadyInstalledError
from pythonz.commands.list import ListCommand
from pythonz.define import PATH_PYTHONS
from pythonz.commands.locate import LocateCommand as LocatePython
def ListPythons():
try:
Path(PATH_PYTHONS).mkdir(parents=True)
except OSError:
pass
return ListCommand()
except:
# create mock commands
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('You need to install the pythonz extra. pip install pew[pythonz]')
else:
# Pythonz does not support windows
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('Command not supported on this platform')
import shellingham
from pew._utils import (check_call, invoke, expandpath, own, env_bin_dir,
check_path, temp_environ, NamedTemporaryFile, to_unicode)
from pew._print_utils import print_virtualenvs
if sys.version_info[0] == 2:
input = raw_input
err = partial(print, file=sys.stderr)
if windows:
default_home = '~/.virtualenvs'
else:
default_home = os.path.join(
os.environ.get('XDG_DATA_HOME', '~/.local/share'), 'virtualenvs')
workon_home = expandpath(
os.environ.get('WORKON_HOME', default_home))
def makedirs_and_symlink_if_needed(workon_home):
if not workon_home.exists() and own(workon_home):
workon_home.mkdir(parents=True)
link = expandpath('~/.virtualenvs')
if os.name == 'posix' and 'WORKON_HOME' not in os.environ and \
'XDG_DATA_HOME' not in os.environ and not link.exists():
link.symlink_to(str(workon_home))
return True
else:
return False
pew_site = Path(__file__).parent
def supported_shell():
shell = Path(os.environ.get('SHELL', '')).stem
if shell in ('bash', 'zsh', 'fish'):
return shell
def shell_config_cmd(argv):
"Prints the path for the current $SHELL helper file"
shell = supported_shell()
if shell:
print(pew_site / 'shell_config' / ('init.' + shell))
else:
err('Completions and prompts are unavailable for %s' %
repr(os.environ.get('SHELL', '')))
def deploy_completions():
completions = {'complete.bash': Path('/etc/bash_completion.d/pew'),
'complete.zsh': Path('/usr/local/share/zsh/site-functions/_pew'),
'complete.fish': Path('/etc/fish/completions/pew.fish')}
for comp, dest in completions.items():
if not dest.parent.exists():
dest.parent.mkdir(parents=True)
shutil.copy(str(pew_site / 'shell_config' / comp), str(dest))
def get_project_dir(env):
project_file = workon_home / env / '.project'
if project_file.exists():
with project_file.open() as f:
project_dir = f.readline().strip()
if os.path.exists(project_dir):
return project_dir
else:
err('Corrupted or outdated:', project_file, '\nDirectory',
project_dir, "doesn't exist.")
def unsetenv(key):
if key in os.environ:
del os.environ[key]
def compute_path(env):
envdir = workon_home / env
return os.pathsep.join([
str(envdir / env_bin_dir),
os.environ['PATH'],
])
def inve(env, command, *args, **kwargs):
"""Run a command in the given virtual environment.
Pass additional keyword arguments to ``subprocess.check_call()``."""
# we don't strictly need to restore the environment, since pew runs in
# its own process, but it feels like the right thing to do
with temp_environ():
os.environ['VIRTUAL_ENV'] = str(workon_home / env)
os.environ['PATH'] = compute_path(env)
unsetenv('PYTHONHOME')
unsetenv('__PYVENV_LAUNCHER__')
try:
return check_call([command] + list(args), shell=windows, **kwargs)
# need to have shell=True on windows, otherwise the PYTHONPATH
# won't inherit the PATH
except OSError as e:
if e.errno == 2:
err('Unable to find', command)
return 2
else:
raise
def fork_shell(env, shellcmd, cwd):
or_ctrld = '' if windows else "or 'Ctrl+D' "
err("Launching subshell in virtual environment. Type 'exit' ", or_ctrld,
"to return.", sep='')
if 'VIRTUAL_ENV' in os.environ:
err("Be aware that this environment will be nested on top "
"of '%s'" % Path(os.environ['VIRTUAL_ENV']).name)
return inve(env, *shellcmd, cwd=cwd)
def fork_bash(env, cwd):
# bash is a special little snowflake, and prevent_path_errors cannot work there
# https://github.com/berdario/pew/issues/58#issuecomment-102182346
bashrcpath = expandpath('~/.bashrc')
if bashrcpath.exists():
with NamedTemporaryFile('w+') as rcfile:
with bashrcpath.open() as bashrc:
rcfile.write(bashrc.read())
rcfile.write('\nexport PATH="' + to_unicode(compute_path(env)) + '"')
rcfile.flush()
return fork_shell(env, ['bash', '--rcfile', rcfile.name], cwd)
else:
return fork_shell(env, ['bash'], cwd)
def fork_cmder(env, cwd):
shell_cmd = ['cmd']
cmderrc_path = r'%CMDER_ROOT%\vendor\init.bat'
if expandpath(cmderrc_path).exists():
shell_cmd += ['/k', cmderrc_path]
if cwd:
os.environ['CMDER_START'] = cwd
return fork_shell(env, shell_cmd, cwd)
def _detect_shell():
shell = os.environ.get('SHELL', None)
if not shell:
if 'CMDER_ROOT' in os.environ:
shell = 'Cmder'
elif windows:
try:
_, shell = shellingham.detect_shell()
except shellingham.ShellDetectionFailure:
shell = os.environ.get('COMSPEC', 'cmd.exe')
else:
shell = 'sh'
return shell
def shell(env, cwd=None):
env = str(env)
shell = _detect_shell()
shell_name = Path(shell).stem
if shell_name not in ('Cmder', 'bash', 'elvish', 'powershell', 'klingon', 'cmd'):
# On Windows the PATH is usually set with System Utility
# so we won't worry about trying to check mistakes there
shell_check = (sys.executable + ' -c "from pew.pew import '
'prevent_path_errors; prevent_path_errors()"')
try:
inve(env, shell, '-c', shell_check)
except CalledProcessError:
return
if shell_name == 'bash':
return fork_bash(env, cwd)
elif shell_name == 'Cmder':
return fork_cmder(env, cwd)
else:
return fork_shell(env, [shell], cwd)
def mkvirtualenv(envname, python=None, packages=[], project=None,
requirements=None, rest=[]):
if python:
rest = ["--python=%s" % python] + rest
path = (workon_home / envname).absolute()
try:
check_call([sys.executable, "-m", "virtualenv", str(path)] + rest)
except (CalledProcessError, KeyboardInterrupt):
rmvirtualenvs([envname])
raise
else:
if project:
setvirtualenvproject(envname, project.absolute())
if requirements:
inve(envname, 'pip', 'install', '-r', str(expandpath(requirements)))
if packages:
inve(envname, 'pip', 'install', *packages)
def mkvirtualenv_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--python')
parser.add_argument('-i', action='append', dest='packages', help='Install \
a package after the environment is created. This option may be repeated.')
parser.add_argument('-r', dest='requirements', help='Provide a pip \
requirements file to install a base set of packages into the new environment.')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
return parser
def new_cmd(argv):
"""Create a new environment, in $WORKON_HOME."""
parser = mkvirtualenv_argparser()
parser.add_argument('-a', dest='project', help='Provide a full path to a \
project directory to associate with the new environment.')
parser.add_argument('envname')
args, rest = parser.parse_known_args(argv)
project = expandpath(args.project) if args.project else None
mkvirtualenv(args.envname, args.python, args.packages, project,
args.requirements, rest)
if args.activate:
shell(args.envname)
def rmvirtualenvs(envs):
error_happened = False
for env in envs:
env = workon_home / env
if os.environ.get('VIRTUAL_ENV') == str(env):
err("ERROR: You cannot remove the active environment (%s)." % env)
error_happened = True
break
try:
shutil.rmtree(str(env))
except OSError as e:
err("Error while trying to remove the {0} env: \n{1}".format
(env, e.strerror))
error_happened = True
return error_happened
def rm_cmd(argv):
"""Remove one or more environment, from $WORKON_HOME."""
if len(argv) < 1:
sys.exit("Please specify an environment")
return rmvirtualenvs(argv)
def packages(site_packages):
nodes = site_packages.iterdir()
return set([x.stem.split('-')[0] for x in nodes]) - set(['__pycache__'])
def showvirtualenv(env):
columns, _ = get_terminal_size()
pkgs = sorted(packages(sitepackages_dir(env)))
env_python = workon_home / env / env_bin_dir / 'python'
l = len(env) + 2
version = invoke(str(env_python), '-V')
version = ' - '.join((version.out + version.err).splitlines())
print(env, ': ', version, sep='')
print(textwrap.fill(' '.join(pkgs),
width=columns-l,
initial_indent=(l * ' '),
subsequent_indent=(l * ' ')), '\n')
def show_cmd(argv):
try:
showvirtualenv(argv[0])
except IndexError:
if 'VIRTUAL_ENV' in os.environ:
showvirtualenv(Path(os.environ['VIRTUAL_ENV']).name)
else:
sys.exit('pew show [env]')
def lsenvs():
return sorted(set(env.parts[-3] for env in
workon_home.glob(os.path.join('*', env_bin_dir, 'python*'))))
def lsvirtualenv(verbose):
envs = lsenvs()
if not verbose:
print_virtualenvs(*envs)
else:
for env in envs:
showvirtualenv(env)
def ls_cmd(argv):
"""List available environments."""
parser = argparse.ArgumentParser()
p_group = parser.add_mutually_exclusive_group()
p_group.add_argument('-b', '--brief', action='store_false')
p_group.add_argument('-l', '--long', action='store_true')
args = parser.parse_args(argv)
lsvirtualenv(args.long)
def parse_envname(argv, no_arg_callback):
if len(argv) < 1 or argv[0] is None:
no_arg_callback()
env = argv[0]
if env.startswith('/'):
sys.exit("ERROR: Invalid environment name '{0}'.".format(env))
if not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist. Create it with \
'pew new {0}'.".format(env))
else:
return env
def workon_cmd(argv):
"""List or change working virtual environments."""
parser = argparse.ArgumentParser(prog='pew workon')
parser.add_argument('envname', nargs='?')
parser.add_argument(
'-n', '--no-cd', action='store_true',
help=('Do not change working directory to project directory after '
'activating virtualenv.')
)
args = parser.parse_args(argv)
def list_and_exit():
lsvirtualenv(False)
sys.exit(0)
env = parse_envname([args.envname], list_and_exit)
# Check if the virtualenv has an associated project directory and in
# this case, use it as the current working directory.
project_dir = get_project_dir(env)
if project_dir is None or args.no_cd:
project_dir = os.getcwd()
return shell(env, cwd=project_dir)
def sitepackages_dir(env=os.environ.get('VIRTUAL_ENV')):
if not env:
sys.exit('ERROR: no virtualenv active')
else:
env_python = workon_home / env / env_bin_dir / 'python'
return Path(invoke(str(env_python), '-c', 'import distutils; \
print(distutils.sysconfig.get_python_lib())').out)
def add_cmd(argv):
"""Add the specified directories to the Python path for the currently active virtualenv.
This will be done by placing the directory names in a path file named
"virtualenv_path_extensions.pth" inside the virtualenv's site-packages
directory; if this file does not exists, it will be created first.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='remove', action='store_true')
parser.add_argument('dirs', nargs='+')
args = parser.parse_args(argv)
extra_paths = sitepackages_dir() / '_virtualenv_path_extensions.pth'
new_paths = [os.path.abspath(d) + "\n" for d in args.dirs]
if not extra_paths.exists():
with extra_paths.open('w') as extra:
extra.write('''import sys; sys.__plen = len(sys.path)
import sys; new=sys.path[sys.__plen:]; del sys.path[sys.__plen:]; p=getattr(sys,'__egginsert',0); sys.path[p:p]=new; sys.__egginsert = p+len(new)
''')
def rewrite(f):
with extra_paths.open('r+') as extra:
to_write = f(extra.readlines())
extra.seek(0)
extra.truncate()
extra.writelines(to_write)
if args.remove:
rewrite(lambda ls: [line for line in ls if line not in new_paths])
else:
rewrite(lambda lines: lines[0:1] + new_paths + lines[1:])
def sitepackages_dir_cmd(argv):
print(sitepackages_dir())
def lssitepackages_cmd(argv):
"""Show the content of the site-packages directory of the current virtualenv."""
site = sitepackages_dir()
print(*sorted(site.iterdir()), sep=os.linesep)
extra_paths = site / '_virtualenv_path_extensions.pth'
if extra_paths.exists():
print('from _virtualenv_path_extensions.pth:')
with extra_paths.open() as extra:
print(''.join(extra.readlines()))
def cp_cmd(argv):
"""Duplicate the named virtualenv to make a new one."""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target', nargs='?')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
args = parser.parse_args(argv)
target_name = copy_virtualenv_project(args.source, args.target)
if args.activate:
shell(target_name)
def copy_virtualenv_project(source, target):
source = expandpath(source)
if not source.exists():
source = workon_home / source
if not source.exists():
sys.exit('Please provide a valid virtualenv to copy')
target_name = target or source.name
target = workon_home / target_name
if target.exists():
sys.exit('%s virtualenv already exists in %s.' % (
target_name, workon_home
))
print('Copying {0} in {1}'.format(source, target_name))
clone_virtualenv(str(source), str(target))
return target_name
def rename_cmd(argv):
"""Rename a virtualenv"""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target')
pargs = parser.parse_args(argv)
copy_virtualenv_project(pargs.source, pargs.target)
return rmvirtualenvs([pargs.source])
def setvirtualenvproject(env, project):
print('Setting project for {0} to {1}'.format(env, project))
with (workon_home / env / '.project').open('wb') as prj:
prj.write(str(project).encode())
def setproject_cmd(argv):
"""Given a virtualenv directory and a project directory, set the \
virtualenv up to be associated with the project."""
args = dict(enumerate(argv))
project = os.path.abspath(args.get(1, '.'))
env = args.get(0, os.environ.get('VIRTUAL_ENV'))
if not env:
sys.exit('pew setproject [virtualenv] [project_path]')
if not (workon_home / env).exists():
sys.exit("Environment '%s' doesn't exist." % env)
if not os.path.isdir(project):
sys.exit('pew setproject: %s does not exist' % project)
setvirtualenvproject(env, project)
def getproject_cmd(argv):
"""Print a virtualenv's project directory, if set.
If called without providing a virtualenv name as argument, print the
current virtualenv's project directory.
"""
# Parse command line arguments
parser = argparse.ArgumentParser(
description="Print an environment's project directory.",
)
parser.add_argument(
'envname',
nargs='?',
default=os.environ.get('VIRTUAL_ENV'),
help=(
'The name of the environment to return the project directory '
'for. If omitted, will use the currently active environment.'
),
)
args = parser.parse_args(argv)
# Now, do the actual work
if not args.envname:
sys.exit('ERROR: no virtualenv active')
if not (workon_home / args.envname).exists():
sys.exit("ERROR: Environment '{0}' does not exist."
.format(args.envname))
project_dir = get_project_dir(args.envname)
if project_dir is None:
sys.exit("ERROR: no project directory set for Environment '{0}'"
.format(args.envname))
print(project_dir)
def mkproject_cmd(argv):
"""Create a new project directory and its associated virtualenv."""
if '-l' in argv or '--list' in argv:
templates = [t.name[9:] for t in workon_home.glob("template_*")]
print("Available project templates:", *templates, sep='\n')
return
parser = mkvirtualenv_argparser()
parser.add_argument('envname')
parser.add_argument(
'-t', action='append', default=[], dest='templates', help='Multiple \
templates may be selected. They are applied in the order specified on the \
command line.')
parser.add_argument(
'-l', '--list', action='store_true', help='List available templates.')
args, rest = parser.parse_known_args(argv)
projects_home = Path(os.environ.get('PROJECT_HOME', '.'))
if not projects_home.exists():
sys.exit('ERROR: Projects directory %s does not exist. \
Create it or set PROJECT_HOME to an existing directory.' % projects_home)
project = (projects_home / args.envname).absolute()
if project.exists():
sys.exit('Project %s already exists.' % args.envname)
mkvirtualenv(args.envname, args.python, args.packages, project.absolute(),
args.requirements, rest)
project.mkdir()
for template_name in args.templates:
template = workon_home / ("template_" + template_name)
inve(args.envname, str(template), args.envname, str(project))
if args.activate:
shell(args.envname, cwd=str(project))
def mktmpenv_cmd(argv):
"""Create a temporary virtualenv."""
parser = mkvirtualenv_argparser()
env = '.'
while (workon_home / env).exists():
env = hex(random.getrandbits(64))[2:-1]
args, rest = parser.parse_known_args(argv)
mkvirtualenv(env, args.python, args.packages, requirements=args.requirements,
rest=rest)
print('This is a temporary environment. It will be deleted when you exit')
try:
if args.activate:
# only used for testing on windows
shell(env)
finally:
return rmvirtualenvs([env])
def wipeenv_cmd(argv):
"""Remove all installed packages from the current (or supplied) env."""
env = argv[0] if argv else os.environ.get('VIRTUAL_ENV')
if not env:
sys.exit('ERROR: no virtualenv active')
elif not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist.".format(env))
else:
env_pip = str(workon_home / env / env_bin_dir / 'pip')
all_pkgs = set(invoke(env_pip, 'freeze').out.splitlines())
pkgs = set(p for p in all_pkgs if len(p.split("==")) == 2)
ignored = sorted(all_pkgs - pkgs)
pkgs = set(p.split("==")[0] for p in pkgs)
to_remove = sorted(pkgs - set(['distribute', 'wsgiref']))
if to_remove:
print("Ignoring:\n %s" % "\n ".join(ignored))
print("Uninstalling packages:\n %s" % "\n ".join(to_remove))
return inve(env, 'pip', 'uninstall', '-y', *to_remove)
else:
print("Nothing to remove")
def inall_cmd(argv):
"""Run a command in each virtualenv."""
envs = lsenvs()
errors = False
for env in envs:
print("\n%s:" % env)
try:
inve(env, *argv)
except CalledProcessError as e:
errors = True
err(e)
sys.exit(errors)
def in_cmd(argv):
"""Run a command in the given virtualenv."""
if len(argv) == 1:
return workon_cmd(argv)
parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
return inve(*argv)
def restore_cmd(argv):
"""Try to restore a broken virtualenv by reinstalling the same python version on top of it"""
if len(argv) < 1:
sys.exit('You must provide a valid virtualenv to target')
env = argv[0]
path = workon_home / env
py = path / env_bin_dir / ('python.exe' if windows else 'python')
exact_py = py.resolve().name
return check_call([sys.executable, "-m", "virtualenv", str(path.absolute()), "--python=%s" % exact_py])
def dir_cmd(argv):
"""Print the path for the virtualenv directory"""
env = parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
print(workon_home / env)
def install_cmd(argv):
'''Use Pythonz to download and build the specified Python version'''
installer = InstallCommand()
options, versions = installer.parser.parse_args(argv)
if len(versions) != 1:
installer.parser.print_help()
sys.exit(1)
else:
try:
actual_installer = PythonInstaller.get_installer(versions[0], options)
return actual_installer.install()
except AlreadyInstalledError as e:
print(e)
def uninstall_cmd(argv):
'''Use Pythonz to uninstall the specified Python version'''
return UninstallCommand().run(argv)
def list_pythons_cmd(argv):
'''List the pythons installed by Pythonz (or all the installable ones)'''
return ListPythons().run(argv)
def locate_python_cmd(argv):
'''Locate the path for the python version installed by Pythonz'''
return LocatePython().run(argv)
def version_cmd(argv):
"""Prints current pew version"""
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('pew').version
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
print('Setuptools has some issues here, failed to get our own package.', file=sys.stderr)
print(__version__)
def prevent_path_errors():
if 'VIRTUAL_ENV' in os.environ and not check_path():
sys.exit('''ERROR: The virtualenv hasn't been activated correctly.
Either the env is corrupted (try running `pew restore env`),
Or an upgrade of your Python version broke your env,
Or check the contents of your $PATH. You might be adding new directories to it
from inside your shell's configuration file.
In this case, for further details please see: https://github.com/berdario/pew#the-environment-doesnt-seem-to-be-activated''')
def first_run_setup():
shell = supported_shell()
if shell:
if shell == 'fish':
source_cmd = 'source (pew shell_config)'
else:
source_cmd = 'source "$(pew shell_config)"'
rcpath = expandpath({'bash': '~/.bashrc'
, 'zsh': '~/.zshrc'
, 'fish': '~/.config/fish/config.fish'}[shell])
if rcpath.exists():
update_config_file(rcpath, source_cmd)
else:
print("It seems that you're running pew for the first time\n"
"If you want source shell competions and update your prompt, "
"Add the following line to your shell config file:\n %s" % source_cmd)
print('\nWill now continue with the command:', *sys.argv[1:])
input('[enter]')
def update_config_file(rcpath, source_cmd):
with rcpath.open('r+') as rcfile:
if source_cmd not in (line.strip() for line in rcfile.readlines()):
choice = 'X'
while choice not in ('y', '', 'n'):
choice = input("It seems that you're running pew for the first time\n"
"do you want to modify %s to source completions and"
" update your prompt? [y/N]\n> " % rcpath).lower()
if choice == 'y':
rcfile.write('\n# added by Pew\n%s\n' % source_cmd)
print('Done')
else:
print('\nOk, if you want to do it manually, just add\n %s\nat'
' the end of %s' % (source_cmd, rcpath))
def print_commands(cmds):
longest = max(map(len, cmds)) + 3
columns, _ = get_terminal_size()
print('Available commands:\n')
for cmd, fun in sorted(cmds.items()):
if fun.__doc__:
print(textwrap.fill(
fun.__doc__.splitlines()[0],
columns or 1000,
initial_indent=(' {0}: '.format(cmd)).ljust(longest),
subsequent_indent=longest * ' '))
else:
print(' ' + cmd)
def pew():
first_run = makedirs_and_symlink_if_needed(workon_home)
if first_run and sys.stdin.isatty():
first_run_setup()
cmds = dict((cmd[:-4], fun)
for cmd, fun in globals().items() if cmd.endswith('_cmd'))
if sys.argv[1:]:
if sys.argv[1] in cmds:
command = cmds[sys.argv[1]]
try:
return command(sys.argv[2:])
except CalledProcessError as e:
return e.returncode
except KeyboardInterrupt:
pass
else:
err("ERROR: command", sys.argv[1], "does not exist.")
print_commands(cmds)
sys.exit(1)
else:
print_commands(cmds)
|
berdario/pew | pew/pew.py | cp_cmd | python | def cp_cmd(argv):
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target', nargs='?')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
args = parser.parse_args(argv)
target_name = copy_virtualenv_project(args.source, args.target)
if args.activate:
shell(target_name) | Duplicate the named virtualenv to make a new one. | train | https://github.com/berdario/pew/blob/37d9ff79342336b8ef6437d9a551008be07afe9b/pew/pew.py#L466-L479 | [
"def shell(env, cwd=None):\n env = str(env)\n shell = _detect_shell()\n shell_name = Path(shell).stem\n if shell_name not in ('Cmder', 'bash', 'elvish', 'powershell', 'klingon', 'cmd'):\n # On Windows the PATH is usually set with System Utility\n # so we won't worry about trying to check mistakes there\n shell_check = (sys.executable + ' -c \"from pew.pew import '\n 'prevent_path_errors; prevent_path_errors()\"')\n try:\n inve(env, shell, '-c', shell_check)\n except CalledProcessError:\n return\n if shell_name == 'bash':\n return fork_bash(env, cwd)\n elif shell_name == 'Cmder':\n return fork_cmder(env, cwd)\n else:\n return fork_shell(env, [shell], cwd)\n",
"def copy_virtualenv_project(source, target):\n source = expandpath(source)\n if not source.exists():\n source = workon_home / source\n if not source.exists():\n sys.exit('Please provide a valid virtualenv to copy')\n\n target_name = target or source.name\n\n target = workon_home / target_name\n\n if target.exists():\n sys.exit('%s virtualenv already exists in %s.' % (\n target_name, workon_home\n ))\n\n print('Copying {0} in {1}'.format(source, target_name))\n clone_virtualenv(str(source), str(target))\n return target_name\n"
] | from __future__ import print_function, absolute_import, unicode_literals
import os
import sys
import argparse
import shutil
import random
import textwrap
from functools import partial
from subprocess import CalledProcessError
from pathlib import Path
try:
from shutil import get_terminal_size
except ImportError:
from backports.shutil_get_terminal_size import get_terminal_size
windows = sys.platform == 'win32'
from clonevirtualenv import clone_virtualenv
if not windows:
try:
# Try importing these packages if avaiable
from pythonz.commands.install import InstallCommand
from pythonz.commands.uninstall import UninstallCommand
from pythonz.installer.pythoninstaller import PythonInstaller, AlreadyInstalledError
from pythonz.commands.list import ListCommand
from pythonz.define import PATH_PYTHONS
from pythonz.commands.locate import LocateCommand as LocatePython
def ListPythons():
try:
Path(PATH_PYTHONS).mkdir(parents=True)
except OSError:
pass
return ListCommand()
except:
# create mock commands
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('You need to install the pythonz extra. pip install pew[pythonz]')
else:
# Pythonz does not support windows
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('Command not supported on this platform')
import shellingham
from pew._utils import (check_call, invoke, expandpath, own, env_bin_dir,
check_path, temp_environ, NamedTemporaryFile, to_unicode)
from pew._print_utils import print_virtualenvs
if sys.version_info[0] == 2:
input = raw_input
err = partial(print, file=sys.stderr)
if windows:
default_home = '~/.virtualenvs'
else:
default_home = os.path.join(
os.environ.get('XDG_DATA_HOME', '~/.local/share'), 'virtualenvs')
workon_home = expandpath(
os.environ.get('WORKON_HOME', default_home))
def makedirs_and_symlink_if_needed(workon_home):
if not workon_home.exists() and own(workon_home):
workon_home.mkdir(parents=True)
link = expandpath('~/.virtualenvs')
if os.name == 'posix' and 'WORKON_HOME' not in os.environ and \
'XDG_DATA_HOME' not in os.environ and not link.exists():
link.symlink_to(str(workon_home))
return True
else:
return False
pew_site = Path(__file__).parent
def supported_shell():
shell = Path(os.environ.get('SHELL', '')).stem
if shell in ('bash', 'zsh', 'fish'):
return shell
def shell_config_cmd(argv):
"Prints the path for the current $SHELL helper file"
shell = supported_shell()
if shell:
print(pew_site / 'shell_config' / ('init.' + shell))
else:
err('Completions and prompts are unavailable for %s' %
repr(os.environ.get('SHELL', '')))
def deploy_completions():
completions = {'complete.bash': Path('/etc/bash_completion.d/pew'),
'complete.zsh': Path('/usr/local/share/zsh/site-functions/_pew'),
'complete.fish': Path('/etc/fish/completions/pew.fish')}
for comp, dest in completions.items():
if not dest.parent.exists():
dest.parent.mkdir(parents=True)
shutil.copy(str(pew_site / 'shell_config' / comp), str(dest))
def get_project_dir(env):
project_file = workon_home / env / '.project'
if project_file.exists():
with project_file.open() as f:
project_dir = f.readline().strip()
if os.path.exists(project_dir):
return project_dir
else:
err('Corrupted or outdated:', project_file, '\nDirectory',
project_dir, "doesn't exist.")
def unsetenv(key):
if key in os.environ:
del os.environ[key]
def compute_path(env):
envdir = workon_home / env
return os.pathsep.join([
str(envdir / env_bin_dir),
os.environ['PATH'],
])
def inve(env, command, *args, **kwargs):
"""Run a command in the given virtual environment.
Pass additional keyword arguments to ``subprocess.check_call()``."""
# we don't strictly need to restore the environment, since pew runs in
# its own process, but it feels like the right thing to do
with temp_environ():
os.environ['VIRTUAL_ENV'] = str(workon_home / env)
os.environ['PATH'] = compute_path(env)
unsetenv('PYTHONHOME')
unsetenv('__PYVENV_LAUNCHER__')
try:
return check_call([command] + list(args), shell=windows, **kwargs)
# need to have shell=True on windows, otherwise the PYTHONPATH
# won't inherit the PATH
except OSError as e:
if e.errno == 2:
err('Unable to find', command)
return 2
else:
raise
def fork_shell(env, shellcmd, cwd):
or_ctrld = '' if windows else "or 'Ctrl+D' "
err("Launching subshell in virtual environment. Type 'exit' ", or_ctrld,
"to return.", sep='')
if 'VIRTUAL_ENV' in os.environ:
err("Be aware that this environment will be nested on top "
"of '%s'" % Path(os.environ['VIRTUAL_ENV']).name)
return inve(env, *shellcmd, cwd=cwd)
def fork_bash(env, cwd):
# bash is a special little snowflake, and prevent_path_errors cannot work there
# https://github.com/berdario/pew/issues/58#issuecomment-102182346
bashrcpath = expandpath('~/.bashrc')
if bashrcpath.exists():
with NamedTemporaryFile('w+') as rcfile:
with bashrcpath.open() as bashrc:
rcfile.write(bashrc.read())
rcfile.write('\nexport PATH="' + to_unicode(compute_path(env)) + '"')
rcfile.flush()
return fork_shell(env, ['bash', '--rcfile', rcfile.name], cwd)
else:
return fork_shell(env, ['bash'], cwd)
def fork_cmder(env, cwd):
shell_cmd = ['cmd']
cmderrc_path = r'%CMDER_ROOT%\vendor\init.bat'
if expandpath(cmderrc_path).exists():
shell_cmd += ['/k', cmderrc_path]
if cwd:
os.environ['CMDER_START'] = cwd
return fork_shell(env, shell_cmd, cwd)
def _detect_shell():
shell = os.environ.get('SHELL', None)
if not shell:
if 'CMDER_ROOT' in os.environ:
shell = 'Cmder'
elif windows:
try:
_, shell = shellingham.detect_shell()
except shellingham.ShellDetectionFailure:
shell = os.environ.get('COMSPEC', 'cmd.exe')
else:
shell = 'sh'
return shell
def shell(env, cwd=None):
env = str(env)
shell = _detect_shell()
shell_name = Path(shell).stem
if shell_name not in ('Cmder', 'bash', 'elvish', 'powershell', 'klingon', 'cmd'):
# On Windows the PATH is usually set with System Utility
# so we won't worry about trying to check mistakes there
shell_check = (sys.executable + ' -c "from pew.pew import '
'prevent_path_errors; prevent_path_errors()"')
try:
inve(env, shell, '-c', shell_check)
except CalledProcessError:
return
if shell_name == 'bash':
return fork_bash(env, cwd)
elif shell_name == 'Cmder':
return fork_cmder(env, cwd)
else:
return fork_shell(env, [shell], cwd)
def mkvirtualenv(envname, python=None, packages=[], project=None,
requirements=None, rest=[]):
if python:
rest = ["--python=%s" % python] + rest
path = (workon_home / envname).absolute()
try:
check_call([sys.executable, "-m", "virtualenv", str(path)] + rest)
except (CalledProcessError, KeyboardInterrupt):
rmvirtualenvs([envname])
raise
else:
if project:
setvirtualenvproject(envname, project.absolute())
if requirements:
inve(envname, 'pip', 'install', '-r', str(expandpath(requirements)))
if packages:
inve(envname, 'pip', 'install', *packages)
def mkvirtualenv_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--python')
parser.add_argument('-i', action='append', dest='packages', help='Install \
a package after the environment is created. This option may be repeated.')
parser.add_argument('-r', dest='requirements', help='Provide a pip \
requirements file to install a base set of packages into the new environment.')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
return parser
def new_cmd(argv):
"""Create a new environment, in $WORKON_HOME."""
parser = mkvirtualenv_argparser()
parser.add_argument('-a', dest='project', help='Provide a full path to a \
project directory to associate with the new environment.')
parser.add_argument('envname')
args, rest = parser.parse_known_args(argv)
project = expandpath(args.project) if args.project else None
mkvirtualenv(args.envname, args.python, args.packages, project,
args.requirements, rest)
if args.activate:
shell(args.envname)
def rmvirtualenvs(envs):
error_happened = False
for env in envs:
env = workon_home / env
if os.environ.get('VIRTUAL_ENV') == str(env):
err("ERROR: You cannot remove the active environment (%s)." % env)
error_happened = True
break
try:
shutil.rmtree(str(env))
except OSError as e:
err("Error while trying to remove the {0} env: \n{1}".format
(env, e.strerror))
error_happened = True
return error_happened
def rm_cmd(argv):
"""Remove one or more environment, from $WORKON_HOME."""
if len(argv) < 1:
sys.exit("Please specify an environment")
return rmvirtualenvs(argv)
def packages(site_packages):
nodes = site_packages.iterdir()
return set([x.stem.split('-')[0] for x in nodes]) - set(['__pycache__'])
def showvirtualenv(env):
columns, _ = get_terminal_size()
pkgs = sorted(packages(sitepackages_dir(env)))
env_python = workon_home / env / env_bin_dir / 'python'
l = len(env) + 2
version = invoke(str(env_python), '-V')
version = ' - '.join((version.out + version.err).splitlines())
print(env, ': ', version, sep='')
print(textwrap.fill(' '.join(pkgs),
width=columns-l,
initial_indent=(l * ' '),
subsequent_indent=(l * ' ')), '\n')
def show_cmd(argv):
try:
showvirtualenv(argv[0])
except IndexError:
if 'VIRTUAL_ENV' in os.environ:
showvirtualenv(Path(os.environ['VIRTUAL_ENV']).name)
else:
sys.exit('pew show [env]')
def lsenvs():
return sorted(set(env.parts[-3] for env in
workon_home.glob(os.path.join('*', env_bin_dir, 'python*'))))
def lsvirtualenv(verbose):
envs = lsenvs()
if not verbose:
print_virtualenvs(*envs)
else:
for env in envs:
showvirtualenv(env)
def ls_cmd(argv):
"""List available environments."""
parser = argparse.ArgumentParser()
p_group = parser.add_mutually_exclusive_group()
p_group.add_argument('-b', '--brief', action='store_false')
p_group.add_argument('-l', '--long', action='store_true')
args = parser.parse_args(argv)
lsvirtualenv(args.long)
def parse_envname(argv, no_arg_callback):
if len(argv) < 1 or argv[0] is None:
no_arg_callback()
env = argv[0]
if env.startswith('/'):
sys.exit("ERROR: Invalid environment name '{0}'.".format(env))
if not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist. Create it with \
'pew new {0}'.".format(env))
else:
return env
def workon_cmd(argv):
"""List or change working virtual environments."""
parser = argparse.ArgumentParser(prog='pew workon')
parser.add_argument('envname', nargs='?')
parser.add_argument(
'-n', '--no-cd', action='store_true',
help=('Do not change working directory to project directory after '
'activating virtualenv.')
)
args = parser.parse_args(argv)
def list_and_exit():
lsvirtualenv(False)
sys.exit(0)
env = parse_envname([args.envname], list_and_exit)
# Check if the virtualenv has an associated project directory and in
# this case, use it as the current working directory.
project_dir = get_project_dir(env)
if project_dir is None or args.no_cd:
project_dir = os.getcwd()
return shell(env, cwd=project_dir)
def sitepackages_dir(env=os.environ.get('VIRTUAL_ENV')):
if not env:
sys.exit('ERROR: no virtualenv active')
else:
env_python = workon_home / env / env_bin_dir / 'python'
return Path(invoke(str(env_python), '-c', 'import distutils; \
print(distutils.sysconfig.get_python_lib())').out)
def add_cmd(argv):
"""Add the specified directories to the Python path for the currently active virtualenv.
This will be done by placing the directory names in a path file named
"virtualenv_path_extensions.pth" inside the virtualenv's site-packages
directory; if this file does not exists, it will be created first.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='remove', action='store_true')
parser.add_argument('dirs', nargs='+')
args = parser.parse_args(argv)
extra_paths = sitepackages_dir() / '_virtualenv_path_extensions.pth'
new_paths = [os.path.abspath(d) + "\n" for d in args.dirs]
if not extra_paths.exists():
with extra_paths.open('w') as extra:
extra.write('''import sys; sys.__plen = len(sys.path)
import sys; new=sys.path[sys.__plen:]; del sys.path[sys.__plen:]; p=getattr(sys,'__egginsert',0); sys.path[p:p]=new; sys.__egginsert = p+len(new)
''')
def rewrite(f):
with extra_paths.open('r+') as extra:
to_write = f(extra.readlines())
extra.seek(0)
extra.truncate()
extra.writelines(to_write)
if args.remove:
rewrite(lambda ls: [line for line in ls if line not in new_paths])
else:
rewrite(lambda lines: lines[0:1] + new_paths + lines[1:])
def sitepackages_dir_cmd(argv):
print(sitepackages_dir())
def lssitepackages_cmd(argv):
"""Show the content of the site-packages directory of the current virtualenv."""
site = sitepackages_dir()
print(*sorted(site.iterdir()), sep=os.linesep)
extra_paths = site / '_virtualenv_path_extensions.pth'
if extra_paths.exists():
print('from _virtualenv_path_extensions.pth:')
with extra_paths.open() as extra:
print(''.join(extra.readlines()))
def toggleglobalsitepackages_cmd(argv):
"""Toggle the current virtualenv between having and not having access to the global site-packages."""
quiet = argv == ['-q']
site = sitepackages_dir()
ngsp_file = site.parent / 'no-global-site-packages.txt'
if ngsp_file.exists():
ngsp_file.unlink()
if not quiet:
print('Enabled global site-packages')
else:
with ngsp_file.open('w'):
if not quiet:
print('Disabled global site-packages')
def copy_virtualenv_project(source, target):
source = expandpath(source)
if not source.exists():
source = workon_home / source
if not source.exists():
sys.exit('Please provide a valid virtualenv to copy')
target_name = target or source.name
target = workon_home / target_name
if target.exists():
sys.exit('%s virtualenv already exists in %s.' % (
target_name, workon_home
))
print('Copying {0} in {1}'.format(source, target_name))
clone_virtualenv(str(source), str(target))
return target_name
def rename_cmd(argv):
"""Rename a virtualenv"""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target')
pargs = parser.parse_args(argv)
copy_virtualenv_project(pargs.source, pargs.target)
return rmvirtualenvs([pargs.source])
def setvirtualenvproject(env, project):
print('Setting project for {0} to {1}'.format(env, project))
with (workon_home / env / '.project').open('wb') as prj:
prj.write(str(project).encode())
def setproject_cmd(argv):
"""Given a virtualenv directory and a project directory, set the \
virtualenv up to be associated with the project."""
args = dict(enumerate(argv))
project = os.path.abspath(args.get(1, '.'))
env = args.get(0, os.environ.get('VIRTUAL_ENV'))
if not env:
sys.exit('pew setproject [virtualenv] [project_path]')
if not (workon_home / env).exists():
sys.exit("Environment '%s' doesn't exist." % env)
if not os.path.isdir(project):
sys.exit('pew setproject: %s does not exist' % project)
setvirtualenvproject(env, project)
def getproject_cmd(argv):
"""Print a virtualenv's project directory, if set.
If called without providing a virtualenv name as argument, print the
current virtualenv's project directory.
"""
# Parse command line arguments
parser = argparse.ArgumentParser(
description="Print an environment's project directory.",
)
parser.add_argument(
'envname',
nargs='?',
default=os.environ.get('VIRTUAL_ENV'),
help=(
'The name of the environment to return the project directory '
'for. If omitted, will use the currently active environment.'
),
)
args = parser.parse_args(argv)
# Now, do the actual work
if not args.envname:
sys.exit('ERROR: no virtualenv active')
if not (workon_home / args.envname).exists():
sys.exit("ERROR: Environment '{0}' does not exist."
.format(args.envname))
project_dir = get_project_dir(args.envname)
if project_dir is None:
sys.exit("ERROR: no project directory set for Environment '{0}'"
.format(args.envname))
print(project_dir)
def mkproject_cmd(argv):
"""Create a new project directory and its associated virtualenv."""
if '-l' in argv or '--list' in argv:
templates = [t.name[9:] for t in workon_home.glob("template_*")]
print("Available project templates:", *templates, sep='\n')
return
parser = mkvirtualenv_argparser()
parser.add_argument('envname')
parser.add_argument(
'-t', action='append', default=[], dest='templates', help='Multiple \
templates may be selected. They are applied in the order specified on the \
command line.')
parser.add_argument(
'-l', '--list', action='store_true', help='List available templates.')
args, rest = parser.parse_known_args(argv)
projects_home = Path(os.environ.get('PROJECT_HOME', '.'))
if not projects_home.exists():
sys.exit('ERROR: Projects directory %s does not exist. \
Create it or set PROJECT_HOME to an existing directory.' % projects_home)
project = (projects_home / args.envname).absolute()
if project.exists():
sys.exit('Project %s already exists.' % args.envname)
mkvirtualenv(args.envname, args.python, args.packages, project.absolute(),
args.requirements, rest)
project.mkdir()
for template_name in args.templates:
template = workon_home / ("template_" + template_name)
inve(args.envname, str(template), args.envname, str(project))
if args.activate:
shell(args.envname, cwd=str(project))
def mktmpenv_cmd(argv):
"""Create a temporary virtualenv."""
parser = mkvirtualenv_argparser()
env = '.'
while (workon_home / env).exists():
env = hex(random.getrandbits(64))[2:-1]
args, rest = parser.parse_known_args(argv)
mkvirtualenv(env, args.python, args.packages, requirements=args.requirements,
rest=rest)
print('This is a temporary environment. It will be deleted when you exit')
try:
if args.activate:
# only used for testing on windows
shell(env)
finally:
return rmvirtualenvs([env])
def wipeenv_cmd(argv):
"""Remove all installed packages from the current (or supplied) env."""
env = argv[0] if argv else os.environ.get('VIRTUAL_ENV')
if not env:
sys.exit('ERROR: no virtualenv active')
elif not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist.".format(env))
else:
env_pip = str(workon_home / env / env_bin_dir / 'pip')
all_pkgs = set(invoke(env_pip, 'freeze').out.splitlines())
pkgs = set(p for p in all_pkgs if len(p.split("==")) == 2)
ignored = sorted(all_pkgs - pkgs)
pkgs = set(p.split("==")[0] for p in pkgs)
to_remove = sorted(pkgs - set(['distribute', 'wsgiref']))
if to_remove:
print("Ignoring:\n %s" % "\n ".join(ignored))
print("Uninstalling packages:\n %s" % "\n ".join(to_remove))
return inve(env, 'pip', 'uninstall', '-y', *to_remove)
else:
print("Nothing to remove")
def inall_cmd(argv):
"""Run a command in each virtualenv."""
envs = lsenvs()
errors = False
for env in envs:
print("\n%s:" % env)
try:
inve(env, *argv)
except CalledProcessError as e:
errors = True
err(e)
sys.exit(errors)
def in_cmd(argv):
"""Run a command in the given virtualenv."""
if len(argv) == 1:
return workon_cmd(argv)
parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
return inve(*argv)
def restore_cmd(argv):
"""Try to restore a broken virtualenv by reinstalling the same python version on top of it"""
if len(argv) < 1:
sys.exit('You must provide a valid virtualenv to target')
env = argv[0]
path = workon_home / env
py = path / env_bin_dir / ('python.exe' if windows else 'python')
exact_py = py.resolve().name
return check_call([sys.executable, "-m", "virtualenv", str(path.absolute()), "--python=%s" % exact_py])
def dir_cmd(argv):
"""Print the path for the virtualenv directory"""
env = parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
print(workon_home / env)
def install_cmd(argv):
'''Use Pythonz to download and build the specified Python version'''
installer = InstallCommand()
options, versions = installer.parser.parse_args(argv)
if len(versions) != 1:
installer.parser.print_help()
sys.exit(1)
else:
try:
actual_installer = PythonInstaller.get_installer(versions[0], options)
return actual_installer.install()
except AlreadyInstalledError as e:
print(e)
def uninstall_cmd(argv):
'''Use Pythonz to uninstall the specified Python version'''
return UninstallCommand().run(argv)
def list_pythons_cmd(argv):
'''List the pythons installed by Pythonz (or all the installable ones)'''
return ListPythons().run(argv)
def locate_python_cmd(argv):
'''Locate the path for the python version installed by Pythonz'''
return LocatePython().run(argv)
def version_cmd(argv):
"""Prints current pew version"""
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('pew').version
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
print('Setuptools has some issues here, failed to get our own package.', file=sys.stderr)
print(__version__)
def prevent_path_errors():
if 'VIRTUAL_ENV' in os.environ and not check_path():
sys.exit('''ERROR: The virtualenv hasn't been activated correctly.
Either the env is corrupted (try running `pew restore env`),
Or an upgrade of your Python version broke your env,
Or check the contents of your $PATH. You might be adding new directories to it
from inside your shell's configuration file.
In this case, for further details please see: https://github.com/berdario/pew#the-environment-doesnt-seem-to-be-activated''')
def first_run_setup():
shell = supported_shell()
if shell:
if shell == 'fish':
source_cmd = 'source (pew shell_config)'
else:
source_cmd = 'source "$(pew shell_config)"'
rcpath = expandpath({'bash': '~/.bashrc'
, 'zsh': '~/.zshrc'
, 'fish': '~/.config/fish/config.fish'}[shell])
if rcpath.exists():
update_config_file(rcpath, source_cmd)
else:
print("It seems that you're running pew for the first time\n"
"If you want source shell competions and update your prompt, "
"Add the following line to your shell config file:\n %s" % source_cmd)
print('\nWill now continue with the command:', *sys.argv[1:])
input('[enter]')
def update_config_file(rcpath, source_cmd):
with rcpath.open('r+') as rcfile:
if source_cmd not in (line.strip() for line in rcfile.readlines()):
choice = 'X'
while choice not in ('y', '', 'n'):
choice = input("It seems that you're running pew for the first time\n"
"do you want to modify %s to source completions and"
" update your prompt? [y/N]\n> " % rcpath).lower()
if choice == 'y':
rcfile.write('\n# added by Pew\n%s\n' % source_cmd)
print('Done')
else:
print('\nOk, if you want to do it manually, just add\n %s\nat'
' the end of %s' % (source_cmd, rcpath))
def print_commands(cmds):
longest = max(map(len, cmds)) + 3
columns, _ = get_terminal_size()
print('Available commands:\n')
for cmd, fun in sorted(cmds.items()):
if fun.__doc__:
print(textwrap.fill(
fun.__doc__.splitlines()[0],
columns or 1000,
initial_indent=(' {0}: '.format(cmd)).ljust(longest),
subsequent_indent=longest * ' '))
else:
print(' ' + cmd)
def pew():
first_run = makedirs_and_symlink_if_needed(workon_home)
if first_run and sys.stdin.isatty():
first_run_setup()
cmds = dict((cmd[:-4], fun)
for cmd, fun in globals().items() if cmd.endswith('_cmd'))
if sys.argv[1:]:
if sys.argv[1] in cmds:
command = cmds[sys.argv[1]]
try:
return command(sys.argv[2:])
except CalledProcessError as e:
return e.returncode
except KeyboardInterrupt:
pass
else:
err("ERROR: command", sys.argv[1], "does not exist.")
print_commands(cmds)
sys.exit(1)
else:
print_commands(cmds)
|
berdario/pew | pew/pew.py | rename_cmd | python | def rename_cmd(argv):
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target')
pargs = parser.parse_args(argv)
copy_virtualenv_project(pargs.source, pargs.target)
return rmvirtualenvs([pargs.source]) | Rename a virtualenv | train | https://github.com/berdario/pew/blob/37d9ff79342336b8ef6437d9a551008be07afe9b/pew/pew.py#L503-L510 | [
"def rmvirtualenvs(envs):\n error_happened = False\n for env in envs:\n env = workon_home / env\n if os.environ.get('VIRTUAL_ENV') == str(env):\n err(\"ERROR: You cannot remove the active environment (%s).\" % env)\n error_happened = True\n break\n try:\n shutil.rmtree(str(env))\n except OSError as e:\n err(\"Error while trying to remove the {0} env: \\n{1}\".format\n (env, e.strerror))\n error_happened = True\n return error_happened\n",
"def copy_virtualenv_project(source, target):\n source = expandpath(source)\n if not source.exists():\n source = workon_home / source\n if not source.exists():\n sys.exit('Please provide a valid virtualenv to copy')\n\n target_name = target or source.name\n\n target = workon_home / target_name\n\n if target.exists():\n sys.exit('%s virtualenv already exists in %s.' % (\n target_name, workon_home\n ))\n\n print('Copying {0} in {1}'.format(source, target_name))\n clone_virtualenv(str(source), str(target))\n return target_name\n"
] | from __future__ import print_function, absolute_import, unicode_literals
import os
import sys
import argparse
import shutil
import random
import textwrap
from functools import partial
from subprocess import CalledProcessError
from pathlib import Path
try:
from shutil import get_terminal_size
except ImportError:
from backports.shutil_get_terminal_size import get_terminal_size
windows = sys.platform == 'win32'
from clonevirtualenv import clone_virtualenv
if not windows:
try:
# Try importing these packages if avaiable
from pythonz.commands.install import InstallCommand
from pythonz.commands.uninstall import UninstallCommand
from pythonz.installer.pythoninstaller import PythonInstaller, AlreadyInstalledError
from pythonz.commands.list import ListCommand
from pythonz.define import PATH_PYTHONS
from pythonz.commands.locate import LocateCommand as LocatePython
def ListPythons():
try:
Path(PATH_PYTHONS).mkdir(parents=True)
except OSError:
pass
return ListCommand()
except:
# create mock commands
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('You need to install the pythonz extra. pip install pew[pythonz]')
else:
# Pythonz does not support windows
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('Command not supported on this platform')
import shellingham
from pew._utils import (check_call, invoke, expandpath, own, env_bin_dir,
check_path, temp_environ, NamedTemporaryFile, to_unicode)
from pew._print_utils import print_virtualenvs
if sys.version_info[0] == 2:
input = raw_input
err = partial(print, file=sys.stderr)
if windows:
default_home = '~/.virtualenvs'
else:
default_home = os.path.join(
os.environ.get('XDG_DATA_HOME', '~/.local/share'), 'virtualenvs')
workon_home = expandpath(
os.environ.get('WORKON_HOME', default_home))
def makedirs_and_symlink_if_needed(workon_home):
if not workon_home.exists() and own(workon_home):
workon_home.mkdir(parents=True)
link = expandpath('~/.virtualenvs')
if os.name == 'posix' and 'WORKON_HOME' not in os.environ and \
'XDG_DATA_HOME' not in os.environ and not link.exists():
link.symlink_to(str(workon_home))
return True
else:
return False
pew_site = Path(__file__).parent
def supported_shell():
shell = Path(os.environ.get('SHELL', '')).stem
if shell in ('bash', 'zsh', 'fish'):
return shell
def shell_config_cmd(argv):
"Prints the path for the current $SHELL helper file"
shell = supported_shell()
if shell:
print(pew_site / 'shell_config' / ('init.' + shell))
else:
err('Completions and prompts are unavailable for %s' %
repr(os.environ.get('SHELL', '')))
def deploy_completions():
completions = {'complete.bash': Path('/etc/bash_completion.d/pew'),
'complete.zsh': Path('/usr/local/share/zsh/site-functions/_pew'),
'complete.fish': Path('/etc/fish/completions/pew.fish')}
for comp, dest in completions.items():
if not dest.parent.exists():
dest.parent.mkdir(parents=True)
shutil.copy(str(pew_site / 'shell_config' / comp), str(dest))
def get_project_dir(env):
project_file = workon_home / env / '.project'
if project_file.exists():
with project_file.open() as f:
project_dir = f.readline().strip()
if os.path.exists(project_dir):
return project_dir
else:
err('Corrupted or outdated:', project_file, '\nDirectory',
project_dir, "doesn't exist.")
def unsetenv(key):
if key in os.environ:
del os.environ[key]
def compute_path(env):
envdir = workon_home / env
return os.pathsep.join([
str(envdir / env_bin_dir),
os.environ['PATH'],
])
def inve(env, command, *args, **kwargs):
"""Run a command in the given virtual environment.
Pass additional keyword arguments to ``subprocess.check_call()``."""
# we don't strictly need to restore the environment, since pew runs in
# its own process, but it feels like the right thing to do
with temp_environ():
os.environ['VIRTUAL_ENV'] = str(workon_home / env)
os.environ['PATH'] = compute_path(env)
unsetenv('PYTHONHOME')
unsetenv('__PYVENV_LAUNCHER__')
try:
return check_call([command] + list(args), shell=windows, **kwargs)
# need to have shell=True on windows, otherwise the PYTHONPATH
# won't inherit the PATH
except OSError as e:
if e.errno == 2:
err('Unable to find', command)
return 2
else:
raise
def fork_shell(env, shellcmd, cwd):
or_ctrld = '' if windows else "or 'Ctrl+D' "
err("Launching subshell in virtual environment. Type 'exit' ", or_ctrld,
"to return.", sep='')
if 'VIRTUAL_ENV' in os.environ:
err("Be aware that this environment will be nested on top "
"of '%s'" % Path(os.environ['VIRTUAL_ENV']).name)
return inve(env, *shellcmd, cwd=cwd)
def fork_bash(env, cwd):
# bash is a special little snowflake, and prevent_path_errors cannot work there
# https://github.com/berdario/pew/issues/58#issuecomment-102182346
bashrcpath = expandpath('~/.bashrc')
if bashrcpath.exists():
with NamedTemporaryFile('w+') as rcfile:
with bashrcpath.open() as bashrc:
rcfile.write(bashrc.read())
rcfile.write('\nexport PATH="' + to_unicode(compute_path(env)) + '"')
rcfile.flush()
return fork_shell(env, ['bash', '--rcfile', rcfile.name], cwd)
else:
return fork_shell(env, ['bash'], cwd)
def fork_cmder(env, cwd):
shell_cmd = ['cmd']
cmderrc_path = r'%CMDER_ROOT%\vendor\init.bat'
if expandpath(cmderrc_path).exists():
shell_cmd += ['/k', cmderrc_path]
if cwd:
os.environ['CMDER_START'] = cwd
return fork_shell(env, shell_cmd, cwd)
def _detect_shell():
shell = os.environ.get('SHELL', None)
if not shell:
if 'CMDER_ROOT' in os.environ:
shell = 'Cmder'
elif windows:
try:
_, shell = shellingham.detect_shell()
except shellingham.ShellDetectionFailure:
shell = os.environ.get('COMSPEC', 'cmd.exe')
else:
shell = 'sh'
return shell
def shell(env, cwd=None):
env = str(env)
shell = _detect_shell()
shell_name = Path(shell).stem
if shell_name not in ('Cmder', 'bash', 'elvish', 'powershell', 'klingon', 'cmd'):
# On Windows the PATH is usually set with System Utility
# so we won't worry about trying to check mistakes there
shell_check = (sys.executable + ' -c "from pew.pew import '
'prevent_path_errors; prevent_path_errors()"')
try:
inve(env, shell, '-c', shell_check)
except CalledProcessError:
return
if shell_name == 'bash':
return fork_bash(env, cwd)
elif shell_name == 'Cmder':
return fork_cmder(env, cwd)
else:
return fork_shell(env, [shell], cwd)
def mkvirtualenv(envname, python=None, packages=[], project=None,
requirements=None, rest=[]):
if python:
rest = ["--python=%s" % python] + rest
path = (workon_home / envname).absolute()
try:
check_call([sys.executable, "-m", "virtualenv", str(path)] + rest)
except (CalledProcessError, KeyboardInterrupt):
rmvirtualenvs([envname])
raise
else:
if project:
setvirtualenvproject(envname, project.absolute())
if requirements:
inve(envname, 'pip', 'install', '-r', str(expandpath(requirements)))
if packages:
inve(envname, 'pip', 'install', *packages)
def mkvirtualenv_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--python')
parser.add_argument('-i', action='append', dest='packages', help='Install \
a package after the environment is created. This option may be repeated.')
parser.add_argument('-r', dest='requirements', help='Provide a pip \
requirements file to install a base set of packages into the new environment.')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
return parser
def new_cmd(argv):
"""Create a new environment, in $WORKON_HOME."""
parser = mkvirtualenv_argparser()
parser.add_argument('-a', dest='project', help='Provide a full path to a \
project directory to associate with the new environment.')
parser.add_argument('envname')
args, rest = parser.parse_known_args(argv)
project = expandpath(args.project) if args.project else None
mkvirtualenv(args.envname, args.python, args.packages, project,
args.requirements, rest)
if args.activate:
shell(args.envname)
def rmvirtualenvs(envs):
error_happened = False
for env in envs:
env = workon_home / env
if os.environ.get('VIRTUAL_ENV') == str(env):
err("ERROR: You cannot remove the active environment (%s)." % env)
error_happened = True
break
try:
shutil.rmtree(str(env))
except OSError as e:
err("Error while trying to remove the {0} env: \n{1}".format
(env, e.strerror))
error_happened = True
return error_happened
def rm_cmd(argv):
"""Remove one or more environment, from $WORKON_HOME."""
if len(argv) < 1:
sys.exit("Please specify an environment")
return rmvirtualenvs(argv)
def packages(site_packages):
nodes = site_packages.iterdir()
return set([x.stem.split('-')[0] for x in nodes]) - set(['__pycache__'])
def showvirtualenv(env):
columns, _ = get_terminal_size()
pkgs = sorted(packages(sitepackages_dir(env)))
env_python = workon_home / env / env_bin_dir / 'python'
l = len(env) + 2
version = invoke(str(env_python), '-V')
version = ' - '.join((version.out + version.err).splitlines())
print(env, ': ', version, sep='')
print(textwrap.fill(' '.join(pkgs),
width=columns-l,
initial_indent=(l * ' '),
subsequent_indent=(l * ' ')), '\n')
def show_cmd(argv):
try:
showvirtualenv(argv[0])
except IndexError:
if 'VIRTUAL_ENV' in os.environ:
showvirtualenv(Path(os.environ['VIRTUAL_ENV']).name)
else:
sys.exit('pew show [env]')
def lsenvs():
return sorted(set(env.parts[-3] for env in
workon_home.glob(os.path.join('*', env_bin_dir, 'python*'))))
def lsvirtualenv(verbose):
envs = lsenvs()
if not verbose:
print_virtualenvs(*envs)
else:
for env in envs:
showvirtualenv(env)
def ls_cmd(argv):
"""List available environments."""
parser = argparse.ArgumentParser()
p_group = parser.add_mutually_exclusive_group()
p_group.add_argument('-b', '--brief', action='store_false')
p_group.add_argument('-l', '--long', action='store_true')
args = parser.parse_args(argv)
lsvirtualenv(args.long)
def parse_envname(argv, no_arg_callback):
if len(argv) < 1 or argv[0] is None:
no_arg_callback()
env = argv[0]
if env.startswith('/'):
sys.exit("ERROR: Invalid environment name '{0}'.".format(env))
if not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist. Create it with \
'pew new {0}'.".format(env))
else:
return env
def workon_cmd(argv):
"""List or change working virtual environments."""
parser = argparse.ArgumentParser(prog='pew workon')
parser.add_argument('envname', nargs='?')
parser.add_argument(
'-n', '--no-cd', action='store_true',
help=('Do not change working directory to project directory after '
'activating virtualenv.')
)
args = parser.parse_args(argv)
def list_and_exit():
lsvirtualenv(False)
sys.exit(0)
env = parse_envname([args.envname], list_and_exit)
# Check if the virtualenv has an associated project directory and in
# this case, use it as the current working directory.
project_dir = get_project_dir(env)
if project_dir is None or args.no_cd:
project_dir = os.getcwd()
return shell(env, cwd=project_dir)
def sitepackages_dir(env=os.environ.get('VIRTUAL_ENV')):
if not env:
sys.exit('ERROR: no virtualenv active')
else:
env_python = workon_home / env / env_bin_dir / 'python'
return Path(invoke(str(env_python), '-c', 'import distutils; \
print(distutils.sysconfig.get_python_lib())').out)
def add_cmd(argv):
"""Add the specified directories to the Python path for the currently active virtualenv.
This will be done by placing the directory names in a path file named
"virtualenv_path_extensions.pth" inside the virtualenv's site-packages
directory; if this file does not exists, it will be created first.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='remove', action='store_true')
parser.add_argument('dirs', nargs='+')
args = parser.parse_args(argv)
extra_paths = sitepackages_dir() / '_virtualenv_path_extensions.pth'
new_paths = [os.path.abspath(d) + "\n" for d in args.dirs]
if not extra_paths.exists():
with extra_paths.open('w') as extra:
extra.write('''import sys; sys.__plen = len(sys.path)
import sys; new=sys.path[sys.__plen:]; del sys.path[sys.__plen:]; p=getattr(sys,'__egginsert',0); sys.path[p:p]=new; sys.__egginsert = p+len(new)
''')
def rewrite(f):
with extra_paths.open('r+') as extra:
to_write = f(extra.readlines())
extra.seek(0)
extra.truncate()
extra.writelines(to_write)
if args.remove:
rewrite(lambda ls: [line for line in ls if line not in new_paths])
else:
rewrite(lambda lines: lines[0:1] + new_paths + lines[1:])
def sitepackages_dir_cmd(argv):
print(sitepackages_dir())
def lssitepackages_cmd(argv):
"""Show the content of the site-packages directory of the current virtualenv."""
site = sitepackages_dir()
print(*sorted(site.iterdir()), sep=os.linesep)
extra_paths = site / '_virtualenv_path_extensions.pth'
if extra_paths.exists():
print('from _virtualenv_path_extensions.pth:')
with extra_paths.open() as extra:
print(''.join(extra.readlines()))
def toggleglobalsitepackages_cmd(argv):
"""Toggle the current virtualenv between having and not having access to the global site-packages."""
quiet = argv == ['-q']
site = sitepackages_dir()
ngsp_file = site.parent / 'no-global-site-packages.txt'
if ngsp_file.exists():
ngsp_file.unlink()
if not quiet:
print('Enabled global site-packages')
else:
with ngsp_file.open('w'):
if not quiet:
print('Disabled global site-packages')
def cp_cmd(argv):
"""Duplicate the named virtualenv to make a new one."""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target', nargs='?')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
args = parser.parse_args(argv)
target_name = copy_virtualenv_project(args.source, args.target)
if args.activate:
shell(target_name)
def copy_virtualenv_project(source, target):
source = expandpath(source)
if not source.exists():
source = workon_home / source
if not source.exists():
sys.exit('Please provide a valid virtualenv to copy')
target_name = target or source.name
target = workon_home / target_name
if target.exists():
sys.exit('%s virtualenv already exists in %s.' % (
target_name, workon_home
))
print('Copying {0} in {1}'.format(source, target_name))
clone_virtualenv(str(source), str(target))
return target_name
def setvirtualenvproject(env, project):
print('Setting project for {0} to {1}'.format(env, project))
with (workon_home / env / '.project').open('wb') as prj:
prj.write(str(project).encode())
def setproject_cmd(argv):
"""Given a virtualenv directory and a project directory, set the \
virtualenv up to be associated with the project."""
args = dict(enumerate(argv))
project = os.path.abspath(args.get(1, '.'))
env = args.get(0, os.environ.get('VIRTUAL_ENV'))
if not env:
sys.exit('pew setproject [virtualenv] [project_path]')
if not (workon_home / env).exists():
sys.exit("Environment '%s' doesn't exist." % env)
if not os.path.isdir(project):
sys.exit('pew setproject: %s does not exist' % project)
setvirtualenvproject(env, project)
def getproject_cmd(argv):
"""Print a virtualenv's project directory, if set.
If called without providing a virtualenv name as argument, print the
current virtualenv's project directory.
"""
# Parse command line arguments
parser = argparse.ArgumentParser(
description="Print an environment's project directory.",
)
parser.add_argument(
'envname',
nargs='?',
default=os.environ.get('VIRTUAL_ENV'),
help=(
'The name of the environment to return the project directory '
'for. If omitted, will use the currently active environment.'
),
)
args = parser.parse_args(argv)
# Now, do the actual work
if not args.envname:
sys.exit('ERROR: no virtualenv active')
if not (workon_home / args.envname).exists():
sys.exit("ERROR: Environment '{0}' does not exist."
.format(args.envname))
project_dir = get_project_dir(args.envname)
if project_dir is None:
sys.exit("ERROR: no project directory set for Environment '{0}'"
.format(args.envname))
print(project_dir)
def mkproject_cmd(argv):
"""Create a new project directory and its associated virtualenv."""
if '-l' in argv or '--list' in argv:
templates = [t.name[9:] for t in workon_home.glob("template_*")]
print("Available project templates:", *templates, sep='\n')
return
parser = mkvirtualenv_argparser()
parser.add_argument('envname')
parser.add_argument(
'-t', action='append', default=[], dest='templates', help='Multiple \
templates may be selected. They are applied in the order specified on the \
command line.')
parser.add_argument(
'-l', '--list', action='store_true', help='List available templates.')
args, rest = parser.parse_known_args(argv)
projects_home = Path(os.environ.get('PROJECT_HOME', '.'))
if not projects_home.exists():
sys.exit('ERROR: Projects directory %s does not exist. \
Create it or set PROJECT_HOME to an existing directory.' % projects_home)
project = (projects_home / args.envname).absolute()
if project.exists():
sys.exit('Project %s already exists.' % args.envname)
mkvirtualenv(args.envname, args.python, args.packages, project.absolute(),
args.requirements, rest)
project.mkdir()
for template_name in args.templates:
template = workon_home / ("template_" + template_name)
inve(args.envname, str(template), args.envname, str(project))
if args.activate:
shell(args.envname, cwd=str(project))
def mktmpenv_cmd(argv):
"""Create a temporary virtualenv."""
parser = mkvirtualenv_argparser()
env = '.'
while (workon_home / env).exists():
env = hex(random.getrandbits(64))[2:-1]
args, rest = parser.parse_known_args(argv)
mkvirtualenv(env, args.python, args.packages, requirements=args.requirements,
rest=rest)
print('This is a temporary environment. It will be deleted when you exit')
try:
if args.activate:
# only used for testing on windows
shell(env)
finally:
return rmvirtualenvs([env])
def wipeenv_cmd(argv):
"""Remove all installed packages from the current (or supplied) env."""
env = argv[0] if argv else os.environ.get('VIRTUAL_ENV')
if not env:
sys.exit('ERROR: no virtualenv active')
elif not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist.".format(env))
else:
env_pip = str(workon_home / env / env_bin_dir / 'pip')
all_pkgs = set(invoke(env_pip, 'freeze').out.splitlines())
pkgs = set(p for p in all_pkgs if len(p.split("==")) == 2)
ignored = sorted(all_pkgs - pkgs)
pkgs = set(p.split("==")[0] for p in pkgs)
to_remove = sorted(pkgs - set(['distribute', 'wsgiref']))
if to_remove:
print("Ignoring:\n %s" % "\n ".join(ignored))
print("Uninstalling packages:\n %s" % "\n ".join(to_remove))
return inve(env, 'pip', 'uninstall', '-y', *to_remove)
else:
print("Nothing to remove")
def inall_cmd(argv):
"""Run a command in each virtualenv."""
envs = lsenvs()
errors = False
for env in envs:
print("\n%s:" % env)
try:
inve(env, *argv)
except CalledProcessError as e:
errors = True
err(e)
sys.exit(errors)
def in_cmd(argv):
"""Run a command in the given virtualenv."""
if len(argv) == 1:
return workon_cmd(argv)
parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
return inve(*argv)
def restore_cmd(argv):
"""Try to restore a broken virtualenv by reinstalling the same python version on top of it"""
if len(argv) < 1:
sys.exit('You must provide a valid virtualenv to target')
env = argv[0]
path = workon_home / env
py = path / env_bin_dir / ('python.exe' if windows else 'python')
exact_py = py.resolve().name
return check_call([sys.executable, "-m", "virtualenv", str(path.absolute()), "--python=%s" % exact_py])
def dir_cmd(argv):
"""Print the path for the virtualenv directory"""
env = parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
print(workon_home / env)
def install_cmd(argv):
'''Use Pythonz to download and build the specified Python version'''
installer = InstallCommand()
options, versions = installer.parser.parse_args(argv)
if len(versions) != 1:
installer.parser.print_help()
sys.exit(1)
else:
try:
actual_installer = PythonInstaller.get_installer(versions[0], options)
return actual_installer.install()
except AlreadyInstalledError as e:
print(e)
def uninstall_cmd(argv):
'''Use Pythonz to uninstall the specified Python version'''
return UninstallCommand().run(argv)
def list_pythons_cmd(argv):
'''List the pythons installed by Pythonz (or all the installable ones)'''
return ListPythons().run(argv)
def locate_python_cmd(argv):
'''Locate the path for the python version installed by Pythonz'''
return LocatePython().run(argv)
def version_cmd(argv):
"""Prints current pew version"""
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('pew').version
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
print('Setuptools has some issues here, failed to get our own package.', file=sys.stderr)
print(__version__)
def prevent_path_errors():
if 'VIRTUAL_ENV' in os.environ and not check_path():
sys.exit('''ERROR: The virtualenv hasn't been activated correctly.
Either the env is corrupted (try running `pew restore env`),
Or an upgrade of your Python version broke your env,
Or check the contents of your $PATH. You might be adding new directories to it
from inside your shell's configuration file.
In this case, for further details please see: https://github.com/berdario/pew#the-environment-doesnt-seem-to-be-activated''')
def first_run_setup():
shell = supported_shell()
if shell:
if shell == 'fish':
source_cmd = 'source (pew shell_config)'
else:
source_cmd = 'source "$(pew shell_config)"'
rcpath = expandpath({'bash': '~/.bashrc'
, 'zsh': '~/.zshrc'
, 'fish': '~/.config/fish/config.fish'}[shell])
if rcpath.exists():
update_config_file(rcpath, source_cmd)
else:
print("It seems that you're running pew for the first time\n"
"If you want source shell competions and update your prompt, "
"Add the following line to your shell config file:\n %s" % source_cmd)
print('\nWill now continue with the command:', *sys.argv[1:])
input('[enter]')
def update_config_file(rcpath, source_cmd):
with rcpath.open('r+') as rcfile:
if source_cmd not in (line.strip() for line in rcfile.readlines()):
choice = 'X'
while choice not in ('y', '', 'n'):
choice = input("It seems that you're running pew for the first time\n"
"do you want to modify %s to source completions and"
" update your prompt? [y/N]\n> " % rcpath).lower()
if choice == 'y':
rcfile.write('\n# added by Pew\n%s\n' % source_cmd)
print('Done')
else:
print('\nOk, if you want to do it manually, just add\n %s\nat'
' the end of %s' % (source_cmd, rcpath))
def print_commands(cmds):
longest = max(map(len, cmds)) + 3
columns, _ = get_terminal_size()
print('Available commands:\n')
for cmd, fun in sorted(cmds.items()):
if fun.__doc__:
print(textwrap.fill(
fun.__doc__.splitlines()[0],
columns or 1000,
initial_indent=(' {0}: '.format(cmd)).ljust(longest),
subsequent_indent=longest * ' '))
else:
print(' ' + cmd)
def pew():
first_run = makedirs_and_symlink_if_needed(workon_home)
if first_run and sys.stdin.isatty():
first_run_setup()
cmds = dict((cmd[:-4], fun)
for cmd, fun in globals().items() if cmd.endswith('_cmd'))
if sys.argv[1:]:
if sys.argv[1] in cmds:
command = cmds[sys.argv[1]]
try:
return command(sys.argv[2:])
except CalledProcessError as e:
return e.returncode
except KeyboardInterrupt:
pass
else:
err("ERROR: command", sys.argv[1], "does not exist.")
print_commands(cmds)
sys.exit(1)
else:
print_commands(cmds)
|
berdario/pew | pew/pew.py | setproject_cmd | python | def setproject_cmd(argv):
args = dict(enumerate(argv))
project = os.path.abspath(args.get(1, '.'))
env = args.get(0, os.environ.get('VIRTUAL_ENV'))
if not env:
sys.exit('pew setproject [virtualenv] [project_path]')
if not (workon_home / env).exists():
sys.exit("Environment '%s' doesn't exist." % env)
if not os.path.isdir(project):
sys.exit('pew setproject: %s does not exist' % project)
setvirtualenvproject(env, project) | Given a virtualenv directory and a project directory, set the \
virtualenv up to be associated with the project. | train | https://github.com/berdario/pew/blob/37d9ff79342336b8ef6437d9a551008be07afe9b/pew/pew.py#L519-L531 | [
"def setvirtualenvproject(env, project):\n print('Setting project for {0} to {1}'.format(env, project))\n with (workon_home / env / '.project').open('wb') as prj:\n prj.write(str(project).encode())\n"
] | from __future__ import print_function, absolute_import, unicode_literals
import os
import sys
import argparse
import shutil
import random
import textwrap
from functools import partial
from subprocess import CalledProcessError
from pathlib import Path
try:
from shutil import get_terminal_size
except ImportError:
from backports.shutil_get_terminal_size import get_terminal_size
windows = sys.platform == 'win32'
from clonevirtualenv import clone_virtualenv
if not windows:
try:
# Try importing these packages if avaiable
from pythonz.commands.install import InstallCommand
from pythonz.commands.uninstall import UninstallCommand
from pythonz.installer.pythoninstaller import PythonInstaller, AlreadyInstalledError
from pythonz.commands.list import ListCommand
from pythonz.define import PATH_PYTHONS
from pythonz.commands.locate import LocateCommand as LocatePython
def ListPythons():
try:
Path(PATH_PYTHONS).mkdir(parents=True)
except OSError:
pass
return ListCommand()
except:
# create mock commands
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('You need to install the pythonz extra. pip install pew[pythonz]')
else:
# Pythonz does not support windows
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('Command not supported on this platform')
import shellingham
from pew._utils import (check_call, invoke, expandpath, own, env_bin_dir,
check_path, temp_environ, NamedTemporaryFile, to_unicode)
from pew._print_utils import print_virtualenvs
if sys.version_info[0] == 2:
input = raw_input
err = partial(print, file=sys.stderr)
if windows:
default_home = '~/.virtualenvs'
else:
default_home = os.path.join(
os.environ.get('XDG_DATA_HOME', '~/.local/share'), 'virtualenvs')
workon_home = expandpath(
os.environ.get('WORKON_HOME', default_home))
def makedirs_and_symlink_if_needed(workon_home):
if not workon_home.exists() and own(workon_home):
workon_home.mkdir(parents=True)
link = expandpath('~/.virtualenvs')
if os.name == 'posix' and 'WORKON_HOME' not in os.environ and \
'XDG_DATA_HOME' not in os.environ and not link.exists():
link.symlink_to(str(workon_home))
return True
else:
return False
pew_site = Path(__file__).parent
def supported_shell():
shell = Path(os.environ.get('SHELL', '')).stem
if shell in ('bash', 'zsh', 'fish'):
return shell
def shell_config_cmd(argv):
"Prints the path for the current $SHELL helper file"
shell = supported_shell()
if shell:
print(pew_site / 'shell_config' / ('init.' + shell))
else:
err('Completions and prompts are unavailable for %s' %
repr(os.environ.get('SHELL', '')))
def deploy_completions():
completions = {'complete.bash': Path('/etc/bash_completion.d/pew'),
'complete.zsh': Path('/usr/local/share/zsh/site-functions/_pew'),
'complete.fish': Path('/etc/fish/completions/pew.fish')}
for comp, dest in completions.items():
if not dest.parent.exists():
dest.parent.mkdir(parents=True)
shutil.copy(str(pew_site / 'shell_config' / comp), str(dest))
def get_project_dir(env):
project_file = workon_home / env / '.project'
if project_file.exists():
with project_file.open() as f:
project_dir = f.readline().strip()
if os.path.exists(project_dir):
return project_dir
else:
err('Corrupted or outdated:', project_file, '\nDirectory',
project_dir, "doesn't exist.")
def unsetenv(key):
if key in os.environ:
del os.environ[key]
def compute_path(env):
envdir = workon_home / env
return os.pathsep.join([
str(envdir / env_bin_dir),
os.environ['PATH'],
])
def inve(env, command, *args, **kwargs):
"""Run a command in the given virtual environment.
Pass additional keyword arguments to ``subprocess.check_call()``."""
# we don't strictly need to restore the environment, since pew runs in
# its own process, but it feels like the right thing to do
with temp_environ():
os.environ['VIRTUAL_ENV'] = str(workon_home / env)
os.environ['PATH'] = compute_path(env)
unsetenv('PYTHONHOME')
unsetenv('__PYVENV_LAUNCHER__')
try:
return check_call([command] + list(args), shell=windows, **kwargs)
# need to have shell=True on windows, otherwise the PYTHONPATH
# won't inherit the PATH
except OSError as e:
if e.errno == 2:
err('Unable to find', command)
return 2
else:
raise
def fork_shell(env, shellcmd, cwd):
or_ctrld = '' if windows else "or 'Ctrl+D' "
err("Launching subshell in virtual environment. Type 'exit' ", or_ctrld,
"to return.", sep='')
if 'VIRTUAL_ENV' in os.environ:
err("Be aware that this environment will be nested on top "
"of '%s'" % Path(os.environ['VIRTUAL_ENV']).name)
return inve(env, *shellcmd, cwd=cwd)
def fork_bash(env, cwd):
# bash is a special little snowflake, and prevent_path_errors cannot work there
# https://github.com/berdario/pew/issues/58#issuecomment-102182346
bashrcpath = expandpath('~/.bashrc')
if bashrcpath.exists():
with NamedTemporaryFile('w+') as rcfile:
with bashrcpath.open() as bashrc:
rcfile.write(bashrc.read())
rcfile.write('\nexport PATH="' + to_unicode(compute_path(env)) + '"')
rcfile.flush()
return fork_shell(env, ['bash', '--rcfile', rcfile.name], cwd)
else:
return fork_shell(env, ['bash'], cwd)
def fork_cmder(env, cwd):
shell_cmd = ['cmd']
cmderrc_path = r'%CMDER_ROOT%\vendor\init.bat'
if expandpath(cmderrc_path).exists():
shell_cmd += ['/k', cmderrc_path]
if cwd:
os.environ['CMDER_START'] = cwd
return fork_shell(env, shell_cmd, cwd)
def _detect_shell():
shell = os.environ.get('SHELL', None)
if not shell:
if 'CMDER_ROOT' in os.environ:
shell = 'Cmder'
elif windows:
try:
_, shell = shellingham.detect_shell()
except shellingham.ShellDetectionFailure:
shell = os.environ.get('COMSPEC', 'cmd.exe')
else:
shell = 'sh'
return shell
def shell(env, cwd=None):
env = str(env)
shell = _detect_shell()
shell_name = Path(shell).stem
if shell_name not in ('Cmder', 'bash', 'elvish', 'powershell', 'klingon', 'cmd'):
# On Windows the PATH is usually set with System Utility
# so we won't worry about trying to check mistakes there
shell_check = (sys.executable + ' -c "from pew.pew import '
'prevent_path_errors; prevent_path_errors()"')
try:
inve(env, shell, '-c', shell_check)
except CalledProcessError:
return
if shell_name == 'bash':
return fork_bash(env, cwd)
elif shell_name == 'Cmder':
return fork_cmder(env, cwd)
else:
return fork_shell(env, [shell], cwd)
def mkvirtualenv(envname, python=None, packages=[], project=None,
requirements=None, rest=[]):
if python:
rest = ["--python=%s" % python] + rest
path = (workon_home / envname).absolute()
try:
check_call([sys.executable, "-m", "virtualenv", str(path)] + rest)
except (CalledProcessError, KeyboardInterrupt):
rmvirtualenvs([envname])
raise
else:
if project:
setvirtualenvproject(envname, project.absolute())
if requirements:
inve(envname, 'pip', 'install', '-r', str(expandpath(requirements)))
if packages:
inve(envname, 'pip', 'install', *packages)
def mkvirtualenv_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--python')
parser.add_argument('-i', action='append', dest='packages', help='Install \
a package after the environment is created. This option may be repeated.')
parser.add_argument('-r', dest='requirements', help='Provide a pip \
requirements file to install a base set of packages into the new environment.')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
return parser
def new_cmd(argv):
"""Create a new environment, in $WORKON_HOME."""
parser = mkvirtualenv_argparser()
parser.add_argument('-a', dest='project', help='Provide a full path to a \
project directory to associate with the new environment.')
parser.add_argument('envname')
args, rest = parser.parse_known_args(argv)
project = expandpath(args.project) if args.project else None
mkvirtualenv(args.envname, args.python, args.packages, project,
args.requirements, rest)
if args.activate:
shell(args.envname)
def rmvirtualenvs(envs):
error_happened = False
for env in envs:
env = workon_home / env
if os.environ.get('VIRTUAL_ENV') == str(env):
err("ERROR: You cannot remove the active environment (%s)." % env)
error_happened = True
break
try:
shutil.rmtree(str(env))
except OSError as e:
err("Error while trying to remove the {0} env: \n{1}".format
(env, e.strerror))
error_happened = True
return error_happened
def rm_cmd(argv):
"""Remove one or more environment, from $WORKON_HOME."""
if len(argv) < 1:
sys.exit("Please specify an environment")
return rmvirtualenvs(argv)
def packages(site_packages):
nodes = site_packages.iterdir()
return set([x.stem.split('-')[0] for x in nodes]) - set(['__pycache__'])
def showvirtualenv(env):
columns, _ = get_terminal_size()
pkgs = sorted(packages(sitepackages_dir(env)))
env_python = workon_home / env / env_bin_dir / 'python'
l = len(env) + 2
version = invoke(str(env_python), '-V')
version = ' - '.join((version.out + version.err).splitlines())
print(env, ': ', version, sep='')
print(textwrap.fill(' '.join(pkgs),
width=columns-l,
initial_indent=(l * ' '),
subsequent_indent=(l * ' ')), '\n')
def show_cmd(argv):
try:
showvirtualenv(argv[0])
except IndexError:
if 'VIRTUAL_ENV' in os.environ:
showvirtualenv(Path(os.environ['VIRTUAL_ENV']).name)
else:
sys.exit('pew show [env]')
def lsenvs():
return sorted(set(env.parts[-3] for env in
workon_home.glob(os.path.join('*', env_bin_dir, 'python*'))))
def lsvirtualenv(verbose):
envs = lsenvs()
if not verbose:
print_virtualenvs(*envs)
else:
for env in envs:
showvirtualenv(env)
def ls_cmd(argv):
"""List available environments."""
parser = argparse.ArgumentParser()
p_group = parser.add_mutually_exclusive_group()
p_group.add_argument('-b', '--brief', action='store_false')
p_group.add_argument('-l', '--long', action='store_true')
args = parser.parse_args(argv)
lsvirtualenv(args.long)
def parse_envname(argv, no_arg_callback):
if len(argv) < 1 or argv[0] is None:
no_arg_callback()
env = argv[0]
if env.startswith('/'):
sys.exit("ERROR: Invalid environment name '{0}'.".format(env))
if not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist. Create it with \
'pew new {0}'.".format(env))
else:
return env
def workon_cmd(argv):
"""List or change working virtual environments."""
parser = argparse.ArgumentParser(prog='pew workon')
parser.add_argument('envname', nargs='?')
parser.add_argument(
'-n', '--no-cd', action='store_true',
help=('Do not change working directory to project directory after '
'activating virtualenv.')
)
args = parser.parse_args(argv)
def list_and_exit():
lsvirtualenv(False)
sys.exit(0)
env = parse_envname([args.envname], list_and_exit)
# Check if the virtualenv has an associated project directory and in
# this case, use it as the current working directory.
project_dir = get_project_dir(env)
if project_dir is None or args.no_cd:
project_dir = os.getcwd()
return shell(env, cwd=project_dir)
def sitepackages_dir(env=os.environ.get('VIRTUAL_ENV')):
if not env:
sys.exit('ERROR: no virtualenv active')
else:
env_python = workon_home / env / env_bin_dir / 'python'
return Path(invoke(str(env_python), '-c', 'import distutils; \
print(distutils.sysconfig.get_python_lib())').out)
def add_cmd(argv):
"""Add the specified directories to the Python path for the currently active virtualenv.
This will be done by placing the directory names in a path file named
"virtualenv_path_extensions.pth" inside the virtualenv's site-packages
directory; if this file does not exists, it will be created first.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='remove', action='store_true')
parser.add_argument('dirs', nargs='+')
args = parser.parse_args(argv)
extra_paths = sitepackages_dir() / '_virtualenv_path_extensions.pth'
new_paths = [os.path.abspath(d) + "\n" for d in args.dirs]
if not extra_paths.exists():
with extra_paths.open('w') as extra:
extra.write('''import sys; sys.__plen = len(sys.path)
import sys; new=sys.path[sys.__plen:]; del sys.path[sys.__plen:]; p=getattr(sys,'__egginsert',0); sys.path[p:p]=new; sys.__egginsert = p+len(new)
''')
def rewrite(f):
with extra_paths.open('r+') as extra:
to_write = f(extra.readlines())
extra.seek(0)
extra.truncate()
extra.writelines(to_write)
if args.remove:
rewrite(lambda ls: [line for line in ls if line not in new_paths])
else:
rewrite(lambda lines: lines[0:1] + new_paths + lines[1:])
def sitepackages_dir_cmd(argv):
print(sitepackages_dir())
def lssitepackages_cmd(argv):
"""Show the content of the site-packages directory of the current virtualenv."""
site = sitepackages_dir()
print(*sorted(site.iterdir()), sep=os.linesep)
extra_paths = site / '_virtualenv_path_extensions.pth'
if extra_paths.exists():
print('from _virtualenv_path_extensions.pth:')
with extra_paths.open() as extra:
print(''.join(extra.readlines()))
def toggleglobalsitepackages_cmd(argv):
"""Toggle the current virtualenv between having and not having access to the global site-packages."""
quiet = argv == ['-q']
site = sitepackages_dir()
ngsp_file = site.parent / 'no-global-site-packages.txt'
if ngsp_file.exists():
ngsp_file.unlink()
if not quiet:
print('Enabled global site-packages')
else:
with ngsp_file.open('w'):
if not quiet:
print('Disabled global site-packages')
def cp_cmd(argv):
"""Duplicate the named virtualenv to make a new one."""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target', nargs='?')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
args = parser.parse_args(argv)
target_name = copy_virtualenv_project(args.source, args.target)
if args.activate:
shell(target_name)
def copy_virtualenv_project(source, target):
source = expandpath(source)
if not source.exists():
source = workon_home / source
if not source.exists():
sys.exit('Please provide a valid virtualenv to copy')
target_name = target or source.name
target = workon_home / target_name
if target.exists():
sys.exit('%s virtualenv already exists in %s.' % (
target_name, workon_home
))
print('Copying {0} in {1}'.format(source, target_name))
clone_virtualenv(str(source), str(target))
return target_name
def rename_cmd(argv):
"""Rename a virtualenv"""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target')
pargs = parser.parse_args(argv)
copy_virtualenv_project(pargs.source, pargs.target)
return rmvirtualenvs([pargs.source])
def setvirtualenvproject(env, project):
print('Setting project for {0} to {1}'.format(env, project))
with (workon_home / env / '.project').open('wb') as prj:
prj.write(str(project).encode())
def getproject_cmd(argv):
"""Print a virtualenv's project directory, if set.
If called without providing a virtualenv name as argument, print the
current virtualenv's project directory.
"""
# Parse command line arguments
parser = argparse.ArgumentParser(
description="Print an environment's project directory.",
)
parser.add_argument(
'envname',
nargs='?',
default=os.environ.get('VIRTUAL_ENV'),
help=(
'The name of the environment to return the project directory '
'for. If omitted, will use the currently active environment.'
),
)
args = parser.parse_args(argv)
# Now, do the actual work
if not args.envname:
sys.exit('ERROR: no virtualenv active')
if not (workon_home / args.envname).exists():
sys.exit("ERROR: Environment '{0}' does not exist."
.format(args.envname))
project_dir = get_project_dir(args.envname)
if project_dir is None:
sys.exit("ERROR: no project directory set for Environment '{0}'"
.format(args.envname))
print(project_dir)
def mkproject_cmd(argv):
"""Create a new project directory and its associated virtualenv."""
if '-l' in argv or '--list' in argv:
templates = [t.name[9:] for t in workon_home.glob("template_*")]
print("Available project templates:", *templates, sep='\n')
return
parser = mkvirtualenv_argparser()
parser.add_argument('envname')
parser.add_argument(
'-t', action='append', default=[], dest='templates', help='Multiple \
templates may be selected. They are applied in the order specified on the \
command line.')
parser.add_argument(
'-l', '--list', action='store_true', help='List available templates.')
args, rest = parser.parse_known_args(argv)
projects_home = Path(os.environ.get('PROJECT_HOME', '.'))
if not projects_home.exists():
sys.exit('ERROR: Projects directory %s does not exist. \
Create it or set PROJECT_HOME to an existing directory.' % projects_home)
project = (projects_home / args.envname).absolute()
if project.exists():
sys.exit('Project %s already exists.' % args.envname)
mkvirtualenv(args.envname, args.python, args.packages, project.absolute(),
args.requirements, rest)
project.mkdir()
for template_name in args.templates:
template = workon_home / ("template_" + template_name)
inve(args.envname, str(template), args.envname, str(project))
if args.activate:
shell(args.envname, cwd=str(project))
def mktmpenv_cmd(argv):
"""Create a temporary virtualenv."""
parser = mkvirtualenv_argparser()
env = '.'
while (workon_home / env).exists():
env = hex(random.getrandbits(64))[2:-1]
args, rest = parser.parse_known_args(argv)
mkvirtualenv(env, args.python, args.packages, requirements=args.requirements,
rest=rest)
print('This is a temporary environment. It will be deleted when you exit')
try:
if args.activate:
# only used for testing on windows
shell(env)
finally:
return rmvirtualenvs([env])
def wipeenv_cmd(argv):
"""Remove all installed packages from the current (or supplied) env."""
env = argv[0] if argv else os.environ.get('VIRTUAL_ENV')
if not env:
sys.exit('ERROR: no virtualenv active')
elif not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist.".format(env))
else:
env_pip = str(workon_home / env / env_bin_dir / 'pip')
all_pkgs = set(invoke(env_pip, 'freeze').out.splitlines())
pkgs = set(p for p in all_pkgs if len(p.split("==")) == 2)
ignored = sorted(all_pkgs - pkgs)
pkgs = set(p.split("==")[0] for p in pkgs)
to_remove = sorted(pkgs - set(['distribute', 'wsgiref']))
if to_remove:
print("Ignoring:\n %s" % "\n ".join(ignored))
print("Uninstalling packages:\n %s" % "\n ".join(to_remove))
return inve(env, 'pip', 'uninstall', '-y', *to_remove)
else:
print("Nothing to remove")
def inall_cmd(argv):
"""Run a command in each virtualenv."""
envs = lsenvs()
errors = False
for env in envs:
print("\n%s:" % env)
try:
inve(env, *argv)
except CalledProcessError as e:
errors = True
err(e)
sys.exit(errors)
def in_cmd(argv):
"""Run a command in the given virtualenv."""
if len(argv) == 1:
return workon_cmd(argv)
parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
return inve(*argv)
def restore_cmd(argv):
"""Try to restore a broken virtualenv by reinstalling the same python version on top of it"""
if len(argv) < 1:
sys.exit('You must provide a valid virtualenv to target')
env = argv[0]
path = workon_home / env
py = path / env_bin_dir / ('python.exe' if windows else 'python')
exact_py = py.resolve().name
return check_call([sys.executable, "-m", "virtualenv", str(path.absolute()), "--python=%s" % exact_py])
def dir_cmd(argv):
"""Print the path for the virtualenv directory"""
env = parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
print(workon_home / env)
def install_cmd(argv):
'''Use Pythonz to download and build the specified Python version'''
installer = InstallCommand()
options, versions = installer.parser.parse_args(argv)
if len(versions) != 1:
installer.parser.print_help()
sys.exit(1)
else:
try:
actual_installer = PythonInstaller.get_installer(versions[0], options)
return actual_installer.install()
except AlreadyInstalledError as e:
print(e)
def uninstall_cmd(argv):
'''Use Pythonz to uninstall the specified Python version'''
return UninstallCommand().run(argv)
def list_pythons_cmd(argv):
'''List the pythons installed by Pythonz (or all the installable ones)'''
return ListPythons().run(argv)
def locate_python_cmd(argv):
'''Locate the path for the python version installed by Pythonz'''
return LocatePython().run(argv)
def version_cmd(argv):
"""Prints current pew version"""
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('pew').version
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
print('Setuptools has some issues here, failed to get our own package.', file=sys.stderr)
print(__version__)
def prevent_path_errors():
if 'VIRTUAL_ENV' in os.environ and not check_path():
sys.exit('''ERROR: The virtualenv hasn't been activated correctly.
Either the env is corrupted (try running `pew restore env`),
Or an upgrade of your Python version broke your env,
Or check the contents of your $PATH. You might be adding new directories to it
from inside your shell's configuration file.
In this case, for further details please see: https://github.com/berdario/pew#the-environment-doesnt-seem-to-be-activated''')
def first_run_setup():
shell = supported_shell()
if shell:
if shell == 'fish':
source_cmd = 'source (pew shell_config)'
else:
source_cmd = 'source "$(pew shell_config)"'
rcpath = expandpath({'bash': '~/.bashrc'
, 'zsh': '~/.zshrc'
, 'fish': '~/.config/fish/config.fish'}[shell])
if rcpath.exists():
update_config_file(rcpath, source_cmd)
else:
print("It seems that you're running pew for the first time\n"
"If you want source shell competions and update your prompt, "
"Add the following line to your shell config file:\n %s" % source_cmd)
print('\nWill now continue with the command:', *sys.argv[1:])
input('[enter]')
def update_config_file(rcpath, source_cmd):
with rcpath.open('r+') as rcfile:
if source_cmd not in (line.strip() for line in rcfile.readlines()):
choice = 'X'
while choice not in ('y', '', 'n'):
choice = input("It seems that you're running pew for the first time\n"
"do you want to modify %s to source completions and"
" update your prompt? [y/N]\n> " % rcpath).lower()
if choice == 'y':
rcfile.write('\n# added by Pew\n%s\n' % source_cmd)
print('Done')
else:
print('\nOk, if you want to do it manually, just add\n %s\nat'
' the end of %s' % (source_cmd, rcpath))
def print_commands(cmds):
longest = max(map(len, cmds)) + 3
columns, _ = get_terminal_size()
print('Available commands:\n')
for cmd, fun in sorted(cmds.items()):
if fun.__doc__:
print(textwrap.fill(
fun.__doc__.splitlines()[0],
columns or 1000,
initial_indent=(' {0}: '.format(cmd)).ljust(longest),
subsequent_indent=longest * ' '))
else:
print(' ' + cmd)
def pew():
first_run = makedirs_and_symlink_if_needed(workon_home)
if first_run and sys.stdin.isatty():
first_run_setup()
cmds = dict((cmd[:-4], fun)
for cmd, fun in globals().items() if cmd.endswith('_cmd'))
if sys.argv[1:]:
if sys.argv[1] in cmds:
command = cmds[sys.argv[1]]
try:
return command(sys.argv[2:])
except CalledProcessError as e:
return e.returncode
except KeyboardInterrupt:
pass
else:
err("ERROR: command", sys.argv[1], "does not exist.")
print_commands(cmds)
sys.exit(1)
else:
print_commands(cmds)
|
berdario/pew | pew/pew.py | getproject_cmd | python | def getproject_cmd(argv):
# Parse command line arguments
parser = argparse.ArgumentParser(
description="Print an environment's project directory.",
)
parser.add_argument(
'envname',
nargs='?',
default=os.environ.get('VIRTUAL_ENV'),
help=(
'The name of the environment to return the project directory '
'for. If omitted, will use the currently active environment.'
),
)
args = parser.parse_args(argv)
# Now, do the actual work
if not args.envname:
sys.exit('ERROR: no virtualenv active')
if not (workon_home / args.envname).exists():
sys.exit("ERROR: Environment '{0}' does not exist."
.format(args.envname))
project_dir = get_project_dir(args.envname)
if project_dir is None:
sys.exit("ERROR: no project directory set for Environment '{0}'"
.format(args.envname))
print(project_dir) | Print a virtualenv's project directory, if set.
If called without providing a virtualenv name as argument, print the
current virtualenv's project directory. | train | https://github.com/berdario/pew/blob/37d9ff79342336b8ef6437d9a551008be07afe9b/pew/pew.py#L534-L564 | [
"def get_project_dir(env):\n project_file = workon_home / env / '.project'\n if project_file.exists():\n with project_file.open() as f:\n project_dir = f.readline().strip()\n if os.path.exists(project_dir):\n return project_dir\n else:\n err('Corrupted or outdated:', project_file, '\\nDirectory',\n project_dir, \"doesn't exist.\")\n"
] | from __future__ import print_function, absolute_import, unicode_literals
import os
import sys
import argparse
import shutil
import random
import textwrap
from functools import partial
from subprocess import CalledProcessError
from pathlib import Path
try:
from shutil import get_terminal_size
except ImportError:
from backports.shutil_get_terminal_size import get_terminal_size
windows = sys.platform == 'win32'
from clonevirtualenv import clone_virtualenv
if not windows:
try:
# Try importing these packages if avaiable
from pythonz.commands.install import InstallCommand
from pythonz.commands.uninstall import UninstallCommand
from pythonz.installer.pythoninstaller import PythonInstaller, AlreadyInstalledError
from pythonz.commands.list import ListCommand
from pythonz.define import PATH_PYTHONS
from pythonz.commands.locate import LocateCommand as LocatePython
def ListPythons():
try:
Path(PATH_PYTHONS).mkdir(parents=True)
except OSError:
pass
return ListCommand()
except:
# create mock commands
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('You need to install the pythonz extra. pip install pew[pythonz]')
else:
# Pythonz does not support windows
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('Command not supported on this platform')
import shellingham
from pew._utils import (check_call, invoke, expandpath, own, env_bin_dir,
check_path, temp_environ, NamedTemporaryFile, to_unicode)
from pew._print_utils import print_virtualenvs
if sys.version_info[0] == 2:
input = raw_input
err = partial(print, file=sys.stderr)
if windows:
default_home = '~/.virtualenvs'
else:
default_home = os.path.join(
os.environ.get('XDG_DATA_HOME', '~/.local/share'), 'virtualenvs')
workon_home = expandpath(
os.environ.get('WORKON_HOME', default_home))
def makedirs_and_symlink_if_needed(workon_home):
if not workon_home.exists() and own(workon_home):
workon_home.mkdir(parents=True)
link = expandpath('~/.virtualenvs')
if os.name == 'posix' and 'WORKON_HOME' not in os.environ and \
'XDG_DATA_HOME' not in os.environ and not link.exists():
link.symlink_to(str(workon_home))
return True
else:
return False
pew_site = Path(__file__).parent
def supported_shell():
shell = Path(os.environ.get('SHELL', '')).stem
if shell in ('bash', 'zsh', 'fish'):
return shell
def shell_config_cmd(argv):
"Prints the path for the current $SHELL helper file"
shell = supported_shell()
if shell:
print(pew_site / 'shell_config' / ('init.' + shell))
else:
err('Completions and prompts are unavailable for %s' %
repr(os.environ.get('SHELL', '')))
def deploy_completions():
completions = {'complete.bash': Path('/etc/bash_completion.d/pew'),
'complete.zsh': Path('/usr/local/share/zsh/site-functions/_pew'),
'complete.fish': Path('/etc/fish/completions/pew.fish')}
for comp, dest in completions.items():
if not dest.parent.exists():
dest.parent.mkdir(parents=True)
shutil.copy(str(pew_site / 'shell_config' / comp), str(dest))
def get_project_dir(env):
project_file = workon_home / env / '.project'
if project_file.exists():
with project_file.open() as f:
project_dir = f.readline().strip()
if os.path.exists(project_dir):
return project_dir
else:
err('Corrupted or outdated:', project_file, '\nDirectory',
project_dir, "doesn't exist.")
def unsetenv(key):
if key in os.environ:
del os.environ[key]
def compute_path(env):
envdir = workon_home / env
return os.pathsep.join([
str(envdir / env_bin_dir),
os.environ['PATH'],
])
def inve(env, command, *args, **kwargs):
"""Run a command in the given virtual environment.
Pass additional keyword arguments to ``subprocess.check_call()``."""
# we don't strictly need to restore the environment, since pew runs in
# its own process, but it feels like the right thing to do
with temp_environ():
os.environ['VIRTUAL_ENV'] = str(workon_home / env)
os.environ['PATH'] = compute_path(env)
unsetenv('PYTHONHOME')
unsetenv('__PYVENV_LAUNCHER__')
try:
return check_call([command] + list(args), shell=windows, **kwargs)
# need to have shell=True on windows, otherwise the PYTHONPATH
# won't inherit the PATH
except OSError as e:
if e.errno == 2:
err('Unable to find', command)
return 2
else:
raise
def fork_shell(env, shellcmd, cwd):
or_ctrld = '' if windows else "or 'Ctrl+D' "
err("Launching subshell in virtual environment. Type 'exit' ", or_ctrld,
"to return.", sep='')
if 'VIRTUAL_ENV' in os.environ:
err("Be aware that this environment will be nested on top "
"of '%s'" % Path(os.environ['VIRTUAL_ENV']).name)
return inve(env, *shellcmd, cwd=cwd)
def fork_bash(env, cwd):
# bash is a special little snowflake, and prevent_path_errors cannot work there
# https://github.com/berdario/pew/issues/58#issuecomment-102182346
bashrcpath = expandpath('~/.bashrc')
if bashrcpath.exists():
with NamedTemporaryFile('w+') as rcfile:
with bashrcpath.open() as bashrc:
rcfile.write(bashrc.read())
rcfile.write('\nexport PATH="' + to_unicode(compute_path(env)) + '"')
rcfile.flush()
return fork_shell(env, ['bash', '--rcfile', rcfile.name], cwd)
else:
return fork_shell(env, ['bash'], cwd)
def fork_cmder(env, cwd):
shell_cmd = ['cmd']
cmderrc_path = r'%CMDER_ROOT%\vendor\init.bat'
if expandpath(cmderrc_path).exists():
shell_cmd += ['/k', cmderrc_path]
if cwd:
os.environ['CMDER_START'] = cwd
return fork_shell(env, shell_cmd, cwd)
def _detect_shell():
shell = os.environ.get('SHELL', None)
if not shell:
if 'CMDER_ROOT' in os.environ:
shell = 'Cmder'
elif windows:
try:
_, shell = shellingham.detect_shell()
except shellingham.ShellDetectionFailure:
shell = os.environ.get('COMSPEC', 'cmd.exe')
else:
shell = 'sh'
return shell
def shell(env, cwd=None):
env = str(env)
shell = _detect_shell()
shell_name = Path(shell).stem
if shell_name not in ('Cmder', 'bash', 'elvish', 'powershell', 'klingon', 'cmd'):
# On Windows the PATH is usually set with System Utility
# so we won't worry about trying to check mistakes there
shell_check = (sys.executable + ' -c "from pew.pew import '
'prevent_path_errors; prevent_path_errors()"')
try:
inve(env, shell, '-c', shell_check)
except CalledProcessError:
return
if shell_name == 'bash':
return fork_bash(env, cwd)
elif shell_name == 'Cmder':
return fork_cmder(env, cwd)
else:
return fork_shell(env, [shell], cwd)
def mkvirtualenv(envname, python=None, packages=[], project=None,
requirements=None, rest=[]):
if python:
rest = ["--python=%s" % python] + rest
path = (workon_home / envname).absolute()
try:
check_call([sys.executable, "-m", "virtualenv", str(path)] + rest)
except (CalledProcessError, KeyboardInterrupt):
rmvirtualenvs([envname])
raise
else:
if project:
setvirtualenvproject(envname, project.absolute())
if requirements:
inve(envname, 'pip', 'install', '-r', str(expandpath(requirements)))
if packages:
inve(envname, 'pip', 'install', *packages)
def mkvirtualenv_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--python')
parser.add_argument('-i', action='append', dest='packages', help='Install \
a package after the environment is created. This option may be repeated.')
parser.add_argument('-r', dest='requirements', help='Provide a pip \
requirements file to install a base set of packages into the new environment.')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
return parser
def new_cmd(argv):
"""Create a new environment, in $WORKON_HOME."""
parser = mkvirtualenv_argparser()
parser.add_argument('-a', dest='project', help='Provide a full path to a \
project directory to associate with the new environment.')
parser.add_argument('envname')
args, rest = parser.parse_known_args(argv)
project = expandpath(args.project) if args.project else None
mkvirtualenv(args.envname, args.python, args.packages, project,
args.requirements, rest)
if args.activate:
shell(args.envname)
def rmvirtualenvs(envs):
error_happened = False
for env in envs:
env = workon_home / env
if os.environ.get('VIRTUAL_ENV') == str(env):
err("ERROR: You cannot remove the active environment (%s)." % env)
error_happened = True
break
try:
shutil.rmtree(str(env))
except OSError as e:
err("Error while trying to remove the {0} env: \n{1}".format
(env, e.strerror))
error_happened = True
return error_happened
def rm_cmd(argv):
"""Remove one or more environment, from $WORKON_HOME."""
if len(argv) < 1:
sys.exit("Please specify an environment")
return rmvirtualenvs(argv)
def packages(site_packages):
nodes = site_packages.iterdir()
return set([x.stem.split('-')[0] for x in nodes]) - set(['__pycache__'])
def showvirtualenv(env):
columns, _ = get_terminal_size()
pkgs = sorted(packages(sitepackages_dir(env)))
env_python = workon_home / env / env_bin_dir / 'python'
l = len(env) + 2
version = invoke(str(env_python), '-V')
version = ' - '.join((version.out + version.err).splitlines())
print(env, ': ', version, sep='')
print(textwrap.fill(' '.join(pkgs),
width=columns-l,
initial_indent=(l * ' '),
subsequent_indent=(l * ' ')), '\n')
def show_cmd(argv):
try:
showvirtualenv(argv[0])
except IndexError:
if 'VIRTUAL_ENV' in os.environ:
showvirtualenv(Path(os.environ['VIRTUAL_ENV']).name)
else:
sys.exit('pew show [env]')
def lsenvs():
return sorted(set(env.parts[-3] for env in
workon_home.glob(os.path.join('*', env_bin_dir, 'python*'))))
def lsvirtualenv(verbose):
envs = lsenvs()
if not verbose:
print_virtualenvs(*envs)
else:
for env in envs:
showvirtualenv(env)
def ls_cmd(argv):
"""List available environments."""
parser = argparse.ArgumentParser()
p_group = parser.add_mutually_exclusive_group()
p_group.add_argument('-b', '--brief', action='store_false')
p_group.add_argument('-l', '--long', action='store_true')
args = parser.parse_args(argv)
lsvirtualenv(args.long)
def parse_envname(argv, no_arg_callback):
if len(argv) < 1 or argv[0] is None:
no_arg_callback()
env = argv[0]
if env.startswith('/'):
sys.exit("ERROR: Invalid environment name '{0}'.".format(env))
if not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist. Create it with \
'pew new {0}'.".format(env))
else:
return env
def workon_cmd(argv):
"""List or change working virtual environments."""
parser = argparse.ArgumentParser(prog='pew workon')
parser.add_argument('envname', nargs='?')
parser.add_argument(
'-n', '--no-cd', action='store_true',
help=('Do not change working directory to project directory after '
'activating virtualenv.')
)
args = parser.parse_args(argv)
def list_and_exit():
lsvirtualenv(False)
sys.exit(0)
env = parse_envname([args.envname], list_and_exit)
# Check if the virtualenv has an associated project directory and in
# this case, use it as the current working directory.
project_dir = get_project_dir(env)
if project_dir is None or args.no_cd:
project_dir = os.getcwd()
return shell(env, cwd=project_dir)
def sitepackages_dir(env=os.environ.get('VIRTUAL_ENV')):
if not env:
sys.exit('ERROR: no virtualenv active')
else:
env_python = workon_home / env / env_bin_dir / 'python'
return Path(invoke(str(env_python), '-c', 'import distutils; \
print(distutils.sysconfig.get_python_lib())').out)
def add_cmd(argv):
"""Add the specified directories to the Python path for the currently active virtualenv.
This will be done by placing the directory names in a path file named
"virtualenv_path_extensions.pth" inside the virtualenv's site-packages
directory; if this file does not exists, it will be created first.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='remove', action='store_true')
parser.add_argument('dirs', nargs='+')
args = parser.parse_args(argv)
extra_paths = sitepackages_dir() / '_virtualenv_path_extensions.pth'
new_paths = [os.path.abspath(d) + "\n" for d in args.dirs]
if not extra_paths.exists():
with extra_paths.open('w') as extra:
extra.write('''import sys; sys.__plen = len(sys.path)
import sys; new=sys.path[sys.__plen:]; del sys.path[sys.__plen:]; p=getattr(sys,'__egginsert',0); sys.path[p:p]=new; sys.__egginsert = p+len(new)
''')
def rewrite(f):
with extra_paths.open('r+') as extra:
to_write = f(extra.readlines())
extra.seek(0)
extra.truncate()
extra.writelines(to_write)
if args.remove:
rewrite(lambda ls: [line for line in ls if line not in new_paths])
else:
rewrite(lambda lines: lines[0:1] + new_paths + lines[1:])
def sitepackages_dir_cmd(argv):
print(sitepackages_dir())
def lssitepackages_cmd(argv):
"""Show the content of the site-packages directory of the current virtualenv."""
site = sitepackages_dir()
print(*sorted(site.iterdir()), sep=os.linesep)
extra_paths = site / '_virtualenv_path_extensions.pth'
if extra_paths.exists():
print('from _virtualenv_path_extensions.pth:')
with extra_paths.open() as extra:
print(''.join(extra.readlines()))
def toggleglobalsitepackages_cmd(argv):
"""Toggle the current virtualenv between having and not having access to the global site-packages."""
quiet = argv == ['-q']
site = sitepackages_dir()
ngsp_file = site.parent / 'no-global-site-packages.txt'
if ngsp_file.exists():
ngsp_file.unlink()
if not quiet:
print('Enabled global site-packages')
else:
with ngsp_file.open('w'):
if not quiet:
print('Disabled global site-packages')
def cp_cmd(argv):
"""Duplicate the named virtualenv to make a new one."""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target', nargs='?')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
args = parser.parse_args(argv)
target_name = copy_virtualenv_project(args.source, args.target)
if args.activate:
shell(target_name)
def copy_virtualenv_project(source, target):
source = expandpath(source)
if not source.exists():
source = workon_home / source
if not source.exists():
sys.exit('Please provide a valid virtualenv to copy')
target_name = target or source.name
target = workon_home / target_name
if target.exists():
sys.exit('%s virtualenv already exists in %s.' % (
target_name, workon_home
))
print('Copying {0} in {1}'.format(source, target_name))
clone_virtualenv(str(source), str(target))
return target_name
def rename_cmd(argv):
"""Rename a virtualenv"""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target')
pargs = parser.parse_args(argv)
copy_virtualenv_project(pargs.source, pargs.target)
return rmvirtualenvs([pargs.source])
def setvirtualenvproject(env, project):
print('Setting project for {0} to {1}'.format(env, project))
with (workon_home / env / '.project').open('wb') as prj:
prj.write(str(project).encode())
def setproject_cmd(argv):
"""Given a virtualenv directory and a project directory, set the \
virtualenv up to be associated with the project."""
args = dict(enumerate(argv))
project = os.path.abspath(args.get(1, '.'))
env = args.get(0, os.environ.get('VIRTUAL_ENV'))
if not env:
sys.exit('pew setproject [virtualenv] [project_path]')
if not (workon_home / env).exists():
sys.exit("Environment '%s' doesn't exist." % env)
if not os.path.isdir(project):
sys.exit('pew setproject: %s does not exist' % project)
setvirtualenvproject(env, project)
def mkproject_cmd(argv):
"""Create a new project directory and its associated virtualenv."""
if '-l' in argv or '--list' in argv:
templates = [t.name[9:] for t in workon_home.glob("template_*")]
print("Available project templates:", *templates, sep='\n')
return
parser = mkvirtualenv_argparser()
parser.add_argument('envname')
parser.add_argument(
'-t', action='append', default=[], dest='templates', help='Multiple \
templates may be selected. They are applied in the order specified on the \
command line.')
parser.add_argument(
'-l', '--list', action='store_true', help='List available templates.')
args, rest = parser.parse_known_args(argv)
projects_home = Path(os.environ.get('PROJECT_HOME', '.'))
if not projects_home.exists():
sys.exit('ERROR: Projects directory %s does not exist. \
Create it or set PROJECT_HOME to an existing directory.' % projects_home)
project = (projects_home / args.envname).absolute()
if project.exists():
sys.exit('Project %s already exists.' % args.envname)
mkvirtualenv(args.envname, args.python, args.packages, project.absolute(),
args.requirements, rest)
project.mkdir()
for template_name in args.templates:
template = workon_home / ("template_" + template_name)
inve(args.envname, str(template), args.envname, str(project))
if args.activate:
shell(args.envname, cwd=str(project))
def mktmpenv_cmd(argv):
"""Create a temporary virtualenv."""
parser = mkvirtualenv_argparser()
env = '.'
while (workon_home / env).exists():
env = hex(random.getrandbits(64))[2:-1]
args, rest = parser.parse_known_args(argv)
mkvirtualenv(env, args.python, args.packages, requirements=args.requirements,
rest=rest)
print('This is a temporary environment. It will be deleted when you exit')
try:
if args.activate:
# only used for testing on windows
shell(env)
finally:
return rmvirtualenvs([env])
def wipeenv_cmd(argv):
"""Remove all installed packages from the current (or supplied) env."""
env = argv[0] if argv else os.environ.get('VIRTUAL_ENV')
if not env:
sys.exit('ERROR: no virtualenv active')
elif not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist.".format(env))
else:
env_pip = str(workon_home / env / env_bin_dir / 'pip')
all_pkgs = set(invoke(env_pip, 'freeze').out.splitlines())
pkgs = set(p for p in all_pkgs if len(p.split("==")) == 2)
ignored = sorted(all_pkgs - pkgs)
pkgs = set(p.split("==")[0] for p in pkgs)
to_remove = sorted(pkgs - set(['distribute', 'wsgiref']))
if to_remove:
print("Ignoring:\n %s" % "\n ".join(ignored))
print("Uninstalling packages:\n %s" % "\n ".join(to_remove))
return inve(env, 'pip', 'uninstall', '-y', *to_remove)
else:
print("Nothing to remove")
def inall_cmd(argv):
"""Run a command in each virtualenv."""
envs = lsenvs()
errors = False
for env in envs:
print("\n%s:" % env)
try:
inve(env, *argv)
except CalledProcessError as e:
errors = True
err(e)
sys.exit(errors)
def in_cmd(argv):
"""Run a command in the given virtualenv."""
if len(argv) == 1:
return workon_cmd(argv)
parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
return inve(*argv)
def restore_cmd(argv):
"""Try to restore a broken virtualenv by reinstalling the same python version on top of it"""
if len(argv) < 1:
sys.exit('You must provide a valid virtualenv to target')
env = argv[0]
path = workon_home / env
py = path / env_bin_dir / ('python.exe' if windows else 'python')
exact_py = py.resolve().name
return check_call([sys.executable, "-m", "virtualenv", str(path.absolute()), "--python=%s" % exact_py])
def dir_cmd(argv):
"""Print the path for the virtualenv directory"""
env = parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
print(workon_home / env)
def install_cmd(argv):
'''Use Pythonz to download and build the specified Python version'''
installer = InstallCommand()
options, versions = installer.parser.parse_args(argv)
if len(versions) != 1:
installer.parser.print_help()
sys.exit(1)
else:
try:
actual_installer = PythonInstaller.get_installer(versions[0], options)
return actual_installer.install()
except AlreadyInstalledError as e:
print(e)
def uninstall_cmd(argv):
'''Use Pythonz to uninstall the specified Python version'''
return UninstallCommand().run(argv)
def list_pythons_cmd(argv):
'''List the pythons installed by Pythonz (or all the installable ones)'''
return ListPythons().run(argv)
def locate_python_cmd(argv):
'''Locate the path for the python version installed by Pythonz'''
return LocatePython().run(argv)
def version_cmd(argv):
"""Prints current pew version"""
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('pew').version
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
print('Setuptools has some issues here, failed to get our own package.', file=sys.stderr)
print(__version__)
def prevent_path_errors():
if 'VIRTUAL_ENV' in os.environ and not check_path():
sys.exit('''ERROR: The virtualenv hasn't been activated correctly.
Either the env is corrupted (try running `pew restore env`),
Or an upgrade of your Python version broke your env,
Or check the contents of your $PATH. You might be adding new directories to it
from inside your shell's configuration file.
In this case, for further details please see: https://github.com/berdario/pew#the-environment-doesnt-seem-to-be-activated''')
def first_run_setup():
shell = supported_shell()
if shell:
if shell == 'fish':
source_cmd = 'source (pew shell_config)'
else:
source_cmd = 'source "$(pew shell_config)"'
rcpath = expandpath({'bash': '~/.bashrc'
, 'zsh': '~/.zshrc'
, 'fish': '~/.config/fish/config.fish'}[shell])
if rcpath.exists():
update_config_file(rcpath, source_cmd)
else:
print("It seems that you're running pew for the first time\n"
"If you want source shell competions and update your prompt, "
"Add the following line to your shell config file:\n %s" % source_cmd)
print('\nWill now continue with the command:', *sys.argv[1:])
input('[enter]')
def update_config_file(rcpath, source_cmd):
with rcpath.open('r+') as rcfile:
if source_cmd not in (line.strip() for line in rcfile.readlines()):
choice = 'X'
while choice not in ('y', '', 'n'):
choice = input("It seems that you're running pew for the first time\n"
"do you want to modify %s to source completions and"
" update your prompt? [y/N]\n> " % rcpath).lower()
if choice == 'y':
rcfile.write('\n# added by Pew\n%s\n' % source_cmd)
print('Done')
else:
print('\nOk, if you want to do it manually, just add\n %s\nat'
' the end of %s' % (source_cmd, rcpath))
def print_commands(cmds):
longest = max(map(len, cmds)) + 3
columns, _ = get_terminal_size()
print('Available commands:\n')
for cmd, fun in sorted(cmds.items()):
if fun.__doc__:
print(textwrap.fill(
fun.__doc__.splitlines()[0],
columns or 1000,
initial_indent=(' {0}: '.format(cmd)).ljust(longest),
subsequent_indent=longest * ' '))
else:
print(' ' + cmd)
def pew():
first_run = makedirs_and_symlink_if_needed(workon_home)
if first_run and sys.stdin.isatty():
first_run_setup()
cmds = dict((cmd[:-4], fun)
for cmd, fun in globals().items() if cmd.endswith('_cmd'))
if sys.argv[1:]:
if sys.argv[1] in cmds:
command = cmds[sys.argv[1]]
try:
return command(sys.argv[2:])
except CalledProcessError as e:
return e.returncode
except KeyboardInterrupt:
pass
else:
err("ERROR: command", sys.argv[1], "does not exist.")
print_commands(cmds)
sys.exit(1)
else:
print_commands(cmds)
|
berdario/pew | pew/pew.py | mkproject_cmd | python | def mkproject_cmd(argv):
if '-l' in argv or '--list' in argv:
templates = [t.name[9:] for t in workon_home.glob("template_*")]
print("Available project templates:", *templates, sep='\n')
return
parser = mkvirtualenv_argparser()
parser.add_argument('envname')
parser.add_argument(
'-t', action='append', default=[], dest='templates', help='Multiple \
templates may be selected. They are applied in the order specified on the \
command line.')
parser.add_argument(
'-l', '--list', action='store_true', help='List available templates.')
args, rest = parser.parse_known_args(argv)
projects_home = Path(os.environ.get('PROJECT_HOME', '.'))
if not projects_home.exists():
sys.exit('ERROR: Projects directory %s does not exist. \
Create it or set PROJECT_HOME to an existing directory.' % projects_home)
project = (projects_home / args.envname).absolute()
if project.exists():
sys.exit('Project %s already exists.' % args.envname)
mkvirtualenv(args.envname, args.python, args.packages, project.absolute(),
args.requirements, rest)
project.mkdir()
for template_name in args.templates:
template = workon_home / ("template_" + template_name)
inve(args.envname, str(template), args.envname, str(project))
if args.activate:
shell(args.envname, cwd=str(project)) | Create a new project directory and its associated virtualenv. | train | https://github.com/berdario/pew/blob/37d9ff79342336b8ef6437d9a551008be07afe9b/pew/pew.py#L567-L603 | [
"def shell(env, cwd=None):\n env = str(env)\n shell = _detect_shell()\n shell_name = Path(shell).stem\n if shell_name not in ('Cmder', 'bash', 'elvish', 'powershell', 'klingon', 'cmd'):\n # On Windows the PATH is usually set with System Utility\n # so we won't worry about trying to check mistakes there\n shell_check = (sys.executable + ' -c \"from pew.pew import '\n 'prevent_path_errors; prevent_path_errors()\"')\n try:\n inve(env, shell, '-c', shell_check)\n except CalledProcessError:\n return\n if shell_name == 'bash':\n return fork_bash(env, cwd)\n elif shell_name == 'Cmder':\n return fork_cmder(env, cwd)\n else:\n return fork_shell(env, [shell], cwd)\n",
"def inve(env, command, *args, **kwargs):\n \"\"\"Run a command in the given virtual environment.\n\n Pass additional keyword arguments to ``subprocess.check_call()``.\"\"\"\n # we don't strictly need to restore the environment, since pew runs in\n # its own process, but it feels like the right thing to do\n with temp_environ():\n os.environ['VIRTUAL_ENV'] = str(workon_home / env)\n os.environ['PATH'] = compute_path(env)\n\n unsetenv('PYTHONHOME')\n unsetenv('__PYVENV_LAUNCHER__')\n\n try:\n return check_call([command] + list(args), shell=windows, **kwargs)\n # need to have shell=True on windows, otherwise the PYTHONPATH\n # won't inherit the PATH\n except OSError as e:\n if e.errno == 2:\n err('Unable to find', command)\n return 2\n else:\n raise\n",
"def mkvirtualenv(envname, python=None, packages=[], project=None,\n requirements=None, rest=[]):\n\n if python:\n rest = [\"--python=%s\" % python] + rest\n\n path = (workon_home / envname).absolute()\n\n try:\n check_call([sys.executable, \"-m\", \"virtualenv\", str(path)] + rest)\n except (CalledProcessError, KeyboardInterrupt):\n rmvirtualenvs([envname])\n raise\n else:\n if project:\n setvirtualenvproject(envname, project.absolute())\n if requirements:\n inve(envname, 'pip', 'install', '-r', str(expandpath(requirements)))\n if packages:\n inve(envname, 'pip', 'install', *packages)\n",
"def mkvirtualenv_argparser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--python')\n parser.add_argument('-i', action='append', dest='packages', help='Install \\\na package after the environment is created. This option may be repeated.')\n parser.add_argument('-r', dest='requirements', help='Provide a pip \\\nrequirements file to install a base set of packages into the new environment.')\n parser.add_argument('-d', '--dont-activate', action='store_false',\n default=True, dest='activate', help=\"After \\\n creation, continue with the existing shell (don't \\\n activate the new environment).\")\n return parser\n"
] | from __future__ import print_function, absolute_import, unicode_literals
import os
import sys
import argparse
import shutil
import random
import textwrap
from functools import partial
from subprocess import CalledProcessError
from pathlib import Path
try:
from shutil import get_terminal_size
except ImportError:
from backports.shutil_get_terminal_size import get_terminal_size
windows = sys.platform == 'win32'
from clonevirtualenv import clone_virtualenv
if not windows:
try:
# Try importing these packages if avaiable
from pythonz.commands.install import InstallCommand
from pythonz.commands.uninstall import UninstallCommand
from pythonz.installer.pythoninstaller import PythonInstaller, AlreadyInstalledError
from pythonz.commands.list import ListCommand
from pythonz.define import PATH_PYTHONS
from pythonz.commands.locate import LocateCommand as LocatePython
def ListPythons():
try:
Path(PATH_PYTHONS).mkdir(parents=True)
except OSError:
pass
return ListCommand()
except:
# create mock commands
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('You need to install the pythonz extra. pip install pew[pythonz]')
else:
# Pythonz does not support windows
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('Command not supported on this platform')
import shellingham
from pew._utils import (check_call, invoke, expandpath, own, env_bin_dir,
check_path, temp_environ, NamedTemporaryFile, to_unicode)
from pew._print_utils import print_virtualenvs
if sys.version_info[0] == 2:
input = raw_input
err = partial(print, file=sys.stderr)
if windows:
default_home = '~/.virtualenvs'
else:
default_home = os.path.join(
os.environ.get('XDG_DATA_HOME', '~/.local/share'), 'virtualenvs')
workon_home = expandpath(
os.environ.get('WORKON_HOME', default_home))
def makedirs_and_symlink_if_needed(workon_home):
if not workon_home.exists() and own(workon_home):
workon_home.mkdir(parents=True)
link = expandpath('~/.virtualenvs')
if os.name == 'posix' and 'WORKON_HOME' not in os.environ and \
'XDG_DATA_HOME' not in os.environ and not link.exists():
link.symlink_to(str(workon_home))
return True
else:
return False
pew_site = Path(__file__).parent
def supported_shell():
shell = Path(os.environ.get('SHELL', '')).stem
if shell in ('bash', 'zsh', 'fish'):
return shell
def shell_config_cmd(argv):
"Prints the path for the current $SHELL helper file"
shell = supported_shell()
if shell:
print(pew_site / 'shell_config' / ('init.' + shell))
else:
err('Completions and prompts are unavailable for %s' %
repr(os.environ.get('SHELL', '')))
def deploy_completions():
completions = {'complete.bash': Path('/etc/bash_completion.d/pew'),
'complete.zsh': Path('/usr/local/share/zsh/site-functions/_pew'),
'complete.fish': Path('/etc/fish/completions/pew.fish')}
for comp, dest in completions.items():
if not dest.parent.exists():
dest.parent.mkdir(parents=True)
shutil.copy(str(pew_site / 'shell_config' / comp), str(dest))
def get_project_dir(env):
project_file = workon_home / env / '.project'
if project_file.exists():
with project_file.open() as f:
project_dir = f.readline().strip()
if os.path.exists(project_dir):
return project_dir
else:
err('Corrupted or outdated:', project_file, '\nDirectory',
project_dir, "doesn't exist.")
def unsetenv(key):
if key in os.environ:
del os.environ[key]
def compute_path(env):
envdir = workon_home / env
return os.pathsep.join([
str(envdir / env_bin_dir),
os.environ['PATH'],
])
def inve(env, command, *args, **kwargs):
"""Run a command in the given virtual environment.
Pass additional keyword arguments to ``subprocess.check_call()``."""
# we don't strictly need to restore the environment, since pew runs in
# its own process, but it feels like the right thing to do
with temp_environ():
os.environ['VIRTUAL_ENV'] = str(workon_home / env)
os.environ['PATH'] = compute_path(env)
unsetenv('PYTHONHOME')
unsetenv('__PYVENV_LAUNCHER__')
try:
return check_call([command] + list(args), shell=windows, **kwargs)
# need to have shell=True on windows, otherwise the PYTHONPATH
# won't inherit the PATH
except OSError as e:
if e.errno == 2:
err('Unable to find', command)
return 2
else:
raise
def fork_shell(env, shellcmd, cwd):
or_ctrld = '' if windows else "or 'Ctrl+D' "
err("Launching subshell in virtual environment. Type 'exit' ", or_ctrld,
"to return.", sep='')
if 'VIRTUAL_ENV' in os.environ:
err("Be aware that this environment will be nested on top "
"of '%s'" % Path(os.environ['VIRTUAL_ENV']).name)
return inve(env, *shellcmd, cwd=cwd)
def fork_bash(env, cwd):
# bash is a special little snowflake, and prevent_path_errors cannot work there
# https://github.com/berdario/pew/issues/58#issuecomment-102182346
bashrcpath = expandpath('~/.bashrc')
if bashrcpath.exists():
with NamedTemporaryFile('w+') as rcfile:
with bashrcpath.open() as bashrc:
rcfile.write(bashrc.read())
rcfile.write('\nexport PATH="' + to_unicode(compute_path(env)) + '"')
rcfile.flush()
return fork_shell(env, ['bash', '--rcfile', rcfile.name], cwd)
else:
return fork_shell(env, ['bash'], cwd)
def fork_cmder(env, cwd):
shell_cmd = ['cmd']
cmderrc_path = r'%CMDER_ROOT%\vendor\init.bat'
if expandpath(cmderrc_path).exists():
shell_cmd += ['/k', cmderrc_path]
if cwd:
os.environ['CMDER_START'] = cwd
return fork_shell(env, shell_cmd, cwd)
def _detect_shell():
shell = os.environ.get('SHELL', None)
if not shell:
if 'CMDER_ROOT' in os.environ:
shell = 'Cmder'
elif windows:
try:
_, shell = shellingham.detect_shell()
except shellingham.ShellDetectionFailure:
shell = os.environ.get('COMSPEC', 'cmd.exe')
else:
shell = 'sh'
return shell
def shell(env, cwd=None):
env = str(env)
shell = _detect_shell()
shell_name = Path(shell).stem
if shell_name not in ('Cmder', 'bash', 'elvish', 'powershell', 'klingon', 'cmd'):
# On Windows the PATH is usually set with System Utility
# so we won't worry about trying to check mistakes there
shell_check = (sys.executable + ' -c "from pew.pew import '
'prevent_path_errors; prevent_path_errors()"')
try:
inve(env, shell, '-c', shell_check)
except CalledProcessError:
return
if shell_name == 'bash':
return fork_bash(env, cwd)
elif shell_name == 'Cmder':
return fork_cmder(env, cwd)
else:
return fork_shell(env, [shell], cwd)
def mkvirtualenv(envname, python=None, packages=[], project=None,
requirements=None, rest=[]):
if python:
rest = ["--python=%s" % python] + rest
path = (workon_home / envname).absolute()
try:
check_call([sys.executable, "-m", "virtualenv", str(path)] + rest)
except (CalledProcessError, KeyboardInterrupt):
rmvirtualenvs([envname])
raise
else:
if project:
setvirtualenvproject(envname, project.absolute())
if requirements:
inve(envname, 'pip', 'install', '-r', str(expandpath(requirements)))
if packages:
inve(envname, 'pip', 'install', *packages)
def mkvirtualenv_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--python')
parser.add_argument('-i', action='append', dest='packages', help='Install \
a package after the environment is created. This option may be repeated.')
parser.add_argument('-r', dest='requirements', help='Provide a pip \
requirements file to install a base set of packages into the new environment.')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
return parser
def new_cmd(argv):
"""Create a new environment, in $WORKON_HOME."""
parser = mkvirtualenv_argparser()
parser.add_argument('-a', dest='project', help='Provide a full path to a \
project directory to associate with the new environment.')
parser.add_argument('envname')
args, rest = parser.parse_known_args(argv)
project = expandpath(args.project) if args.project else None
mkvirtualenv(args.envname, args.python, args.packages, project,
args.requirements, rest)
if args.activate:
shell(args.envname)
def rmvirtualenvs(envs):
error_happened = False
for env in envs:
env = workon_home / env
if os.environ.get('VIRTUAL_ENV') == str(env):
err("ERROR: You cannot remove the active environment (%s)." % env)
error_happened = True
break
try:
shutil.rmtree(str(env))
except OSError as e:
err("Error while trying to remove the {0} env: \n{1}".format
(env, e.strerror))
error_happened = True
return error_happened
def rm_cmd(argv):
"""Remove one or more environment, from $WORKON_HOME."""
if len(argv) < 1:
sys.exit("Please specify an environment")
return rmvirtualenvs(argv)
def packages(site_packages):
nodes = site_packages.iterdir()
return set([x.stem.split('-')[0] for x in nodes]) - set(['__pycache__'])
def showvirtualenv(env):
columns, _ = get_terminal_size()
pkgs = sorted(packages(sitepackages_dir(env)))
env_python = workon_home / env / env_bin_dir / 'python'
l = len(env) + 2
version = invoke(str(env_python), '-V')
version = ' - '.join((version.out + version.err).splitlines())
print(env, ': ', version, sep='')
print(textwrap.fill(' '.join(pkgs),
width=columns-l,
initial_indent=(l * ' '),
subsequent_indent=(l * ' ')), '\n')
def show_cmd(argv):
try:
showvirtualenv(argv[0])
except IndexError:
if 'VIRTUAL_ENV' in os.environ:
showvirtualenv(Path(os.environ['VIRTUAL_ENV']).name)
else:
sys.exit('pew show [env]')
def lsenvs():
return sorted(set(env.parts[-3] for env in
workon_home.glob(os.path.join('*', env_bin_dir, 'python*'))))
def lsvirtualenv(verbose):
envs = lsenvs()
if not verbose:
print_virtualenvs(*envs)
else:
for env in envs:
showvirtualenv(env)
def ls_cmd(argv):
"""List available environments."""
parser = argparse.ArgumentParser()
p_group = parser.add_mutually_exclusive_group()
p_group.add_argument('-b', '--brief', action='store_false')
p_group.add_argument('-l', '--long', action='store_true')
args = parser.parse_args(argv)
lsvirtualenv(args.long)
def parse_envname(argv, no_arg_callback):
if len(argv) < 1 or argv[0] is None:
no_arg_callback()
env = argv[0]
if env.startswith('/'):
sys.exit("ERROR: Invalid environment name '{0}'.".format(env))
if not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist. Create it with \
'pew new {0}'.".format(env))
else:
return env
def workon_cmd(argv):
"""List or change working virtual environments."""
parser = argparse.ArgumentParser(prog='pew workon')
parser.add_argument('envname', nargs='?')
parser.add_argument(
'-n', '--no-cd', action='store_true',
help=('Do not change working directory to project directory after '
'activating virtualenv.')
)
args = parser.parse_args(argv)
def list_and_exit():
lsvirtualenv(False)
sys.exit(0)
env = parse_envname([args.envname], list_and_exit)
# Check if the virtualenv has an associated project directory and in
# this case, use it as the current working directory.
project_dir = get_project_dir(env)
if project_dir is None or args.no_cd:
project_dir = os.getcwd()
return shell(env, cwd=project_dir)
def sitepackages_dir(env=os.environ.get('VIRTUAL_ENV')):
if not env:
sys.exit('ERROR: no virtualenv active')
else:
env_python = workon_home / env / env_bin_dir / 'python'
return Path(invoke(str(env_python), '-c', 'import distutils; \
print(distutils.sysconfig.get_python_lib())').out)
def add_cmd(argv):
"""Add the specified directories to the Python path for the currently active virtualenv.
This will be done by placing the directory names in a path file named
"virtualenv_path_extensions.pth" inside the virtualenv's site-packages
directory; if this file does not exists, it will be created first.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='remove', action='store_true')
parser.add_argument('dirs', nargs='+')
args = parser.parse_args(argv)
extra_paths = sitepackages_dir() / '_virtualenv_path_extensions.pth'
new_paths = [os.path.abspath(d) + "\n" for d in args.dirs]
if not extra_paths.exists():
with extra_paths.open('w') as extra:
extra.write('''import sys; sys.__plen = len(sys.path)
import sys; new=sys.path[sys.__plen:]; del sys.path[sys.__plen:]; p=getattr(sys,'__egginsert',0); sys.path[p:p]=new; sys.__egginsert = p+len(new)
''')
def rewrite(f):
with extra_paths.open('r+') as extra:
to_write = f(extra.readlines())
extra.seek(0)
extra.truncate()
extra.writelines(to_write)
if args.remove:
rewrite(lambda ls: [line for line in ls if line not in new_paths])
else:
rewrite(lambda lines: lines[0:1] + new_paths + lines[1:])
def sitepackages_dir_cmd(argv):
print(sitepackages_dir())
def lssitepackages_cmd(argv):
"""Show the content of the site-packages directory of the current virtualenv."""
site = sitepackages_dir()
print(*sorted(site.iterdir()), sep=os.linesep)
extra_paths = site / '_virtualenv_path_extensions.pth'
if extra_paths.exists():
print('from _virtualenv_path_extensions.pth:')
with extra_paths.open() as extra:
print(''.join(extra.readlines()))
def toggleglobalsitepackages_cmd(argv):
"""Toggle the current virtualenv between having and not having access to the global site-packages."""
quiet = argv == ['-q']
site = sitepackages_dir()
ngsp_file = site.parent / 'no-global-site-packages.txt'
if ngsp_file.exists():
ngsp_file.unlink()
if not quiet:
print('Enabled global site-packages')
else:
with ngsp_file.open('w'):
if not quiet:
print('Disabled global site-packages')
def cp_cmd(argv):
"""Duplicate the named virtualenv to make a new one."""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target', nargs='?')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
args = parser.parse_args(argv)
target_name = copy_virtualenv_project(args.source, args.target)
if args.activate:
shell(target_name)
def copy_virtualenv_project(source, target):
source = expandpath(source)
if not source.exists():
source = workon_home / source
if not source.exists():
sys.exit('Please provide a valid virtualenv to copy')
target_name = target or source.name
target = workon_home / target_name
if target.exists():
sys.exit('%s virtualenv already exists in %s.' % (
target_name, workon_home
))
print('Copying {0} in {1}'.format(source, target_name))
clone_virtualenv(str(source), str(target))
return target_name
def rename_cmd(argv):
"""Rename a virtualenv"""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target')
pargs = parser.parse_args(argv)
copy_virtualenv_project(pargs.source, pargs.target)
return rmvirtualenvs([pargs.source])
def setvirtualenvproject(env, project):
print('Setting project for {0} to {1}'.format(env, project))
with (workon_home / env / '.project').open('wb') as prj:
prj.write(str(project).encode())
def setproject_cmd(argv):
"""Given a virtualenv directory and a project directory, set the \
virtualenv up to be associated with the project."""
args = dict(enumerate(argv))
project = os.path.abspath(args.get(1, '.'))
env = args.get(0, os.environ.get('VIRTUAL_ENV'))
if not env:
sys.exit('pew setproject [virtualenv] [project_path]')
if not (workon_home / env).exists():
sys.exit("Environment '%s' doesn't exist." % env)
if not os.path.isdir(project):
sys.exit('pew setproject: %s does not exist' % project)
setvirtualenvproject(env, project)
def getproject_cmd(argv):
"""Print a virtualenv's project directory, if set.
If called without providing a virtualenv name as argument, print the
current virtualenv's project directory.
"""
# Parse command line arguments
parser = argparse.ArgumentParser(
description="Print an environment's project directory.",
)
parser.add_argument(
'envname',
nargs='?',
default=os.environ.get('VIRTUAL_ENV'),
help=(
'The name of the environment to return the project directory '
'for. If omitted, will use the currently active environment.'
),
)
args = parser.parse_args(argv)
# Now, do the actual work
if not args.envname:
sys.exit('ERROR: no virtualenv active')
if not (workon_home / args.envname).exists():
sys.exit("ERROR: Environment '{0}' does not exist."
.format(args.envname))
project_dir = get_project_dir(args.envname)
if project_dir is None:
sys.exit("ERROR: no project directory set for Environment '{0}'"
.format(args.envname))
print(project_dir)
def mktmpenv_cmd(argv):
"""Create a temporary virtualenv."""
parser = mkvirtualenv_argparser()
env = '.'
while (workon_home / env).exists():
env = hex(random.getrandbits(64))[2:-1]
args, rest = parser.parse_known_args(argv)
mkvirtualenv(env, args.python, args.packages, requirements=args.requirements,
rest=rest)
print('This is a temporary environment. It will be deleted when you exit')
try:
if args.activate:
# only used for testing on windows
shell(env)
finally:
return rmvirtualenvs([env])
def wipeenv_cmd(argv):
"""Remove all installed packages from the current (or supplied) env."""
env = argv[0] if argv else os.environ.get('VIRTUAL_ENV')
if not env:
sys.exit('ERROR: no virtualenv active')
elif not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist.".format(env))
else:
env_pip = str(workon_home / env / env_bin_dir / 'pip')
all_pkgs = set(invoke(env_pip, 'freeze').out.splitlines())
pkgs = set(p for p in all_pkgs if len(p.split("==")) == 2)
ignored = sorted(all_pkgs - pkgs)
pkgs = set(p.split("==")[0] for p in pkgs)
to_remove = sorted(pkgs - set(['distribute', 'wsgiref']))
if to_remove:
print("Ignoring:\n %s" % "\n ".join(ignored))
print("Uninstalling packages:\n %s" % "\n ".join(to_remove))
return inve(env, 'pip', 'uninstall', '-y', *to_remove)
else:
print("Nothing to remove")
def inall_cmd(argv):
"""Run a command in each virtualenv."""
envs = lsenvs()
errors = False
for env in envs:
print("\n%s:" % env)
try:
inve(env, *argv)
except CalledProcessError as e:
errors = True
err(e)
sys.exit(errors)
def in_cmd(argv):
"""Run a command in the given virtualenv."""
if len(argv) == 1:
return workon_cmd(argv)
parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
return inve(*argv)
def restore_cmd(argv):
"""Try to restore a broken virtualenv by reinstalling the same python version on top of it"""
if len(argv) < 1:
sys.exit('You must provide a valid virtualenv to target')
env = argv[0]
path = workon_home / env
py = path / env_bin_dir / ('python.exe' if windows else 'python')
exact_py = py.resolve().name
return check_call([sys.executable, "-m", "virtualenv", str(path.absolute()), "--python=%s" % exact_py])
def dir_cmd(argv):
"""Print the path for the virtualenv directory"""
env = parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
print(workon_home / env)
def install_cmd(argv):
'''Use Pythonz to download and build the specified Python version'''
installer = InstallCommand()
options, versions = installer.parser.parse_args(argv)
if len(versions) != 1:
installer.parser.print_help()
sys.exit(1)
else:
try:
actual_installer = PythonInstaller.get_installer(versions[0], options)
return actual_installer.install()
except AlreadyInstalledError as e:
print(e)
def uninstall_cmd(argv):
'''Use Pythonz to uninstall the specified Python version'''
return UninstallCommand().run(argv)
def list_pythons_cmd(argv):
'''List the pythons installed by Pythonz (or all the installable ones)'''
return ListPythons().run(argv)
def locate_python_cmd(argv):
'''Locate the path for the python version installed by Pythonz'''
return LocatePython().run(argv)
def version_cmd(argv):
"""Prints current pew version"""
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('pew').version
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
print('Setuptools has some issues here, failed to get our own package.', file=sys.stderr)
print(__version__)
def prevent_path_errors():
if 'VIRTUAL_ENV' in os.environ and not check_path():
sys.exit('''ERROR: The virtualenv hasn't been activated correctly.
Either the env is corrupted (try running `pew restore env`),
Or an upgrade of your Python version broke your env,
Or check the contents of your $PATH. You might be adding new directories to it
from inside your shell's configuration file.
In this case, for further details please see: https://github.com/berdario/pew#the-environment-doesnt-seem-to-be-activated''')
def first_run_setup():
shell = supported_shell()
if shell:
if shell == 'fish':
source_cmd = 'source (pew shell_config)'
else:
source_cmd = 'source "$(pew shell_config)"'
rcpath = expandpath({'bash': '~/.bashrc'
, 'zsh': '~/.zshrc'
, 'fish': '~/.config/fish/config.fish'}[shell])
if rcpath.exists():
update_config_file(rcpath, source_cmd)
else:
print("It seems that you're running pew for the first time\n"
"If you want source shell competions and update your prompt, "
"Add the following line to your shell config file:\n %s" % source_cmd)
print('\nWill now continue with the command:', *sys.argv[1:])
input('[enter]')
def update_config_file(rcpath, source_cmd):
with rcpath.open('r+') as rcfile:
if source_cmd not in (line.strip() for line in rcfile.readlines()):
choice = 'X'
while choice not in ('y', '', 'n'):
choice = input("It seems that you're running pew for the first time\n"
"do you want to modify %s to source completions and"
" update your prompt? [y/N]\n> " % rcpath).lower()
if choice == 'y':
rcfile.write('\n# added by Pew\n%s\n' % source_cmd)
print('Done')
else:
print('\nOk, if you want to do it manually, just add\n %s\nat'
' the end of %s' % (source_cmd, rcpath))
def print_commands(cmds):
longest = max(map(len, cmds)) + 3
columns, _ = get_terminal_size()
print('Available commands:\n')
for cmd, fun in sorted(cmds.items()):
if fun.__doc__:
print(textwrap.fill(
fun.__doc__.splitlines()[0],
columns or 1000,
initial_indent=(' {0}: '.format(cmd)).ljust(longest),
subsequent_indent=longest * ' '))
else:
print(' ' + cmd)
def pew():
first_run = makedirs_and_symlink_if_needed(workon_home)
if first_run and sys.stdin.isatty():
first_run_setup()
cmds = dict((cmd[:-4], fun)
for cmd, fun in globals().items() if cmd.endswith('_cmd'))
if sys.argv[1:]:
if sys.argv[1] in cmds:
command = cmds[sys.argv[1]]
try:
return command(sys.argv[2:])
except CalledProcessError as e:
return e.returncode
except KeyboardInterrupt:
pass
else:
err("ERROR: command", sys.argv[1], "does not exist.")
print_commands(cmds)
sys.exit(1)
else:
print_commands(cmds)
|
berdario/pew | pew/pew.py | mktmpenv_cmd | python | def mktmpenv_cmd(argv):
parser = mkvirtualenv_argparser()
env = '.'
while (workon_home / env).exists():
env = hex(random.getrandbits(64))[2:-1]
args, rest = parser.parse_known_args(argv)
mkvirtualenv(env, args.python, args.packages, requirements=args.requirements,
rest=rest)
print('This is a temporary environment. It will be deleted when you exit')
try:
if args.activate:
# only used for testing on windows
shell(env)
finally:
return rmvirtualenvs([env]) | Create a temporary virtualenv. | train | https://github.com/berdario/pew/blob/37d9ff79342336b8ef6437d9a551008be07afe9b/pew/pew.py#L606-L623 | [
"def shell(env, cwd=None):\n env = str(env)\n shell = _detect_shell()\n shell_name = Path(shell).stem\n if shell_name not in ('Cmder', 'bash', 'elvish', 'powershell', 'klingon', 'cmd'):\n # On Windows the PATH is usually set with System Utility\n # so we won't worry about trying to check mistakes there\n shell_check = (sys.executable + ' -c \"from pew.pew import '\n 'prevent_path_errors; prevent_path_errors()\"')\n try:\n inve(env, shell, '-c', shell_check)\n except CalledProcessError:\n return\n if shell_name == 'bash':\n return fork_bash(env, cwd)\n elif shell_name == 'Cmder':\n return fork_cmder(env, cwd)\n else:\n return fork_shell(env, [shell], cwd)\n",
"def mkvirtualenv(envname, python=None, packages=[], project=None,\n requirements=None, rest=[]):\n\n if python:\n rest = [\"--python=%s\" % python] + rest\n\n path = (workon_home / envname).absolute()\n\n try:\n check_call([sys.executable, \"-m\", \"virtualenv\", str(path)] + rest)\n except (CalledProcessError, KeyboardInterrupt):\n rmvirtualenvs([envname])\n raise\n else:\n if project:\n setvirtualenvproject(envname, project.absolute())\n if requirements:\n inve(envname, 'pip', 'install', '-r', str(expandpath(requirements)))\n if packages:\n inve(envname, 'pip', 'install', *packages)\n",
"def rmvirtualenvs(envs):\n error_happened = False\n for env in envs:\n env = workon_home / env\n if os.environ.get('VIRTUAL_ENV') == str(env):\n err(\"ERROR: You cannot remove the active environment (%s).\" % env)\n error_happened = True\n break\n try:\n shutil.rmtree(str(env))\n except OSError as e:\n err(\"Error while trying to remove the {0} env: \\n{1}\".format\n (env, e.strerror))\n error_happened = True\n return error_happened\n",
"def mkvirtualenv_argparser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--python')\n parser.add_argument('-i', action='append', dest='packages', help='Install \\\na package after the environment is created. This option may be repeated.')\n parser.add_argument('-r', dest='requirements', help='Provide a pip \\\nrequirements file to install a base set of packages into the new environment.')\n parser.add_argument('-d', '--dont-activate', action='store_false',\n default=True, dest='activate', help=\"After \\\n creation, continue with the existing shell (don't \\\n activate the new environment).\")\n return parser\n"
] | from __future__ import print_function, absolute_import, unicode_literals
import os
import sys
import argparse
import shutil
import random
import textwrap
from functools import partial
from subprocess import CalledProcessError
from pathlib import Path
try:
from shutil import get_terminal_size
except ImportError:
from backports.shutil_get_terminal_size import get_terminal_size
windows = sys.platform == 'win32'
from clonevirtualenv import clone_virtualenv
if not windows:
try:
# Try importing these packages if avaiable
from pythonz.commands.install import InstallCommand
from pythonz.commands.uninstall import UninstallCommand
from pythonz.installer.pythoninstaller import PythonInstaller, AlreadyInstalledError
from pythonz.commands.list import ListCommand
from pythonz.define import PATH_PYTHONS
from pythonz.commands.locate import LocateCommand as LocatePython
def ListPythons():
try:
Path(PATH_PYTHONS).mkdir(parents=True)
except OSError:
pass
return ListCommand()
except:
# create mock commands
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('You need to install the pythonz extra. pip install pew[pythonz]')
else:
# Pythonz does not support windows
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('Command not supported on this platform')
import shellingham
from pew._utils import (check_call, invoke, expandpath, own, env_bin_dir,
check_path, temp_environ, NamedTemporaryFile, to_unicode)
from pew._print_utils import print_virtualenvs
if sys.version_info[0] == 2:
input = raw_input
err = partial(print, file=sys.stderr)
if windows:
default_home = '~/.virtualenvs'
else:
default_home = os.path.join(
os.environ.get('XDG_DATA_HOME', '~/.local/share'), 'virtualenvs')
workon_home = expandpath(
os.environ.get('WORKON_HOME', default_home))
def makedirs_and_symlink_if_needed(workon_home):
if not workon_home.exists() and own(workon_home):
workon_home.mkdir(parents=True)
link = expandpath('~/.virtualenvs')
if os.name == 'posix' and 'WORKON_HOME' not in os.environ and \
'XDG_DATA_HOME' not in os.environ and not link.exists():
link.symlink_to(str(workon_home))
return True
else:
return False
pew_site = Path(__file__).parent
def supported_shell():
shell = Path(os.environ.get('SHELL', '')).stem
if shell in ('bash', 'zsh', 'fish'):
return shell
def shell_config_cmd(argv):
"Prints the path for the current $SHELL helper file"
shell = supported_shell()
if shell:
print(pew_site / 'shell_config' / ('init.' + shell))
else:
err('Completions and prompts are unavailable for %s' %
repr(os.environ.get('SHELL', '')))
def deploy_completions():
completions = {'complete.bash': Path('/etc/bash_completion.d/pew'),
'complete.zsh': Path('/usr/local/share/zsh/site-functions/_pew'),
'complete.fish': Path('/etc/fish/completions/pew.fish')}
for comp, dest in completions.items():
if not dest.parent.exists():
dest.parent.mkdir(parents=True)
shutil.copy(str(pew_site / 'shell_config' / comp), str(dest))
def get_project_dir(env):
project_file = workon_home / env / '.project'
if project_file.exists():
with project_file.open() as f:
project_dir = f.readline().strip()
if os.path.exists(project_dir):
return project_dir
else:
err('Corrupted or outdated:', project_file, '\nDirectory',
project_dir, "doesn't exist.")
def unsetenv(key):
if key in os.environ:
del os.environ[key]
def compute_path(env):
envdir = workon_home / env
return os.pathsep.join([
str(envdir / env_bin_dir),
os.environ['PATH'],
])
def inve(env, command, *args, **kwargs):
"""Run a command in the given virtual environment.
Pass additional keyword arguments to ``subprocess.check_call()``."""
# we don't strictly need to restore the environment, since pew runs in
# its own process, but it feels like the right thing to do
with temp_environ():
os.environ['VIRTUAL_ENV'] = str(workon_home / env)
os.environ['PATH'] = compute_path(env)
unsetenv('PYTHONHOME')
unsetenv('__PYVENV_LAUNCHER__')
try:
return check_call([command] + list(args), shell=windows, **kwargs)
# need to have shell=True on windows, otherwise the PYTHONPATH
# won't inherit the PATH
except OSError as e:
if e.errno == 2:
err('Unable to find', command)
return 2
else:
raise
def fork_shell(env, shellcmd, cwd):
or_ctrld = '' if windows else "or 'Ctrl+D' "
err("Launching subshell in virtual environment. Type 'exit' ", or_ctrld,
"to return.", sep='')
if 'VIRTUAL_ENV' in os.environ:
err("Be aware that this environment will be nested on top "
"of '%s'" % Path(os.environ['VIRTUAL_ENV']).name)
return inve(env, *shellcmd, cwd=cwd)
def fork_bash(env, cwd):
# bash is a special little snowflake, and prevent_path_errors cannot work there
# https://github.com/berdario/pew/issues/58#issuecomment-102182346
bashrcpath = expandpath('~/.bashrc')
if bashrcpath.exists():
with NamedTemporaryFile('w+') as rcfile:
with bashrcpath.open() as bashrc:
rcfile.write(bashrc.read())
rcfile.write('\nexport PATH="' + to_unicode(compute_path(env)) + '"')
rcfile.flush()
return fork_shell(env, ['bash', '--rcfile', rcfile.name], cwd)
else:
return fork_shell(env, ['bash'], cwd)
def fork_cmder(env, cwd):
shell_cmd = ['cmd']
cmderrc_path = r'%CMDER_ROOT%\vendor\init.bat'
if expandpath(cmderrc_path).exists():
shell_cmd += ['/k', cmderrc_path]
if cwd:
os.environ['CMDER_START'] = cwd
return fork_shell(env, shell_cmd, cwd)
def _detect_shell():
shell = os.environ.get('SHELL', None)
if not shell:
if 'CMDER_ROOT' in os.environ:
shell = 'Cmder'
elif windows:
try:
_, shell = shellingham.detect_shell()
except shellingham.ShellDetectionFailure:
shell = os.environ.get('COMSPEC', 'cmd.exe')
else:
shell = 'sh'
return shell
def shell(env, cwd=None):
env = str(env)
shell = _detect_shell()
shell_name = Path(shell).stem
if shell_name not in ('Cmder', 'bash', 'elvish', 'powershell', 'klingon', 'cmd'):
# On Windows the PATH is usually set with System Utility
# so we won't worry about trying to check mistakes there
shell_check = (sys.executable + ' -c "from pew.pew import '
'prevent_path_errors; prevent_path_errors()"')
try:
inve(env, shell, '-c', shell_check)
except CalledProcessError:
return
if shell_name == 'bash':
return fork_bash(env, cwd)
elif shell_name == 'Cmder':
return fork_cmder(env, cwd)
else:
return fork_shell(env, [shell], cwd)
def mkvirtualenv(envname, python=None, packages=[], project=None,
requirements=None, rest=[]):
if python:
rest = ["--python=%s" % python] + rest
path = (workon_home / envname).absolute()
try:
check_call([sys.executable, "-m", "virtualenv", str(path)] + rest)
except (CalledProcessError, KeyboardInterrupt):
rmvirtualenvs([envname])
raise
else:
if project:
setvirtualenvproject(envname, project.absolute())
if requirements:
inve(envname, 'pip', 'install', '-r', str(expandpath(requirements)))
if packages:
inve(envname, 'pip', 'install', *packages)
def mkvirtualenv_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--python')
parser.add_argument('-i', action='append', dest='packages', help='Install \
a package after the environment is created. This option may be repeated.')
parser.add_argument('-r', dest='requirements', help='Provide a pip \
requirements file to install a base set of packages into the new environment.')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
return parser
def new_cmd(argv):
"""Create a new environment, in $WORKON_HOME."""
parser = mkvirtualenv_argparser()
parser.add_argument('-a', dest='project', help='Provide a full path to a \
project directory to associate with the new environment.')
parser.add_argument('envname')
args, rest = parser.parse_known_args(argv)
project = expandpath(args.project) if args.project else None
mkvirtualenv(args.envname, args.python, args.packages, project,
args.requirements, rest)
if args.activate:
shell(args.envname)
def rmvirtualenvs(envs):
error_happened = False
for env in envs:
env = workon_home / env
if os.environ.get('VIRTUAL_ENV') == str(env):
err("ERROR: You cannot remove the active environment (%s)." % env)
error_happened = True
break
try:
shutil.rmtree(str(env))
except OSError as e:
err("Error while trying to remove the {0} env: \n{1}".format
(env, e.strerror))
error_happened = True
return error_happened
def rm_cmd(argv):
"""Remove one or more environment, from $WORKON_HOME."""
if len(argv) < 1:
sys.exit("Please specify an environment")
return rmvirtualenvs(argv)
def packages(site_packages):
nodes = site_packages.iterdir()
return set([x.stem.split('-')[0] for x in nodes]) - set(['__pycache__'])
def showvirtualenv(env):
columns, _ = get_terminal_size()
pkgs = sorted(packages(sitepackages_dir(env)))
env_python = workon_home / env / env_bin_dir / 'python'
l = len(env) + 2
version = invoke(str(env_python), '-V')
version = ' - '.join((version.out + version.err).splitlines())
print(env, ': ', version, sep='')
print(textwrap.fill(' '.join(pkgs),
width=columns-l,
initial_indent=(l * ' '),
subsequent_indent=(l * ' ')), '\n')
def show_cmd(argv):
try:
showvirtualenv(argv[0])
except IndexError:
if 'VIRTUAL_ENV' in os.environ:
showvirtualenv(Path(os.environ['VIRTUAL_ENV']).name)
else:
sys.exit('pew show [env]')
def lsenvs():
return sorted(set(env.parts[-3] for env in
workon_home.glob(os.path.join('*', env_bin_dir, 'python*'))))
def lsvirtualenv(verbose):
envs = lsenvs()
if not verbose:
print_virtualenvs(*envs)
else:
for env in envs:
showvirtualenv(env)
def ls_cmd(argv):
"""List available environments."""
parser = argparse.ArgumentParser()
p_group = parser.add_mutually_exclusive_group()
p_group.add_argument('-b', '--brief', action='store_false')
p_group.add_argument('-l', '--long', action='store_true')
args = parser.parse_args(argv)
lsvirtualenv(args.long)
def parse_envname(argv, no_arg_callback):
if len(argv) < 1 or argv[0] is None:
no_arg_callback()
env = argv[0]
if env.startswith('/'):
sys.exit("ERROR: Invalid environment name '{0}'.".format(env))
if not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist. Create it with \
'pew new {0}'.".format(env))
else:
return env
def workon_cmd(argv):
"""List or change working virtual environments."""
parser = argparse.ArgumentParser(prog='pew workon')
parser.add_argument('envname', nargs='?')
parser.add_argument(
'-n', '--no-cd', action='store_true',
help=('Do not change working directory to project directory after '
'activating virtualenv.')
)
args = parser.parse_args(argv)
def list_and_exit():
lsvirtualenv(False)
sys.exit(0)
env = parse_envname([args.envname], list_and_exit)
# Check if the virtualenv has an associated project directory and in
# this case, use it as the current working directory.
project_dir = get_project_dir(env)
if project_dir is None or args.no_cd:
project_dir = os.getcwd()
return shell(env, cwd=project_dir)
def sitepackages_dir(env=os.environ.get('VIRTUAL_ENV')):
if not env:
sys.exit('ERROR: no virtualenv active')
else:
env_python = workon_home / env / env_bin_dir / 'python'
return Path(invoke(str(env_python), '-c', 'import distutils; \
print(distutils.sysconfig.get_python_lib())').out)
def add_cmd(argv):
"""Add the specified directories to the Python path for the currently active virtualenv.
This will be done by placing the directory names in a path file named
"virtualenv_path_extensions.pth" inside the virtualenv's site-packages
directory; if this file does not exists, it will be created first.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='remove', action='store_true')
parser.add_argument('dirs', nargs='+')
args = parser.parse_args(argv)
extra_paths = sitepackages_dir() / '_virtualenv_path_extensions.pth'
new_paths = [os.path.abspath(d) + "\n" for d in args.dirs]
if not extra_paths.exists():
with extra_paths.open('w') as extra:
extra.write('''import sys; sys.__plen = len(sys.path)
import sys; new=sys.path[sys.__plen:]; del sys.path[sys.__plen:]; p=getattr(sys,'__egginsert',0); sys.path[p:p]=new; sys.__egginsert = p+len(new)
''')
def rewrite(f):
with extra_paths.open('r+') as extra:
to_write = f(extra.readlines())
extra.seek(0)
extra.truncate()
extra.writelines(to_write)
if args.remove:
rewrite(lambda ls: [line for line in ls if line not in new_paths])
else:
rewrite(lambda lines: lines[0:1] + new_paths + lines[1:])
def sitepackages_dir_cmd(argv):
print(sitepackages_dir())
def lssitepackages_cmd(argv):
"""Show the content of the site-packages directory of the current virtualenv."""
site = sitepackages_dir()
print(*sorted(site.iterdir()), sep=os.linesep)
extra_paths = site / '_virtualenv_path_extensions.pth'
if extra_paths.exists():
print('from _virtualenv_path_extensions.pth:')
with extra_paths.open() as extra:
print(''.join(extra.readlines()))
def toggleglobalsitepackages_cmd(argv):
"""Toggle the current virtualenv between having and not having access to the global site-packages."""
quiet = argv == ['-q']
site = sitepackages_dir()
ngsp_file = site.parent / 'no-global-site-packages.txt'
if ngsp_file.exists():
ngsp_file.unlink()
if not quiet:
print('Enabled global site-packages')
else:
with ngsp_file.open('w'):
if not quiet:
print('Disabled global site-packages')
def cp_cmd(argv):
"""Duplicate the named virtualenv to make a new one."""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target', nargs='?')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
args = parser.parse_args(argv)
target_name = copy_virtualenv_project(args.source, args.target)
if args.activate:
shell(target_name)
def copy_virtualenv_project(source, target):
source = expandpath(source)
if not source.exists():
source = workon_home / source
if not source.exists():
sys.exit('Please provide a valid virtualenv to copy')
target_name = target or source.name
target = workon_home / target_name
if target.exists():
sys.exit('%s virtualenv already exists in %s.' % (
target_name, workon_home
))
print('Copying {0} in {1}'.format(source, target_name))
clone_virtualenv(str(source), str(target))
return target_name
def rename_cmd(argv):
"""Rename a virtualenv"""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target')
pargs = parser.parse_args(argv)
copy_virtualenv_project(pargs.source, pargs.target)
return rmvirtualenvs([pargs.source])
def setvirtualenvproject(env, project):
print('Setting project for {0} to {1}'.format(env, project))
with (workon_home / env / '.project').open('wb') as prj:
prj.write(str(project).encode())
def setproject_cmd(argv):
"""Given a virtualenv directory and a project directory, set the \
virtualenv up to be associated with the project."""
args = dict(enumerate(argv))
project = os.path.abspath(args.get(1, '.'))
env = args.get(0, os.environ.get('VIRTUAL_ENV'))
if not env:
sys.exit('pew setproject [virtualenv] [project_path]')
if not (workon_home / env).exists():
sys.exit("Environment '%s' doesn't exist." % env)
if not os.path.isdir(project):
sys.exit('pew setproject: %s does not exist' % project)
setvirtualenvproject(env, project)
def getproject_cmd(argv):
"""Print a virtualenv's project directory, if set.
If called without providing a virtualenv name as argument, print the
current virtualenv's project directory.
"""
# Parse command line arguments
parser = argparse.ArgumentParser(
description="Print an environment's project directory.",
)
parser.add_argument(
'envname',
nargs='?',
default=os.environ.get('VIRTUAL_ENV'),
help=(
'The name of the environment to return the project directory '
'for. If omitted, will use the currently active environment.'
),
)
args = parser.parse_args(argv)
# Now, do the actual work
if not args.envname:
sys.exit('ERROR: no virtualenv active')
if not (workon_home / args.envname).exists():
sys.exit("ERROR: Environment '{0}' does not exist."
.format(args.envname))
project_dir = get_project_dir(args.envname)
if project_dir is None:
sys.exit("ERROR: no project directory set for Environment '{0}'"
.format(args.envname))
print(project_dir)
def mkproject_cmd(argv):
"""Create a new project directory and its associated virtualenv."""
if '-l' in argv or '--list' in argv:
templates = [t.name[9:] for t in workon_home.glob("template_*")]
print("Available project templates:", *templates, sep='\n')
return
parser = mkvirtualenv_argparser()
parser.add_argument('envname')
parser.add_argument(
'-t', action='append', default=[], dest='templates', help='Multiple \
templates may be selected. They are applied in the order specified on the \
command line.')
parser.add_argument(
'-l', '--list', action='store_true', help='List available templates.')
args, rest = parser.parse_known_args(argv)
projects_home = Path(os.environ.get('PROJECT_HOME', '.'))
if not projects_home.exists():
sys.exit('ERROR: Projects directory %s does not exist. \
Create it or set PROJECT_HOME to an existing directory.' % projects_home)
project = (projects_home / args.envname).absolute()
if project.exists():
sys.exit('Project %s already exists.' % args.envname)
mkvirtualenv(args.envname, args.python, args.packages, project.absolute(),
args.requirements, rest)
project.mkdir()
for template_name in args.templates:
template = workon_home / ("template_" + template_name)
inve(args.envname, str(template), args.envname, str(project))
if args.activate:
shell(args.envname, cwd=str(project))
def wipeenv_cmd(argv):
"""Remove all installed packages from the current (or supplied) env."""
env = argv[0] if argv else os.environ.get('VIRTUAL_ENV')
if not env:
sys.exit('ERROR: no virtualenv active')
elif not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist.".format(env))
else:
env_pip = str(workon_home / env / env_bin_dir / 'pip')
all_pkgs = set(invoke(env_pip, 'freeze').out.splitlines())
pkgs = set(p for p in all_pkgs if len(p.split("==")) == 2)
ignored = sorted(all_pkgs - pkgs)
pkgs = set(p.split("==")[0] for p in pkgs)
to_remove = sorted(pkgs - set(['distribute', 'wsgiref']))
if to_remove:
print("Ignoring:\n %s" % "\n ".join(ignored))
print("Uninstalling packages:\n %s" % "\n ".join(to_remove))
return inve(env, 'pip', 'uninstall', '-y', *to_remove)
else:
print("Nothing to remove")
def inall_cmd(argv):
"""Run a command in each virtualenv."""
envs = lsenvs()
errors = False
for env in envs:
print("\n%s:" % env)
try:
inve(env, *argv)
except CalledProcessError as e:
errors = True
err(e)
sys.exit(errors)
def in_cmd(argv):
"""Run a command in the given virtualenv."""
if len(argv) == 1:
return workon_cmd(argv)
parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
return inve(*argv)
def restore_cmd(argv):
"""Try to restore a broken virtualenv by reinstalling the same python version on top of it"""
if len(argv) < 1:
sys.exit('You must provide a valid virtualenv to target')
env = argv[0]
path = workon_home / env
py = path / env_bin_dir / ('python.exe' if windows else 'python')
exact_py = py.resolve().name
return check_call([sys.executable, "-m", "virtualenv", str(path.absolute()), "--python=%s" % exact_py])
def dir_cmd(argv):
"""Print the path for the virtualenv directory"""
env = parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
print(workon_home / env)
def install_cmd(argv):
'''Use Pythonz to download and build the specified Python version'''
installer = InstallCommand()
options, versions = installer.parser.parse_args(argv)
if len(versions) != 1:
installer.parser.print_help()
sys.exit(1)
else:
try:
actual_installer = PythonInstaller.get_installer(versions[0], options)
return actual_installer.install()
except AlreadyInstalledError as e:
print(e)
def uninstall_cmd(argv):
'''Use Pythonz to uninstall the specified Python version'''
return UninstallCommand().run(argv)
def list_pythons_cmd(argv):
'''List the pythons installed by Pythonz (or all the installable ones)'''
return ListPythons().run(argv)
def locate_python_cmd(argv):
'''Locate the path for the python version installed by Pythonz'''
return LocatePython().run(argv)
def version_cmd(argv):
"""Prints current pew version"""
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('pew').version
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
print('Setuptools has some issues here, failed to get our own package.', file=sys.stderr)
print(__version__)
def prevent_path_errors():
if 'VIRTUAL_ENV' in os.environ and not check_path():
sys.exit('''ERROR: The virtualenv hasn't been activated correctly.
Either the env is corrupted (try running `pew restore env`),
Or an upgrade of your Python version broke your env,
Or check the contents of your $PATH. You might be adding new directories to it
from inside your shell's configuration file.
In this case, for further details please see: https://github.com/berdario/pew#the-environment-doesnt-seem-to-be-activated''')
def first_run_setup():
shell = supported_shell()
if shell:
if shell == 'fish':
source_cmd = 'source (pew shell_config)'
else:
source_cmd = 'source "$(pew shell_config)"'
rcpath = expandpath({'bash': '~/.bashrc'
, 'zsh': '~/.zshrc'
, 'fish': '~/.config/fish/config.fish'}[shell])
if rcpath.exists():
update_config_file(rcpath, source_cmd)
else:
print("It seems that you're running pew for the first time\n"
"If you want source shell competions and update your prompt, "
"Add the following line to your shell config file:\n %s" % source_cmd)
print('\nWill now continue with the command:', *sys.argv[1:])
input('[enter]')
def update_config_file(rcpath, source_cmd):
with rcpath.open('r+') as rcfile:
if source_cmd not in (line.strip() for line in rcfile.readlines()):
choice = 'X'
while choice not in ('y', '', 'n'):
choice = input("It seems that you're running pew for the first time\n"
"do you want to modify %s to source completions and"
" update your prompt? [y/N]\n> " % rcpath).lower()
if choice == 'y':
rcfile.write('\n# added by Pew\n%s\n' % source_cmd)
print('Done')
else:
print('\nOk, if you want to do it manually, just add\n %s\nat'
' the end of %s' % (source_cmd, rcpath))
def print_commands(cmds):
longest = max(map(len, cmds)) + 3
columns, _ = get_terminal_size()
print('Available commands:\n')
for cmd, fun in sorted(cmds.items()):
if fun.__doc__:
print(textwrap.fill(
fun.__doc__.splitlines()[0],
columns or 1000,
initial_indent=(' {0}: '.format(cmd)).ljust(longest),
subsequent_indent=longest * ' '))
else:
print(' ' + cmd)
def pew():
first_run = makedirs_and_symlink_if_needed(workon_home)
if first_run and sys.stdin.isatty():
first_run_setup()
cmds = dict((cmd[:-4], fun)
for cmd, fun in globals().items() if cmd.endswith('_cmd'))
if sys.argv[1:]:
if sys.argv[1] in cmds:
command = cmds[sys.argv[1]]
try:
return command(sys.argv[2:])
except CalledProcessError as e:
return e.returncode
except KeyboardInterrupt:
pass
else:
err("ERROR: command", sys.argv[1], "does not exist.")
print_commands(cmds)
sys.exit(1)
else:
print_commands(cmds)
|
berdario/pew | pew/pew.py | wipeenv_cmd | python | def wipeenv_cmd(argv):
env = argv[0] if argv else os.environ.get('VIRTUAL_ENV')
if not env:
sys.exit('ERROR: no virtualenv active')
elif not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist.".format(env))
else:
env_pip = str(workon_home / env / env_bin_dir / 'pip')
all_pkgs = set(invoke(env_pip, 'freeze').out.splitlines())
pkgs = set(p for p in all_pkgs if len(p.split("==")) == 2)
ignored = sorted(all_pkgs - pkgs)
pkgs = set(p.split("==")[0] for p in pkgs)
to_remove = sorted(pkgs - set(['distribute', 'wsgiref']))
if to_remove:
print("Ignoring:\n %s" % "\n ".join(ignored))
print("Uninstalling packages:\n %s" % "\n ".join(to_remove))
return inve(env, 'pip', 'uninstall', '-y', *to_remove)
else:
print("Nothing to remove") | Remove all installed packages from the current (or supplied) env. | train | https://github.com/berdario/pew/blob/37d9ff79342336b8ef6437d9a551008be07afe9b/pew/pew.py#L626-L646 | [
"def invoke(*args, **kwargs):\n inp = kwargs.pop('inp', '').encode(encoding)\n popen = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE, **kwargs)\n out, err = [o.strip().decode(encoding) for o in popen.communicate(inp)]\n return Result(popen.returncode, out, err)\n",
"def inve(env, command, *args, **kwargs):\n \"\"\"Run a command in the given virtual environment.\n\n Pass additional keyword arguments to ``subprocess.check_call()``.\"\"\"\n # we don't strictly need to restore the environment, since pew runs in\n # its own process, but it feels like the right thing to do\n with temp_environ():\n os.environ['VIRTUAL_ENV'] = str(workon_home / env)\n os.environ['PATH'] = compute_path(env)\n\n unsetenv('PYTHONHOME')\n unsetenv('__PYVENV_LAUNCHER__')\n\n try:\n return check_call([command] + list(args), shell=windows, **kwargs)\n # need to have shell=True on windows, otherwise the PYTHONPATH\n # won't inherit the PATH\n except OSError as e:\n if e.errno == 2:\n err('Unable to find', command)\n return 2\n else:\n raise\n"
] | from __future__ import print_function, absolute_import, unicode_literals
import os
import sys
import argparse
import shutil
import random
import textwrap
from functools import partial
from subprocess import CalledProcessError
from pathlib import Path
try:
from shutil import get_terminal_size
except ImportError:
from backports.shutil_get_terminal_size import get_terminal_size
windows = sys.platform == 'win32'
from clonevirtualenv import clone_virtualenv
if not windows:
try:
# Try importing these packages if avaiable
from pythonz.commands.install import InstallCommand
from pythonz.commands.uninstall import UninstallCommand
from pythonz.installer.pythoninstaller import PythonInstaller, AlreadyInstalledError
from pythonz.commands.list import ListCommand
from pythonz.define import PATH_PYTHONS
from pythonz.commands.locate import LocateCommand as LocatePython
def ListPythons():
try:
Path(PATH_PYTHONS).mkdir(parents=True)
except OSError:
pass
return ListCommand()
except:
# create mock commands
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('You need to install the pythonz extra. pip install pew[pythonz]')
else:
# Pythonz does not support windows
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('Command not supported on this platform')
import shellingham
from pew._utils import (check_call, invoke, expandpath, own, env_bin_dir,
check_path, temp_environ, NamedTemporaryFile, to_unicode)
from pew._print_utils import print_virtualenvs
if sys.version_info[0] == 2:
input = raw_input
err = partial(print, file=sys.stderr)
if windows:
default_home = '~/.virtualenvs'
else:
default_home = os.path.join(
os.environ.get('XDG_DATA_HOME', '~/.local/share'), 'virtualenvs')
workon_home = expandpath(
os.environ.get('WORKON_HOME', default_home))
def makedirs_and_symlink_if_needed(workon_home):
if not workon_home.exists() and own(workon_home):
workon_home.mkdir(parents=True)
link = expandpath('~/.virtualenvs')
if os.name == 'posix' and 'WORKON_HOME' not in os.environ and \
'XDG_DATA_HOME' not in os.environ and not link.exists():
link.symlink_to(str(workon_home))
return True
else:
return False
pew_site = Path(__file__).parent
def supported_shell():
shell = Path(os.environ.get('SHELL', '')).stem
if shell in ('bash', 'zsh', 'fish'):
return shell
def shell_config_cmd(argv):
"Prints the path for the current $SHELL helper file"
shell = supported_shell()
if shell:
print(pew_site / 'shell_config' / ('init.' + shell))
else:
err('Completions and prompts are unavailable for %s' %
repr(os.environ.get('SHELL', '')))
def deploy_completions():
completions = {'complete.bash': Path('/etc/bash_completion.d/pew'),
'complete.zsh': Path('/usr/local/share/zsh/site-functions/_pew'),
'complete.fish': Path('/etc/fish/completions/pew.fish')}
for comp, dest in completions.items():
if not dest.parent.exists():
dest.parent.mkdir(parents=True)
shutil.copy(str(pew_site / 'shell_config' / comp), str(dest))
def get_project_dir(env):
project_file = workon_home / env / '.project'
if project_file.exists():
with project_file.open() as f:
project_dir = f.readline().strip()
if os.path.exists(project_dir):
return project_dir
else:
err('Corrupted or outdated:', project_file, '\nDirectory',
project_dir, "doesn't exist.")
def unsetenv(key):
if key in os.environ:
del os.environ[key]
def compute_path(env):
envdir = workon_home / env
return os.pathsep.join([
str(envdir / env_bin_dir),
os.environ['PATH'],
])
def inve(env, command, *args, **kwargs):
"""Run a command in the given virtual environment.
Pass additional keyword arguments to ``subprocess.check_call()``."""
# we don't strictly need to restore the environment, since pew runs in
# its own process, but it feels like the right thing to do
with temp_environ():
os.environ['VIRTUAL_ENV'] = str(workon_home / env)
os.environ['PATH'] = compute_path(env)
unsetenv('PYTHONHOME')
unsetenv('__PYVENV_LAUNCHER__')
try:
return check_call([command] + list(args), shell=windows, **kwargs)
# need to have shell=True on windows, otherwise the PYTHONPATH
# won't inherit the PATH
except OSError as e:
if e.errno == 2:
err('Unable to find', command)
return 2
else:
raise
def fork_shell(env, shellcmd, cwd):
or_ctrld = '' if windows else "or 'Ctrl+D' "
err("Launching subshell in virtual environment. Type 'exit' ", or_ctrld,
"to return.", sep='')
if 'VIRTUAL_ENV' in os.environ:
err("Be aware that this environment will be nested on top "
"of '%s'" % Path(os.environ['VIRTUAL_ENV']).name)
return inve(env, *shellcmd, cwd=cwd)
def fork_bash(env, cwd):
# bash is a special little snowflake, and prevent_path_errors cannot work there
# https://github.com/berdario/pew/issues/58#issuecomment-102182346
bashrcpath = expandpath('~/.bashrc')
if bashrcpath.exists():
with NamedTemporaryFile('w+') as rcfile:
with bashrcpath.open() as bashrc:
rcfile.write(bashrc.read())
rcfile.write('\nexport PATH="' + to_unicode(compute_path(env)) + '"')
rcfile.flush()
return fork_shell(env, ['bash', '--rcfile', rcfile.name], cwd)
else:
return fork_shell(env, ['bash'], cwd)
def fork_cmder(env, cwd):
shell_cmd = ['cmd']
cmderrc_path = r'%CMDER_ROOT%\vendor\init.bat'
if expandpath(cmderrc_path).exists():
shell_cmd += ['/k', cmderrc_path]
if cwd:
os.environ['CMDER_START'] = cwd
return fork_shell(env, shell_cmd, cwd)
def _detect_shell():
shell = os.environ.get('SHELL', None)
if not shell:
if 'CMDER_ROOT' in os.environ:
shell = 'Cmder'
elif windows:
try:
_, shell = shellingham.detect_shell()
except shellingham.ShellDetectionFailure:
shell = os.environ.get('COMSPEC', 'cmd.exe')
else:
shell = 'sh'
return shell
def shell(env, cwd=None):
env = str(env)
shell = _detect_shell()
shell_name = Path(shell).stem
if shell_name not in ('Cmder', 'bash', 'elvish', 'powershell', 'klingon', 'cmd'):
# On Windows the PATH is usually set with System Utility
# so we won't worry about trying to check mistakes there
shell_check = (sys.executable + ' -c "from pew.pew import '
'prevent_path_errors; prevent_path_errors()"')
try:
inve(env, shell, '-c', shell_check)
except CalledProcessError:
return
if shell_name == 'bash':
return fork_bash(env, cwd)
elif shell_name == 'Cmder':
return fork_cmder(env, cwd)
else:
return fork_shell(env, [shell], cwd)
def mkvirtualenv(envname, python=None, packages=[], project=None,
requirements=None, rest=[]):
if python:
rest = ["--python=%s" % python] + rest
path = (workon_home / envname).absolute()
try:
check_call([sys.executable, "-m", "virtualenv", str(path)] + rest)
except (CalledProcessError, KeyboardInterrupt):
rmvirtualenvs([envname])
raise
else:
if project:
setvirtualenvproject(envname, project.absolute())
if requirements:
inve(envname, 'pip', 'install', '-r', str(expandpath(requirements)))
if packages:
inve(envname, 'pip', 'install', *packages)
def mkvirtualenv_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--python')
parser.add_argument('-i', action='append', dest='packages', help='Install \
a package after the environment is created. This option may be repeated.')
parser.add_argument('-r', dest='requirements', help='Provide a pip \
requirements file to install a base set of packages into the new environment.')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
return parser
def new_cmd(argv):
"""Create a new environment, in $WORKON_HOME."""
parser = mkvirtualenv_argparser()
parser.add_argument('-a', dest='project', help='Provide a full path to a \
project directory to associate with the new environment.')
parser.add_argument('envname')
args, rest = parser.parse_known_args(argv)
project = expandpath(args.project) if args.project else None
mkvirtualenv(args.envname, args.python, args.packages, project,
args.requirements, rest)
if args.activate:
shell(args.envname)
def rmvirtualenvs(envs):
error_happened = False
for env in envs:
env = workon_home / env
if os.environ.get('VIRTUAL_ENV') == str(env):
err("ERROR: You cannot remove the active environment (%s)." % env)
error_happened = True
break
try:
shutil.rmtree(str(env))
except OSError as e:
err("Error while trying to remove the {0} env: \n{1}".format
(env, e.strerror))
error_happened = True
return error_happened
def rm_cmd(argv):
"""Remove one or more environment, from $WORKON_HOME."""
if len(argv) < 1:
sys.exit("Please specify an environment")
return rmvirtualenvs(argv)
def packages(site_packages):
nodes = site_packages.iterdir()
return set([x.stem.split('-')[0] for x in nodes]) - set(['__pycache__'])
def showvirtualenv(env):
columns, _ = get_terminal_size()
pkgs = sorted(packages(sitepackages_dir(env)))
env_python = workon_home / env / env_bin_dir / 'python'
l = len(env) + 2
version = invoke(str(env_python), '-V')
version = ' - '.join((version.out + version.err).splitlines())
print(env, ': ', version, sep='')
print(textwrap.fill(' '.join(pkgs),
width=columns-l,
initial_indent=(l * ' '),
subsequent_indent=(l * ' ')), '\n')
def show_cmd(argv):
try:
showvirtualenv(argv[0])
except IndexError:
if 'VIRTUAL_ENV' in os.environ:
showvirtualenv(Path(os.environ['VIRTUAL_ENV']).name)
else:
sys.exit('pew show [env]')
def lsenvs():
return sorted(set(env.parts[-3] for env in
workon_home.glob(os.path.join('*', env_bin_dir, 'python*'))))
def lsvirtualenv(verbose):
envs = lsenvs()
if not verbose:
print_virtualenvs(*envs)
else:
for env in envs:
showvirtualenv(env)
def ls_cmd(argv):
"""List available environments."""
parser = argparse.ArgumentParser()
p_group = parser.add_mutually_exclusive_group()
p_group.add_argument('-b', '--brief', action='store_false')
p_group.add_argument('-l', '--long', action='store_true')
args = parser.parse_args(argv)
lsvirtualenv(args.long)
def parse_envname(argv, no_arg_callback):
if len(argv) < 1 or argv[0] is None:
no_arg_callback()
env = argv[0]
if env.startswith('/'):
sys.exit("ERROR: Invalid environment name '{0}'.".format(env))
if not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist. Create it with \
'pew new {0}'.".format(env))
else:
return env
def workon_cmd(argv):
"""List or change working virtual environments."""
parser = argparse.ArgumentParser(prog='pew workon')
parser.add_argument('envname', nargs='?')
parser.add_argument(
'-n', '--no-cd', action='store_true',
help=('Do not change working directory to project directory after '
'activating virtualenv.')
)
args = parser.parse_args(argv)
def list_and_exit():
lsvirtualenv(False)
sys.exit(0)
env = parse_envname([args.envname], list_and_exit)
# Check if the virtualenv has an associated project directory and in
# this case, use it as the current working directory.
project_dir = get_project_dir(env)
if project_dir is None or args.no_cd:
project_dir = os.getcwd()
return shell(env, cwd=project_dir)
def sitepackages_dir(env=os.environ.get('VIRTUAL_ENV')):
if not env:
sys.exit('ERROR: no virtualenv active')
else:
env_python = workon_home / env / env_bin_dir / 'python'
return Path(invoke(str(env_python), '-c', 'import distutils; \
print(distutils.sysconfig.get_python_lib())').out)
def add_cmd(argv):
"""Add the specified directories to the Python path for the currently active virtualenv.
This will be done by placing the directory names in a path file named
"virtualenv_path_extensions.pth" inside the virtualenv's site-packages
directory; if this file does not exists, it will be created first.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='remove', action='store_true')
parser.add_argument('dirs', nargs='+')
args = parser.parse_args(argv)
extra_paths = sitepackages_dir() / '_virtualenv_path_extensions.pth'
new_paths = [os.path.abspath(d) + "\n" for d in args.dirs]
if not extra_paths.exists():
with extra_paths.open('w') as extra:
extra.write('''import sys; sys.__plen = len(sys.path)
import sys; new=sys.path[sys.__plen:]; del sys.path[sys.__plen:]; p=getattr(sys,'__egginsert',0); sys.path[p:p]=new; sys.__egginsert = p+len(new)
''')
def rewrite(f):
with extra_paths.open('r+') as extra:
to_write = f(extra.readlines())
extra.seek(0)
extra.truncate()
extra.writelines(to_write)
if args.remove:
rewrite(lambda ls: [line for line in ls if line not in new_paths])
else:
rewrite(lambda lines: lines[0:1] + new_paths + lines[1:])
def sitepackages_dir_cmd(argv):
print(sitepackages_dir())
def lssitepackages_cmd(argv):
"""Show the content of the site-packages directory of the current virtualenv."""
site = sitepackages_dir()
print(*sorted(site.iterdir()), sep=os.linesep)
extra_paths = site / '_virtualenv_path_extensions.pth'
if extra_paths.exists():
print('from _virtualenv_path_extensions.pth:')
with extra_paths.open() as extra:
print(''.join(extra.readlines()))
def toggleglobalsitepackages_cmd(argv):
"""Toggle the current virtualenv between having and not having access to the global site-packages."""
quiet = argv == ['-q']
site = sitepackages_dir()
ngsp_file = site.parent / 'no-global-site-packages.txt'
if ngsp_file.exists():
ngsp_file.unlink()
if not quiet:
print('Enabled global site-packages')
else:
with ngsp_file.open('w'):
if not quiet:
print('Disabled global site-packages')
def cp_cmd(argv):
"""Duplicate the named virtualenv to make a new one."""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target', nargs='?')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
args = parser.parse_args(argv)
target_name = copy_virtualenv_project(args.source, args.target)
if args.activate:
shell(target_name)
def copy_virtualenv_project(source, target):
source = expandpath(source)
if not source.exists():
source = workon_home / source
if not source.exists():
sys.exit('Please provide a valid virtualenv to copy')
target_name = target or source.name
target = workon_home / target_name
if target.exists():
sys.exit('%s virtualenv already exists in %s.' % (
target_name, workon_home
))
print('Copying {0} in {1}'.format(source, target_name))
clone_virtualenv(str(source), str(target))
return target_name
def rename_cmd(argv):
"""Rename a virtualenv"""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target')
pargs = parser.parse_args(argv)
copy_virtualenv_project(pargs.source, pargs.target)
return rmvirtualenvs([pargs.source])
def setvirtualenvproject(env, project):
print('Setting project for {0} to {1}'.format(env, project))
with (workon_home / env / '.project').open('wb') as prj:
prj.write(str(project).encode())
def setproject_cmd(argv):
"""Given a virtualenv directory and a project directory, set the \
virtualenv up to be associated with the project."""
args = dict(enumerate(argv))
project = os.path.abspath(args.get(1, '.'))
env = args.get(0, os.environ.get('VIRTUAL_ENV'))
if not env:
sys.exit('pew setproject [virtualenv] [project_path]')
if not (workon_home / env).exists():
sys.exit("Environment '%s' doesn't exist." % env)
if not os.path.isdir(project):
sys.exit('pew setproject: %s does not exist' % project)
setvirtualenvproject(env, project)
def getproject_cmd(argv):
"""Print a virtualenv's project directory, if set.
If called without providing a virtualenv name as argument, print the
current virtualenv's project directory.
"""
# Parse command line arguments
parser = argparse.ArgumentParser(
description="Print an environment's project directory.",
)
parser.add_argument(
'envname',
nargs='?',
default=os.environ.get('VIRTUAL_ENV'),
help=(
'The name of the environment to return the project directory '
'for. If omitted, will use the currently active environment.'
),
)
args = parser.parse_args(argv)
# Now, do the actual work
if not args.envname:
sys.exit('ERROR: no virtualenv active')
if not (workon_home / args.envname).exists():
sys.exit("ERROR: Environment '{0}' does not exist."
.format(args.envname))
project_dir = get_project_dir(args.envname)
if project_dir is None:
sys.exit("ERROR: no project directory set for Environment '{0}'"
.format(args.envname))
print(project_dir)
def mkproject_cmd(argv):
"""Create a new project directory and its associated virtualenv."""
if '-l' in argv or '--list' in argv:
templates = [t.name[9:] for t in workon_home.glob("template_*")]
print("Available project templates:", *templates, sep='\n')
return
parser = mkvirtualenv_argparser()
parser.add_argument('envname')
parser.add_argument(
'-t', action='append', default=[], dest='templates', help='Multiple \
templates may be selected. They are applied in the order specified on the \
command line.')
parser.add_argument(
'-l', '--list', action='store_true', help='List available templates.')
args, rest = parser.parse_known_args(argv)
projects_home = Path(os.environ.get('PROJECT_HOME', '.'))
if not projects_home.exists():
sys.exit('ERROR: Projects directory %s does not exist. \
Create it or set PROJECT_HOME to an existing directory.' % projects_home)
project = (projects_home / args.envname).absolute()
if project.exists():
sys.exit('Project %s already exists.' % args.envname)
mkvirtualenv(args.envname, args.python, args.packages, project.absolute(),
args.requirements, rest)
project.mkdir()
for template_name in args.templates:
template = workon_home / ("template_" + template_name)
inve(args.envname, str(template), args.envname, str(project))
if args.activate:
shell(args.envname, cwd=str(project))
def mktmpenv_cmd(argv):
"""Create a temporary virtualenv."""
parser = mkvirtualenv_argparser()
env = '.'
while (workon_home / env).exists():
env = hex(random.getrandbits(64))[2:-1]
args, rest = parser.parse_known_args(argv)
mkvirtualenv(env, args.python, args.packages, requirements=args.requirements,
rest=rest)
print('This is a temporary environment. It will be deleted when you exit')
try:
if args.activate:
# only used for testing on windows
shell(env)
finally:
return rmvirtualenvs([env])
def inall_cmd(argv):
"""Run a command in each virtualenv."""
envs = lsenvs()
errors = False
for env in envs:
print("\n%s:" % env)
try:
inve(env, *argv)
except CalledProcessError as e:
errors = True
err(e)
sys.exit(errors)
def in_cmd(argv):
"""Run a command in the given virtualenv."""
if len(argv) == 1:
return workon_cmd(argv)
parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
return inve(*argv)
def restore_cmd(argv):
"""Try to restore a broken virtualenv by reinstalling the same python version on top of it"""
if len(argv) < 1:
sys.exit('You must provide a valid virtualenv to target')
env = argv[0]
path = workon_home / env
py = path / env_bin_dir / ('python.exe' if windows else 'python')
exact_py = py.resolve().name
return check_call([sys.executable, "-m", "virtualenv", str(path.absolute()), "--python=%s" % exact_py])
def dir_cmd(argv):
"""Print the path for the virtualenv directory"""
env = parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
print(workon_home / env)
def install_cmd(argv):
'''Use Pythonz to download and build the specified Python version'''
installer = InstallCommand()
options, versions = installer.parser.parse_args(argv)
if len(versions) != 1:
installer.parser.print_help()
sys.exit(1)
else:
try:
actual_installer = PythonInstaller.get_installer(versions[0], options)
return actual_installer.install()
except AlreadyInstalledError as e:
print(e)
def uninstall_cmd(argv):
'''Use Pythonz to uninstall the specified Python version'''
return UninstallCommand().run(argv)
def list_pythons_cmd(argv):
'''List the pythons installed by Pythonz (or all the installable ones)'''
return ListPythons().run(argv)
def locate_python_cmd(argv):
'''Locate the path for the python version installed by Pythonz'''
return LocatePython().run(argv)
def version_cmd(argv):
"""Prints current pew version"""
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('pew').version
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
print('Setuptools has some issues here, failed to get our own package.', file=sys.stderr)
print(__version__)
def prevent_path_errors():
if 'VIRTUAL_ENV' in os.environ and not check_path():
sys.exit('''ERROR: The virtualenv hasn't been activated correctly.
Either the env is corrupted (try running `pew restore env`),
Or an upgrade of your Python version broke your env,
Or check the contents of your $PATH. You might be adding new directories to it
from inside your shell's configuration file.
In this case, for further details please see: https://github.com/berdario/pew#the-environment-doesnt-seem-to-be-activated''')
def first_run_setup():
shell = supported_shell()
if shell:
if shell == 'fish':
source_cmd = 'source (pew shell_config)'
else:
source_cmd = 'source "$(pew shell_config)"'
rcpath = expandpath({'bash': '~/.bashrc'
, 'zsh': '~/.zshrc'
, 'fish': '~/.config/fish/config.fish'}[shell])
if rcpath.exists():
update_config_file(rcpath, source_cmd)
else:
print("It seems that you're running pew for the first time\n"
"If you want source shell competions and update your prompt, "
"Add the following line to your shell config file:\n %s" % source_cmd)
print('\nWill now continue with the command:', *sys.argv[1:])
input('[enter]')
def update_config_file(rcpath, source_cmd):
with rcpath.open('r+') as rcfile:
if source_cmd not in (line.strip() for line in rcfile.readlines()):
choice = 'X'
while choice not in ('y', '', 'n'):
choice = input("It seems that you're running pew for the first time\n"
"do you want to modify %s to source completions and"
" update your prompt? [y/N]\n> " % rcpath).lower()
if choice == 'y':
rcfile.write('\n# added by Pew\n%s\n' % source_cmd)
print('Done')
else:
print('\nOk, if you want to do it manually, just add\n %s\nat'
' the end of %s' % (source_cmd, rcpath))
def print_commands(cmds):
longest = max(map(len, cmds)) + 3
columns, _ = get_terminal_size()
print('Available commands:\n')
for cmd, fun in sorted(cmds.items()):
if fun.__doc__:
print(textwrap.fill(
fun.__doc__.splitlines()[0],
columns or 1000,
initial_indent=(' {0}: '.format(cmd)).ljust(longest),
subsequent_indent=longest * ' '))
else:
print(' ' + cmd)
def pew():
first_run = makedirs_and_symlink_if_needed(workon_home)
if first_run and sys.stdin.isatty():
first_run_setup()
cmds = dict((cmd[:-4], fun)
for cmd, fun in globals().items() if cmd.endswith('_cmd'))
if sys.argv[1:]:
if sys.argv[1] in cmds:
command = cmds[sys.argv[1]]
try:
return command(sys.argv[2:])
except CalledProcessError as e:
return e.returncode
except KeyboardInterrupt:
pass
else:
err("ERROR: command", sys.argv[1], "does not exist.")
print_commands(cmds)
sys.exit(1)
else:
print_commands(cmds)
|
berdario/pew | pew/pew.py | inall_cmd | python | def inall_cmd(argv):
envs = lsenvs()
errors = False
for env in envs:
print("\n%s:" % env)
try:
inve(env, *argv)
except CalledProcessError as e:
errors = True
err(e)
sys.exit(errors) | Run a command in each virtualenv. | train | https://github.com/berdario/pew/blob/37d9ff79342336b8ef6437d9a551008be07afe9b/pew/pew.py#L649-L660 | [
"def inve(env, command, *args, **kwargs):\n \"\"\"Run a command in the given virtual environment.\n\n Pass additional keyword arguments to ``subprocess.check_call()``.\"\"\"\n # we don't strictly need to restore the environment, since pew runs in\n # its own process, but it feels like the right thing to do\n with temp_environ():\n os.environ['VIRTUAL_ENV'] = str(workon_home / env)\n os.environ['PATH'] = compute_path(env)\n\n unsetenv('PYTHONHOME')\n unsetenv('__PYVENV_LAUNCHER__')\n\n try:\n return check_call([command] + list(args), shell=windows, **kwargs)\n # need to have shell=True on windows, otherwise the PYTHONPATH\n # won't inherit the PATH\n except OSError as e:\n if e.errno == 2:\n err('Unable to find', command)\n return 2\n else:\n raise\n",
"def lsenvs():\n return sorted(set(env.parts[-3] for env in\n workon_home.glob(os.path.join('*', env_bin_dir, 'python*'))))\n"
] | from __future__ import print_function, absolute_import, unicode_literals
import os
import sys
import argparse
import shutil
import random
import textwrap
from functools import partial
from subprocess import CalledProcessError
from pathlib import Path
try:
from shutil import get_terminal_size
except ImportError:
from backports.shutil_get_terminal_size import get_terminal_size
windows = sys.platform == 'win32'
from clonevirtualenv import clone_virtualenv
if not windows:
try:
# Try importing these packages if avaiable
from pythonz.commands.install import InstallCommand
from pythonz.commands.uninstall import UninstallCommand
from pythonz.installer.pythoninstaller import PythonInstaller, AlreadyInstalledError
from pythonz.commands.list import ListCommand
from pythonz.define import PATH_PYTHONS
from pythonz.commands.locate import LocateCommand as LocatePython
def ListPythons():
try:
Path(PATH_PYTHONS).mkdir(parents=True)
except OSError:
pass
return ListCommand()
except:
# create mock commands
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('You need to install the pythonz extra. pip install pew[pythonz]')
else:
# Pythonz does not support windows
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('Command not supported on this platform')
import shellingham
from pew._utils import (check_call, invoke, expandpath, own, env_bin_dir,
check_path, temp_environ, NamedTemporaryFile, to_unicode)
from pew._print_utils import print_virtualenvs
if sys.version_info[0] == 2:
input = raw_input
err = partial(print, file=sys.stderr)
if windows:
default_home = '~/.virtualenvs'
else:
default_home = os.path.join(
os.environ.get('XDG_DATA_HOME', '~/.local/share'), 'virtualenvs')
workon_home = expandpath(
os.environ.get('WORKON_HOME', default_home))
def makedirs_and_symlink_if_needed(workon_home):
if not workon_home.exists() and own(workon_home):
workon_home.mkdir(parents=True)
link = expandpath('~/.virtualenvs')
if os.name == 'posix' and 'WORKON_HOME' not in os.environ and \
'XDG_DATA_HOME' not in os.environ and not link.exists():
link.symlink_to(str(workon_home))
return True
else:
return False
pew_site = Path(__file__).parent
def supported_shell():
shell = Path(os.environ.get('SHELL', '')).stem
if shell in ('bash', 'zsh', 'fish'):
return shell
def shell_config_cmd(argv):
"Prints the path for the current $SHELL helper file"
shell = supported_shell()
if shell:
print(pew_site / 'shell_config' / ('init.' + shell))
else:
err('Completions and prompts are unavailable for %s' %
repr(os.environ.get('SHELL', '')))
def deploy_completions():
completions = {'complete.bash': Path('/etc/bash_completion.d/pew'),
'complete.zsh': Path('/usr/local/share/zsh/site-functions/_pew'),
'complete.fish': Path('/etc/fish/completions/pew.fish')}
for comp, dest in completions.items():
if not dest.parent.exists():
dest.parent.mkdir(parents=True)
shutil.copy(str(pew_site / 'shell_config' / comp), str(dest))
def get_project_dir(env):
project_file = workon_home / env / '.project'
if project_file.exists():
with project_file.open() as f:
project_dir = f.readline().strip()
if os.path.exists(project_dir):
return project_dir
else:
err('Corrupted or outdated:', project_file, '\nDirectory',
project_dir, "doesn't exist.")
def unsetenv(key):
if key in os.environ:
del os.environ[key]
def compute_path(env):
envdir = workon_home / env
return os.pathsep.join([
str(envdir / env_bin_dir),
os.environ['PATH'],
])
def inve(env, command, *args, **kwargs):
"""Run a command in the given virtual environment.
Pass additional keyword arguments to ``subprocess.check_call()``."""
# we don't strictly need to restore the environment, since pew runs in
# its own process, but it feels like the right thing to do
with temp_environ():
os.environ['VIRTUAL_ENV'] = str(workon_home / env)
os.environ['PATH'] = compute_path(env)
unsetenv('PYTHONHOME')
unsetenv('__PYVENV_LAUNCHER__')
try:
return check_call([command] + list(args), shell=windows, **kwargs)
# need to have shell=True on windows, otherwise the PYTHONPATH
# won't inherit the PATH
except OSError as e:
if e.errno == 2:
err('Unable to find', command)
return 2
else:
raise
def fork_shell(env, shellcmd, cwd):
or_ctrld = '' if windows else "or 'Ctrl+D' "
err("Launching subshell in virtual environment. Type 'exit' ", or_ctrld,
"to return.", sep='')
if 'VIRTUAL_ENV' in os.environ:
err("Be aware that this environment will be nested on top "
"of '%s'" % Path(os.environ['VIRTUAL_ENV']).name)
return inve(env, *shellcmd, cwd=cwd)
def fork_bash(env, cwd):
# bash is a special little snowflake, and prevent_path_errors cannot work there
# https://github.com/berdario/pew/issues/58#issuecomment-102182346
bashrcpath = expandpath('~/.bashrc')
if bashrcpath.exists():
with NamedTemporaryFile('w+') as rcfile:
with bashrcpath.open() as bashrc:
rcfile.write(bashrc.read())
rcfile.write('\nexport PATH="' + to_unicode(compute_path(env)) + '"')
rcfile.flush()
return fork_shell(env, ['bash', '--rcfile', rcfile.name], cwd)
else:
return fork_shell(env, ['bash'], cwd)
def fork_cmder(env, cwd):
shell_cmd = ['cmd']
cmderrc_path = r'%CMDER_ROOT%\vendor\init.bat'
if expandpath(cmderrc_path).exists():
shell_cmd += ['/k', cmderrc_path]
if cwd:
os.environ['CMDER_START'] = cwd
return fork_shell(env, shell_cmd, cwd)
def _detect_shell():
shell = os.environ.get('SHELL', None)
if not shell:
if 'CMDER_ROOT' in os.environ:
shell = 'Cmder'
elif windows:
try:
_, shell = shellingham.detect_shell()
except shellingham.ShellDetectionFailure:
shell = os.environ.get('COMSPEC', 'cmd.exe')
else:
shell = 'sh'
return shell
def shell(env, cwd=None):
env = str(env)
shell = _detect_shell()
shell_name = Path(shell).stem
if shell_name not in ('Cmder', 'bash', 'elvish', 'powershell', 'klingon', 'cmd'):
# On Windows the PATH is usually set with System Utility
# so we won't worry about trying to check mistakes there
shell_check = (sys.executable + ' -c "from pew.pew import '
'prevent_path_errors; prevent_path_errors()"')
try:
inve(env, shell, '-c', shell_check)
except CalledProcessError:
return
if shell_name == 'bash':
return fork_bash(env, cwd)
elif shell_name == 'Cmder':
return fork_cmder(env, cwd)
else:
return fork_shell(env, [shell], cwd)
def mkvirtualenv(envname, python=None, packages=[], project=None,
requirements=None, rest=[]):
if python:
rest = ["--python=%s" % python] + rest
path = (workon_home / envname).absolute()
try:
check_call([sys.executable, "-m", "virtualenv", str(path)] + rest)
except (CalledProcessError, KeyboardInterrupt):
rmvirtualenvs([envname])
raise
else:
if project:
setvirtualenvproject(envname, project.absolute())
if requirements:
inve(envname, 'pip', 'install', '-r', str(expandpath(requirements)))
if packages:
inve(envname, 'pip', 'install', *packages)
def mkvirtualenv_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--python')
parser.add_argument('-i', action='append', dest='packages', help='Install \
a package after the environment is created. This option may be repeated.')
parser.add_argument('-r', dest='requirements', help='Provide a pip \
requirements file to install a base set of packages into the new environment.')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
return parser
def new_cmd(argv):
"""Create a new environment, in $WORKON_HOME."""
parser = mkvirtualenv_argparser()
parser.add_argument('-a', dest='project', help='Provide a full path to a \
project directory to associate with the new environment.')
parser.add_argument('envname')
args, rest = parser.parse_known_args(argv)
project = expandpath(args.project) if args.project else None
mkvirtualenv(args.envname, args.python, args.packages, project,
args.requirements, rest)
if args.activate:
shell(args.envname)
def rmvirtualenvs(envs):
error_happened = False
for env in envs:
env = workon_home / env
if os.environ.get('VIRTUAL_ENV') == str(env):
err("ERROR: You cannot remove the active environment (%s)." % env)
error_happened = True
break
try:
shutil.rmtree(str(env))
except OSError as e:
err("Error while trying to remove the {0} env: \n{1}".format
(env, e.strerror))
error_happened = True
return error_happened
def rm_cmd(argv):
"""Remove one or more environment, from $WORKON_HOME."""
if len(argv) < 1:
sys.exit("Please specify an environment")
return rmvirtualenvs(argv)
def packages(site_packages):
nodes = site_packages.iterdir()
return set([x.stem.split('-')[0] for x in nodes]) - set(['__pycache__'])
def showvirtualenv(env):
columns, _ = get_terminal_size()
pkgs = sorted(packages(sitepackages_dir(env)))
env_python = workon_home / env / env_bin_dir / 'python'
l = len(env) + 2
version = invoke(str(env_python), '-V')
version = ' - '.join((version.out + version.err).splitlines())
print(env, ': ', version, sep='')
print(textwrap.fill(' '.join(pkgs),
width=columns-l,
initial_indent=(l * ' '),
subsequent_indent=(l * ' ')), '\n')
def show_cmd(argv):
try:
showvirtualenv(argv[0])
except IndexError:
if 'VIRTUAL_ENV' in os.environ:
showvirtualenv(Path(os.environ['VIRTUAL_ENV']).name)
else:
sys.exit('pew show [env]')
def lsenvs():
return sorted(set(env.parts[-3] for env in
workon_home.glob(os.path.join('*', env_bin_dir, 'python*'))))
def lsvirtualenv(verbose):
envs = lsenvs()
if not verbose:
print_virtualenvs(*envs)
else:
for env in envs:
showvirtualenv(env)
def ls_cmd(argv):
"""List available environments."""
parser = argparse.ArgumentParser()
p_group = parser.add_mutually_exclusive_group()
p_group.add_argument('-b', '--brief', action='store_false')
p_group.add_argument('-l', '--long', action='store_true')
args = parser.parse_args(argv)
lsvirtualenv(args.long)
def parse_envname(argv, no_arg_callback):
if len(argv) < 1 or argv[0] is None:
no_arg_callback()
env = argv[0]
if env.startswith('/'):
sys.exit("ERROR: Invalid environment name '{0}'.".format(env))
if not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist. Create it with \
'pew new {0}'.".format(env))
else:
return env
def workon_cmd(argv):
"""List or change working virtual environments."""
parser = argparse.ArgumentParser(prog='pew workon')
parser.add_argument('envname', nargs='?')
parser.add_argument(
'-n', '--no-cd', action='store_true',
help=('Do not change working directory to project directory after '
'activating virtualenv.')
)
args = parser.parse_args(argv)
def list_and_exit():
lsvirtualenv(False)
sys.exit(0)
env = parse_envname([args.envname], list_and_exit)
# Check if the virtualenv has an associated project directory and in
# this case, use it as the current working directory.
project_dir = get_project_dir(env)
if project_dir is None or args.no_cd:
project_dir = os.getcwd()
return shell(env, cwd=project_dir)
def sitepackages_dir(env=os.environ.get('VIRTUAL_ENV')):
if not env:
sys.exit('ERROR: no virtualenv active')
else:
env_python = workon_home / env / env_bin_dir / 'python'
return Path(invoke(str(env_python), '-c', 'import distutils; \
print(distutils.sysconfig.get_python_lib())').out)
def add_cmd(argv):
"""Add the specified directories to the Python path for the currently active virtualenv.
This will be done by placing the directory names in a path file named
"virtualenv_path_extensions.pth" inside the virtualenv's site-packages
directory; if this file does not exists, it will be created first.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='remove', action='store_true')
parser.add_argument('dirs', nargs='+')
args = parser.parse_args(argv)
extra_paths = sitepackages_dir() / '_virtualenv_path_extensions.pth'
new_paths = [os.path.abspath(d) + "\n" for d in args.dirs]
if not extra_paths.exists():
with extra_paths.open('w') as extra:
extra.write('''import sys; sys.__plen = len(sys.path)
import sys; new=sys.path[sys.__plen:]; del sys.path[sys.__plen:]; p=getattr(sys,'__egginsert',0); sys.path[p:p]=new; sys.__egginsert = p+len(new)
''')
def rewrite(f):
with extra_paths.open('r+') as extra:
to_write = f(extra.readlines())
extra.seek(0)
extra.truncate()
extra.writelines(to_write)
if args.remove:
rewrite(lambda ls: [line for line in ls if line not in new_paths])
else:
rewrite(lambda lines: lines[0:1] + new_paths + lines[1:])
def sitepackages_dir_cmd(argv):
print(sitepackages_dir())
def lssitepackages_cmd(argv):
"""Show the content of the site-packages directory of the current virtualenv."""
site = sitepackages_dir()
print(*sorted(site.iterdir()), sep=os.linesep)
extra_paths = site / '_virtualenv_path_extensions.pth'
if extra_paths.exists():
print('from _virtualenv_path_extensions.pth:')
with extra_paths.open() as extra:
print(''.join(extra.readlines()))
def toggleglobalsitepackages_cmd(argv):
"""Toggle the current virtualenv between having and not having access to the global site-packages."""
quiet = argv == ['-q']
site = sitepackages_dir()
ngsp_file = site.parent / 'no-global-site-packages.txt'
if ngsp_file.exists():
ngsp_file.unlink()
if not quiet:
print('Enabled global site-packages')
else:
with ngsp_file.open('w'):
if not quiet:
print('Disabled global site-packages')
def cp_cmd(argv):
"""Duplicate the named virtualenv to make a new one."""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target', nargs='?')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
args = parser.parse_args(argv)
target_name = copy_virtualenv_project(args.source, args.target)
if args.activate:
shell(target_name)
def copy_virtualenv_project(source, target):
source = expandpath(source)
if not source.exists():
source = workon_home / source
if not source.exists():
sys.exit('Please provide a valid virtualenv to copy')
target_name = target or source.name
target = workon_home / target_name
if target.exists():
sys.exit('%s virtualenv already exists in %s.' % (
target_name, workon_home
))
print('Copying {0} in {1}'.format(source, target_name))
clone_virtualenv(str(source), str(target))
return target_name
def rename_cmd(argv):
"""Rename a virtualenv"""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target')
pargs = parser.parse_args(argv)
copy_virtualenv_project(pargs.source, pargs.target)
return rmvirtualenvs([pargs.source])
def setvirtualenvproject(env, project):
print('Setting project for {0} to {1}'.format(env, project))
with (workon_home / env / '.project').open('wb') as prj:
prj.write(str(project).encode())
def setproject_cmd(argv):
"""Given a virtualenv directory and a project directory, set the \
virtualenv up to be associated with the project."""
args = dict(enumerate(argv))
project = os.path.abspath(args.get(1, '.'))
env = args.get(0, os.environ.get('VIRTUAL_ENV'))
if not env:
sys.exit('pew setproject [virtualenv] [project_path]')
if not (workon_home / env).exists():
sys.exit("Environment '%s' doesn't exist." % env)
if not os.path.isdir(project):
sys.exit('pew setproject: %s does not exist' % project)
setvirtualenvproject(env, project)
def getproject_cmd(argv):
"""Print a virtualenv's project directory, if set.
If called without providing a virtualenv name as argument, print the
current virtualenv's project directory.
"""
# Parse command line arguments
parser = argparse.ArgumentParser(
description="Print an environment's project directory.",
)
parser.add_argument(
'envname',
nargs='?',
default=os.environ.get('VIRTUAL_ENV'),
help=(
'The name of the environment to return the project directory '
'for. If omitted, will use the currently active environment.'
),
)
args = parser.parse_args(argv)
# Now, do the actual work
if not args.envname:
sys.exit('ERROR: no virtualenv active')
if not (workon_home / args.envname).exists():
sys.exit("ERROR: Environment '{0}' does not exist."
.format(args.envname))
project_dir = get_project_dir(args.envname)
if project_dir is None:
sys.exit("ERROR: no project directory set for Environment '{0}'"
.format(args.envname))
print(project_dir)
def mkproject_cmd(argv):
"""Create a new project directory and its associated virtualenv."""
if '-l' in argv or '--list' in argv:
templates = [t.name[9:] for t in workon_home.glob("template_*")]
print("Available project templates:", *templates, sep='\n')
return
parser = mkvirtualenv_argparser()
parser.add_argument('envname')
parser.add_argument(
'-t', action='append', default=[], dest='templates', help='Multiple \
templates may be selected. They are applied in the order specified on the \
command line.')
parser.add_argument(
'-l', '--list', action='store_true', help='List available templates.')
args, rest = parser.parse_known_args(argv)
projects_home = Path(os.environ.get('PROJECT_HOME', '.'))
if not projects_home.exists():
sys.exit('ERROR: Projects directory %s does not exist. \
Create it or set PROJECT_HOME to an existing directory.' % projects_home)
project = (projects_home / args.envname).absolute()
if project.exists():
sys.exit('Project %s already exists.' % args.envname)
mkvirtualenv(args.envname, args.python, args.packages, project.absolute(),
args.requirements, rest)
project.mkdir()
for template_name in args.templates:
template = workon_home / ("template_" + template_name)
inve(args.envname, str(template), args.envname, str(project))
if args.activate:
shell(args.envname, cwd=str(project))
def mktmpenv_cmd(argv):
"""Create a temporary virtualenv."""
parser = mkvirtualenv_argparser()
env = '.'
while (workon_home / env).exists():
env = hex(random.getrandbits(64))[2:-1]
args, rest = parser.parse_known_args(argv)
mkvirtualenv(env, args.python, args.packages, requirements=args.requirements,
rest=rest)
print('This is a temporary environment. It will be deleted when you exit')
try:
if args.activate:
# only used for testing on windows
shell(env)
finally:
return rmvirtualenvs([env])
def wipeenv_cmd(argv):
"""Remove all installed packages from the current (or supplied) env."""
env = argv[0] if argv else os.environ.get('VIRTUAL_ENV')
if not env:
sys.exit('ERROR: no virtualenv active')
elif not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist.".format(env))
else:
env_pip = str(workon_home / env / env_bin_dir / 'pip')
all_pkgs = set(invoke(env_pip, 'freeze').out.splitlines())
pkgs = set(p for p in all_pkgs if len(p.split("==")) == 2)
ignored = sorted(all_pkgs - pkgs)
pkgs = set(p.split("==")[0] for p in pkgs)
to_remove = sorted(pkgs - set(['distribute', 'wsgiref']))
if to_remove:
print("Ignoring:\n %s" % "\n ".join(ignored))
print("Uninstalling packages:\n %s" % "\n ".join(to_remove))
return inve(env, 'pip', 'uninstall', '-y', *to_remove)
else:
print("Nothing to remove")
def in_cmd(argv):
"""Run a command in the given virtualenv."""
if len(argv) == 1:
return workon_cmd(argv)
parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
return inve(*argv)
def restore_cmd(argv):
"""Try to restore a broken virtualenv by reinstalling the same python version on top of it"""
if len(argv) < 1:
sys.exit('You must provide a valid virtualenv to target')
env = argv[0]
path = workon_home / env
py = path / env_bin_dir / ('python.exe' if windows else 'python')
exact_py = py.resolve().name
return check_call([sys.executable, "-m", "virtualenv", str(path.absolute()), "--python=%s" % exact_py])
def dir_cmd(argv):
"""Print the path for the virtualenv directory"""
env = parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
print(workon_home / env)
def install_cmd(argv):
'''Use Pythonz to download and build the specified Python version'''
installer = InstallCommand()
options, versions = installer.parser.parse_args(argv)
if len(versions) != 1:
installer.parser.print_help()
sys.exit(1)
else:
try:
actual_installer = PythonInstaller.get_installer(versions[0], options)
return actual_installer.install()
except AlreadyInstalledError as e:
print(e)
def uninstall_cmd(argv):
'''Use Pythonz to uninstall the specified Python version'''
return UninstallCommand().run(argv)
def list_pythons_cmd(argv):
'''List the pythons installed by Pythonz (or all the installable ones)'''
return ListPythons().run(argv)
def locate_python_cmd(argv):
'''Locate the path for the python version installed by Pythonz'''
return LocatePython().run(argv)
def version_cmd(argv):
"""Prints current pew version"""
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('pew').version
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
print('Setuptools has some issues here, failed to get our own package.', file=sys.stderr)
print(__version__)
def prevent_path_errors():
if 'VIRTUAL_ENV' in os.environ and not check_path():
sys.exit('''ERROR: The virtualenv hasn't been activated correctly.
Either the env is corrupted (try running `pew restore env`),
Or an upgrade of your Python version broke your env,
Or check the contents of your $PATH. You might be adding new directories to it
from inside your shell's configuration file.
In this case, for further details please see: https://github.com/berdario/pew#the-environment-doesnt-seem-to-be-activated''')
def first_run_setup():
shell = supported_shell()
if shell:
if shell == 'fish':
source_cmd = 'source (pew shell_config)'
else:
source_cmd = 'source "$(pew shell_config)"'
rcpath = expandpath({'bash': '~/.bashrc'
, 'zsh': '~/.zshrc'
, 'fish': '~/.config/fish/config.fish'}[shell])
if rcpath.exists():
update_config_file(rcpath, source_cmd)
else:
print("It seems that you're running pew for the first time\n"
"If you want source shell competions and update your prompt, "
"Add the following line to your shell config file:\n %s" % source_cmd)
print('\nWill now continue with the command:', *sys.argv[1:])
input('[enter]')
def update_config_file(rcpath, source_cmd):
with rcpath.open('r+') as rcfile:
if source_cmd not in (line.strip() for line in rcfile.readlines()):
choice = 'X'
while choice not in ('y', '', 'n'):
choice = input("It seems that you're running pew for the first time\n"
"do you want to modify %s to source completions and"
" update your prompt? [y/N]\n> " % rcpath).lower()
if choice == 'y':
rcfile.write('\n# added by Pew\n%s\n' % source_cmd)
print('Done')
else:
print('\nOk, if you want to do it manually, just add\n %s\nat'
' the end of %s' % (source_cmd, rcpath))
def print_commands(cmds):
longest = max(map(len, cmds)) + 3
columns, _ = get_terminal_size()
print('Available commands:\n')
for cmd, fun in sorted(cmds.items()):
if fun.__doc__:
print(textwrap.fill(
fun.__doc__.splitlines()[0],
columns or 1000,
initial_indent=(' {0}: '.format(cmd)).ljust(longest),
subsequent_indent=longest * ' '))
else:
print(' ' + cmd)
def pew():
first_run = makedirs_and_symlink_if_needed(workon_home)
if first_run and sys.stdin.isatty():
first_run_setup()
cmds = dict((cmd[:-4], fun)
for cmd, fun in globals().items() if cmd.endswith('_cmd'))
if sys.argv[1:]:
if sys.argv[1] in cmds:
command = cmds[sys.argv[1]]
try:
return command(sys.argv[2:])
except CalledProcessError as e:
return e.returncode
except KeyboardInterrupt:
pass
else:
err("ERROR: command", sys.argv[1], "does not exist.")
print_commands(cmds)
sys.exit(1)
else:
print_commands(cmds)
|
berdario/pew | pew/pew.py | in_cmd | python | def in_cmd(argv):
if len(argv) == 1:
return workon_cmd(argv)
parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
return inve(*argv) | Run a command in the given virtualenv. | train | https://github.com/berdario/pew/blob/37d9ff79342336b8ef6437d9a551008be07afe9b/pew/pew.py#L663-L671 | [
"def inve(env, command, *args, **kwargs):\n \"\"\"Run a command in the given virtual environment.\n\n Pass additional keyword arguments to ``subprocess.check_call()``.\"\"\"\n # we don't strictly need to restore the environment, since pew runs in\n # its own process, but it feels like the right thing to do\n with temp_environ():\n os.environ['VIRTUAL_ENV'] = str(workon_home / env)\n os.environ['PATH'] = compute_path(env)\n\n unsetenv('PYTHONHOME')\n unsetenv('__PYVENV_LAUNCHER__')\n\n try:\n return check_call([command] + list(args), shell=windows, **kwargs)\n # need to have shell=True on windows, otherwise the PYTHONPATH\n # won't inherit the PATH\n except OSError as e:\n if e.errno == 2:\n err('Unable to find', command)\n return 2\n else:\n raise\n",
"def parse_envname(argv, no_arg_callback):\n if len(argv) < 1 or argv[0] is None:\n no_arg_callback()\n\n env = argv[0]\n if env.startswith('/'):\n sys.exit(\"ERROR: Invalid environment name '{0}'.\".format(env))\n if not (workon_home / env).exists():\n sys.exit(\"ERROR: Environment '{0}' does not exist. Create it with \\\n'pew new {0}'.\".format(env))\n else:\n return env\n",
"def workon_cmd(argv):\n \"\"\"List or change working virtual environments.\"\"\"\n parser = argparse.ArgumentParser(prog='pew workon')\n parser.add_argument('envname', nargs='?')\n parser.add_argument(\n '-n', '--no-cd', action='store_true',\n help=('Do not change working directory to project directory after '\n 'activating virtualenv.')\n )\n args = parser.parse_args(argv)\n\n def list_and_exit():\n lsvirtualenv(False)\n sys.exit(0)\n\n env = parse_envname([args.envname], list_and_exit)\n\n # Check if the virtualenv has an associated project directory and in\n # this case, use it as the current working directory.\n project_dir = get_project_dir(env)\n if project_dir is None or args.no_cd:\n project_dir = os.getcwd()\n\n return shell(env, cwd=project_dir)\n"
] | from __future__ import print_function, absolute_import, unicode_literals
import os
import sys
import argparse
import shutil
import random
import textwrap
from functools import partial
from subprocess import CalledProcessError
from pathlib import Path
try:
from shutil import get_terminal_size
except ImportError:
from backports.shutil_get_terminal_size import get_terminal_size
windows = sys.platform == 'win32'
from clonevirtualenv import clone_virtualenv
if not windows:
try:
# Try importing these packages if avaiable
from pythonz.commands.install import InstallCommand
from pythonz.commands.uninstall import UninstallCommand
from pythonz.installer.pythoninstaller import PythonInstaller, AlreadyInstalledError
from pythonz.commands.list import ListCommand
from pythonz.define import PATH_PYTHONS
from pythonz.commands.locate import LocateCommand as LocatePython
def ListPythons():
try:
Path(PATH_PYTHONS).mkdir(parents=True)
except OSError:
pass
return ListCommand()
except:
# create mock commands
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('You need to install the pythonz extra. pip install pew[pythonz]')
else:
# Pythonz does not support windows
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('Command not supported on this platform')
import shellingham
from pew._utils import (check_call, invoke, expandpath, own, env_bin_dir,
check_path, temp_environ, NamedTemporaryFile, to_unicode)
from pew._print_utils import print_virtualenvs
if sys.version_info[0] == 2:
input = raw_input
err = partial(print, file=sys.stderr)
if windows:
default_home = '~/.virtualenvs'
else:
default_home = os.path.join(
os.environ.get('XDG_DATA_HOME', '~/.local/share'), 'virtualenvs')
workon_home = expandpath(
os.environ.get('WORKON_HOME', default_home))
def makedirs_and_symlink_if_needed(workon_home):
if not workon_home.exists() and own(workon_home):
workon_home.mkdir(parents=True)
link = expandpath('~/.virtualenvs')
if os.name == 'posix' and 'WORKON_HOME' not in os.environ and \
'XDG_DATA_HOME' not in os.environ and not link.exists():
link.symlink_to(str(workon_home))
return True
else:
return False
pew_site = Path(__file__).parent
def supported_shell():
shell = Path(os.environ.get('SHELL', '')).stem
if shell in ('bash', 'zsh', 'fish'):
return shell
def shell_config_cmd(argv):
"Prints the path for the current $SHELL helper file"
shell = supported_shell()
if shell:
print(pew_site / 'shell_config' / ('init.' + shell))
else:
err('Completions and prompts are unavailable for %s' %
repr(os.environ.get('SHELL', '')))
def deploy_completions():
completions = {'complete.bash': Path('/etc/bash_completion.d/pew'),
'complete.zsh': Path('/usr/local/share/zsh/site-functions/_pew'),
'complete.fish': Path('/etc/fish/completions/pew.fish')}
for comp, dest in completions.items():
if not dest.parent.exists():
dest.parent.mkdir(parents=True)
shutil.copy(str(pew_site / 'shell_config' / comp), str(dest))
def get_project_dir(env):
project_file = workon_home / env / '.project'
if project_file.exists():
with project_file.open() as f:
project_dir = f.readline().strip()
if os.path.exists(project_dir):
return project_dir
else:
err('Corrupted or outdated:', project_file, '\nDirectory',
project_dir, "doesn't exist.")
def unsetenv(key):
if key in os.environ:
del os.environ[key]
def compute_path(env):
envdir = workon_home / env
return os.pathsep.join([
str(envdir / env_bin_dir),
os.environ['PATH'],
])
def inve(env, command, *args, **kwargs):
"""Run a command in the given virtual environment.
Pass additional keyword arguments to ``subprocess.check_call()``."""
# we don't strictly need to restore the environment, since pew runs in
# its own process, but it feels like the right thing to do
with temp_environ():
os.environ['VIRTUAL_ENV'] = str(workon_home / env)
os.environ['PATH'] = compute_path(env)
unsetenv('PYTHONHOME')
unsetenv('__PYVENV_LAUNCHER__')
try:
return check_call([command] + list(args), shell=windows, **kwargs)
# need to have shell=True on windows, otherwise the PYTHONPATH
# won't inherit the PATH
except OSError as e:
if e.errno == 2:
err('Unable to find', command)
return 2
else:
raise
def fork_shell(env, shellcmd, cwd):
or_ctrld = '' if windows else "or 'Ctrl+D' "
err("Launching subshell in virtual environment. Type 'exit' ", or_ctrld,
"to return.", sep='')
if 'VIRTUAL_ENV' in os.environ:
err("Be aware that this environment will be nested on top "
"of '%s'" % Path(os.environ['VIRTUAL_ENV']).name)
return inve(env, *shellcmd, cwd=cwd)
def fork_bash(env, cwd):
# bash is a special little snowflake, and prevent_path_errors cannot work there
# https://github.com/berdario/pew/issues/58#issuecomment-102182346
bashrcpath = expandpath('~/.bashrc')
if bashrcpath.exists():
with NamedTemporaryFile('w+') as rcfile:
with bashrcpath.open() as bashrc:
rcfile.write(bashrc.read())
rcfile.write('\nexport PATH="' + to_unicode(compute_path(env)) + '"')
rcfile.flush()
return fork_shell(env, ['bash', '--rcfile', rcfile.name], cwd)
else:
return fork_shell(env, ['bash'], cwd)
def fork_cmder(env, cwd):
shell_cmd = ['cmd']
cmderrc_path = r'%CMDER_ROOT%\vendor\init.bat'
if expandpath(cmderrc_path).exists():
shell_cmd += ['/k', cmderrc_path]
if cwd:
os.environ['CMDER_START'] = cwd
return fork_shell(env, shell_cmd, cwd)
def _detect_shell():
shell = os.environ.get('SHELL', None)
if not shell:
if 'CMDER_ROOT' in os.environ:
shell = 'Cmder'
elif windows:
try:
_, shell = shellingham.detect_shell()
except shellingham.ShellDetectionFailure:
shell = os.environ.get('COMSPEC', 'cmd.exe')
else:
shell = 'sh'
return shell
def shell(env, cwd=None):
env = str(env)
shell = _detect_shell()
shell_name = Path(shell).stem
if shell_name not in ('Cmder', 'bash', 'elvish', 'powershell', 'klingon', 'cmd'):
# On Windows the PATH is usually set with System Utility
# so we won't worry about trying to check mistakes there
shell_check = (sys.executable + ' -c "from pew.pew import '
'prevent_path_errors; prevent_path_errors()"')
try:
inve(env, shell, '-c', shell_check)
except CalledProcessError:
return
if shell_name == 'bash':
return fork_bash(env, cwd)
elif shell_name == 'Cmder':
return fork_cmder(env, cwd)
else:
return fork_shell(env, [shell], cwd)
def mkvirtualenv(envname, python=None, packages=[], project=None,
requirements=None, rest=[]):
if python:
rest = ["--python=%s" % python] + rest
path = (workon_home / envname).absolute()
try:
check_call([sys.executable, "-m", "virtualenv", str(path)] + rest)
except (CalledProcessError, KeyboardInterrupt):
rmvirtualenvs([envname])
raise
else:
if project:
setvirtualenvproject(envname, project.absolute())
if requirements:
inve(envname, 'pip', 'install', '-r', str(expandpath(requirements)))
if packages:
inve(envname, 'pip', 'install', *packages)
def mkvirtualenv_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--python')
parser.add_argument('-i', action='append', dest='packages', help='Install \
a package after the environment is created. This option may be repeated.')
parser.add_argument('-r', dest='requirements', help='Provide a pip \
requirements file to install a base set of packages into the new environment.')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
return parser
def new_cmd(argv):
"""Create a new environment, in $WORKON_HOME."""
parser = mkvirtualenv_argparser()
parser.add_argument('-a', dest='project', help='Provide a full path to a \
project directory to associate with the new environment.')
parser.add_argument('envname')
args, rest = parser.parse_known_args(argv)
project = expandpath(args.project) if args.project else None
mkvirtualenv(args.envname, args.python, args.packages, project,
args.requirements, rest)
if args.activate:
shell(args.envname)
def rmvirtualenvs(envs):
error_happened = False
for env in envs:
env = workon_home / env
if os.environ.get('VIRTUAL_ENV') == str(env):
err("ERROR: You cannot remove the active environment (%s)." % env)
error_happened = True
break
try:
shutil.rmtree(str(env))
except OSError as e:
err("Error while trying to remove the {0} env: \n{1}".format
(env, e.strerror))
error_happened = True
return error_happened
def rm_cmd(argv):
"""Remove one or more environment, from $WORKON_HOME."""
if len(argv) < 1:
sys.exit("Please specify an environment")
return rmvirtualenvs(argv)
def packages(site_packages):
nodes = site_packages.iterdir()
return set([x.stem.split('-')[0] for x in nodes]) - set(['__pycache__'])
def showvirtualenv(env):
columns, _ = get_terminal_size()
pkgs = sorted(packages(sitepackages_dir(env)))
env_python = workon_home / env / env_bin_dir / 'python'
l = len(env) + 2
version = invoke(str(env_python), '-V')
version = ' - '.join((version.out + version.err).splitlines())
print(env, ': ', version, sep='')
print(textwrap.fill(' '.join(pkgs),
width=columns-l,
initial_indent=(l * ' '),
subsequent_indent=(l * ' ')), '\n')
def show_cmd(argv):
try:
showvirtualenv(argv[0])
except IndexError:
if 'VIRTUAL_ENV' in os.environ:
showvirtualenv(Path(os.environ['VIRTUAL_ENV']).name)
else:
sys.exit('pew show [env]')
def lsenvs():
return sorted(set(env.parts[-3] for env in
workon_home.glob(os.path.join('*', env_bin_dir, 'python*'))))
def lsvirtualenv(verbose):
envs = lsenvs()
if not verbose:
print_virtualenvs(*envs)
else:
for env in envs:
showvirtualenv(env)
def ls_cmd(argv):
"""List available environments."""
parser = argparse.ArgumentParser()
p_group = parser.add_mutually_exclusive_group()
p_group.add_argument('-b', '--brief', action='store_false')
p_group.add_argument('-l', '--long', action='store_true')
args = parser.parse_args(argv)
lsvirtualenv(args.long)
def parse_envname(argv, no_arg_callback):
if len(argv) < 1 or argv[0] is None:
no_arg_callback()
env = argv[0]
if env.startswith('/'):
sys.exit("ERROR: Invalid environment name '{0}'.".format(env))
if not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist. Create it with \
'pew new {0}'.".format(env))
else:
return env
def workon_cmd(argv):
"""List or change working virtual environments."""
parser = argparse.ArgumentParser(prog='pew workon')
parser.add_argument('envname', nargs='?')
parser.add_argument(
'-n', '--no-cd', action='store_true',
help=('Do not change working directory to project directory after '
'activating virtualenv.')
)
args = parser.parse_args(argv)
def list_and_exit():
lsvirtualenv(False)
sys.exit(0)
env = parse_envname([args.envname], list_and_exit)
# Check if the virtualenv has an associated project directory and in
# this case, use it as the current working directory.
project_dir = get_project_dir(env)
if project_dir is None or args.no_cd:
project_dir = os.getcwd()
return shell(env, cwd=project_dir)
def sitepackages_dir(env=os.environ.get('VIRTUAL_ENV')):
if not env:
sys.exit('ERROR: no virtualenv active')
else:
env_python = workon_home / env / env_bin_dir / 'python'
return Path(invoke(str(env_python), '-c', 'import distutils; \
print(distutils.sysconfig.get_python_lib())').out)
def add_cmd(argv):
"""Add the specified directories to the Python path for the currently active virtualenv.
This will be done by placing the directory names in a path file named
"virtualenv_path_extensions.pth" inside the virtualenv's site-packages
directory; if this file does not exists, it will be created first.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='remove', action='store_true')
parser.add_argument('dirs', nargs='+')
args = parser.parse_args(argv)
extra_paths = sitepackages_dir() / '_virtualenv_path_extensions.pth'
new_paths = [os.path.abspath(d) + "\n" for d in args.dirs]
if not extra_paths.exists():
with extra_paths.open('w') as extra:
extra.write('''import sys; sys.__plen = len(sys.path)
import sys; new=sys.path[sys.__plen:]; del sys.path[sys.__plen:]; p=getattr(sys,'__egginsert',0); sys.path[p:p]=new; sys.__egginsert = p+len(new)
''')
def rewrite(f):
with extra_paths.open('r+') as extra:
to_write = f(extra.readlines())
extra.seek(0)
extra.truncate()
extra.writelines(to_write)
if args.remove:
rewrite(lambda ls: [line for line in ls if line not in new_paths])
else:
rewrite(lambda lines: lines[0:1] + new_paths + lines[1:])
def sitepackages_dir_cmd(argv):
print(sitepackages_dir())
def lssitepackages_cmd(argv):
"""Show the content of the site-packages directory of the current virtualenv."""
site = sitepackages_dir()
print(*sorted(site.iterdir()), sep=os.linesep)
extra_paths = site / '_virtualenv_path_extensions.pth'
if extra_paths.exists():
print('from _virtualenv_path_extensions.pth:')
with extra_paths.open() as extra:
print(''.join(extra.readlines()))
def toggleglobalsitepackages_cmd(argv):
"""Toggle the current virtualenv between having and not having access to the global site-packages."""
quiet = argv == ['-q']
site = sitepackages_dir()
ngsp_file = site.parent / 'no-global-site-packages.txt'
if ngsp_file.exists():
ngsp_file.unlink()
if not quiet:
print('Enabled global site-packages')
else:
with ngsp_file.open('w'):
if not quiet:
print('Disabled global site-packages')
def cp_cmd(argv):
"""Duplicate the named virtualenv to make a new one."""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target', nargs='?')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
args = parser.parse_args(argv)
target_name = copy_virtualenv_project(args.source, args.target)
if args.activate:
shell(target_name)
def copy_virtualenv_project(source, target):
source = expandpath(source)
if not source.exists():
source = workon_home / source
if not source.exists():
sys.exit('Please provide a valid virtualenv to copy')
target_name = target or source.name
target = workon_home / target_name
if target.exists():
sys.exit('%s virtualenv already exists in %s.' % (
target_name, workon_home
))
print('Copying {0} in {1}'.format(source, target_name))
clone_virtualenv(str(source), str(target))
return target_name
def rename_cmd(argv):
"""Rename a virtualenv"""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target')
pargs = parser.parse_args(argv)
copy_virtualenv_project(pargs.source, pargs.target)
return rmvirtualenvs([pargs.source])
def setvirtualenvproject(env, project):
print('Setting project for {0} to {1}'.format(env, project))
with (workon_home / env / '.project').open('wb') as prj:
prj.write(str(project).encode())
def setproject_cmd(argv):
"""Given a virtualenv directory and a project directory, set the \
virtualenv up to be associated with the project."""
args = dict(enumerate(argv))
project = os.path.abspath(args.get(1, '.'))
env = args.get(0, os.environ.get('VIRTUAL_ENV'))
if not env:
sys.exit('pew setproject [virtualenv] [project_path]')
if not (workon_home / env).exists():
sys.exit("Environment '%s' doesn't exist." % env)
if not os.path.isdir(project):
sys.exit('pew setproject: %s does not exist' % project)
setvirtualenvproject(env, project)
def getproject_cmd(argv):
"""Print a virtualenv's project directory, if set.
If called without providing a virtualenv name as argument, print the
current virtualenv's project directory.
"""
# Parse command line arguments
parser = argparse.ArgumentParser(
description="Print an environment's project directory.",
)
parser.add_argument(
'envname',
nargs='?',
default=os.environ.get('VIRTUAL_ENV'),
help=(
'The name of the environment to return the project directory '
'for. If omitted, will use the currently active environment.'
),
)
args = parser.parse_args(argv)
# Now, do the actual work
if not args.envname:
sys.exit('ERROR: no virtualenv active')
if not (workon_home / args.envname).exists():
sys.exit("ERROR: Environment '{0}' does not exist."
.format(args.envname))
project_dir = get_project_dir(args.envname)
if project_dir is None:
sys.exit("ERROR: no project directory set for Environment '{0}'"
.format(args.envname))
print(project_dir)
def mkproject_cmd(argv):
"""Create a new project directory and its associated virtualenv."""
if '-l' in argv or '--list' in argv:
templates = [t.name[9:] for t in workon_home.glob("template_*")]
print("Available project templates:", *templates, sep='\n')
return
parser = mkvirtualenv_argparser()
parser.add_argument('envname')
parser.add_argument(
'-t', action='append', default=[], dest='templates', help='Multiple \
templates may be selected. They are applied in the order specified on the \
command line.')
parser.add_argument(
'-l', '--list', action='store_true', help='List available templates.')
args, rest = parser.parse_known_args(argv)
projects_home = Path(os.environ.get('PROJECT_HOME', '.'))
if not projects_home.exists():
sys.exit('ERROR: Projects directory %s does not exist. \
Create it or set PROJECT_HOME to an existing directory.' % projects_home)
project = (projects_home / args.envname).absolute()
if project.exists():
sys.exit('Project %s already exists.' % args.envname)
mkvirtualenv(args.envname, args.python, args.packages, project.absolute(),
args.requirements, rest)
project.mkdir()
for template_name in args.templates:
template = workon_home / ("template_" + template_name)
inve(args.envname, str(template), args.envname, str(project))
if args.activate:
shell(args.envname, cwd=str(project))
def mktmpenv_cmd(argv):
"""Create a temporary virtualenv."""
parser = mkvirtualenv_argparser()
env = '.'
while (workon_home / env).exists():
env = hex(random.getrandbits(64))[2:-1]
args, rest = parser.parse_known_args(argv)
mkvirtualenv(env, args.python, args.packages, requirements=args.requirements,
rest=rest)
print('This is a temporary environment. It will be deleted when you exit')
try:
if args.activate:
# only used for testing on windows
shell(env)
finally:
return rmvirtualenvs([env])
def wipeenv_cmd(argv):
"""Remove all installed packages from the current (or supplied) env."""
env = argv[0] if argv else os.environ.get('VIRTUAL_ENV')
if not env:
sys.exit('ERROR: no virtualenv active')
elif not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist.".format(env))
else:
env_pip = str(workon_home / env / env_bin_dir / 'pip')
all_pkgs = set(invoke(env_pip, 'freeze').out.splitlines())
pkgs = set(p for p in all_pkgs if len(p.split("==")) == 2)
ignored = sorted(all_pkgs - pkgs)
pkgs = set(p.split("==")[0] for p in pkgs)
to_remove = sorted(pkgs - set(['distribute', 'wsgiref']))
if to_remove:
print("Ignoring:\n %s" % "\n ".join(ignored))
print("Uninstalling packages:\n %s" % "\n ".join(to_remove))
return inve(env, 'pip', 'uninstall', '-y', *to_remove)
else:
print("Nothing to remove")
def inall_cmd(argv):
"""Run a command in each virtualenv."""
envs = lsenvs()
errors = False
for env in envs:
print("\n%s:" % env)
try:
inve(env, *argv)
except CalledProcessError as e:
errors = True
err(e)
sys.exit(errors)
def restore_cmd(argv):
"""Try to restore a broken virtualenv by reinstalling the same python version on top of it"""
if len(argv) < 1:
sys.exit('You must provide a valid virtualenv to target')
env = argv[0]
path = workon_home / env
py = path / env_bin_dir / ('python.exe' if windows else 'python')
exact_py = py.resolve().name
return check_call([sys.executable, "-m", "virtualenv", str(path.absolute()), "--python=%s" % exact_py])
def dir_cmd(argv):
"""Print the path for the virtualenv directory"""
env = parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
print(workon_home / env)
def install_cmd(argv):
'''Use Pythonz to download and build the specified Python version'''
installer = InstallCommand()
options, versions = installer.parser.parse_args(argv)
if len(versions) != 1:
installer.parser.print_help()
sys.exit(1)
else:
try:
actual_installer = PythonInstaller.get_installer(versions[0], options)
return actual_installer.install()
except AlreadyInstalledError as e:
print(e)
def uninstall_cmd(argv):
'''Use Pythonz to uninstall the specified Python version'''
return UninstallCommand().run(argv)
def list_pythons_cmd(argv):
'''List the pythons installed by Pythonz (or all the installable ones)'''
return ListPythons().run(argv)
def locate_python_cmd(argv):
'''Locate the path for the python version installed by Pythonz'''
return LocatePython().run(argv)
def version_cmd(argv):
"""Prints current pew version"""
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('pew').version
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
print('Setuptools has some issues here, failed to get our own package.', file=sys.stderr)
print(__version__)
def prevent_path_errors():
if 'VIRTUAL_ENV' in os.environ and not check_path():
sys.exit('''ERROR: The virtualenv hasn't been activated correctly.
Either the env is corrupted (try running `pew restore env`),
Or an upgrade of your Python version broke your env,
Or check the contents of your $PATH. You might be adding new directories to it
from inside your shell's configuration file.
In this case, for further details please see: https://github.com/berdario/pew#the-environment-doesnt-seem-to-be-activated''')
def first_run_setup():
shell = supported_shell()
if shell:
if shell == 'fish':
source_cmd = 'source (pew shell_config)'
else:
source_cmd = 'source "$(pew shell_config)"'
rcpath = expandpath({'bash': '~/.bashrc'
, 'zsh': '~/.zshrc'
, 'fish': '~/.config/fish/config.fish'}[shell])
if rcpath.exists():
update_config_file(rcpath, source_cmd)
else:
print("It seems that you're running pew for the first time\n"
"If you want source shell competions and update your prompt, "
"Add the following line to your shell config file:\n %s" % source_cmd)
print('\nWill now continue with the command:', *sys.argv[1:])
input('[enter]')
def update_config_file(rcpath, source_cmd):
with rcpath.open('r+') as rcfile:
if source_cmd not in (line.strip() for line in rcfile.readlines()):
choice = 'X'
while choice not in ('y', '', 'n'):
choice = input("It seems that you're running pew for the first time\n"
"do you want to modify %s to source completions and"
" update your prompt? [y/N]\n> " % rcpath).lower()
if choice == 'y':
rcfile.write('\n# added by Pew\n%s\n' % source_cmd)
print('Done')
else:
print('\nOk, if you want to do it manually, just add\n %s\nat'
' the end of %s' % (source_cmd, rcpath))
def print_commands(cmds):
longest = max(map(len, cmds)) + 3
columns, _ = get_terminal_size()
print('Available commands:\n')
for cmd, fun in sorted(cmds.items()):
if fun.__doc__:
print(textwrap.fill(
fun.__doc__.splitlines()[0],
columns or 1000,
initial_indent=(' {0}: '.format(cmd)).ljust(longest),
subsequent_indent=longest * ' '))
else:
print(' ' + cmd)
def pew():
first_run = makedirs_and_symlink_if_needed(workon_home)
if first_run and sys.stdin.isatty():
first_run_setup()
cmds = dict((cmd[:-4], fun)
for cmd, fun in globals().items() if cmd.endswith('_cmd'))
if sys.argv[1:]:
if sys.argv[1] in cmds:
command = cmds[sys.argv[1]]
try:
return command(sys.argv[2:])
except CalledProcessError as e:
return e.returncode
except KeyboardInterrupt:
pass
else:
err("ERROR: command", sys.argv[1], "does not exist.")
print_commands(cmds)
sys.exit(1)
else:
print_commands(cmds)
|
berdario/pew | pew/pew.py | restore_cmd | python | def restore_cmd(argv):
if len(argv) < 1:
sys.exit('You must provide a valid virtualenv to target')
env = argv[0]
path = workon_home / env
py = path / env_bin_dir / ('python.exe' if windows else 'python')
exact_py = py.resolve().name
return check_call([sys.executable, "-m", "virtualenv", str(path.absolute()), "--python=%s" % exact_py]) | Try to restore a broken virtualenv by reinstalling the same python version on top of it | train | https://github.com/berdario/pew/blob/37d9ff79342336b8ef6437d9a551008be07afe9b/pew/pew.py#L674-L685 | null | from __future__ import print_function, absolute_import, unicode_literals
import os
import sys
import argparse
import shutil
import random
import textwrap
from functools import partial
from subprocess import CalledProcessError
from pathlib import Path
try:
from shutil import get_terminal_size
except ImportError:
from backports.shutil_get_terminal_size import get_terminal_size
windows = sys.platform == 'win32'
from clonevirtualenv import clone_virtualenv
if not windows:
try:
# Try importing these packages if avaiable
from pythonz.commands.install import InstallCommand
from pythonz.commands.uninstall import UninstallCommand
from pythonz.installer.pythoninstaller import PythonInstaller, AlreadyInstalledError
from pythonz.commands.list import ListCommand
from pythonz.define import PATH_PYTHONS
from pythonz.commands.locate import LocateCommand as LocatePython
def ListPythons():
try:
Path(PATH_PYTHONS).mkdir(parents=True)
except OSError:
pass
return ListCommand()
except:
# create mock commands
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('You need to install the pythonz extra. pip install pew[pythonz]')
else:
# Pythonz does not support windows
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('Command not supported on this platform')
import shellingham
from pew._utils import (check_call, invoke, expandpath, own, env_bin_dir,
check_path, temp_environ, NamedTemporaryFile, to_unicode)
from pew._print_utils import print_virtualenvs
if sys.version_info[0] == 2:
input = raw_input
err = partial(print, file=sys.stderr)
if windows:
default_home = '~/.virtualenvs'
else:
default_home = os.path.join(
os.environ.get('XDG_DATA_HOME', '~/.local/share'), 'virtualenvs')
workon_home = expandpath(
os.environ.get('WORKON_HOME', default_home))
def makedirs_and_symlink_if_needed(workon_home):
if not workon_home.exists() and own(workon_home):
workon_home.mkdir(parents=True)
link = expandpath('~/.virtualenvs')
if os.name == 'posix' and 'WORKON_HOME' not in os.environ and \
'XDG_DATA_HOME' not in os.environ and not link.exists():
link.symlink_to(str(workon_home))
return True
else:
return False
pew_site = Path(__file__).parent
def supported_shell():
shell = Path(os.environ.get('SHELL', '')).stem
if shell in ('bash', 'zsh', 'fish'):
return shell
def shell_config_cmd(argv):
"Prints the path for the current $SHELL helper file"
shell = supported_shell()
if shell:
print(pew_site / 'shell_config' / ('init.' + shell))
else:
err('Completions and prompts are unavailable for %s' %
repr(os.environ.get('SHELL', '')))
def deploy_completions():
completions = {'complete.bash': Path('/etc/bash_completion.d/pew'),
'complete.zsh': Path('/usr/local/share/zsh/site-functions/_pew'),
'complete.fish': Path('/etc/fish/completions/pew.fish')}
for comp, dest in completions.items():
if not dest.parent.exists():
dest.parent.mkdir(parents=True)
shutil.copy(str(pew_site / 'shell_config' / comp), str(dest))
def get_project_dir(env):
project_file = workon_home / env / '.project'
if project_file.exists():
with project_file.open() as f:
project_dir = f.readline().strip()
if os.path.exists(project_dir):
return project_dir
else:
err('Corrupted or outdated:', project_file, '\nDirectory',
project_dir, "doesn't exist.")
def unsetenv(key):
if key in os.environ:
del os.environ[key]
def compute_path(env):
envdir = workon_home / env
return os.pathsep.join([
str(envdir / env_bin_dir),
os.environ['PATH'],
])
def inve(env, command, *args, **kwargs):
"""Run a command in the given virtual environment.
Pass additional keyword arguments to ``subprocess.check_call()``."""
# we don't strictly need to restore the environment, since pew runs in
# its own process, but it feels like the right thing to do
with temp_environ():
os.environ['VIRTUAL_ENV'] = str(workon_home / env)
os.environ['PATH'] = compute_path(env)
unsetenv('PYTHONHOME')
unsetenv('__PYVENV_LAUNCHER__')
try:
return check_call([command] + list(args), shell=windows, **kwargs)
# need to have shell=True on windows, otherwise the PYTHONPATH
# won't inherit the PATH
except OSError as e:
if e.errno == 2:
err('Unable to find', command)
return 2
else:
raise
def fork_shell(env, shellcmd, cwd):
or_ctrld = '' if windows else "or 'Ctrl+D' "
err("Launching subshell in virtual environment. Type 'exit' ", or_ctrld,
"to return.", sep='')
if 'VIRTUAL_ENV' in os.environ:
err("Be aware that this environment will be nested on top "
"of '%s'" % Path(os.environ['VIRTUAL_ENV']).name)
return inve(env, *shellcmd, cwd=cwd)
def fork_bash(env, cwd):
# bash is a special little snowflake, and prevent_path_errors cannot work there
# https://github.com/berdario/pew/issues/58#issuecomment-102182346
bashrcpath = expandpath('~/.bashrc')
if bashrcpath.exists():
with NamedTemporaryFile('w+') as rcfile:
with bashrcpath.open() as bashrc:
rcfile.write(bashrc.read())
rcfile.write('\nexport PATH="' + to_unicode(compute_path(env)) + '"')
rcfile.flush()
return fork_shell(env, ['bash', '--rcfile', rcfile.name], cwd)
else:
return fork_shell(env, ['bash'], cwd)
def fork_cmder(env, cwd):
shell_cmd = ['cmd']
cmderrc_path = r'%CMDER_ROOT%\vendor\init.bat'
if expandpath(cmderrc_path).exists():
shell_cmd += ['/k', cmderrc_path]
if cwd:
os.environ['CMDER_START'] = cwd
return fork_shell(env, shell_cmd, cwd)
def _detect_shell():
shell = os.environ.get('SHELL', None)
if not shell:
if 'CMDER_ROOT' in os.environ:
shell = 'Cmder'
elif windows:
try:
_, shell = shellingham.detect_shell()
except shellingham.ShellDetectionFailure:
shell = os.environ.get('COMSPEC', 'cmd.exe')
else:
shell = 'sh'
return shell
def shell(env, cwd=None):
env = str(env)
shell = _detect_shell()
shell_name = Path(shell).stem
if shell_name not in ('Cmder', 'bash', 'elvish', 'powershell', 'klingon', 'cmd'):
# On Windows the PATH is usually set with System Utility
# so we won't worry about trying to check mistakes there
shell_check = (sys.executable + ' -c "from pew.pew import '
'prevent_path_errors; prevent_path_errors()"')
try:
inve(env, shell, '-c', shell_check)
except CalledProcessError:
return
if shell_name == 'bash':
return fork_bash(env, cwd)
elif shell_name == 'Cmder':
return fork_cmder(env, cwd)
else:
return fork_shell(env, [shell], cwd)
def mkvirtualenv(envname, python=None, packages=[], project=None,
requirements=None, rest=[]):
if python:
rest = ["--python=%s" % python] + rest
path = (workon_home / envname).absolute()
try:
check_call([sys.executable, "-m", "virtualenv", str(path)] + rest)
except (CalledProcessError, KeyboardInterrupt):
rmvirtualenvs([envname])
raise
else:
if project:
setvirtualenvproject(envname, project.absolute())
if requirements:
inve(envname, 'pip', 'install', '-r', str(expandpath(requirements)))
if packages:
inve(envname, 'pip', 'install', *packages)
def mkvirtualenv_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--python')
parser.add_argument('-i', action='append', dest='packages', help='Install \
a package after the environment is created. This option may be repeated.')
parser.add_argument('-r', dest='requirements', help='Provide a pip \
requirements file to install a base set of packages into the new environment.')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
return parser
def new_cmd(argv):
"""Create a new environment, in $WORKON_HOME."""
parser = mkvirtualenv_argparser()
parser.add_argument('-a', dest='project', help='Provide a full path to a \
project directory to associate with the new environment.')
parser.add_argument('envname')
args, rest = parser.parse_known_args(argv)
project = expandpath(args.project) if args.project else None
mkvirtualenv(args.envname, args.python, args.packages, project,
args.requirements, rest)
if args.activate:
shell(args.envname)
def rmvirtualenvs(envs):
error_happened = False
for env in envs:
env = workon_home / env
if os.environ.get('VIRTUAL_ENV') == str(env):
err("ERROR: You cannot remove the active environment (%s)." % env)
error_happened = True
break
try:
shutil.rmtree(str(env))
except OSError as e:
err("Error while trying to remove the {0} env: \n{1}".format
(env, e.strerror))
error_happened = True
return error_happened
def rm_cmd(argv):
"""Remove one or more environment, from $WORKON_HOME."""
if len(argv) < 1:
sys.exit("Please specify an environment")
return rmvirtualenvs(argv)
def packages(site_packages):
nodes = site_packages.iterdir()
return set([x.stem.split('-')[0] for x in nodes]) - set(['__pycache__'])
def showvirtualenv(env):
columns, _ = get_terminal_size()
pkgs = sorted(packages(sitepackages_dir(env)))
env_python = workon_home / env / env_bin_dir / 'python'
l = len(env) + 2
version = invoke(str(env_python), '-V')
version = ' - '.join((version.out + version.err).splitlines())
print(env, ': ', version, sep='')
print(textwrap.fill(' '.join(pkgs),
width=columns-l,
initial_indent=(l * ' '),
subsequent_indent=(l * ' ')), '\n')
def show_cmd(argv):
try:
showvirtualenv(argv[0])
except IndexError:
if 'VIRTUAL_ENV' in os.environ:
showvirtualenv(Path(os.environ['VIRTUAL_ENV']).name)
else:
sys.exit('pew show [env]')
def lsenvs():
return sorted(set(env.parts[-3] for env in
workon_home.glob(os.path.join('*', env_bin_dir, 'python*'))))
def lsvirtualenv(verbose):
envs = lsenvs()
if not verbose:
print_virtualenvs(*envs)
else:
for env in envs:
showvirtualenv(env)
def ls_cmd(argv):
"""List available environments."""
parser = argparse.ArgumentParser()
p_group = parser.add_mutually_exclusive_group()
p_group.add_argument('-b', '--brief', action='store_false')
p_group.add_argument('-l', '--long', action='store_true')
args = parser.parse_args(argv)
lsvirtualenv(args.long)
def parse_envname(argv, no_arg_callback):
if len(argv) < 1 or argv[0] is None:
no_arg_callback()
env = argv[0]
if env.startswith('/'):
sys.exit("ERROR: Invalid environment name '{0}'.".format(env))
if not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist. Create it with \
'pew new {0}'.".format(env))
else:
return env
def workon_cmd(argv):
"""List or change working virtual environments."""
parser = argparse.ArgumentParser(prog='pew workon')
parser.add_argument('envname', nargs='?')
parser.add_argument(
'-n', '--no-cd', action='store_true',
help=('Do not change working directory to project directory after '
'activating virtualenv.')
)
args = parser.parse_args(argv)
def list_and_exit():
lsvirtualenv(False)
sys.exit(0)
env = parse_envname([args.envname], list_and_exit)
# Check if the virtualenv has an associated project directory and in
# this case, use it as the current working directory.
project_dir = get_project_dir(env)
if project_dir is None or args.no_cd:
project_dir = os.getcwd()
return shell(env, cwd=project_dir)
def sitepackages_dir(env=os.environ.get('VIRTUAL_ENV')):
if not env:
sys.exit('ERROR: no virtualenv active')
else:
env_python = workon_home / env / env_bin_dir / 'python'
return Path(invoke(str(env_python), '-c', 'import distutils; \
print(distutils.sysconfig.get_python_lib())').out)
def add_cmd(argv):
"""Add the specified directories to the Python path for the currently active virtualenv.
This will be done by placing the directory names in a path file named
"virtualenv_path_extensions.pth" inside the virtualenv's site-packages
directory; if this file does not exists, it will be created first.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='remove', action='store_true')
parser.add_argument('dirs', nargs='+')
args = parser.parse_args(argv)
extra_paths = sitepackages_dir() / '_virtualenv_path_extensions.pth'
new_paths = [os.path.abspath(d) + "\n" for d in args.dirs]
if not extra_paths.exists():
with extra_paths.open('w') as extra:
extra.write('''import sys; sys.__plen = len(sys.path)
import sys; new=sys.path[sys.__plen:]; del sys.path[sys.__plen:]; p=getattr(sys,'__egginsert',0); sys.path[p:p]=new; sys.__egginsert = p+len(new)
''')
def rewrite(f):
with extra_paths.open('r+') as extra:
to_write = f(extra.readlines())
extra.seek(0)
extra.truncate()
extra.writelines(to_write)
if args.remove:
rewrite(lambda ls: [line for line in ls if line not in new_paths])
else:
rewrite(lambda lines: lines[0:1] + new_paths + lines[1:])
def sitepackages_dir_cmd(argv):
print(sitepackages_dir())
def lssitepackages_cmd(argv):
"""Show the content of the site-packages directory of the current virtualenv."""
site = sitepackages_dir()
print(*sorted(site.iterdir()), sep=os.linesep)
extra_paths = site / '_virtualenv_path_extensions.pth'
if extra_paths.exists():
print('from _virtualenv_path_extensions.pth:')
with extra_paths.open() as extra:
print(''.join(extra.readlines()))
def toggleglobalsitepackages_cmd(argv):
"""Toggle the current virtualenv between having and not having access to the global site-packages."""
quiet = argv == ['-q']
site = sitepackages_dir()
ngsp_file = site.parent / 'no-global-site-packages.txt'
if ngsp_file.exists():
ngsp_file.unlink()
if not quiet:
print('Enabled global site-packages')
else:
with ngsp_file.open('w'):
if not quiet:
print('Disabled global site-packages')
def cp_cmd(argv):
"""Duplicate the named virtualenv to make a new one."""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target', nargs='?')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
args = parser.parse_args(argv)
target_name = copy_virtualenv_project(args.source, args.target)
if args.activate:
shell(target_name)
def copy_virtualenv_project(source, target):
source = expandpath(source)
if not source.exists():
source = workon_home / source
if not source.exists():
sys.exit('Please provide a valid virtualenv to copy')
target_name = target or source.name
target = workon_home / target_name
if target.exists():
sys.exit('%s virtualenv already exists in %s.' % (
target_name, workon_home
))
print('Copying {0} in {1}'.format(source, target_name))
clone_virtualenv(str(source), str(target))
return target_name
def rename_cmd(argv):
"""Rename a virtualenv"""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target')
pargs = parser.parse_args(argv)
copy_virtualenv_project(pargs.source, pargs.target)
return rmvirtualenvs([pargs.source])
def setvirtualenvproject(env, project):
print('Setting project for {0} to {1}'.format(env, project))
with (workon_home / env / '.project').open('wb') as prj:
prj.write(str(project).encode())
def setproject_cmd(argv):
"""Given a virtualenv directory and a project directory, set the \
virtualenv up to be associated with the project."""
args = dict(enumerate(argv))
project = os.path.abspath(args.get(1, '.'))
env = args.get(0, os.environ.get('VIRTUAL_ENV'))
if not env:
sys.exit('pew setproject [virtualenv] [project_path]')
if not (workon_home / env).exists():
sys.exit("Environment '%s' doesn't exist." % env)
if not os.path.isdir(project):
sys.exit('pew setproject: %s does not exist' % project)
setvirtualenvproject(env, project)
def getproject_cmd(argv):
"""Print a virtualenv's project directory, if set.
If called without providing a virtualenv name as argument, print the
current virtualenv's project directory.
"""
# Parse command line arguments
parser = argparse.ArgumentParser(
description="Print an environment's project directory.",
)
parser.add_argument(
'envname',
nargs='?',
default=os.environ.get('VIRTUAL_ENV'),
help=(
'The name of the environment to return the project directory '
'for. If omitted, will use the currently active environment.'
),
)
args = parser.parse_args(argv)
# Now, do the actual work
if not args.envname:
sys.exit('ERROR: no virtualenv active')
if not (workon_home / args.envname).exists():
sys.exit("ERROR: Environment '{0}' does not exist."
.format(args.envname))
project_dir = get_project_dir(args.envname)
if project_dir is None:
sys.exit("ERROR: no project directory set for Environment '{0}'"
.format(args.envname))
print(project_dir)
def mkproject_cmd(argv):
"""Create a new project directory and its associated virtualenv."""
if '-l' in argv or '--list' in argv:
templates = [t.name[9:] for t in workon_home.glob("template_*")]
print("Available project templates:", *templates, sep='\n')
return
parser = mkvirtualenv_argparser()
parser.add_argument('envname')
parser.add_argument(
'-t', action='append', default=[], dest='templates', help='Multiple \
templates may be selected. They are applied in the order specified on the \
command line.')
parser.add_argument(
'-l', '--list', action='store_true', help='List available templates.')
args, rest = parser.parse_known_args(argv)
projects_home = Path(os.environ.get('PROJECT_HOME', '.'))
if not projects_home.exists():
sys.exit('ERROR: Projects directory %s does not exist. \
Create it or set PROJECT_HOME to an existing directory.' % projects_home)
project = (projects_home / args.envname).absolute()
if project.exists():
sys.exit('Project %s already exists.' % args.envname)
mkvirtualenv(args.envname, args.python, args.packages, project.absolute(),
args.requirements, rest)
project.mkdir()
for template_name in args.templates:
template = workon_home / ("template_" + template_name)
inve(args.envname, str(template), args.envname, str(project))
if args.activate:
shell(args.envname, cwd=str(project))
def mktmpenv_cmd(argv):
"""Create a temporary virtualenv."""
parser = mkvirtualenv_argparser()
env = '.'
while (workon_home / env).exists():
env = hex(random.getrandbits(64))[2:-1]
args, rest = parser.parse_known_args(argv)
mkvirtualenv(env, args.python, args.packages, requirements=args.requirements,
rest=rest)
print('This is a temporary environment. It will be deleted when you exit')
try:
if args.activate:
# only used for testing on windows
shell(env)
finally:
return rmvirtualenvs([env])
def wipeenv_cmd(argv):
"""Remove all installed packages from the current (or supplied) env."""
env = argv[0] if argv else os.environ.get('VIRTUAL_ENV')
if not env:
sys.exit('ERROR: no virtualenv active')
elif not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist.".format(env))
else:
env_pip = str(workon_home / env / env_bin_dir / 'pip')
all_pkgs = set(invoke(env_pip, 'freeze').out.splitlines())
pkgs = set(p for p in all_pkgs if len(p.split("==")) == 2)
ignored = sorted(all_pkgs - pkgs)
pkgs = set(p.split("==")[0] for p in pkgs)
to_remove = sorted(pkgs - set(['distribute', 'wsgiref']))
if to_remove:
print("Ignoring:\n %s" % "\n ".join(ignored))
print("Uninstalling packages:\n %s" % "\n ".join(to_remove))
return inve(env, 'pip', 'uninstall', '-y', *to_remove)
else:
print("Nothing to remove")
def inall_cmd(argv):
"""Run a command in each virtualenv."""
envs = lsenvs()
errors = False
for env in envs:
print("\n%s:" % env)
try:
inve(env, *argv)
except CalledProcessError as e:
errors = True
err(e)
sys.exit(errors)
def in_cmd(argv):
"""Run a command in the given virtualenv."""
if len(argv) == 1:
return workon_cmd(argv)
parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
return inve(*argv)
def dir_cmd(argv):
"""Print the path for the virtualenv directory"""
env = parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
print(workon_home / env)
def install_cmd(argv):
'''Use Pythonz to download and build the specified Python version'''
installer = InstallCommand()
options, versions = installer.parser.parse_args(argv)
if len(versions) != 1:
installer.parser.print_help()
sys.exit(1)
else:
try:
actual_installer = PythonInstaller.get_installer(versions[0], options)
return actual_installer.install()
except AlreadyInstalledError as e:
print(e)
def uninstall_cmd(argv):
'''Use Pythonz to uninstall the specified Python version'''
return UninstallCommand().run(argv)
def list_pythons_cmd(argv):
'''List the pythons installed by Pythonz (or all the installable ones)'''
return ListPythons().run(argv)
def locate_python_cmd(argv):
'''Locate the path for the python version installed by Pythonz'''
return LocatePython().run(argv)
def version_cmd(argv):
"""Prints current pew version"""
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('pew').version
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
print('Setuptools has some issues here, failed to get our own package.', file=sys.stderr)
print(__version__)
def prevent_path_errors():
if 'VIRTUAL_ENV' in os.environ and not check_path():
sys.exit('''ERROR: The virtualenv hasn't been activated correctly.
Either the env is corrupted (try running `pew restore env`),
Or an upgrade of your Python version broke your env,
Or check the contents of your $PATH. You might be adding new directories to it
from inside your shell's configuration file.
In this case, for further details please see: https://github.com/berdario/pew#the-environment-doesnt-seem-to-be-activated''')
def first_run_setup():
shell = supported_shell()
if shell:
if shell == 'fish':
source_cmd = 'source (pew shell_config)'
else:
source_cmd = 'source "$(pew shell_config)"'
rcpath = expandpath({'bash': '~/.bashrc'
, 'zsh': '~/.zshrc'
, 'fish': '~/.config/fish/config.fish'}[shell])
if rcpath.exists():
update_config_file(rcpath, source_cmd)
else:
print("It seems that you're running pew for the first time\n"
"If you want source shell competions and update your prompt, "
"Add the following line to your shell config file:\n %s" % source_cmd)
print('\nWill now continue with the command:', *sys.argv[1:])
input('[enter]')
def update_config_file(rcpath, source_cmd):
with rcpath.open('r+') as rcfile:
if source_cmd not in (line.strip() for line in rcfile.readlines()):
choice = 'X'
while choice not in ('y', '', 'n'):
choice = input("It seems that you're running pew for the first time\n"
"do you want to modify %s to source completions and"
" update your prompt? [y/N]\n> " % rcpath).lower()
if choice == 'y':
rcfile.write('\n# added by Pew\n%s\n' % source_cmd)
print('Done')
else:
print('\nOk, if you want to do it manually, just add\n %s\nat'
' the end of %s' % (source_cmd, rcpath))
def print_commands(cmds):
longest = max(map(len, cmds)) + 3
columns, _ = get_terminal_size()
print('Available commands:\n')
for cmd, fun in sorted(cmds.items()):
if fun.__doc__:
print(textwrap.fill(
fun.__doc__.splitlines()[0],
columns or 1000,
initial_indent=(' {0}: '.format(cmd)).ljust(longest),
subsequent_indent=longest * ' '))
else:
print(' ' + cmd)
def pew():
first_run = makedirs_and_symlink_if_needed(workon_home)
if first_run and sys.stdin.isatty():
first_run_setup()
cmds = dict((cmd[:-4], fun)
for cmd, fun in globals().items() if cmd.endswith('_cmd'))
if sys.argv[1:]:
if sys.argv[1] in cmds:
command = cmds[sys.argv[1]]
try:
return command(sys.argv[2:])
except CalledProcessError as e:
return e.returncode
except KeyboardInterrupt:
pass
else:
err("ERROR: command", sys.argv[1], "does not exist.")
print_commands(cmds)
sys.exit(1)
else:
print_commands(cmds)
|
berdario/pew | pew/pew.py | dir_cmd | python | def dir_cmd(argv):
env = parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
print(workon_home / env) | Print the path for the virtualenv directory | train | https://github.com/berdario/pew/blob/37d9ff79342336b8ef6437d9a551008be07afe9b/pew/pew.py#L688-L691 | [
"def parse_envname(argv, no_arg_callback):\n if len(argv) < 1 or argv[0] is None:\n no_arg_callback()\n\n env = argv[0]\n if env.startswith('/'):\n sys.exit(\"ERROR: Invalid environment name '{0}'.\".format(env))\n if not (workon_home / env).exists():\n sys.exit(\"ERROR: Environment '{0}' does not exist. Create it with \\\n'pew new {0}'.\".format(env))\n else:\n return env\n"
] | from __future__ import print_function, absolute_import, unicode_literals
import os
import sys
import argparse
import shutil
import random
import textwrap
from functools import partial
from subprocess import CalledProcessError
from pathlib import Path
try:
from shutil import get_terminal_size
except ImportError:
from backports.shutil_get_terminal_size import get_terminal_size
windows = sys.platform == 'win32'
from clonevirtualenv import clone_virtualenv
if not windows:
try:
# Try importing these packages if avaiable
from pythonz.commands.install import InstallCommand
from pythonz.commands.uninstall import UninstallCommand
from pythonz.installer.pythoninstaller import PythonInstaller, AlreadyInstalledError
from pythonz.commands.list import ListCommand
from pythonz.define import PATH_PYTHONS
from pythonz.commands.locate import LocateCommand as LocatePython
def ListPythons():
try:
Path(PATH_PYTHONS).mkdir(parents=True)
except OSError:
pass
return ListCommand()
except:
# create mock commands
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('You need to install the pythonz extra. pip install pew[pythonz]')
else:
# Pythonz does not support windows
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('Command not supported on this platform')
import shellingham
from pew._utils import (check_call, invoke, expandpath, own, env_bin_dir,
check_path, temp_environ, NamedTemporaryFile, to_unicode)
from pew._print_utils import print_virtualenvs
if sys.version_info[0] == 2:
input = raw_input
err = partial(print, file=sys.stderr)
if windows:
default_home = '~/.virtualenvs'
else:
default_home = os.path.join(
os.environ.get('XDG_DATA_HOME', '~/.local/share'), 'virtualenvs')
workon_home = expandpath(
os.environ.get('WORKON_HOME', default_home))
def makedirs_and_symlink_if_needed(workon_home):
if not workon_home.exists() and own(workon_home):
workon_home.mkdir(parents=True)
link = expandpath('~/.virtualenvs')
if os.name == 'posix' and 'WORKON_HOME' not in os.environ and \
'XDG_DATA_HOME' not in os.environ and not link.exists():
link.symlink_to(str(workon_home))
return True
else:
return False
pew_site = Path(__file__).parent
def supported_shell():
shell = Path(os.environ.get('SHELL', '')).stem
if shell in ('bash', 'zsh', 'fish'):
return shell
def shell_config_cmd(argv):
"Prints the path for the current $SHELL helper file"
shell = supported_shell()
if shell:
print(pew_site / 'shell_config' / ('init.' + shell))
else:
err('Completions and prompts are unavailable for %s' %
repr(os.environ.get('SHELL', '')))
def deploy_completions():
completions = {'complete.bash': Path('/etc/bash_completion.d/pew'),
'complete.zsh': Path('/usr/local/share/zsh/site-functions/_pew'),
'complete.fish': Path('/etc/fish/completions/pew.fish')}
for comp, dest in completions.items():
if not dest.parent.exists():
dest.parent.mkdir(parents=True)
shutil.copy(str(pew_site / 'shell_config' / comp), str(dest))
def get_project_dir(env):
project_file = workon_home / env / '.project'
if project_file.exists():
with project_file.open() as f:
project_dir = f.readline().strip()
if os.path.exists(project_dir):
return project_dir
else:
err('Corrupted or outdated:', project_file, '\nDirectory',
project_dir, "doesn't exist.")
def unsetenv(key):
if key in os.environ:
del os.environ[key]
def compute_path(env):
envdir = workon_home / env
return os.pathsep.join([
str(envdir / env_bin_dir),
os.environ['PATH'],
])
def inve(env, command, *args, **kwargs):
"""Run a command in the given virtual environment.
Pass additional keyword arguments to ``subprocess.check_call()``."""
# we don't strictly need to restore the environment, since pew runs in
# its own process, but it feels like the right thing to do
with temp_environ():
os.environ['VIRTUAL_ENV'] = str(workon_home / env)
os.environ['PATH'] = compute_path(env)
unsetenv('PYTHONHOME')
unsetenv('__PYVENV_LAUNCHER__')
try:
return check_call([command] + list(args), shell=windows, **kwargs)
# need to have shell=True on windows, otherwise the PYTHONPATH
# won't inherit the PATH
except OSError as e:
if e.errno == 2:
err('Unable to find', command)
return 2
else:
raise
def fork_shell(env, shellcmd, cwd):
or_ctrld = '' if windows else "or 'Ctrl+D' "
err("Launching subshell in virtual environment. Type 'exit' ", or_ctrld,
"to return.", sep='')
if 'VIRTUAL_ENV' in os.environ:
err("Be aware that this environment will be nested on top "
"of '%s'" % Path(os.environ['VIRTUAL_ENV']).name)
return inve(env, *shellcmd, cwd=cwd)
def fork_bash(env, cwd):
# bash is a special little snowflake, and prevent_path_errors cannot work there
# https://github.com/berdario/pew/issues/58#issuecomment-102182346
bashrcpath = expandpath('~/.bashrc')
if bashrcpath.exists():
with NamedTemporaryFile('w+') as rcfile:
with bashrcpath.open() as bashrc:
rcfile.write(bashrc.read())
rcfile.write('\nexport PATH="' + to_unicode(compute_path(env)) + '"')
rcfile.flush()
return fork_shell(env, ['bash', '--rcfile', rcfile.name], cwd)
else:
return fork_shell(env, ['bash'], cwd)
def fork_cmder(env, cwd):
shell_cmd = ['cmd']
cmderrc_path = r'%CMDER_ROOT%\vendor\init.bat'
if expandpath(cmderrc_path).exists():
shell_cmd += ['/k', cmderrc_path]
if cwd:
os.environ['CMDER_START'] = cwd
return fork_shell(env, shell_cmd, cwd)
def _detect_shell():
shell = os.environ.get('SHELL', None)
if not shell:
if 'CMDER_ROOT' in os.environ:
shell = 'Cmder'
elif windows:
try:
_, shell = shellingham.detect_shell()
except shellingham.ShellDetectionFailure:
shell = os.environ.get('COMSPEC', 'cmd.exe')
else:
shell = 'sh'
return shell
def shell(env, cwd=None):
env = str(env)
shell = _detect_shell()
shell_name = Path(shell).stem
if shell_name not in ('Cmder', 'bash', 'elvish', 'powershell', 'klingon', 'cmd'):
# On Windows the PATH is usually set with System Utility
# so we won't worry about trying to check mistakes there
shell_check = (sys.executable + ' -c "from pew.pew import '
'prevent_path_errors; prevent_path_errors()"')
try:
inve(env, shell, '-c', shell_check)
except CalledProcessError:
return
if shell_name == 'bash':
return fork_bash(env, cwd)
elif shell_name == 'Cmder':
return fork_cmder(env, cwd)
else:
return fork_shell(env, [shell], cwd)
def mkvirtualenv(envname, python=None, packages=[], project=None,
requirements=None, rest=[]):
if python:
rest = ["--python=%s" % python] + rest
path = (workon_home / envname).absolute()
try:
check_call([sys.executable, "-m", "virtualenv", str(path)] + rest)
except (CalledProcessError, KeyboardInterrupt):
rmvirtualenvs([envname])
raise
else:
if project:
setvirtualenvproject(envname, project.absolute())
if requirements:
inve(envname, 'pip', 'install', '-r', str(expandpath(requirements)))
if packages:
inve(envname, 'pip', 'install', *packages)
def mkvirtualenv_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--python')
parser.add_argument('-i', action='append', dest='packages', help='Install \
a package after the environment is created. This option may be repeated.')
parser.add_argument('-r', dest='requirements', help='Provide a pip \
requirements file to install a base set of packages into the new environment.')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
return parser
def new_cmd(argv):
"""Create a new environment, in $WORKON_HOME."""
parser = mkvirtualenv_argparser()
parser.add_argument('-a', dest='project', help='Provide a full path to a \
project directory to associate with the new environment.')
parser.add_argument('envname')
args, rest = parser.parse_known_args(argv)
project = expandpath(args.project) if args.project else None
mkvirtualenv(args.envname, args.python, args.packages, project,
args.requirements, rest)
if args.activate:
shell(args.envname)
def rmvirtualenvs(envs):
error_happened = False
for env in envs:
env = workon_home / env
if os.environ.get('VIRTUAL_ENV') == str(env):
err("ERROR: You cannot remove the active environment (%s)." % env)
error_happened = True
break
try:
shutil.rmtree(str(env))
except OSError as e:
err("Error while trying to remove the {0} env: \n{1}".format
(env, e.strerror))
error_happened = True
return error_happened
def rm_cmd(argv):
"""Remove one or more environment, from $WORKON_HOME."""
if len(argv) < 1:
sys.exit("Please specify an environment")
return rmvirtualenvs(argv)
def packages(site_packages):
nodes = site_packages.iterdir()
return set([x.stem.split('-')[0] for x in nodes]) - set(['__pycache__'])
def showvirtualenv(env):
columns, _ = get_terminal_size()
pkgs = sorted(packages(sitepackages_dir(env)))
env_python = workon_home / env / env_bin_dir / 'python'
l = len(env) + 2
version = invoke(str(env_python), '-V')
version = ' - '.join((version.out + version.err).splitlines())
print(env, ': ', version, sep='')
print(textwrap.fill(' '.join(pkgs),
width=columns-l,
initial_indent=(l * ' '),
subsequent_indent=(l * ' ')), '\n')
def show_cmd(argv):
try:
showvirtualenv(argv[0])
except IndexError:
if 'VIRTUAL_ENV' in os.environ:
showvirtualenv(Path(os.environ['VIRTUAL_ENV']).name)
else:
sys.exit('pew show [env]')
def lsenvs():
return sorted(set(env.parts[-3] for env in
workon_home.glob(os.path.join('*', env_bin_dir, 'python*'))))
def lsvirtualenv(verbose):
envs = lsenvs()
if not verbose:
print_virtualenvs(*envs)
else:
for env in envs:
showvirtualenv(env)
def ls_cmd(argv):
"""List available environments."""
parser = argparse.ArgumentParser()
p_group = parser.add_mutually_exclusive_group()
p_group.add_argument('-b', '--brief', action='store_false')
p_group.add_argument('-l', '--long', action='store_true')
args = parser.parse_args(argv)
lsvirtualenv(args.long)
def parse_envname(argv, no_arg_callback):
if len(argv) < 1 or argv[0] is None:
no_arg_callback()
env = argv[0]
if env.startswith('/'):
sys.exit("ERROR: Invalid environment name '{0}'.".format(env))
if not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist. Create it with \
'pew new {0}'.".format(env))
else:
return env
def workon_cmd(argv):
"""List or change working virtual environments."""
parser = argparse.ArgumentParser(prog='pew workon')
parser.add_argument('envname', nargs='?')
parser.add_argument(
'-n', '--no-cd', action='store_true',
help=('Do not change working directory to project directory after '
'activating virtualenv.')
)
args = parser.parse_args(argv)
def list_and_exit():
lsvirtualenv(False)
sys.exit(0)
env = parse_envname([args.envname], list_and_exit)
# Check if the virtualenv has an associated project directory and in
# this case, use it as the current working directory.
project_dir = get_project_dir(env)
if project_dir is None or args.no_cd:
project_dir = os.getcwd()
return shell(env, cwd=project_dir)
def sitepackages_dir(env=os.environ.get('VIRTUAL_ENV')):
if not env:
sys.exit('ERROR: no virtualenv active')
else:
env_python = workon_home / env / env_bin_dir / 'python'
return Path(invoke(str(env_python), '-c', 'import distutils; \
print(distutils.sysconfig.get_python_lib())').out)
def add_cmd(argv):
"""Add the specified directories to the Python path for the currently active virtualenv.
This will be done by placing the directory names in a path file named
"virtualenv_path_extensions.pth" inside the virtualenv's site-packages
directory; if this file does not exists, it will be created first.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='remove', action='store_true')
parser.add_argument('dirs', nargs='+')
args = parser.parse_args(argv)
extra_paths = sitepackages_dir() / '_virtualenv_path_extensions.pth'
new_paths = [os.path.abspath(d) + "\n" for d in args.dirs]
if not extra_paths.exists():
with extra_paths.open('w') as extra:
extra.write('''import sys; sys.__plen = len(sys.path)
import sys; new=sys.path[sys.__plen:]; del sys.path[sys.__plen:]; p=getattr(sys,'__egginsert',0); sys.path[p:p]=new; sys.__egginsert = p+len(new)
''')
def rewrite(f):
with extra_paths.open('r+') as extra:
to_write = f(extra.readlines())
extra.seek(0)
extra.truncate()
extra.writelines(to_write)
if args.remove:
rewrite(lambda ls: [line for line in ls if line not in new_paths])
else:
rewrite(lambda lines: lines[0:1] + new_paths + lines[1:])
def sitepackages_dir_cmd(argv):
print(sitepackages_dir())
def lssitepackages_cmd(argv):
"""Show the content of the site-packages directory of the current virtualenv."""
site = sitepackages_dir()
print(*sorted(site.iterdir()), sep=os.linesep)
extra_paths = site / '_virtualenv_path_extensions.pth'
if extra_paths.exists():
print('from _virtualenv_path_extensions.pth:')
with extra_paths.open() as extra:
print(''.join(extra.readlines()))
def toggleglobalsitepackages_cmd(argv):
"""Toggle the current virtualenv between having and not having access to the global site-packages."""
quiet = argv == ['-q']
site = sitepackages_dir()
ngsp_file = site.parent / 'no-global-site-packages.txt'
if ngsp_file.exists():
ngsp_file.unlink()
if not quiet:
print('Enabled global site-packages')
else:
with ngsp_file.open('w'):
if not quiet:
print('Disabled global site-packages')
def cp_cmd(argv):
"""Duplicate the named virtualenv to make a new one."""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target', nargs='?')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
args = parser.parse_args(argv)
target_name = copy_virtualenv_project(args.source, args.target)
if args.activate:
shell(target_name)
def copy_virtualenv_project(source, target):
source = expandpath(source)
if not source.exists():
source = workon_home / source
if not source.exists():
sys.exit('Please provide a valid virtualenv to copy')
target_name = target or source.name
target = workon_home / target_name
if target.exists():
sys.exit('%s virtualenv already exists in %s.' % (
target_name, workon_home
))
print('Copying {0} in {1}'.format(source, target_name))
clone_virtualenv(str(source), str(target))
return target_name
def rename_cmd(argv):
"""Rename a virtualenv"""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target')
pargs = parser.parse_args(argv)
copy_virtualenv_project(pargs.source, pargs.target)
return rmvirtualenvs([pargs.source])
def setvirtualenvproject(env, project):
print('Setting project for {0} to {1}'.format(env, project))
with (workon_home / env / '.project').open('wb') as prj:
prj.write(str(project).encode())
def setproject_cmd(argv):
"""Given a virtualenv directory and a project directory, set the \
virtualenv up to be associated with the project."""
args = dict(enumerate(argv))
project = os.path.abspath(args.get(1, '.'))
env = args.get(0, os.environ.get('VIRTUAL_ENV'))
if not env:
sys.exit('pew setproject [virtualenv] [project_path]')
if not (workon_home / env).exists():
sys.exit("Environment '%s' doesn't exist." % env)
if not os.path.isdir(project):
sys.exit('pew setproject: %s does not exist' % project)
setvirtualenvproject(env, project)
def getproject_cmd(argv):
"""Print a virtualenv's project directory, if set.
If called without providing a virtualenv name as argument, print the
current virtualenv's project directory.
"""
# Parse command line arguments
parser = argparse.ArgumentParser(
description="Print an environment's project directory.",
)
parser.add_argument(
'envname',
nargs='?',
default=os.environ.get('VIRTUAL_ENV'),
help=(
'The name of the environment to return the project directory '
'for. If omitted, will use the currently active environment.'
),
)
args = parser.parse_args(argv)
# Now, do the actual work
if not args.envname:
sys.exit('ERROR: no virtualenv active')
if not (workon_home / args.envname).exists():
sys.exit("ERROR: Environment '{0}' does not exist."
.format(args.envname))
project_dir = get_project_dir(args.envname)
if project_dir is None:
sys.exit("ERROR: no project directory set for Environment '{0}'"
.format(args.envname))
print(project_dir)
def mkproject_cmd(argv):
"""Create a new project directory and its associated virtualenv."""
if '-l' in argv or '--list' in argv:
templates = [t.name[9:] for t in workon_home.glob("template_*")]
print("Available project templates:", *templates, sep='\n')
return
parser = mkvirtualenv_argparser()
parser.add_argument('envname')
parser.add_argument(
'-t', action='append', default=[], dest='templates', help='Multiple \
templates may be selected. They are applied in the order specified on the \
command line.')
parser.add_argument(
'-l', '--list', action='store_true', help='List available templates.')
args, rest = parser.parse_known_args(argv)
projects_home = Path(os.environ.get('PROJECT_HOME', '.'))
if not projects_home.exists():
sys.exit('ERROR: Projects directory %s does not exist. \
Create it or set PROJECT_HOME to an existing directory.' % projects_home)
project = (projects_home / args.envname).absolute()
if project.exists():
sys.exit('Project %s already exists.' % args.envname)
mkvirtualenv(args.envname, args.python, args.packages, project.absolute(),
args.requirements, rest)
project.mkdir()
for template_name in args.templates:
template = workon_home / ("template_" + template_name)
inve(args.envname, str(template), args.envname, str(project))
if args.activate:
shell(args.envname, cwd=str(project))
def mktmpenv_cmd(argv):
"""Create a temporary virtualenv."""
parser = mkvirtualenv_argparser()
env = '.'
while (workon_home / env).exists():
env = hex(random.getrandbits(64))[2:-1]
args, rest = parser.parse_known_args(argv)
mkvirtualenv(env, args.python, args.packages, requirements=args.requirements,
rest=rest)
print('This is a temporary environment. It will be deleted when you exit')
try:
if args.activate:
# only used for testing on windows
shell(env)
finally:
return rmvirtualenvs([env])
def wipeenv_cmd(argv):
"""Remove all installed packages from the current (or supplied) env."""
env = argv[0] if argv else os.environ.get('VIRTUAL_ENV')
if not env:
sys.exit('ERROR: no virtualenv active')
elif not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist.".format(env))
else:
env_pip = str(workon_home / env / env_bin_dir / 'pip')
all_pkgs = set(invoke(env_pip, 'freeze').out.splitlines())
pkgs = set(p for p in all_pkgs if len(p.split("==")) == 2)
ignored = sorted(all_pkgs - pkgs)
pkgs = set(p.split("==")[0] for p in pkgs)
to_remove = sorted(pkgs - set(['distribute', 'wsgiref']))
if to_remove:
print("Ignoring:\n %s" % "\n ".join(ignored))
print("Uninstalling packages:\n %s" % "\n ".join(to_remove))
return inve(env, 'pip', 'uninstall', '-y', *to_remove)
else:
print("Nothing to remove")
def inall_cmd(argv):
"""Run a command in each virtualenv."""
envs = lsenvs()
errors = False
for env in envs:
print("\n%s:" % env)
try:
inve(env, *argv)
except CalledProcessError as e:
errors = True
err(e)
sys.exit(errors)
def in_cmd(argv):
"""Run a command in the given virtualenv."""
if len(argv) == 1:
return workon_cmd(argv)
parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
return inve(*argv)
def restore_cmd(argv):
"""Try to restore a broken virtualenv by reinstalling the same python version on top of it"""
if len(argv) < 1:
sys.exit('You must provide a valid virtualenv to target')
env = argv[0]
path = workon_home / env
py = path / env_bin_dir / ('python.exe' if windows else 'python')
exact_py = py.resolve().name
return check_call([sys.executable, "-m", "virtualenv", str(path.absolute()), "--python=%s" % exact_py])
def install_cmd(argv):
'''Use Pythonz to download and build the specified Python version'''
installer = InstallCommand()
options, versions = installer.parser.parse_args(argv)
if len(versions) != 1:
installer.parser.print_help()
sys.exit(1)
else:
try:
actual_installer = PythonInstaller.get_installer(versions[0], options)
return actual_installer.install()
except AlreadyInstalledError as e:
print(e)
def uninstall_cmd(argv):
'''Use Pythonz to uninstall the specified Python version'''
return UninstallCommand().run(argv)
def list_pythons_cmd(argv):
'''List the pythons installed by Pythonz (or all the installable ones)'''
return ListPythons().run(argv)
def locate_python_cmd(argv):
'''Locate the path for the python version installed by Pythonz'''
return LocatePython().run(argv)
def version_cmd(argv):
"""Prints current pew version"""
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('pew').version
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
print('Setuptools has some issues here, failed to get our own package.', file=sys.stderr)
print(__version__)
def prevent_path_errors():
if 'VIRTUAL_ENV' in os.environ and not check_path():
sys.exit('''ERROR: The virtualenv hasn't been activated correctly.
Either the env is corrupted (try running `pew restore env`),
Or an upgrade of your Python version broke your env,
Or check the contents of your $PATH. You might be adding new directories to it
from inside your shell's configuration file.
In this case, for further details please see: https://github.com/berdario/pew#the-environment-doesnt-seem-to-be-activated''')
def first_run_setup():
shell = supported_shell()
if shell:
if shell == 'fish':
source_cmd = 'source (pew shell_config)'
else:
source_cmd = 'source "$(pew shell_config)"'
rcpath = expandpath({'bash': '~/.bashrc'
, 'zsh': '~/.zshrc'
, 'fish': '~/.config/fish/config.fish'}[shell])
if rcpath.exists():
update_config_file(rcpath, source_cmd)
else:
print("It seems that you're running pew for the first time\n"
"If you want source shell competions and update your prompt, "
"Add the following line to your shell config file:\n %s" % source_cmd)
print('\nWill now continue with the command:', *sys.argv[1:])
input('[enter]')
def update_config_file(rcpath, source_cmd):
with rcpath.open('r+') as rcfile:
if source_cmd not in (line.strip() for line in rcfile.readlines()):
choice = 'X'
while choice not in ('y', '', 'n'):
choice = input("It seems that you're running pew for the first time\n"
"do you want to modify %s to source completions and"
" update your prompt? [y/N]\n> " % rcpath).lower()
if choice == 'y':
rcfile.write('\n# added by Pew\n%s\n' % source_cmd)
print('Done')
else:
print('\nOk, if you want to do it manually, just add\n %s\nat'
' the end of %s' % (source_cmd, rcpath))
def print_commands(cmds):
longest = max(map(len, cmds)) + 3
columns, _ = get_terminal_size()
print('Available commands:\n')
for cmd, fun in sorted(cmds.items()):
if fun.__doc__:
print(textwrap.fill(
fun.__doc__.splitlines()[0],
columns or 1000,
initial_indent=(' {0}: '.format(cmd)).ljust(longest),
subsequent_indent=longest * ' '))
else:
print(' ' + cmd)
def pew():
first_run = makedirs_and_symlink_if_needed(workon_home)
if first_run and sys.stdin.isatty():
first_run_setup()
cmds = dict((cmd[:-4], fun)
for cmd, fun in globals().items() if cmd.endswith('_cmd'))
if sys.argv[1:]:
if sys.argv[1] in cmds:
command = cmds[sys.argv[1]]
try:
return command(sys.argv[2:])
except CalledProcessError as e:
return e.returncode
except KeyboardInterrupt:
pass
else:
err("ERROR: command", sys.argv[1], "does not exist.")
print_commands(cmds)
sys.exit(1)
else:
print_commands(cmds)
|
berdario/pew | pew/pew.py | install_cmd | python | def install_cmd(argv):
'''Use Pythonz to download and build the specified Python version'''
installer = InstallCommand()
options, versions = installer.parser.parse_args(argv)
if len(versions) != 1:
installer.parser.print_help()
sys.exit(1)
else:
try:
actual_installer = PythonInstaller.get_installer(versions[0], options)
return actual_installer.install()
except AlreadyInstalledError as e:
print(e) | Use Pythonz to download and build the specified Python version | train | https://github.com/berdario/pew/blob/37d9ff79342336b8ef6437d9a551008be07afe9b/pew/pew.py#L694-L706 | [
"lambda : sys.exit('You need to install the pythonz extra. pip install pew[pythonz]')\n"
] | from __future__ import print_function, absolute_import, unicode_literals
import os
import sys
import argparse
import shutil
import random
import textwrap
from functools import partial
from subprocess import CalledProcessError
from pathlib import Path
try:
from shutil import get_terminal_size
except ImportError:
from backports.shutil_get_terminal_size import get_terminal_size
windows = sys.platform == 'win32'
from clonevirtualenv import clone_virtualenv
if not windows:
try:
# Try importing these packages if avaiable
from pythonz.commands.install import InstallCommand
from pythonz.commands.uninstall import UninstallCommand
from pythonz.installer.pythoninstaller import PythonInstaller, AlreadyInstalledError
from pythonz.commands.list import ListCommand
from pythonz.define import PATH_PYTHONS
from pythonz.commands.locate import LocateCommand as LocatePython
def ListPythons():
try:
Path(PATH_PYTHONS).mkdir(parents=True)
except OSError:
pass
return ListCommand()
except:
# create mock commands
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('You need to install the pythonz extra. pip install pew[pythonz]')
else:
# Pythonz does not support windows
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('Command not supported on this platform')
import shellingham
from pew._utils import (check_call, invoke, expandpath, own, env_bin_dir,
check_path, temp_environ, NamedTemporaryFile, to_unicode)
from pew._print_utils import print_virtualenvs
if sys.version_info[0] == 2:
input = raw_input
err = partial(print, file=sys.stderr)
if windows:
default_home = '~/.virtualenvs'
else:
default_home = os.path.join(
os.environ.get('XDG_DATA_HOME', '~/.local/share'), 'virtualenvs')
workon_home = expandpath(
os.environ.get('WORKON_HOME', default_home))
def makedirs_and_symlink_if_needed(workon_home):
if not workon_home.exists() and own(workon_home):
workon_home.mkdir(parents=True)
link = expandpath('~/.virtualenvs')
if os.name == 'posix' and 'WORKON_HOME' not in os.environ and \
'XDG_DATA_HOME' not in os.environ and not link.exists():
link.symlink_to(str(workon_home))
return True
else:
return False
pew_site = Path(__file__).parent
def supported_shell():
shell = Path(os.environ.get('SHELL', '')).stem
if shell in ('bash', 'zsh', 'fish'):
return shell
def shell_config_cmd(argv):
"Prints the path for the current $SHELL helper file"
shell = supported_shell()
if shell:
print(pew_site / 'shell_config' / ('init.' + shell))
else:
err('Completions and prompts are unavailable for %s' %
repr(os.environ.get('SHELL', '')))
def deploy_completions():
completions = {'complete.bash': Path('/etc/bash_completion.d/pew'),
'complete.zsh': Path('/usr/local/share/zsh/site-functions/_pew'),
'complete.fish': Path('/etc/fish/completions/pew.fish')}
for comp, dest in completions.items():
if not dest.parent.exists():
dest.parent.mkdir(parents=True)
shutil.copy(str(pew_site / 'shell_config' / comp), str(dest))
def get_project_dir(env):
project_file = workon_home / env / '.project'
if project_file.exists():
with project_file.open() as f:
project_dir = f.readline().strip()
if os.path.exists(project_dir):
return project_dir
else:
err('Corrupted or outdated:', project_file, '\nDirectory',
project_dir, "doesn't exist.")
def unsetenv(key):
if key in os.environ:
del os.environ[key]
def compute_path(env):
envdir = workon_home / env
return os.pathsep.join([
str(envdir / env_bin_dir),
os.environ['PATH'],
])
def inve(env, command, *args, **kwargs):
"""Run a command in the given virtual environment.
Pass additional keyword arguments to ``subprocess.check_call()``."""
# we don't strictly need to restore the environment, since pew runs in
# its own process, but it feels like the right thing to do
with temp_environ():
os.environ['VIRTUAL_ENV'] = str(workon_home / env)
os.environ['PATH'] = compute_path(env)
unsetenv('PYTHONHOME')
unsetenv('__PYVENV_LAUNCHER__')
try:
return check_call([command] + list(args), shell=windows, **kwargs)
# need to have shell=True on windows, otherwise the PYTHONPATH
# won't inherit the PATH
except OSError as e:
if e.errno == 2:
err('Unable to find', command)
return 2
else:
raise
def fork_shell(env, shellcmd, cwd):
or_ctrld = '' if windows else "or 'Ctrl+D' "
err("Launching subshell in virtual environment. Type 'exit' ", or_ctrld,
"to return.", sep='')
if 'VIRTUAL_ENV' in os.environ:
err("Be aware that this environment will be nested on top "
"of '%s'" % Path(os.environ['VIRTUAL_ENV']).name)
return inve(env, *shellcmd, cwd=cwd)
def fork_bash(env, cwd):
# bash is a special little snowflake, and prevent_path_errors cannot work there
# https://github.com/berdario/pew/issues/58#issuecomment-102182346
bashrcpath = expandpath('~/.bashrc')
if bashrcpath.exists():
with NamedTemporaryFile('w+') as rcfile:
with bashrcpath.open() as bashrc:
rcfile.write(bashrc.read())
rcfile.write('\nexport PATH="' + to_unicode(compute_path(env)) + '"')
rcfile.flush()
return fork_shell(env, ['bash', '--rcfile', rcfile.name], cwd)
else:
return fork_shell(env, ['bash'], cwd)
def fork_cmder(env, cwd):
shell_cmd = ['cmd']
cmderrc_path = r'%CMDER_ROOT%\vendor\init.bat'
if expandpath(cmderrc_path).exists():
shell_cmd += ['/k', cmderrc_path]
if cwd:
os.environ['CMDER_START'] = cwd
return fork_shell(env, shell_cmd, cwd)
def _detect_shell():
shell = os.environ.get('SHELL', None)
if not shell:
if 'CMDER_ROOT' in os.environ:
shell = 'Cmder'
elif windows:
try:
_, shell = shellingham.detect_shell()
except shellingham.ShellDetectionFailure:
shell = os.environ.get('COMSPEC', 'cmd.exe')
else:
shell = 'sh'
return shell
def shell(env, cwd=None):
env = str(env)
shell = _detect_shell()
shell_name = Path(shell).stem
if shell_name not in ('Cmder', 'bash', 'elvish', 'powershell', 'klingon', 'cmd'):
# On Windows the PATH is usually set with System Utility
# so we won't worry about trying to check mistakes there
shell_check = (sys.executable + ' -c "from pew.pew import '
'prevent_path_errors; prevent_path_errors()"')
try:
inve(env, shell, '-c', shell_check)
except CalledProcessError:
return
if shell_name == 'bash':
return fork_bash(env, cwd)
elif shell_name == 'Cmder':
return fork_cmder(env, cwd)
else:
return fork_shell(env, [shell], cwd)
def mkvirtualenv(envname, python=None, packages=[], project=None,
requirements=None, rest=[]):
if python:
rest = ["--python=%s" % python] + rest
path = (workon_home / envname).absolute()
try:
check_call([sys.executable, "-m", "virtualenv", str(path)] + rest)
except (CalledProcessError, KeyboardInterrupt):
rmvirtualenvs([envname])
raise
else:
if project:
setvirtualenvproject(envname, project.absolute())
if requirements:
inve(envname, 'pip', 'install', '-r', str(expandpath(requirements)))
if packages:
inve(envname, 'pip', 'install', *packages)
def mkvirtualenv_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--python')
parser.add_argument('-i', action='append', dest='packages', help='Install \
a package after the environment is created. This option may be repeated.')
parser.add_argument('-r', dest='requirements', help='Provide a pip \
requirements file to install a base set of packages into the new environment.')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
return parser
def new_cmd(argv):
"""Create a new environment, in $WORKON_HOME."""
parser = mkvirtualenv_argparser()
parser.add_argument('-a', dest='project', help='Provide a full path to a \
project directory to associate with the new environment.')
parser.add_argument('envname')
args, rest = parser.parse_known_args(argv)
project = expandpath(args.project) if args.project else None
mkvirtualenv(args.envname, args.python, args.packages, project,
args.requirements, rest)
if args.activate:
shell(args.envname)
def rmvirtualenvs(envs):
error_happened = False
for env in envs:
env = workon_home / env
if os.environ.get('VIRTUAL_ENV') == str(env):
err("ERROR: You cannot remove the active environment (%s)." % env)
error_happened = True
break
try:
shutil.rmtree(str(env))
except OSError as e:
err("Error while trying to remove the {0} env: \n{1}".format
(env, e.strerror))
error_happened = True
return error_happened
def rm_cmd(argv):
"""Remove one or more environment, from $WORKON_HOME."""
if len(argv) < 1:
sys.exit("Please specify an environment")
return rmvirtualenvs(argv)
def packages(site_packages):
nodes = site_packages.iterdir()
return set([x.stem.split('-')[0] for x in nodes]) - set(['__pycache__'])
def showvirtualenv(env):
columns, _ = get_terminal_size()
pkgs = sorted(packages(sitepackages_dir(env)))
env_python = workon_home / env / env_bin_dir / 'python'
l = len(env) + 2
version = invoke(str(env_python), '-V')
version = ' - '.join((version.out + version.err).splitlines())
print(env, ': ', version, sep='')
print(textwrap.fill(' '.join(pkgs),
width=columns-l,
initial_indent=(l * ' '),
subsequent_indent=(l * ' ')), '\n')
def show_cmd(argv):
try:
showvirtualenv(argv[0])
except IndexError:
if 'VIRTUAL_ENV' in os.environ:
showvirtualenv(Path(os.environ['VIRTUAL_ENV']).name)
else:
sys.exit('pew show [env]')
def lsenvs():
return sorted(set(env.parts[-3] for env in
workon_home.glob(os.path.join('*', env_bin_dir, 'python*'))))
def lsvirtualenv(verbose):
envs = lsenvs()
if not verbose:
print_virtualenvs(*envs)
else:
for env in envs:
showvirtualenv(env)
def ls_cmd(argv):
"""List available environments."""
parser = argparse.ArgumentParser()
p_group = parser.add_mutually_exclusive_group()
p_group.add_argument('-b', '--brief', action='store_false')
p_group.add_argument('-l', '--long', action='store_true')
args = parser.parse_args(argv)
lsvirtualenv(args.long)
def parse_envname(argv, no_arg_callback):
if len(argv) < 1 or argv[0] is None:
no_arg_callback()
env = argv[0]
if env.startswith('/'):
sys.exit("ERROR: Invalid environment name '{0}'.".format(env))
if not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist. Create it with \
'pew new {0}'.".format(env))
else:
return env
def workon_cmd(argv):
"""List or change working virtual environments."""
parser = argparse.ArgumentParser(prog='pew workon')
parser.add_argument('envname', nargs='?')
parser.add_argument(
'-n', '--no-cd', action='store_true',
help=('Do not change working directory to project directory after '
'activating virtualenv.')
)
args = parser.parse_args(argv)
def list_and_exit():
lsvirtualenv(False)
sys.exit(0)
env = parse_envname([args.envname], list_and_exit)
# Check if the virtualenv has an associated project directory and in
# this case, use it as the current working directory.
project_dir = get_project_dir(env)
if project_dir is None or args.no_cd:
project_dir = os.getcwd()
return shell(env, cwd=project_dir)
def sitepackages_dir(env=os.environ.get('VIRTUAL_ENV')):
if not env:
sys.exit('ERROR: no virtualenv active')
else:
env_python = workon_home / env / env_bin_dir / 'python'
return Path(invoke(str(env_python), '-c', 'import distutils; \
print(distutils.sysconfig.get_python_lib())').out)
def add_cmd(argv):
"""Add the specified directories to the Python path for the currently active virtualenv.
This will be done by placing the directory names in a path file named
"virtualenv_path_extensions.pth" inside the virtualenv's site-packages
directory; if this file does not exists, it will be created first.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='remove', action='store_true')
parser.add_argument('dirs', nargs='+')
args = parser.parse_args(argv)
extra_paths = sitepackages_dir() / '_virtualenv_path_extensions.pth'
new_paths = [os.path.abspath(d) + "\n" for d in args.dirs]
if not extra_paths.exists():
with extra_paths.open('w') as extra:
extra.write('''import sys; sys.__plen = len(sys.path)
import sys; new=sys.path[sys.__plen:]; del sys.path[sys.__plen:]; p=getattr(sys,'__egginsert',0); sys.path[p:p]=new; sys.__egginsert = p+len(new)
''')
def rewrite(f):
with extra_paths.open('r+') as extra:
to_write = f(extra.readlines())
extra.seek(0)
extra.truncate()
extra.writelines(to_write)
if args.remove:
rewrite(lambda ls: [line for line in ls if line not in new_paths])
else:
rewrite(lambda lines: lines[0:1] + new_paths + lines[1:])
def sitepackages_dir_cmd(argv):
print(sitepackages_dir())
def lssitepackages_cmd(argv):
"""Show the content of the site-packages directory of the current virtualenv."""
site = sitepackages_dir()
print(*sorted(site.iterdir()), sep=os.linesep)
extra_paths = site / '_virtualenv_path_extensions.pth'
if extra_paths.exists():
print('from _virtualenv_path_extensions.pth:')
with extra_paths.open() as extra:
print(''.join(extra.readlines()))
def toggleglobalsitepackages_cmd(argv):
"""Toggle the current virtualenv between having and not having access to the global site-packages."""
quiet = argv == ['-q']
site = sitepackages_dir()
ngsp_file = site.parent / 'no-global-site-packages.txt'
if ngsp_file.exists():
ngsp_file.unlink()
if not quiet:
print('Enabled global site-packages')
else:
with ngsp_file.open('w'):
if not quiet:
print('Disabled global site-packages')
def cp_cmd(argv):
"""Duplicate the named virtualenv to make a new one."""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target', nargs='?')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
args = parser.parse_args(argv)
target_name = copy_virtualenv_project(args.source, args.target)
if args.activate:
shell(target_name)
def copy_virtualenv_project(source, target):
source = expandpath(source)
if not source.exists():
source = workon_home / source
if not source.exists():
sys.exit('Please provide a valid virtualenv to copy')
target_name = target or source.name
target = workon_home / target_name
if target.exists():
sys.exit('%s virtualenv already exists in %s.' % (
target_name, workon_home
))
print('Copying {0} in {1}'.format(source, target_name))
clone_virtualenv(str(source), str(target))
return target_name
def rename_cmd(argv):
"""Rename a virtualenv"""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target')
pargs = parser.parse_args(argv)
copy_virtualenv_project(pargs.source, pargs.target)
return rmvirtualenvs([pargs.source])
def setvirtualenvproject(env, project):
print('Setting project for {0} to {1}'.format(env, project))
with (workon_home / env / '.project').open('wb') as prj:
prj.write(str(project).encode())
def setproject_cmd(argv):
"""Given a virtualenv directory and a project directory, set the \
virtualenv up to be associated with the project."""
args = dict(enumerate(argv))
project = os.path.abspath(args.get(1, '.'))
env = args.get(0, os.environ.get('VIRTUAL_ENV'))
if not env:
sys.exit('pew setproject [virtualenv] [project_path]')
if not (workon_home / env).exists():
sys.exit("Environment '%s' doesn't exist." % env)
if not os.path.isdir(project):
sys.exit('pew setproject: %s does not exist' % project)
setvirtualenvproject(env, project)
def getproject_cmd(argv):
"""Print a virtualenv's project directory, if set.
If called without providing a virtualenv name as argument, print the
current virtualenv's project directory.
"""
# Parse command line arguments
parser = argparse.ArgumentParser(
description="Print an environment's project directory.",
)
parser.add_argument(
'envname',
nargs='?',
default=os.environ.get('VIRTUAL_ENV'),
help=(
'The name of the environment to return the project directory '
'for. If omitted, will use the currently active environment.'
),
)
args = parser.parse_args(argv)
# Now, do the actual work
if not args.envname:
sys.exit('ERROR: no virtualenv active')
if not (workon_home / args.envname).exists():
sys.exit("ERROR: Environment '{0}' does not exist."
.format(args.envname))
project_dir = get_project_dir(args.envname)
if project_dir is None:
sys.exit("ERROR: no project directory set for Environment '{0}'"
.format(args.envname))
print(project_dir)
def mkproject_cmd(argv):
"""Create a new project directory and its associated virtualenv."""
if '-l' in argv or '--list' in argv:
templates = [t.name[9:] for t in workon_home.glob("template_*")]
print("Available project templates:", *templates, sep='\n')
return
parser = mkvirtualenv_argparser()
parser.add_argument('envname')
parser.add_argument(
'-t', action='append', default=[], dest='templates', help='Multiple \
templates may be selected. They are applied in the order specified on the \
command line.')
parser.add_argument(
'-l', '--list', action='store_true', help='List available templates.')
args, rest = parser.parse_known_args(argv)
projects_home = Path(os.environ.get('PROJECT_HOME', '.'))
if not projects_home.exists():
sys.exit('ERROR: Projects directory %s does not exist. \
Create it or set PROJECT_HOME to an existing directory.' % projects_home)
project = (projects_home / args.envname).absolute()
if project.exists():
sys.exit('Project %s already exists.' % args.envname)
mkvirtualenv(args.envname, args.python, args.packages, project.absolute(),
args.requirements, rest)
project.mkdir()
for template_name in args.templates:
template = workon_home / ("template_" + template_name)
inve(args.envname, str(template), args.envname, str(project))
if args.activate:
shell(args.envname, cwd=str(project))
def mktmpenv_cmd(argv):
"""Create a temporary virtualenv."""
parser = mkvirtualenv_argparser()
env = '.'
while (workon_home / env).exists():
env = hex(random.getrandbits(64))[2:-1]
args, rest = parser.parse_known_args(argv)
mkvirtualenv(env, args.python, args.packages, requirements=args.requirements,
rest=rest)
print('This is a temporary environment. It will be deleted when you exit')
try:
if args.activate:
# only used for testing on windows
shell(env)
finally:
return rmvirtualenvs([env])
def wipeenv_cmd(argv):
"""Remove all installed packages from the current (or supplied) env."""
env = argv[0] if argv else os.environ.get('VIRTUAL_ENV')
if not env:
sys.exit('ERROR: no virtualenv active')
elif not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist.".format(env))
else:
env_pip = str(workon_home / env / env_bin_dir / 'pip')
all_pkgs = set(invoke(env_pip, 'freeze').out.splitlines())
pkgs = set(p for p in all_pkgs if len(p.split("==")) == 2)
ignored = sorted(all_pkgs - pkgs)
pkgs = set(p.split("==")[0] for p in pkgs)
to_remove = sorted(pkgs - set(['distribute', 'wsgiref']))
if to_remove:
print("Ignoring:\n %s" % "\n ".join(ignored))
print("Uninstalling packages:\n %s" % "\n ".join(to_remove))
return inve(env, 'pip', 'uninstall', '-y', *to_remove)
else:
print("Nothing to remove")
def inall_cmd(argv):
"""Run a command in each virtualenv."""
envs = lsenvs()
errors = False
for env in envs:
print("\n%s:" % env)
try:
inve(env, *argv)
except CalledProcessError as e:
errors = True
err(e)
sys.exit(errors)
def in_cmd(argv):
"""Run a command in the given virtualenv."""
if len(argv) == 1:
return workon_cmd(argv)
parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
return inve(*argv)
def restore_cmd(argv):
"""Try to restore a broken virtualenv by reinstalling the same python version on top of it"""
if len(argv) < 1:
sys.exit('You must provide a valid virtualenv to target')
env = argv[0]
path = workon_home / env
py = path / env_bin_dir / ('python.exe' if windows else 'python')
exact_py = py.resolve().name
return check_call([sys.executable, "-m", "virtualenv", str(path.absolute()), "--python=%s" % exact_py])
def dir_cmd(argv):
"""Print the path for the virtualenv directory"""
env = parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
print(workon_home / env)
def uninstall_cmd(argv):
'''Use Pythonz to uninstall the specified Python version'''
return UninstallCommand().run(argv)
def list_pythons_cmd(argv):
'''List the pythons installed by Pythonz (or all the installable ones)'''
return ListPythons().run(argv)
def locate_python_cmd(argv):
'''Locate the path for the python version installed by Pythonz'''
return LocatePython().run(argv)
def version_cmd(argv):
"""Prints current pew version"""
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('pew').version
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
print('Setuptools has some issues here, failed to get our own package.', file=sys.stderr)
print(__version__)
def prevent_path_errors():
if 'VIRTUAL_ENV' in os.environ and not check_path():
sys.exit('''ERROR: The virtualenv hasn't been activated correctly.
Either the env is corrupted (try running `pew restore env`),
Or an upgrade of your Python version broke your env,
Or check the contents of your $PATH. You might be adding new directories to it
from inside your shell's configuration file.
In this case, for further details please see: https://github.com/berdario/pew#the-environment-doesnt-seem-to-be-activated''')
def first_run_setup():
shell = supported_shell()
if shell:
if shell == 'fish':
source_cmd = 'source (pew shell_config)'
else:
source_cmd = 'source "$(pew shell_config)"'
rcpath = expandpath({'bash': '~/.bashrc'
, 'zsh': '~/.zshrc'
, 'fish': '~/.config/fish/config.fish'}[shell])
if rcpath.exists():
update_config_file(rcpath, source_cmd)
else:
print("It seems that you're running pew for the first time\n"
"If you want source shell competions and update your prompt, "
"Add the following line to your shell config file:\n %s" % source_cmd)
print('\nWill now continue with the command:', *sys.argv[1:])
input('[enter]')
def update_config_file(rcpath, source_cmd):
with rcpath.open('r+') as rcfile:
if source_cmd not in (line.strip() for line in rcfile.readlines()):
choice = 'X'
while choice not in ('y', '', 'n'):
choice = input("It seems that you're running pew for the first time\n"
"do you want to modify %s to source completions and"
" update your prompt? [y/N]\n> " % rcpath).lower()
if choice == 'y':
rcfile.write('\n# added by Pew\n%s\n' % source_cmd)
print('Done')
else:
print('\nOk, if you want to do it manually, just add\n %s\nat'
' the end of %s' % (source_cmd, rcpath))
def print_commands(cmds):
longest = max(map(len, cmds)) + 3
columns, _ = get_terminal_size()
print('Available commands:\n')
for cmd, fun in sorted(cmds.items()):
if fun.__doc__:
print(textwrap.fill(
fun.__doc__.splitlines()[0],
columns or 1000,
initial_indent=(' {0}: '.format(cmd)).ljust(longest),
subsequent_indent=longest * ' '))
else:
print(' ' + cmd)
def pew():
first_run = makedirs_and_symlink_if_needed(workon_home)
if first_run and sys.stdin.isatty():
first_run_setup()
cmds = dict((cmd[:-4], fun)
for cmd, fun in globals().items() if cmd.endswith('_cmd'))
if sys.argv[1:]:
if sys.argv[1] in cmds:
command = cmds[sys.argv[1]]
try:
return command(sys.argv[2:])
except CalledProcessError as e:
return e.returncode
except KeyboardInterrupt:
pass
else:
err("ERROR: command", sys.argv[1], "does not exist.")
print_commands(cmds)
sys.exit(1)
else:
print_commands(cmds)
|
berdario/pew | pew/pew.py | version_cmd | python | def version_cmd(argv):
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('pew').version
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
print('Setuptools has some issues here, failed to get our own package.', file=sys.stderr)
print(__version__) | Prints current pew version | train | https://github.com/berdario/pew/blob/37d9ff79342336b8ef6437d9a551008be07afe9b/pew/pew.py#L724-L734 | null | from __future__ import print_function, absolute_import, unicode_literals
import os
import sys
import argparse
import shutil
import random
import textwrap
from functools import partial
from subprocess import CalledProcessError
from pathlib import Path
try:
from shutil import get_terminal_size
except ImportError:
from backports.shutil_get_terminal_size import get_terminal_size
windows = sys.platform == 'win32'
from clonevirtualenv import clone_virtualenv
if not windows:
try:
# Try importing these packages if avaiable
from pythonz.commands.install import InstallCommand
from pythonz.commands.uninstall import UninstallCommand
from pythonz.installer.pythoninstaller import PythonInstaller, AlreadyInstalledError
from pythonz.commands.list import ListCommand
from pythonz.define import PATH_PYTHONS
from pythonz.commands.locate import LocateCommand as LocatePython
def ListPythons():
try:
Path(PATH_PYTHONS).mkdir(parents=True)
except OSError:
pass
return ListCommand()
except:
# create mock commands
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('You need to install the pythonz extra. pip install pew[pythonz]')
else:
# Pythonz does not support windows
InstallCommand = ListPythons = LocatePython = UninstallCommand = \
lambda : sys.exit('Command not supported on this platform')
import shellingham
from pew._utils import (check_call, invoke, expandpath, own, env_bin_dir,
check_path, temp_environ, NamedTemporaryFile, to_unicode)
from pew._print_utils import print_virtualenvs
if sys.version_info[0] == 2:
input = raw_input
err = partial(print, file=sys.stderr)
if windows:
default_home = '~/.virtualenvs'
else:
default_home = os.path.join(
os.environ.get('XDG_DATA_HOME', '~/.local/share'), 'virtualenvs')
workon_home = expandpath(
os.environ.get('WORKON_HOME', default_home))
def makedirs_and_symlink_if_needed(workon_home):
if not workon_home.exists() and own(workon_home):
workon_home.mkdir(parents=True)
link = expandpath('~/.virtualenvs')
if os.name == 'posix' and 'WORKON_HOME' not in os.environ and \
'XDG_DATA_HOME' not in os.environ and not link.exists():
link.symlink_to(str(workon_home))
return True
else:
return False
pew_site = Path(__file__).parent
def supported_shell():
shell = Path(os.environ.get('SHELL', '')).stem
if shell in ('bash', 'zsh', 'fish'):
return shell
def shell_config_cmd(argv):
"Prints the path for the current $SHELL helper file"
shell = supported_shell()
if shell:
print(pew_site / 'shell_config' / ('init.' + shell))
else:
err('Completions and prompts are unavailable for %s' %
repr(os.environ.get('SHELL', '')))
def deploy_completions():
completions = {'complete.bash': Path('/etc/bash_completion.d/pew'),
'complete.zsh': Path('/usr/local/share/zsh/site-functions/_pew'),
'complete.fish': Path('/etc/fish/completions/pew.fish')}
for comp, dest in completions.items():
if not dest.parent.exists():
dest.parent.mkdir(parents=True)
shutil.copy(str(pew_site / 'shell_config' / comp), str(dest))
def get_project_dir(env):
project_file = workon_home / env / '.project'
if project_file.exists():
with project_file.open() as f:
project_dir = f.readline().strip()
if os.path.exists(project_dir):
return project_dir
else:
err('Corrupted or outdated:', project_file, '\nDirectory',
project_dir, "doesn't exist.")
def unsetenv(key):
if key in os.environ:
del os.environ[key]
def compute_path(env):
envdir = workon_home / env
return os.pathsep.join([
str(envdir / env_bin_dir),
os.environ['PATH'],
])
def inve(env, command, *args, **kwargs):
"""Run a command in the given virtual environment.
Pass additional keyword arguments to ``subprocess.check_call()``."""
# we don't strictly need to restore the environment, since pew runs in
# its own process, but it feels like the right thing to do
with temp_environ():
os.environ['VIRTUAL_ENV'] = str(workon_home / env)
os.environ['PATH'] = compute_path(env)
unsetenv('PYTHONHOME')
unsetenv('__PYVENV_LAUNCHER__')
try:
return check_call([command] + list(args), shell=windows, **kwargs)
# need to have shell=True on windows, otherwise the PYTHONPATH
# won't inherit the PATH
except OSError as e:
if e.errno == 2:
err('Unable to find', command)
return 2
else:
raise
def fork_shell(env, shellcmd, cwd):
or_ctrld = '' if windows else "or 'Ctrl+D' "
err("Launching subshell in virtual environment. Type 'exit' ", or_ctrld,
"to return.", sep='')
if 'VIRTUAL_ENV' in os.environ:
err("Be aware that this environment will be nested on top "
"of '%s'" % Path(os.environ['VIRTUAL_ENV']).name)
return inve(env, *shellcmd, cwd=cwd)
def fork_bash(env, cwd):
# bash is a special little snowflake, and prevent_path_errors cannot work there
# https://github.com/berdario/pew/issues/58#issuecomment-102182346
bashrcpath = expandpath('~/.bashrc')
if bashrcpath.exists():
with NamedTemporaryFile('w+') as rcfile:
with bashrcpath.open() as bashrc:
rcfile.write(bashrc.read())
rcfile.write('\nexport PATH="' + to_unicode(compute_path(env)) + '"')
rcfile.flush()
return fork_shell(env, ['bash', '--rcfile', rcfile.name], cwd)
else:
return fork_shell(env, ['bash'], cwd)
def fork_cmder(env, cwd):
shell_cmd = ['cmd']
cmderrc_path = r'%CMDER_ROOT%\vendor\init.bat'
if expandpath(cmderrc_path).exists():
shell_cmd += ['/k', cmderrc_path]
if cwd:
os.environ['CMDER_START'] = cwd
return fork_shell(env, shell_cmd, cwd)
def _detect_shell():
shell = os.environ.get('SHELL', None)
if not shell:
if 'CMDER_ROOT' in os.environ:
shell = 'Cmder'
elif windows:
try:
_, shell = shellingham.detect_shell()
except shellingham.ShellDetectionFailure:
shell = os.environ.get('COMSPEC', 'cmd.exe')
else:
shell = 'sh'
return shell
def shell(env, cwd=None):
env = str(env)
shell = _detect_shell()
shell_name = Path(shell).stem
if shell_name not in ('Cmder', 'bash', 'elvish', 'powershell', 'klingon', 'cmd'):
# On Windows the PATH is usually set with System Utility
# so we won't worry about trying to check mistakes there
shell_check = (sys.executable + ' -c "from pew.pew import '
'prevent_path_errors; prevent_path_errors()"')
try:
inve(env, shell, '-c', shell_check)
except CalledProcessError:
return
if shell_name == 'bash':
return fork_bash(env, cwd)
elif shell_name == 'Cmder':
return fork_cmder(env, cwd)
else:
return fork_shell(env, [shell], cwd)
def mkvirtualenv(envname, python=None, packages=[], project=None,
requirements=None, rest=[]):
if python:
rest = ["--python=%s" % python] + rest
path = (workon_home / envname).absolute()
try:
check_call([sys.executable, "-m", "virtualenv", str(path)] + rest)
except (CalledProcessError, KeyboardInterrupt):
rmvirtualenvs([envname])
raise
else:
if project:
setvirtualenvproject(envname, project.absolute())
if requirements:
inve(envname, 'pip', 'install', '-r', str(expandpath(requirements)))
if packages:
inve(envname, 'pip', 'install', *packages)
def mkvirtualenv_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--python')
parser.add_argument('-i', action='append', dest='packages', help='Install \
a package after the environment is created. This option may be repeated.')
parser.add_argument('-r', dest='requirements', help='Provide a pip \
requirements file to install a base set of packages into the new environment.')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
return parser
def new_cmd(argv):
"""Create a new environment, in $WORKON_HOME."""
parser = mkvirtualenv_argparser()
parser.add_argument('-a', dest='project', help='Provide a full path to a \
project directory to associate with the new environment.')
parser.add_argument('envname')
args, rest = parser.parse_known_args(argv)
project = expandpath(args.project) if args.project else None
mkvirtualenv(args.envname, args.python, args.packages, project,
args.requirements, rest)
if args.activate:
shell(args.envname)
def rmvirtualenvs(envs):
error_happened = False
for env in envs:
env = workon_home / env
if os.environ.get('VIRTUAL_ENV') == str(env):
err("ERROR: You cannot remove the active environment (%s)." % env)
error_happened = True
break
try:
shutil.rmtree(str(env))
except OSError as e:
err("Error while trying to remove the {0} env: \n{1}".format
(env, e.strerror))
error_happened = True
return error_happened
def rm_cmd(argv):
"""Remove one or more environment, from $WORKON_HOME."""
if len(argv) < 1:
sys.exit("Please specify an environment")
return rmvirtualenvs(argv)
def packages(site_packages):
nodes = site_packages.iterdir()
return set([x.stem.split('-')[0] for x in nodes]) - set(['__pycache__'])
def showvirtualenv(env):
columns, _ = get_terminal_size()
pkgs = sorted(packages(sitepackages_dir(env)))
env_python = workon_home / env / env_bin_dir / 'python'
l = len(env) + 2
version = invoke(str(env_python), '-V')
version = ' - '.join((version.out + version.err).splitlines())
print(env, ': ', version, sep='')
print(textwrap.fill(' '.join(pkgs),
width=columns-l,
initial_indent=(l * ' '),
subsequent_indent=(l * ' ')), '\n')
def show_cmd(argv):
try:
showvirtualenv(argv[0])
except IndexError:
if 'VIRTUAL_ENV' in os.environ:
showvirtualenv(Path(os.environ['VIRTUAL_ENV']).name)
else:
sys.exit('pew show [env]')
def lsenvs():
return sorted(set(env.parts[-3] for env in
workon_home.glob(os.path.join('*', env_bin_dir, 'python*'))))
def lsvirtualenv(verbose):
envs = lsenvs()
if not verbose:
print_virtualenvs(*envs)
else:
for env in envs:
showvirtualenv(env)
def ls_cmd(argv):
"""List available environments."""
parser = argparse.ArgumentParser()
p_group = parser.add_mutually_exclusive_group()
p_group.add_argument('-b', '--brief', action='store_false')
p_group.add_argument('-l', '--long', action='store_true')
args = parser.parse_args(argv)
lsvirtualenv(args.long)
def parse_envname(argv, no_arg_callback):
if len(argv) < 1 or argv[0] is None:
no_arg_callback()
env = argv[0]
if env.startswith('/'):
sys.exit("ERROR: Invalid environment name '{0}'.".format(env))
if not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist. Create it with \
'pew new {0}'.".format(env))
else:
return env
def workon_cmd(argv):
"""List or change working virtual environments."""
parser = argparse.ArgumentParser(prog='pew workon')
parser.add_argument('envname', nargs='?')
parser.add_argument(
'-n', '--no-cd', action='store_true',
help=('Do not change working directory to project directory after '
'activating virtualenv.')
)
args = parser.parse_args(argv)
def list_and_exit():
lsvirtualenv(False)
sys.exit(0)
env = parse_envname([args.envname], list_and_exit)
# Check if the virtualenv has an associated project directory and in
# this case, use it as the current working directory.
project_dir = get_project_dir(env)
if project_dir is None or args.no_cd:
project_dir = os.getcwd()
return shell(env, cwd=project_dir)
def sitepackages_dir(env=os.environ.get('VIRTUAL_ENV')):
if not env:
sys.exit('ERROR: no virtualenv active')
else:
env_python = workon_home / env / env_bin_dir / 'python'
return Path(invoke(str(env_python), '-c', 'import distutils; \
print(distutils.sysconfig.get_python_lib())').out)
def add_cmd(argv):
"""Add the specified directories to the Python path for the currently active virtualenv.
This will be done by placing the directory names in a path file named
"virtualenv_path_extensions.pth" inside the virtualenv's site-packages
directory; if this file does not exists, it will be created first.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='remove', action='store_true')
parser.add_argument('dirs', nargs='+')
args = parser.parse_args(argv)
extra_paths = sitepackages_dir() / '_virtualenv_path_extensions.pth'
new_paths = [os.path.abspath(d) + "\n" for d in args.dirs]
if not extra_paths.exists():
with extra_paths.open('w') as extra:
extra.write('''import sys; sys.__plen = len(sys.path)
import sys; new=sys.path[sys.__plen:]; del sys.path[sys.__plen:]; p=getattr(sys,'__egginsert',0); sys.path[p:p]=new; sys.__egginsert = p+len(new)
''')
def rewrite(f):
with extra_paths.open('r+') as extra:
to_write = f(extra.readlines())
extra.seek(0)
extra.truncate()
extra.writelines(to_write)
if args.remove:
rewrite(lambda ls: [line for line in ls if line not in new_paths])
else:
rewrite(lambda lines: lines[0:1] + new_paths + lines[1:])
def sitepackages_dir_cmd(argv):
print(sitepackages_dir())
def lssitepackages_cmd(argv):
"""Show the content of the site-packages directory of the current virtualenv."""
site = sitepackages_dir()
print(*sorted(site.iterdir()), sep=os.linesep)
extra_paths = site / '_virtualenv_path_extensions.pth'
if extra_paths.exists():
print('from _virtualenv_path_extensions.pth:')
with extra_paths.open() as extra:
print(''.join(extra.readlines()))
def toggleglobalsitepackages_cmd(argv):
"""Toggle the current virtualenv between having and not having access to the global site-packages."""
quiet = argv == ['-q']
site = sitepackages_dir()
ngsp_file = site.parent / 'no-global-site-packages.txt'
if ngsp_file.exists():
ngsp_file.unlink()
if not quiet:
print('Enabled global site-packages')
else:
with ngsp_file.open('w'):
if not quiet:
print('Disabled global site-packages')
def cp_cmd(argv):
"""Duplicate the named virtualenv to make a new one."""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target', nargs='?')
parser.add_argument('-d', '--dont-activate', action='store_false',
default=True, dest='activate', help="After \
creation, continue with the existing shell (don't \
activate the new environment).")
args = parser.parse_args(argv)
target_name = copy_virtualenv_project(args.source, args.target)
if args.activate:
shell(target_name)
def copy_virtualenv_project(source, target):
source = expandpath(source)
if not source.exists():
source = workon_home / source
if not source.exists():
sys.exit('Please provide a valid virtualenv to copy')
target_name = target or source.name
target = workon_home / target_name
if target.exists():
sys.exit('%s virtualenv already exists in %s.' % (
target_name, workon_home
))
print('Copying {0} in {1}'.format(source, target_name))
clone_virtualenv(str(source), str(target))
return target_name
def rename_cmd(argv):
"""Rename a virtualenv"""
parser = argparse.ArgumentParser()
parser.add_argument('source')
parser.add_argument('target')
pargs = parser.parse_args(argv)
copy_virtualenv_project(pargs.source, pargs.target)
return rmvirtualenvs([pargs.source])
def setvirtualenvproject(env, project):
print('Setting project for {0} to {1}'.format(env, project))
with (workon_home / env / '.project').open('wb') as prj:
prj.write(str(project).encode())
def setproject_cmd(argv):
"""Given a virtualenv directory and a project directory, set the \
virtualenv up to be associated with the project."""
args = dict(enumerate(argv))
project = os.path.abspath(args.get(1, '.'))
env = args.get(0, os.environ.get('VIRTUAL_ENV'))
if not env:
sys.exit('pew setproject [virtualenv] [project_path]')
if not (workon_home / env).exists():
sys.exit("Environment '%s' doesn't exist." % env)
if not os.path.isdir(project):
sys.exit('pew setproject: %s does not exist' % project)
setvirtualenvproject(env, project)
def getproject_cmd(argv):
"""Print a virtualenv's project directory, if set.
If called without providing a virtualenv name as argument, print the
current virtualenv's project directory.
"""
# Parse command line arguments
parser = argparse.ArgumentParser(
description="Print an environment's project directory.",
)
parser.add_argument(
'envname',
nargs='?',
default=os.environ.get('VIRTUAL_ENV'),
help=(
'The name of the environment to return the project directory '
'for. If omitted, will use the currently active environment.'
),
)
args = parser.parse_args(argv)
# Now, do the actual work
if not args.envname:
sys.exit('ERROR: no virtualenv active')
if not (workon_home / args.envname).exists():
sys.exit("ERROR: Environment '{0}' does not exist."
.format(args.envname))
project_dir = get_project_dir(args.envname)
if project_dir is None:
sys.exit("ERROR: no project directory set for Environment '{0}'"
.format(args.envname))
print(project_dir)
def mkproject_cmd(argv):
"""Create a new project directory and its associated virtualenv."""
if '-l' in argv or '--list' in argv:
templates = [t.name[9:] for t in workon_home.glob("template_*")]
print("Available project templates:", *templates, sep='\n')
return
parser = mkvirtualenv_argparser()
parser.add_argument('envname')
parser.add_argument(
'-t', action='append', default=[], dest='templates', help='Multiple \
templates may be selected. They are applied in the order specified on the \
command line.')
parser.add_argument(
'-l', '--list', action='store_true', help='List available templates.')
args, rest = parser.parse_known_args(argv)
projects_home = Path(os.environ.get('PROJECT_HOME', '.'))
if not projects_home.exists():
sys.exit('ERROR: Projects directory %s does not exist. \
Create it or set PROJECT_HOME to an existing directory.' % projects_home)
project = (projects_home / args.envname).absolute()
if project.exists():
sys.exit('Project %s already exists.' % args.envname)
mkvirtualenv(args.envname, args.python, args.packages, project.absolute(),
args.requirements, rest)
project.mkdir()
for template_name in args.templates:
template = workon_home / ("template_" + template_name)
inve(args.envname, str(template), args.envname, str(project))
if args.activate:
shell(args.envname, cwd=str(project))
def mktmpenv_cmd(argv):
"""Create a temporary virtualenv."""
parser = mkvirtualenv_argparser()
env = '.'
while (workon_home / env).exists():
env = hex(random.getrandbits(64))[2:-1]
args, rest = parser.parse_known_args(argv)
mkvirtualenv(env, args.python, args.packages, requirements=args.requirements,
rest=rest)
print('This is a temporary environment. It will be deleted when you exit')
try:
if args.activate:
# only used for testing on windows
shell(env)
finally:
return rmvirtualenvs([env])
def wipeenv_cmd(argv):
"""Remove all installed packages from the current (or supplied) env."""
env = argv[0] if argv else os.environ.get('VIRTUAL_ENV')
if not env:
sys.exit('ERROR: no virtualenv active')
elif not (workon_home / env).exists():
sys.exit("ERROR: Environment '{0}' does not exist.".format(env))
else:
env_pip = str(workon_home / env / env_bin_dir / 'pip')
all_pkgs = set(invoke(env_pip, 'freeze').out.splitlines())
pkgs = set(p for p in all_pkgs if len(p.split("==")) == 2)
ignored = sorted(all_pkgs - pkgs)
pkgs = set(p.split("==")[0] for p in pkgs)
to_remove = sorted(pkgs - set(['distribute', 'wsgiref']))
if to_remove:
print("Ignoring:\n %s" % "\n ".join(ignored))
print("Uninstalling packages:\n %s" % "\n ".join(to_remove))
return inve(env, 'pip', 'uninstall', '-y', *to_remove)
else:
print("Nothing to remove")
def inall_cmd(argv):
"""Run a command in each virtualenv."""
envs = lsenvs()
errors = False
for env in envs:
print("\n%s:" % env)
try:
inve(env, *argv)
except CalledProcessError as e:
errors = True
err(e)
sys.exit(errors)
def in_cmd(argv):
"""Run a command in the given virtualenv."""
if len(argv) == 1:
return workon_cmd(argv)
parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
return inve(*argv)
def restore_cmd(argv):
"""Try to restore a broken virtualenv by reinstalling the same python version on top of it"""
if len(argv) < 1:
sys.exit('You must provide a valid virtualenv to target')
env = argv[0]
path = workon_home / env
py = path / env_bin_dir / ('python.exe' if windows else 'python')
exact_py = py.resolve().name
return check_call([sys.executable, "-m", "virtualenv", str(path.absolute()), "--python=%s" % exact_py])
def dir_cmd(argv):
"""Print the path for the virtualenv directory"""
env = parse_envname(argv, lambda : sys.exit('You must provide a valid virtualenv to target'))
print(workon_home / env)
def install_cmd(argv):
'''Use Pythonz to download and build the specified Python version'''
installer = InstallCommand()
options, versions = installer.parser.parse_args(argv)
if len(versions) != 1:
installer.parser.print_help()
sys.exit(1)
else:
try:
actual_installer = PythonInstaller.get_installer(versions[0], options)
return actual_installer.install()
except AlreadyInstalledError as e:
print(e)
def uninstall_cmd(argv):
'''Use Pythonz to uninstall the specified Python version'''
return UninstallCommand().run(argv)
def list_pythons_cmd(argv):
'''List the pythons installed by Pythonz (or all the installable ones)'''
return ListPythons().run(argv)
def locate_python_cmd(argv):
'''Locate the path for the python version installed by Pythonz'''
return LocatePython().run(argv)
def prevent_path_errors():
if 'VIRTUAL_ENV' in os.environ and not check_path():
sys.exit('''ERROR: The virtualenv hasn't been activated correctly.
Either the env is corrupted (try running `pew restore env`),
Or an upgrade of your Python version broke your env,
Or check the contents of your $PATH. You might be adding new directories to it
from inside your shell's configuration file.
In this case, for further details please see: https://github.com/berdario/pew#the-environment-doesnt-seem-to-be-activated''')
def first_run_setup():
shell = supported_shell()
if shell:
if shell == 'fish':
source_cmd = 'source (pew shell_config)'
else:
source_cmd = 'source "$(pew shell_config)"'
rcpath = expandpath({'bash': '~/.bashrc'
, 'zsh': '~/.zshrc'
, 'fish': '~/.config/fish/config.fish'}[shell])
if rcpath.exists():
update_config_file(rcpath, source_cmd)
else:
print("It seems that you're running pew for the first time\n"
"If you want source shell competions and update your prompt, "
"Add the following line to your shell config file:\n %s" % source_cmd)
print('\nWill now continue with the command:', *sys.argv[1:])
input('[enter]')
def update_config_file(rcpath, source_cmd):
with rcpath.open('r+') as rcfile:
if source_cmd not in (line.strip() for line in rcfile.readlines()):
choice = 'X'
while choice not in ('y', '', 'n'):
choice = input("It seems that you're running pew for the first time\n"
"do you want to modify %s to source completions and"
" update your prompt? [y/N]\n> " % rcpath).lower()
if choice == 'y':
rcfile.write('\n# added by Pew\n%s\n' % source_cmd)
print('Done')
else:
print('\nOk, if you want to do it manually, just add\n %s\nat'
' the end of %s' % (source_cmd, rcpath))
def print_commands(cmds):
longest = max(map(len, cmds)) + 3
columns, _ = get_terminal_size()
print('Available commands:\n')
for cmd, fun in sorted(cmds.items()):
if fun.__doc__:
print(textwrap.fill(
fun.__doc__.splitlines()[0],
columns or 1000,
initial_indent=(' {0}: '.format(cmd)).ljust(longest),
subsequent_indent=longest * ' '))
else:
print(' ' + cmd)
def pew():
first_run = makedirs_and_symlink_if_needed(workon_home)
if first_run and sys.stdin.isatty():
first_run_setup()
cmds = dict((cmd[:-4], fun)
for cmd, fun in globals().items() if cmd.endswith('_cmd'))
if sys.argv[1:]:
if sys.argv[1] in cmds:
command = cmds[sys.argv[1]]
try:
return command(sys.argv[2:])
except CalledProcessError as e:
return e.returncode
except KeyboardInterrupt:
pass
else:
err("ERROR: command", sys.argv[1], "does not exist.")
print_commands(cmds)
sys.exit(1)
else:
print_commands(cmds)
|
admiralobvious/vyper | vyper/vyper.py | Vyper._get_env | python | def _get_env(self, key):
if self._env_key_replacer is not None:
key = key.replace(*self._env_key_replacer)
return os.getenv(key) | Wrapper around os.getenv() which replaces characters
in the original key. This allows env vars which have different keys
than the config object keys. | train | https://github.com/admiralobvious/vyper/blob/58ec7b90661502b7b2fea7a30849b90b907fcdec/vyper/vyper.py#L109-L116 | null | class Vyper(object):
"""Vyper is a prioritized configuration registry. It maintains a set of
configuration sources, fetches values to populate those, and provides
them according to the source's priority.
The priority of the sources is the following:
1. overrides
2. args
3. env. variables
4. config file
5. key/value store
6. defaults
For example, if values from the following sources were loaded:
defaults: {
"secret": "",
"user": "default",
"endpoint": "https://localhost"
}
config: {
"user": "root"
"secret": "defaultsecret"
}
env: {
"secret": "somesecretkey"
}
The resulting config will have the following values:
{
"secret": "somesecretkey",
"user": "root",
"endpoint": "https://localhost"
}
"""
def __init__(self, config_name="config", key_delimiter="."):
# Delimiter that separates a list of keys
# used to access a nested value in one go.
self._key_delimiter = key_delimiter
# A set of paths to look for the config file in.
self._config_paths = []
# A set of remote providers to search for the configuration.
self._remote_providers = []
# Name of file to look for inside the path.
self._config_name = config_name
self._config_file = ""
self._config_type = ""
self._env_prefix = ""
self._automatic_env_applied = False
self._env_key_replacer = None
self._aliases = {}
self._override = {}
self._args = {}
self._env = {}
self._config = {}
self._kvstore = {}
self._defaults = {}
self._on_config_change = None
self._on_remote_config_change = None
self.parse_argv_disabled = False
def on_config_change(self, func, *args, **kwargs):
self._on_config_change = lambda: func(*args, **kwargs)
def watch_config(self):
config_file = self._get_config_file()
watcher = watch.get_watcher(config_file, self)
watcher.start()
def set_config_file(self, file_):
"""Explicitly define the path, name and extension of the config file
Vyper will use this and not check any of the config paths.
"""
self._config_file = file_
def set_env_prefix(self, prefix):
"""Define a prefix that ENVIRONMENT variables will use.
e.g. if your prefix is "spf", the env registry will look
for env. variables that start with "SPF_"
"""
self._env_prefix = prefix
def _merge_with_env_prefix(self, key):
if self._env_prefix != "":
return ("{0}_{1}".format(self._env_prefix, key)).upper()
return key.upper()
def config_file_used(self):
"""Return the file used to populate the config registry."""
return self._config_file
def add_config_path(self, path):
"""Add a path for Vyper to search for the config file in.
Can be called multiple times to define multiple search paths.
"""
abspath = util.abs_pathify(path)
if abspath not in self._config_paths:
log.info("Adding {0} to paths to search".format(abspath))
self._config_paths.append(abspath)
def add_remote_provider(self, provider, client, path):
"""Adds a remote configuration source.
Remote Providers are searched in the order they are added.
provider is a string value, "etcd", "consul" and "zookeeper" are
currently supported.
client is a client object
path is the path in the k/v store to retrieve configuration
To retrieve a config file called myapp.json from /configs/myapp.json
you should set path to /configs and set config name (set_config_name)
to "myapp"
"""
if provider not in constants.SUPPORTED_REMOTE_PROVIDERS:
raise errors.UnsupportedRemoteProviderError(provider)
host = ""
if provider == "etcd":
host = "{0}://{1}:{2}".format(client.protocol, client.host,
client.port)
elif provider == "consul":
host = "{0}://{1}:{2}".format(client.http.scheme, client.http.host,
client.http.port)
elif provider == "zookeeper":
host = ",".join(
str("{0}:{1}".format(h[0], h[1])) for h in client.hosts)
log.info("Adding {0}:{1} to remote provider list".format(
provider, host))
rp = remote.RemoteProvider(provider, client, path, self)
if not self._provider_path_exists(rp):
self._remote_providers.append(rp)
def _provider_path_exists(self, rp):
for p in self._remote_providers:
if p.path == rp.path:
return True
return False
def _search_dict(self, d, keys):
if not keys:
return d
for key in keys:
if key in d and not isinstance(d[key], dict):
return d[key]
elif key in d:
return self._search_dict(d[key], keys[1::])
else:
return None
def get(self, key):
"""Vyper is essentially repository for configurations.
`get` can retrieve any value given the key to use.
`get` has the behavior of returning the value associated with the first
place from where it is set. Viper will check in the following order:
override, arg, env, config file, key/value store, default.
"""
path = key.split(self._key_delimiter)
lowercase_key = key.lower()
val = self._find(lowercase_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is None:
return None
return val
def get_string(self, key):
return str(self.get(key))
def get_bool(self, key):
val = self.get(key)
if isinstance(val, str):
if val.lower() == 'false':
return False
return bool(val)
def get_int(self, key):
return int(self.get(key))
def get_float(self, key):
return float(self.get(key))
def get_unicode(self, key):
return text(self.get(key))
def get_bytes(self, key):
return b"{0}".format(self.get(key))
def sub(self, key):
"""Returns new Vyper instance representing a sub tree of this instance.
"""
subv = Vyper()
data = self.get(key)
if isinstance(data, dict):
subv._config = data
return subv
else:
return None
def unmarshall_key(self, key, cls):
"""Takes a single key and unmarshalls it into a class."""
return setattr(cls, key, self.get(key))
def unmarshall(self, cls):
"""Unmarshalls the config into a class. Make sure that the tags on
the attributes of the class are properly set.
"""
for k, v in self.all_settings().items():
setattr(cls, k, v)
return cls
def bind_args(self, parser):
if isinstance(parser, argparse.ArgumentParser):
return self._bind_parser_values(parser)
else:
return self.bind_arg_values(parser)
def bind_arg(self, key, arg):
return self.bind_arg_value(key, arg)
def _parse_args(self, parser, overrides=None):
if overrides:
return vars(parser.parse_args(overrides))
if not self.parse_argv_disabled:
return vars(parser.parse_args())
else:
return vars(parser.parse_args([]))
def _bind_parser_values(self, parser, overrides=None):
# method mostly for testing, use bind_args()
args = self._parse_args(parser, overrides)
defaults = \
{k: parser.get_default(k) for k in args.keys()}
for k, v in defaults.items():
self.set_default(k, v)
if args[k] != defaults[k]:
self.bind_arg(k, args[k])
def bind_arg_values(self, args):
for k, v in args.items():
try:
self.bind_arg_value(k, v)
except ValueError:
pass
def bind_arg_value(self, key, arg):
if arg is None:
raise ValueError("arg for {0} is None".format(key))
self._args[key.lower()] = arg
def bind_env(self, *input_):
"""Binds a Vyper key to a ENV variable.
ENV variables are case sensitive.
If only a key is provided, it will use the env key matching the key,
uppercased.
`env_prefix` will be used when set when env name is not provided.
"""
if len(input_) == 0:
return "bind_env missing key to bind to"
key = input_[0].lower()
if len(input_) == 1:
env_key = self._merge_with_env_prefix(key)
else:
env_key = input_[1]
self._env[key] = env_key
if self._key_delimiter in key:
parts = input_[0].split(self._key_delimiter)
env_info = {
"path": parts[1:-1],
"final_key": parts[-1],
"env_key": env_key
}
if self._env.get(parts[0]) is None:
self._env[parts[0]] = [env_info]
else:
self._env[parts[0]].append(env_info)
return None
def _find_real_key(self, key, source):
return next(
(real for real in source.keys() if real.lower() == key.lower()),
None)
def _find_insensitive(self, key, source):
real_key = self._find_real_key(key, source)
return source.get(real_key)
def _set_insensitive(self, key, val, source):
real_key = self._find_real_key(key, source)
if real_key is None:
msg = "No case insensitive variant of {0} found.".format(key)
raise KeyError(msg)
source[real_key] = val
def _find(self, key):
"""Given a key, find the value
Vyper will check in the following order:
override, arg, env, config file, key/value store, default
Vyper will check to see if an alias exists first.
"""
key = self._real_key(key)
# OVERRIDES
val = self._override.get(key)
if val is not None:
log.debug("{0} found in override: {1}".format(key, val))
return val
# ARGS
val = self._args.get(key)
if val is not None:
log.debug("{0} found in args: {1}".format(key, val))
return val
# ENVIRONMENT VARIABLES
if self._automatic_env_applied:
# even if it hasn't been registered, if `automatic_env` is used,
# check any `get` request
val = self._get_env(self._merge_with_env_prefix(key))
if val is not None:
log.debug("{0} found in environment: {1}".format(key, val))
return val
env_key = self._find_insensitive(key, self._env)
log.debug("Looking for {0} in env".format(key))
if isinstance(env_key, list):
parent = self._find_insensitive(key, self._config)
found_in_env = False
log.debug("Found env key parent {0}: {1}".format(key, parent))
for item in env_key:
log.debug("{0} registered as env var parent {1}:".format(
key, item["env_key"]))
val = self._get_env(item["env_key"])
if val is not None:
log.debug("{0} found in environment: {1}".format(
item["env_key"], val))
temp = parent
for path in item["path"]:
real_key = self._find_real_key(path, temp)
temp = temp[real_key]
self._set_insensitive(item["final_key"], val, temp)
found_in_env = True
else:
log.debug("{0} env value unset".format(item["env_key"]))
if found_in_env:
return parent
elif env_key is not None:
log.debug("{0} registered as env var: {1}".format(key, env_key))
val = self._get_env(env_key)
if val is not None:
log.debug("{0} found in environment: {1}".format(env_key, val))
return val
else:
log.debug("{0} env value unset".format(env_key))
# CONFIG FILE
val = self._find_insensitive(key, self._config)
if val is not None:
log.debug("{0} found in config: {1}".format(key, val))
return val
# Test for nested config parameter
if self._key_delimiter in key:
path = key.split(self._key_delimiter)
source = self._find(path[0])
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is not None:
log.debug("{0} found in nested config: {1}".format(
key, val))
return val
# KEY/VALUE STORE
val = self._kvstore.get(key)
if val is not None:
log.debug("{0} found in key/value store: {1}".format(key, val))
return val
# DEFAULTS
val = self._defaults.get(key)
if val is not None:
log.debug("{0} found in defaults: {1}".format(key, val))
return val
return None
def is_set(self, key):
"""Check to see if the key has been set in any of the data locations.
"""
path = key.split(self._key_delimiter)
lower_case_key = key.lower()
val = self._find(lower_case_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
return val is not None
def automatic_env(self):
"""Have Vyper check ENV variables for all keys set in
config, default & args.
"""
self._automatic_env_applied = True
def set_env_key_replacer(self, old, new):
"""Sets the strings.Replacer on the Vyper object.
Useful for mapping an environment variable to a key that does
not match it.
"""
self._env_key_replacer = old, new
def register_alias(self, alias, key):
"""Aliases provide another accessor for the same key.
This enables one to change a name without breaking the application.
"""
alias = alias.lower()
key = key.lower()
if alias != key and alias != self._real_key(key):
exists = self._aliases.get(alias)
if exists is None:
# if we alias something that exists in one of the dicts to
# another name, we'll never be able to get that value using the
# original name, so move the config value to the new _real_key.
val = self._config.get(alias)
if val:
self._config.pop(alias)
self._config[key] = val
val = self._kvstore.get(alias)
if val:
self._kvstore.pop(alias)
self._kvstore[key] = val
val = self._defaults.get(alias)
if val:
self._defaults.pop(alias)
self._defaults[key] = val
val = self._override.get(alias)
if val:
self._override.pop(alias)
self._override[key] = val
self._aliases[alias] = key
else:
log.warning("Creating circular reference alias {0} {1} {2}".format(
alias, key, self._real_key(key)))
def _real_key(self, key):
new_key = self._aliases.get(key)
if new_key is not None:
return self._real_key(new_key)
else:
return key
def in_config(self, key):
"""Check to see if the given key (or an alias) is in the config file.
"""
# if the requested key is an alias, then return the proper key
key = self._real_key(key)
exists = self._config.get(key)
return exists
def set_default(self, key, value):
"""Set the default value for this key.
Default only used when no value is provided by the user via
arg, config or env.
"""
k = self._real_key(key.lower())
self._defaults[k] = value
def set(self, key, value):
"""Sets the value for the key in the override register.
Will be used instead of values obtained via
args, config file, env, defaults or key/value store.
"""
k = self._real_key(key.lower())
self._override[k] = value
def read_in_config(self):
"""Vyper will discover and load the configuration file from disk
and key/value stores, searching in one of the defined paths.
"""
log.info("Attempting to read in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
self._config = {}
return self._unmarshall_reader(f, self._config)
def merge_in_config(self):
log.info("Attempting to merge in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
return self.merge_config(f)
def read_config(self, file_):
"""Vyper will read a configuration file, setting existing keys to
`None` if the key does not exist in the file.
"""
self._unmarshall_reader(file_, self._config)
def merge_config(self, file_):
if self._config is None:
self._config = {}
cfg = {}
cfg = self._unmarshall_reader(file_, cfg)
self._merge_dicts(cfg, self._config)
def _merge_dicts(self, src, target):
for k, v in src.items():
if isinstance(v, dict):
self._merge_dicts(v, target[k])
else:
target[k] = v
def read_remote_config(self):
"""Attempts to get configuration from a remote source
and read it in the remote configuration registry.
"""
return self._get_key_value_config()
def _unmarshall_reader(self, file_, d):
"""Unmarshall a file into a `dict`."""
return util.unmarshall_config_reader(file_, d, self._get_config_type())
def _get_key_value_config(self):
"""Retrieves the first found remote configuration."""
for rp in self._remote_providers:
val = self._get_remote_config(rp)
self._kvstore = val
return None
raise errors.RemoteConfigError("No Files Found")
def _get_remote_config(self, provider):
reader = provider.get()
self._unmarshall_reader(reader, self._kvstore)
return self._kvstore
def on_remote_config_change(self, func, *args, **kwargs):
self._on_remote_config_change = lambda x: func(*args, **kwargs)
for rp in self._remote_providers:
rp.add_listener(self._on_remote_config_change)
return None
def watch_remote_config(self):
for rp in self._remote_providers:
rp.add_listener()
return None
raise errors.RemoteConfigError("No Files Found")
def all_keys(self, uppercase_keys=False):
"""Return all keys regardless where they are set."""
d = {}
for k in self._override.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._args.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._env.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._config.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._kvstore.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._defaults.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._aliases.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
return d.keys()
def all_settings(self, uppercase_keys=False):
"""Return all settings as a `dict`."""
d = {}
for k in self.all_keys(uppercase_keys):
d[k] = self.get(k)
return d
def set_config_name(self, name):
"""Name for the config file. Does not include extension."""
self._config_name = name
self._config_file = ""
def set_config_type(self, type_):
"""Sets the type of the configuration returned by the
remote source, e.g. "json".
"""
self._config_type = type_
def _get_config_type(self):
if self._config_type != "":
return self._config_type
cf = self._get_config_file()
ext = os.path.splitext(cf)
if len(ext) > 1:
return ext[1][1:]
else:
return ""
def _get_config_file(self):
if self._config_file == "":
try:
cf = self._find_config_file()
self._config_file = cf
except errors.ConfigFileNotFoundError:
return ""
return self._config_file
def _search_in_path(self, path):
log.debug("Searching for config in: {0}".format(path))
for ext in constants.SUPPORTED_EXTENSIONS:
full_path = "{0}/{1}.{2}".format(path, self._config_name, ext)
log.debug("Checking for {0}".format(full_path))
if util.exists(full_path):
log.debug("Found: {0}".format(full_path))
return full_path
return ""
def _find_config_file(self):
"""Search all `config_paths` for any config file.
Returns the first path that exists (and is a config file).
"""
log.info("Searching for config in: {0}".format(
", ".join(str(p) for p in self._config_paths)))
for cp in self._config_paths:
f = self._search_in_path(cp)
if f != "":
return f
raise errors.ConfigFileNotFoundError(
self._config_name, self._config_paths)
def debug(self): # pragma: no cover
"""Prints all configuration registries for debugging purposes."""
print("Aliases:")
pprint.pprint(self._aliases)
print("Override:")
pprint.pprint(self._override)
print("Args:")
pprint.pprint(self._args)
print("Env:")
pprint.pprint(self._env)
print("Config:")
pprint.pprint(self._config)
print("Key/Value Store:")
pprint.pprint(self._kvstore)
print("Defaults:")
pprint.pprint(self._defaults)
|
admiralobvious/vyper | vyper/vyper.py | Vyper.add_config_path | python | def add_config_path(self, path):
abspath = util.abs_pathify(path)
if abspath not in self._config_paths:
log.info("Adding {0} to paths to search".format(abspath))
self._config_paths.append(abspath) | Add a path for Vyper to search for the config file in.
Can be called multiple times to define multiple search paths. | train | https://github.com/admiralobvious/vyper/blob/58ec7b90661502b7b2fea7a30849b90b907fcdec/vyper/vyper.py#L122-L129 | [
"def abs_pathify(in_path):\n log.info(\"Trying to resolve absolute path to {0}\".format(in_path))\n\n try:\n return pathlib.Path(in_path).resolve()\n except FileNotFoundError as e:\n log.error(\"Couldn\\\"t discover absolute path: {0}\".format(e))\n return \"\"\n"
] | class Vyper(object):
"""Vyper is a prioritized configuration registry. It maintains a set of
configuration sources, fetches values to populate those, and provides
them according to the source's priority.
The priority of the sources is the following:
1. overrides
2. args
3. env. variables
4. config file
5. key/value store
6. defaults
For example, if values from the following sources were loaded:
defaults: {
"secret": "",
"user": "default",
"endpoint": "https://localhost"
}
config: {
"user": "root"
"secret": "defaultsecret"
}
env: {
"secret": "somesecretkey"
}
The resulting config will have the following values:
{
"secret": "somesecretkey",
"user": "root",
"endpoint": "https://localhost"
}
"""
def __init__(self, config_name="config", key_delimiter="."):
# Delimiter that separates a list of keys
# used to access a nested value in one go.
self._key_delimiter = key_delimiter
# A set of paths to look for the config file in.
self._config_paths = []
# A set of remote providers to search for the configuration.
self._remote_providers = []
# Name of file to look for inside the path.
self._config_name = config_name
self._config_file = ""
self._config_type = ""
self._env_prefix = ""
self._automatic_env_applied = False
self._env_key_replacer = None
self._aliases = {}
self._override = {}
self._args = {}
self._env = {}
self._config = {}
self._kvstore = {}
self._defaults = {}
self._on_config_change = None
self._on_remote_config_change = None
self.parse_argv_disabled = False
def on_config_change(self, func, *args, **kwargs):
self._on_config_change = lambda: func(*args, **kwargs)
def watch_config(self):
config_file = self._get_config_file()
watcher = watch.get_watcher(config_file, self)
watcher.start()
def set_config_file(self, file_):
"""Explicitly define the path, name and extension of the config file
Vyper will use this and not check any of the config paths.
"""
self._config_file = file_
def set_env_prefix(self, prefix):
"""Define a prefix that ENVIRONMENT variables will use.
e.g. if your prefix is "spf", the env registry will look
for env. variables that start with "SPF_"
"""
self._env_prefix = prefix
def _merge_with_env_prefix(self, key):
if self._env_prefix != "":
return ("{0}_{1}".format(self._env_prefix, key)).upper()
return key.upper()
def _get_env(self, key):
"""Wrapper around os.getenv() which replaces characters
in the original key. This allows env vars which have different keys
than the config object keys.
"""
if self._env_key_replacer is not None:
key = key.replace(*self._env_key_replacer)
return os.getenv(key)
def config_file_used(self):
"""Return the file used to populate the config registry."""
return self._config_file
def add_remote_provider(self, provider, client, path):
"""Adds a remote configuration source.
Remote Providers are searched in the order they are added.
provider is a string value, "etcd", "consul" and "zookeeper" are
currently supported.
client is a client object
path is the path in the k/v store to retrieve configuration
To retrieve a config file called myapp.json from /configs/myapp.json
you should set path to /configs and set config name (set_config_name)
to "myapp"
"""
if provider not in constants.SUPPORTED_REMOTE_PROVIDERS:
raise errors.UnsupportedRemoteProviderError(provider)
host = ""
if provider == "etcd":
host = "{0}://{1}:{2}".format(client.protocol, client.host,
client.port)
elif provider == "consul":
host = "{0}://{1}:{2}".format(client.http.scheme, client.http.host,
client.http.port)
elif provider == "zookeeper":
host = ",".join(
str("{0}:{1}".format(h[0], h[1])) for h in client.hosts)
log.info("Adding {0}:{1} to remote provider list".format(
provider, host))
rp = remote.RemoteProvider(provider, client, path, self)
if not self._provider_path_exists(rp):
self._remote_providers.append(rp)
def _provider_path_exists(self, rp):
for p in self._remote_providers:
if p.path == rp.path:
return True
return False
def _search_dict(self, d, keys):
if not keys:
return d
for key in keys:
if key in d and not isinstance(d[key], dict):
return d[key]
elif key in d:
return self._search_dict(d[key], keys[1::])
else:
return None
def get(self, key):
"""Vyper is essentially repository for configurations.
`get` can retrieve any value given the key to use.
`get` has the behavior of returning the value associated with the first
place from where it is set. Viper will check in the following order:
override, arg, env, config file, key/value store, default.
"""
path = key.split(self._key_delimiter)
lowercase_key = key.lower()
val = self._find(lowercase_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is None:
return None
return val
def get_string(self, key):
return str(self.get(key))
def get_bool(self, key):
val = self.get(key)
if isinstance(val, str):
if val.lower() == 'false':
return False
return bool(val)
def get_int(self, key):
return int(self.get(key))
def get_float(self, key):
return float(self.get(key))
def get_unicode(self, key):
return text(self.get(key))
def get_bytes(self, key):
return b"{0}".format(self.get(key))
def sub(self, key):
"""Returns new Vyper instance representing a sub tree of this instance.
"""
subv = Vyper()
data = self.get(key)
if isinstance(data, dict):
subv._config = data
return subv
else:
return None
def unmarshall_key(self, key, cls):
"""Takes a single key and unmarshalls it into a class."""
return setattr(cls, key, self.get(key))
def unmarshall(self, cls):
"""Unmarshalls the config into a class. Make sure that the tags on
the attributes of the class are properly set.
"""
for k, v in self.all_settings().items():
setattr(cls, k, v)
return cls
def bind_args(self, parser):
if isinstance(parser, argparse.ArgumentParser):
return self._bind_parser_values(parser)
else:
return self.bind_arg_values(parser)
def bind_arg(self, key, arg):
return self.bind_arg_value(key, arg)
def _parse_args(self, parser, overrides=None):
if overrides:
return vars(parser.parse_args(overrides))
if not self.parse_argv_disabled:
return vars(parser.parse_args())
else:
return vars(parser.parse_args([]))
def _bind_parser_values(self, parser, overrides=None):
# method mostly for testing, use bind_args()
args = self._parse_args(parser, overrides)
defaults = \
{k: parser.get_default(k) for k in args.keys()}
for k, v in defaults.items():
self.set_default(k, v)
if args[k] != defaults[k]:
self.bind_arg(k, args[k])
def bind_arg_values(self, args):
for k, v in args.items():
try:
self.bind_arg_value(k, v)
except ValueError:
pass
def bind_arg_value(self, key, arg):
if arg is None:
raise ValueError("arg for {0} is None".format(key))
self._args[key.lower()] = arg
def bind_env(self, *input_):
"""Binds a Vyper key to a ENV variable.
ENV variables are case sensitive.
If only a key is provided, it will use the env key matching the key,
uppercased.
`env_prefix` will be used when set when env name is not provided.
"""
if len(input_) == 0:
return "bind_env missing key to bind to"
key = input_[0].lower()
if len(input_) == 1:
env_key = self._merge_with_env_prefix(key)
else:
env_key = input_[1]
self._env[key] = env_key
if self._key_delimiter in key:
parts = input_[0].split(self._key_delimiter)
env_info = {
"path": parts[1:-1],
"final_key": parts[-1],
"env_key": env_key
}
if self._env.get(parts[0]) is None:
self._env[parts[0]] = [env_info]
else:
self._env[parts[0]].append(env_info)
return None
def _find_real_key(self, key, source):
return next(
(real for real in source.keys() if real.lower() == key.lower()),
None)
def _find_insensitive(self, key, source):
real_key = self._find_real_key(key, source)
return source.get(real_key)
def _set_insensitive(self, key, val, source):
real_key = self._find_real_key(key, source)
if real_key is None:
msg = "No case insensitive variant of {0} found.".format(key)
raise KeyError(msg)
source[real_key] = val
def _find(self, key):
"""Given a key, find the value
Vyper will check in the following order:
override, arg, env, config file, key/value store, default
Vyper will check to see if an alias exists first.
"""
key = self._real_key(key)
# OVERRIDES
val = self._override.get(key)
if val is not None:
log.debug("{0} found in override: {1}".format(key, val))
return val
# ARGS
val = self._args.get(key)
if val is not None:
log.debug("{0} found in args: {1}".format(key, val))
return val
# ENVIRONMENT VARIABLES
if self._automatic_env_applied:
# even if it hasn't been registered, if `automatic_env` is used,
# check any `get` request
val = self._get_env(self._merge_with_env_prefix(key))
if val is not None:
log.debug("{0} found in environment: {1}".format(key, val))
return val
env_key = self._find_insensitive(key, self._env)
log.debug("Looking for {0} in env".format(key))
if isinstance(env_key, list):
parent = self._find_insensitive(key, self._config)
found_in_env = False
log.debug("Found env key parent {0}: {1}".format(key, parent))
for item in env_key:
log.debug("{0} registered as env var parent {1}:".format(
key, item["env_key"]))
val = self._get_env(item["env_key"])
if val is not None:
log.debug("{0} found in environment: {1}".format(
item["env_key"], val))
temp = parent
for path in item["path"]:
real_key = self._find_real_key(path, temp)
temp = temp[real_key]
self._set_insensitive(item["final_key"], val, temp)
found_in_env = True
else:
log.debug("{0} env value unset".format(item["env_key"]))
if found_in_env:
return parent
elif env_key is not None:
log.debug("{0} registered as env var: {1}".format(key, env_key))
val = self._get_env(env_key)
if val is not None:
log.debug("{0} found in environment: {1}".format(env_key, val))
return val
else:
log.debug("{0} env value unset".format(env_key))
# CONFIG FILE
val = self._find_insensitive(key, self._config)
if val is not None:
log.debug("{0} found in config: {1}".format(key, val))
return val
# Test for nested config parameter
if self._key_delimiter in key:
path = key.split(self._key_delimiter)
source = self._find(path[0])
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is not None:
log.debug("{0} found in nested config: {1}".format(
key, val))
return val
# KEY/VALUE STORE
val = self._kvstore.get(key)
if val is not None:
log.debug("{0} found in key/value store: {1}".format(key, val))
return val
# DEFAULTS
val = self._defaults.get(key)
if val is not None:
log.debug("{0} found in defaults: {1}".format(key, val))
return val
return None
def is_set(self, key):
"""Check to see if the key has been set in any of the data locations.
"""
path = key.split(self._key_delimiter)
lower_case_key = key.lower()
val = self._find(lower_case_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
return val is not None
def automatic_env(self):
"""Have Vyper check ENV variables for all keys set in
config, default & args.
"""
self._automatic_env_applied = True
def set_env_key_replacer(self, old, new):
"""Sets the strings.Replacer on the Vyper object.
Useful for mapping an environment variable to a key that does
not match it.
"""
self._env_key_replacer = old, new
def register_alias(self, alias, key):
"""Aliases provide another accessor for the same key.
This enables one to change a name without breaking the application.
"""
alias = alias.lower()
key = key.lower()
if alias != key and alias != self._real_key(key):
exists = self._aliases.get(alias)
if exists is None:
# if we alias something that exists in one of the dicts to
# another name, we'll never be able to get that value using the
# original name, so move the config value to the new _real_key.
val = self._config.get(alias)
if val:
self._config.pop(alias)
self._config[key] = val
val = self._kvstore.get(alias)
if val:
self._kvstore.pop(alias)
self._kvstore[key] = val
val = self._defaults.get(alias)
if val:
self._defaults.pop(alias)
self._defaults[key] = val
val = self._override.get(alias)
if val:
self._override.pop(alias)
self._override[key] = val
self._aliases[alias] = key
else:
log.warning("Creating circular reference alias {0} {1} {2}".format(
alias, key, self._real_key(key)))
def _real_key(self, key):
new_key = self._aliases.get(key)
if new_key is not None:
return self._real_key(new_key)
else:
return key
def in_config(self, key):
"""Check to see if the given key (or an alias) is in the config file.
"""
# if the requested key is an alias, then return the proper key
key = self._real_key(key)
exists = self._config.get(key)
return exists
def set_default(self, key, value):
"""Set the default value for this key.
Default only used when no value is provided by the user via
arg, config or env.
"""
k = self._real_key(key.lower())
self._defaults[k] = value
def set(self, key, value):
"""Sets the value for the key in the override register.
Will be used instead of values obtained via
args, config file, env, defaults or key/value store.
"""
k = self._real_key(key.lower())
self._override[k] = value
def read_in_config(self):
"""Vyper will discover and load the configuration file from disk
and key/value stores, searching in one of the defined paths.
"""
log.info("Attempting to read in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
self._config = {}
return self._unmarshall_reader(f, self._config)
def merge_in_config(self):
log.info("Attempting to merge in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
return self.merge_config(f)
def read_config(self, file_):
"""Vyper will read a configuration file, setting existing keys to
`None` if the key does not exist in the file.
"""
self._unmarshall_reader(file_, self._config)
def merge_config(self, file_):
if self._config is None:
self._config = {}
cfg = {}
cfg = self._unmarshall_reader(file_, cfg)
self._merge_dicts(cfg, self._config)
def _merge_dicts(self, src, target):
for k, v in src.items():
if isinstance(v, dict):
self._merge_dicts(v, target[k])
else:
target[k] = v
def read_remote_config(self):
"""Attempts to get configuration from a remote source
and read it in the remote configuration registry.
"""
return self._get_key_value_config()
def _unmarshall_reader(self, file_, d):
"""Unmarshall a file into a `dict`."""
return util.unmarshall_config_reader(file_, d, self._get_config_type())
def _get_key_value_config(self):
"""Retrieves the first found remote configuration."""
for rp in self._remote_providers:
val = self._get_remote_config(rp)
self._kvstore = val
return None
raise errors.RemoteConfigError("No Files Found")
def _get_remote_config(self, provider):
reader = provider.get()
self._unmarshall_reader(reader, self._kvstore)
return self._kvstore
def on_remote_config_change(self, func, *args, **kwargs):
self._on_remote_config_change = lambda x: func(*args, **kwargs)
for rp in self._remote_providers:
rp.add_listener(self._on_remote_config_change)
return None
def watch_remote_config(self):
for rp in self._remote_providers:
rp.add_listener()
return None
raise errors.RemoteConfigError("No Files Found")
def all_keys(self, uppercase_keys=False):
"""Return all keys regardless where they are set."""
d = {}
for k in self._override.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._args.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._env.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._config.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._kvstore.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._defaults.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._aliases.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
return d.keys()
def all_settings(self, uppercase_keys=False):
"""Return all settings as a `dict`."""
d = {}
for k in self.all_keys(uppercase_keys):
d[k] = self.get(k)
return d
def set_config_name(self, name):
"""Name for the config file. Does not include extension."""
self._config_name = name
self._config_file = ""
def set_config_type(self, type_):
"""Sets the type of the configuration returned by the
remote source, e.g. "json".
"""
self._config_type = type_
def _get_config_type(self):
if self._config_type != "":
return self._config_type
cf = self._get_config_file()
ext = os.path.splitext(cf)
if len(ext) > 1:
return ext[1][1:]
else:
return ""
def _get_config_file(self):
if self._config_file == "":
try:
cf = self._find_config_file()
self._config_file = cf
except errors.ConfigFileNotFoundError:
return ""
return self._config_file
def _search_in_path(self, path):
log.debug("Searching for config in: {0}".format(path))
for ext in constants.SUPPORTED_EXTENSIONS:
full_path = "{0}/{1}.{2}".format(path, self._config_name, ext)
log.debug("Checking for {0}".format(full_path))
if util.exists(full_path):
log.debug("Found: {0}".format(full_path))
return full_path
return ""
def _find_config_file(self):
"""Search all `config_paths` for any config file.
Returns the first path that exists (and is a config file).
"""
log.info("Searching for config in: {0}".format(
", ".join(str(p) for p in self._config_paths)))
for cp in self._config_paths:
f = self._search_in_path(cp)
if f != "":
return f
raise errors.ConfigFileNotFoundError(
self._config_name, self._config_paths)
def debug(self): # pragma: no cover
"""Prints all configuration registries for debugging purposes."""
print("Aliases:")
pprint.pprint(self._aliases)
print("Override:")
pprint.pprint(self._override)
print("Args:")
pprint.pprint(self._args)
print("Env:")
pprint.pprint(self._env)
print("Config:")
pprint.pprint(self._config)
print("Key/Value Store:")
pprint.pprint(self._kvstore)
print("Defaults:")
pprint.pprint(self._defaults)
|
admiralobvious/vyper | vyper/vyper.py | Vyper.add_remote_provider | python | def add_remote_provider(self, provider, client, path):
if provider not in constants.SUPPORTED_REMOTE_PROVIDERS:
raise errors.UnsupportedRemoteProviderError(provider)
host = ""
if provider == "etcd":
host = "{0}://{1}:{2}".format(client.protocol, client.host,
client.port)
elif provider == "consul":
host = "{0}://{1}:{2}".format(client.http.scheme, client.http.host,
client.http.port)
elif provider == "zookeeper":
host = ",".join(
str("{0}:{1}".format(h[0], h[1])) for h in client.hosts)
log.info("Adding {0}:{1} to remote provider list".format(
provider, host))
rp = remote.RemoteProvider(provider, client, path, self)
if not self._provider_path_exists(rp):
self._remote_providers.append(rp) | Adds a remote configuration source.
Remote Providers are searched in the order they are added.
provider is a string value, "etcd", "consul" and "zookeeper" are
currently supported.
client is a client object
path is the path in the k/v store to retrieve configuration
To retrieve a config file called myapp.json from /configs/myapp.json
you should set path to /configs and set config name (set_config_name)
to "myapp" | train | https://github.com/admiralobvious/vyper/blob/58ec7b90661502b7b2fea7a30849b90b907fcdec/vyper/vyper.py#L131-L161 | [
"def _provider_path_exists(self, rp):\n for p in self._remote_providers:\n if p.path == rp.path:\n return True\n return False\n"
] | class Vyper(object):
"""Vyper is a prioritized configuration registry. It maintains a set of
configuration sources, fetches values to populate those, and provides
them according to the source's priority.
The priority of the sources is the following:
1. overrides
2. args
3. env. variables
4. config file
5. key/value store
6. defaults
For example, if values from the following sources were loaded:
defaults: {
"secret": "",
"user": "default",
"endpoint": "https://localhost"
}
config: {
"user": "root"
"secret": "defaultsecret"
}
env: {
"secret": "somesecretkey"
}
The resulting config will have the following values:
{
"secret": "somesecretkey",
"user": "root",
"endpoint": "https://localhost"
}
"""
def __init__(self, config_name="config", key_delimiter="."):
# Delimiter that separates a list of keys
# used to access a nested value in one go.
self._key_delimiter = key_delimiter
# A set of paths to look for the config file in.
self._config_paths = []
# A set of remote providers to search for the configuration.
self._remote_providers = []
# Name of file to look for inside the path.
self._config_name = config_name
self._config_file = ""
self._config_type = ""
self._env_prefix = ""
self._automatic_env_applied = False
self._env_key_replacer = None
self._aliases = {}
self._override = {}
self._args = {}
self._env = {}
self._config = {}
self._kvstore = {}
self._defaults = {}
self._on_config_change = None
self._on_remote_config_change = None
self.parse_argv_disabled = False
def on_config_change(self, func, *args, **kwargs):
self._on_config_change = lambda: func(*args, **kwargs)
def watch_config(self):
config_file = self._get_config_file()
watcher = watch.get_watcher(config_file, self)
watcher.start()
def set_config_file(self, file_):
"""Explicitly define the path, name and extension of the config file
Vyper will use this and not check any of the config paths.
"""
self._config_file = file_
def set_env_prefix(self, prefix):
"""Define a prefix that ENVIRONMENT variables will use.
e.g. if your prefix is "spf", the env registry will look
for env. variables that start with "SPF_"
"""
self._env_prefix = prefix
def _merge_with_env_prefix(self, key):
if self._env_prefix != "":
return ("{0}_{1}".format(self._env_prefix, key)).upper()
return key.upper()
def _get_env(self, key):
"""Wrapper around os.getenv() which replaces characters
in the original key. This allows env vars which have different keys
than the config object keys.
"""
if self._env_key_replacer is not None:
key = key.replace(*self._env_key_replacer)
return os.getenv(key)
def config_file_used(self):
"""Return the file used to populate the config registry."""
return self._config_file
def add_config_path(self, path):
"""Add a path for Vyper to search for the config file in.
Can be called multiple times to define multiple search paths.
"""
abspath = util.abs_pathify(path)
if abspath not in self._config_paths:
log.info("Adding {0} to paths to search".format(abspath))
self._config_paths.append(abspath)
def _provider_path_exists(self, rp):
for p in self._remote_providers:
if p.path == rp.path:
return True
return False
def _search_dict(self, d, keys):
if not keys:
return d
for key in keys:
if key in d and not isinstance(d[key], dict):
return d[key]
elif key in d:
return self._search_dict(d[key], keys[1::])
else:
return None
def get(self, key):
"""Vyper is essentially repository for configurations.
`get` can retrieve any value given the key to use.
`get` has the behavior of returning the value associated with the first
place from where it is set. Viper will check in the following order:
override, arg, env, config file, key/value store, default.
"""
path = key.split(self._key_delimiter)
lowercase_key = key.lower()
val = self._find(lowercase_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is None:
return None
return val
def get_string(self, key):
return str(self.get(key))
def get_bool(self, key):
val = self.get(key)
if isinstance(val, str):
if val.lower() == 'false':
return False
return bool(val)
def get_int(self, key):
return int(self.get(key))
def get_float(self, key):
return float(self.get(key))
def get_unicode(self, key):
return text(self.get(key))
def get_bytes(self, key):
return b"{0}".format(self.get(key))
def sub(self, key):
"""Returns new Vyper instance representing a sub tree of this instance.
"""
subv = Vyper()
data = self.get(key)
if isinstance(data, dict):
subv._config = data
return subv
else:
return None
def unmarshall_key(self, key, cls):
"""Takes a single key and unmarshalls it into a class."""
return setattr(cls, key, self.get(key))
def unmarshall(self, cls):
"""Unmarshalls the config into a class. Make sure that the tags on
the attributes of the class are properly set.
"""
for k, v in self.all_settings().items():
setattr(cls, k, v)
return cls
def bind_args(self, parser):
if isinstance(parser, argparse.ArgumentParser):
return self._bind_parser_values(parser)
else:
return self.bind_arg_values(parser)
def bind_arg(self, key, arg):
return self.bind_arg_value(key, arg)
def _parse_args(self, parser, overrides=None):
if overrides:
return vars(parser.parse_args(overrides))
if not self.parse_argv_disabled:
return vars(parser.parse_args())
else:
return vars(parser.parse_args([]))
def _bind_parser_values(self, parser, overrides=None):
# method mostly for testing, use bind_args()
args = self._parse_args(parser, overrides)
defaults = \
{k: parser.get_default(k) for k in args.keys()}
for k, v in defaults.items():
self.set_default(k, v)
if args[k] != defaults[k]:
self.bind_arg(k, args[k])
def bind_arg_values(self, args):
for k, v in args.items():
try:
self.bind_arg_value(k, v)
except ValueError:
pass
def bind_arg_value(self, key, arg):
if arg is None:
raise ValueError("arg for {0} is None".format(key))
self._args[key.lower()] = arg
def bind_env(self, *input_):
"""Binds a Vyper key to a ENV variable.
ENV variables are case sensitive.
If only a key is provided, it will use the env key matching the key,
uppercased.
`env_prefix` will be used when set when env name is not provided.
"""
if len(input_) == 0:
return "bind_env missing key to bind to"
key = input_[0].lower()
if len(input_) == 1:
env_key = self._merge_with_env_prefix(key)
else:
env_key = input_[1]
self._env[key] = env_key
if self._key_delimiter in key:
parts = input_[0].split(self._key_delimiter)
env_info = {
"path": parts[1:-1],
"final_key": parts[-1],
"env_key": env_key
}
if self._env.get(parts[0]) is None:
self._env[parts[0]] = [env_info]
else:
self._env[parts[0]].append(env_info)
return None
def _find_real_key(self, key, source):
return next(
(real for real in source.keys() if real.lower() == key.lower()),
None)
def _find_insensitive(self, key, source):
real_key = self._find_real_key(key, source)
return source.get(real_key)
def _set_insensitive(self, key, val, source):
real_key = self._find_real_key(key, source)
if real_key is None:
msg = "No case insensitive variant of {0} found.".format(key)
raise KeyError(msg)
source[real_key] = val
def _find(self, key):
"""Given a key, find the value
Vyper will check in the following order:
override, arg, env, config file, key/value store, default
Vyper will check to see if an alias exists first.
"""
key = self._real_key(key)
# OVERRIDES
val = self._override.get(key)
if val is not None:
log.debug("{0} found in override: {1}".format(key, val))
return val
# ARGS
val = self._args.get(key)
if val is not None:
log.debug("{0} found in args: {1}".format(key, val))
return val
# ENVIRONMENT VARIABLES
if self._automatic_env_applied:
# even if it hasn't been registered, if `automatic_env` is used,
# check any `get` request
val = self._get_env(self._merge_with_env_prefix(key))
if val is not None:
log.debug("{0} found in environment: {1}".format(key, val))
return val
env_key = self._find_insensitive(key, self._env)
log.debug("Looking for {0} in env".format(key))
if isinstance(env_key, list):
parent = self._find_insensitive(key, self._config)
found_in_env = False
log.debug("Found env key parent {0}: {1}".format(key, parent))
for item in env_key:
log.debug("{0} registered as env var parent {1}:".format(
key, item["env_key"]))
val = self._get_env(item["env_key"])
if val is not None:
log.debug("{0} found in environment: {1}".format(
item["env_key"], val))
temp = parent
for path in item["path"]:
real_key = self._find_real_key(path, temp)
temp = temp[real_key]
self._set_insensitive(item["final_key"], val, temp)
found_in_env = True
else:
log.debug("{0} env value unset".format(item["env_key"]))
if found_in_env:
return parent
elif env_key is not None:
log.debug("{0} registered as env var: {1}".format(key, env_key))
val = self._get_env(env_key)
if val is not None:
log.debug("{0} found in environment: {1}".format(env_key, val))
return val
else:
log.debug("{0} env value unset".format(env_key))
# CONFIG FILE
val = self._find_insensitive(key, self._config)
if val is not None:
log.debug("{0} found in config: {1}".format(key, val))
return val
# Test for nested config parameter
if self._key_delimiter in key:
path = key.split(self._key_delimiter)
source = self._find(path[0])
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is not None:
log.debug("{0} found in nested config: {1}".format(
key, val))
return val
# KEY/VALUE STORE
val = self._kvstore.get(key)
if val is not None:
log.debug("{0} found in key/value store: {1}".format(key, val))
return val
# DEFAULTS
val = self._defaults.get(key)
if val is not None:
log.debug("{0} found in defaults: {1}".format(key, val))
return val
return None
def is_set(self, key):
"""Check to see if the key has been set in any of the data locations.
"""
path = key.split(self._key_delimiter)
lower_case_key = key.lower()
val = self._find(lower_case_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
return val is not None
def automatic_env(self):
"""Have Vyper check ENV variables for all keys set in
config, default & args.
"""
self._automatic_env_applied = True
def set_env_key_replacer(self, old, new):
"""Sets the strings.Replacer on the Vyper object.
Useful for mapping an environment variable to a key that does
not match it.
"""
self._env_key_replacer = old, new
def register_alias(self, alias, key):
"""Aliases provide another accessor for the same key.
This enables one to change a name without breaking the application.
"""
alias = alias.lower()
key = key.lower()
if alias != key and alias != self._real_key(key):
exists = self._aliases.get(alias)
if exists is None:
# if we alias something that exists in one of the dicts to
# another name, we'll never be able to get that value using the
# original name, so move the config value to the new _real_key.
val = self._config.get(alias)
if val:
self._config.pop(alias)
self._config[key] = val
val = self._kvstore.get(alias)
if val:
self._kvstore.pop(alias)
self._kvstore[key] = val
val = self._defaults.get(alias)
if val:
self._defaults.pop(alias)
self._defaults[key] = val
val = self._override.get(alias)
if val:
self._override.pop(alias)
self._override[key] = val
self._aliases[alias] = key
else:
log.warning("Creating circular reference alias {0} {1} {2}".format(
alias, key, self._real_key(key)))
def _real_key(self, key):
new_key = self._aliases.get(key)
if new_key is not None:
return self._real_key(new_key)
else:
return key
def in_config(self, key):
"""Check to see if the given key (or an alias) is in the config file.
"""
# if the requested key is an alias, then return the proper key
key = self._real_key(key)
exists = self._config.get(key)
return exists
def set_default(self, key, value):
"""Set the default value for this key.
Default only used when no value is provided by the user via
arg, config or env.
"""
k = self._real_key(key.lower())
self._defaults[k] = value
def set(self, key, value):
"""Sets the value for the key in the override register.
Will be used instead of values obtained via
args, config file, env, defaults or key/value store.
"""
k = self._real_key(key.lower())
self._override[k] = value
def read_in_config(self):
"""Vyper will discover and load the configuration file from disk
and key/value stores, searching in one of the defined paths.
"""
log.info("Attempting to read in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
self._config = {}
return self._unmarshall_reader(f, self._config)
def merge_in_config(self):
log.info("Attempting to merge in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
return self.merge_config(f)
def read_config(self, file_):
"""Vyper will read a configuration file, setting existing keys to
`None` if the key does not exist in the file.
"""
self._unmarshall_reader(file_, self._config)
def merge_config(self, file_):
if self._config is None:
self._config = {}
cfg = {}
cfg = self._unmarshall_reader(file_, cfg)
self._merge_dicts(cfg, self._config)
def _merge_dicts(self, src, target):
for k, v in src.items():
if isinstance(v, dict):
self._merge_dicts(v, target[k])
else:
target[k] = v
def read_remote_config(self):
"""Attempts to get configuration from a remote source
and read it in the remote configuration registry.
"""
return self._get_key_value_config()
def _unmarshall_reader(self, file_, d):
"""Unmarshall a file into a `dict`."""
return util.unmarshall_config_reader(file_, d, self._get_config_type())
def _get_key_value_config(self):
"""Retrieves the first found remote configuration."""
for rp in self._remote_providers:
val = self._get_remote_config(rp)
self._kvstore = val
return None
raise errors.RemoteConfigError("No Files Found")
def _get_remote_config(self, provider):
reader = provider.get()
self._unmarshall_reader(reader, self._kvstore)
return self._kvstore
def on_remote_config_change(self, func, *args, **kwargs):
self._on_remote_config_change = lambda x: func(*args, **kwargs)
for rp in self._remote_providers:
rp.add_listener(self._on_remote_config_change)
return None
def watch_remote_config(self):
for rp in self._remote_providers:
rp.add_listener()
return None
raise errors.RemoteConfigError("No Files Found")
def all_keys(self, uppercase_keys=False):
"""Return all keys regardless where they are set."""
d = {}
for k in self._override.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._args.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._env.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._config.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._kvstore.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._defaults.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._aliases.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
return d.keys()
def all_settings(self, uppercase_keys=False):
"""Return all settings as a `dict`."""
d = {}
for k in self.all_keys(uppercase_keys):
d[k] = self.get(k)
return d
def set_config_name(self, name):
"""Name for the config file. Does not include extension."""
self._config_name = name
self._config_file = ""
def set_config_type(self, type_):
"""Sets the type of the configuration returned by the
remote source, e.g. "json".
"""
self._config_type = type_
def _get_config_type(self):
if self._config_type != "":
return self._config_type
cf = self._get_config_file()
ext = os.path.splitext(cf)
if len(ext) > 1:
return ext[1][1:]
else:
return ""
def _get_config_file(self):
if self._config_file == "":
try:
cf = self._find_config_file()
self._config_file = cf
except errors.ConfigFileNotFoundError:
return ""
return self._config_file
def _search_in_path(self, path):
log.debug("Searching for config in: {0}".format(path))
for ext in constants.SUPPORTED_EXTENSIONS:
full_path = "{0}/{1}.{2}".format(path, self._config_name, ext)
log.debug("Checking for {0}".format(full_path))
if util.exists(full_path):
log.debug("Found: {0}".format(full_path))
return full_path
return ""
def _find_config_file(self):
"""Search all `config_paths` for any config file.
Returns the first path that exists (and is a config file).
"""
log.info("Searching for config in: {0}".format(
", ".join(str(p) for p in self._config_paths)))
for cp in self._config_paths:
f = self._search_in_path(cp)
if f != "":
return f
raise errors.ConfigFileNotFoundError(
self._config_name, self._config_paths)
def debug(self): # pragma: no cover
"""Prints all configuration registries for debugging purposes."""
print("Aliases:")
pprint.pprint(self._aliases)
print("Override:")
pprint.pprint(self._override)
print("Args:")
pprint.pprint(self._args)
print("Env:")
pprint.pprint(self._env)
print("Config:")
pprint.pprint(self._config)
print("Key/Value Store:")
pprint.pprint(self._kvstore)
print("Defaults:")
pprint.pprint(self._defaults)
|
admiralobvious/vyper | vyper/vyper.py | Vyper.get | python | def get(self, key):
path = key.split(self._key_delimiter)
lowercase_key = key.lower()
val = self._find(lowercase_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is None:
return None
return val | Vyper is essentially repository for configurations.
`get` can retrieve any value given the key to use.
`get` has the behavior of returning the value associated with the first
place from where it is set. Viper will check in the following order:
override, arg, env, config file, key/value store, default. | train | https://github.com/admiralobvious/vyper/blob/58ec7b90661502b7b2fea7a30849b90b907fcdec/vyper/vyper.py#L180-L200 | [
"def _search_dict(self, d, keys):\n if not keys:\n return d\n for key in keys:\n if key in d and not isinstance(d[key], dict):\n return d[key]\n elif key in d:\n return self._search_dict(d[key], keys[1::])\n else:\n return None\n",
"def _find(self, key):\n \"\"\"Given a key, find the value\n Vyper will check in the following order:\n override, arg, env, config file, key/value store, default\n Vyper will check to see if an alias exists first.\n \"\"\"\n key = self._real_key(key)\n\n # OVERRIDES\n val = self._override.get(key)\n if val is not None:\n log.debug(\"{0} found in override: {1}\".format(key, val))\n return val\n\n # ARGS\n val = self._args.get(key)\n if val is not None:\n log.debug(\"{0} found in args: {1}\".format(key, val))\n return val\n\n # ENVIRONMENT VARIABLES\n if self._automatic_env_applied:\n # even if it hasn't been registered, if `automatic_env` is used,\n # check any `get` request\n val = self._get_env(self._merge_with_env_prefix(key))\n if val is not None:\n log.debug(\"{0} found in environment: {1}\".format(key, val))\n return val\n\n env_key = self._find_insensitive(key, self._env)\n log.debug(\"Looking for {0} in env\".format(key))\n if isinstance(env_key, list):\n parent = self._find_insensitive(key, self._config)\n found_in_env = False\n log.debug(\"Found env key parent {0}: {1}\".format(key, parent))\n\n for item in env_key:\n log.debug(\"{0} registered as env var parent {1}:\".format(\n key, item[\"env_key\"]))\n val = self._get_env(item[\"env_key\"])\n\n if val is not None:\n log.debug(\"{0} found in environment: {1}\".format(\n item[\"env_key\"], val))\n temp = parent\n for path in item[\"path\"]:\n real_key = self._find_real_key(path, temp)\n temp = temp[real_key]\n\n self._set_insensitive(item[\"final_key\"], val, temp)\n found_in_env = True\n else:\n log.debug(\"{0} env value unset\".format(item[\"env_key\"]))\n\n if found_in_env:\n return parent\n\n elif env_key is not None:\n log.debug(\"{0} registered as env var: {1}\".format(key, env_key))\n val = self._get_env(env_key)\n if val is not None:\n log.debug(\"{0} found in environment: {1}\".format(env_key, val))\n return val\n else:\n log.debug(\"{0} env value unset\".format(env_key))\n\n # CONFIG FILE\n val = self._find_insensitive(key, self._config)\n if val is not None:\n log.debug(\"{0} found in config: {1}\".format(key, val))\n return val\n\n # Test for nested config parameter\n if self._key_delimiter in key:\n path = key.split(self._key_delimiter)\n\n source = self._find(path[0])\n if source is not None and isinstance(source, dict):\n val = self._search_dict(source, path[1::])\n if val is not None:\n log.debug(\"{0} found in nested config: {1}\".format(\n key, val))\n return val\n\n # KEY/VALUE STORE\n val = self._kvstore.get(key)\n if val is not None:\n log.debug(\"{0} found in key/value store: {1}\".format(key, val))\n return val\n\n # DEFAULTS\n val = self._defaults.get(key)\n if val is not None:\n log.debug(\"{0} found in defaults: {1}\".format(key, val))\n return val\n\n return None\n"
] | class Vyper(object):
"""Vyper is a prioritized configuration registry. It maintains a set of
configuration sources, fetches values to populate those, and provides
them according to the source's priority.
The priority of the sources is the following:
1. overrides
2. args
3. env. variables
4. config file
5. key/value store
6. defaults
For example, if values from the following sources were loaded:
defaults: {
"secret": "",
"user": "default",
"endpoint": "https://localhost"
}
config: {
"user": "root"
"secret": "defaultsecret"
}
env: {
"secret": "somesecretkey"
}
The resulting config will have the following values:
{
"secret": "somesecretkey",
"user": "root",
"endpoint": "https://localhost"
}
"""
def __init__(self, config_name="config", key_delimiter="."):
# Delimiter that separates a list of keys
# used to access a nested value in one go.
self._key_delimiter = key_delimiter
# A set of paths to look for the config file in.
self._config_paths = []
# A set of remote providers to search for the configuration.
self._remote_providers = []
# Name of file to look for inside the path.
self._config_name = config_name
self._config_file = ""
self._config_type = ""
self._env_prefix = ""
self._automatic_env_applied = False
self._env_key_replacer = None
self._aliases = {}
self._override = {}
self._args = {}
self._env = {}
self._config = {}
self._kvstore = {}
self._defaults = {}
self._on_config_change = None
self._on_remote_config_change = None
self.parse_argv_disabled = False
def on_config_change(self, func, *args, **kwargs):
self._on_config_change = lambda: func(*args, **kwargs)
def watch_config(self):
config_file = self._get_config_file()
watcher = watch.get_watcher(config_file, self)
watcher.start()
def set_config_file(self, file_):
"""Explicitly define the path, name and extension of the config file
Vyper will use this and not check any of the config paths.
"""
self._config_file = file_
def set_env_prefix(self, prefix):
"""Define a prefix that ENVIRONMENT variables will use.
e.g. if your prefix is "spf", the env registry will look
for env. variables that start with "SPF_"
"""
self._env_prefix = prefix
def _merge_with_env_prefix(self, key):
if self._env_prefix != "":
return ("{0}_{1}".format(self._env_prefix, key)).upper()
return key.upper()
def _get_env(self, key):
"""Wrapper around os.getenv() which replaces characters
in the original key. This allows env vars which have different keys
than the config object keys.
"""
if self._env_key_replacer is not None:
key = key.replace(*self._env_key_replacer)
return os.getenv(key)
def config_file_used(self):
"""Return the file used to populate the config registry."""
return self._config_file
def add_config_path(self, path):
"""Add a path for Vyper to search for the config file in.
Can be called multiple times to define multiple search paths.
"""
abspath = util.abs_pathify(path)
if abspath not in self._config_paths:
log.info("Adding {0} to paths to search".format(abspath))
self._config_paths.append(abspath)
def add_remote_provider(self, provider, client, path):
"""Adds a remote configuration source.
Remote Providers are searched in the order they are added.
provider is a string value, "etcd", "consul" and "zookeeper" are
currently supported.
client is a client object
path is the path in the k/v store to retrieve configuration
To retrieve a config file called myapp.json from /configs/myapp.json
you should set path to /configs and set config name (set_config_name)
to "myapp"
"""
if provider not in constants.SUPPORTED_REMOTE_PROVIDERS:
raise errors.UnsupportedRemoteProviderError(provider)
host = ""
if provider == "etcd":
host = "{0}://{1}:{2}".format(client.protocol, client.host,
client.port)
elif provider == "consul":
host = "{0}://{1}:{2}".format(client.http.scheme, client.http.host,
client.http.port)
elif provider == "zookeeper":
host = ",".join(
str("{0}:{1}".format(h[0], h[1])) for h in client.hosts)
log.info("Adding {0}:{1} to remote provider list".format(
provider, host))
rp = remote.RemoteProvider(provider, client, path, self)
if not self._provider_path_exists(rp):
self._remote_providers.append(rp)
def _provider_path_exists(self, rp):
for p in self._remote_providers:
if p.path == rp.path:
return True
return False
def _search_dict(self, d, keys):
if not keys:
return d
for key in keys:
if key in d and not isinstance(d[key], dict):
return d[key]
elif key in d:
return self._search_dict(d[key], keys[1::])
else:
return None
def get_string(self, key):
return str(self.get(key))
def get_bool(self, key):
val = self.get(key)
if isinstance(val, str):
if val.lower() == 'false':
return False
return bool(val)
def get_int(self, key):
return int(self.get(key))
def get_float(self, key):
return float(self.get(key))
def get_unicode(self, key):
return text(self.get(key))
def get_bytes(self, key):
return b"{0}".format(self.get(key))
def sub(self, key):
"""Returns new Vyper instance representing a sub tree of this instance.
"""
subv = Vyper()
data = self.get(key)
if isinstance(data, dict):
subv._config = data
return subv
else:
return None
def unmarshall_key(self, key, cls):
"""Takes a single key and unmarshalls it into a class."""
return setattr(cls, key, self.get(key))
def unmarshall(self, cls):
"""Unmarshalls the config into a class. Make sure that the tags on
the attributes of the class are properly set.
"""
for k, v in self.all_settings().items():
setattr(cls, k, v)
return cls
def bind_args(self, parser):
if isinstance(parser, argparse.ArgumentParser):
return self._bind_parser_values(parser)
else:
return self.bind_arg_values(parser)
def bind_arg(self, key, arg):
return self.bind_arg_value(key, arg)
def _parse_args(self, parser, overrides=None):
if overrides:
return vars(parser.parse_args(overrides))
if not self.parse_argv_disabled:
return vars(parser.parse_args())
else:
return vars(parser.parse_args([]))
def _bind_parser_values(self, parser, overrides=None):
# method mostly for testing, use bind_args()
args = self._parse_args(parser, overrides)
defaults = \
{k: parser.get_default(k) for k in args.keys()}
for k, v in defaults.items():
self.set_default(k, v)
if args[k] != defaults[k]:
self.bind_arg(k, args[k])
def bind_arg_values(self, args):
for k, v in args.items():
try:
self.bind_arg_value(k, v)
except ValueError:
pass
def bind_arg_value(self, key, arg):
if arg is None:
raise ValueError("arg for {0} is None".format(key))
self._args[key.lower()] = arg
def bind_env(self, *input_):
"""Binds a Vyper key to a ENV variable.
ENV variables are case sensitive.
If only a key is provided, it will use the env key matching the key,
uppercased.
`env_prefix` will be used when set when env name is not provided.
"""
if len(input_) == 0:
return "bind_env missing key to bind to"
key = input_[0].lower()
if len(input_) == 1:
env_key = self._merge_with_env_prefix(key)
else:
env_key = input_[1]
self._env[key] = env_key
if self._key_delimiter in key:
parts = input_[0].split(self._key_delimiter)
env_info = {
"path": parts[1:-1],
"final_key": parts[-1],
"env_key": env_key
}
if self._env.get(parts[0]) is None:
self._env[parts[0]] = [env_info]
else:
self._env[parts[0]].append(env_info)
return None
def _find_real_key(self, key, source):
return next(
(real for real in source.keys() if real.lower() == key.lower()),
None)
def _find_insensitive(self, key, source):
real_key = self._find_real_key(key, source)
return source.get(real_key)
def _set_insensitive(self, key, val, source):
real_key = self._find_real_key(key, source)
if real_key is None:
msg = "No case insensitive variant of {0} found.".format(key)
raise KeyError(msg)
source[real_key] = val
def _find(self, key):
"""Given a key, find the value
Vyper will check in the following order:
override, arg, env, config file, key/value store, default
Vyper will check to see if an alias exists first.
"""
key = self._real_key(key)
# OVERRIDES
val = self._override.get(key)
if val is not None:
log.debug("{0} found in override: {1}".format(key, val))
return val
# ARGS
val = self._args.get(key)
if val is not None:
log.debug("{0} found in args: {1}".format(key, val))
return val
# ENVIRONMENT VARIABLES
if self._automatic_env_applied:
# even if it hasn't been registered, if `automatic_env` is used,
# check any `get` request
val = self._get_env(self._merge_with_env_prefix(key))
if val is not None:
log.debug("{0} found in environment: {1}".format(key, val))
return val
env_key = self._find_insensitive(key, self._env)
log.debug("Looking for {0} in env".format(key))
if isinstance(env_key, list):
parent = self._find_insensitive(key, self._config)
found_in_env = False
log.debug("Found env key parent {0}: {1}".format(key, parent))
for item in env_key:
log.debug("{0} registered as env var parent {1}:".format(
key, item["env_key"]))
val = self._get_env(item["env_key"])
if val is not None:
log.debug("{0} found in environment: {1}".format(
item["env_key"], val))
temp = parent
for path in item["path"]:
real_key = self._find_real_key(path, temp)
temp = temp[real_key]
self._set_insensitive(item["final_key"], val, temp)
found_in_env = True
else:
log.debug("{0} env value unset".format(item["env_key"]))
if found_in_env:
return parent
elif env_key is not None:
log.debug("{0} registered as env var: {1}".format(key, env_key))
val = self._get_env(env_key)
if val is not None:
log.debug("{0} found in environment: {1}".format(env_key, val))
return val
else:
log.debug("{0} env value unset".format(env_key))
# CONFIG FILE
val = self._find_insensitive(key, self._config)
if val is not None:
log.debug("{0} found in config: {1}".format(key, val))
return val
# Test for nested config parameter
if self._key_delimiter in key:
path = key.split(self._key_delimiter)
source = self._find(path[0])
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is not None:
log.debug("{0} found in nested config: {1}".format(
key, val))
return val
# KEY/VALUE STORE
val = self._kvstore.get(key)
if val is not None:
log.debug("{0} found in key/value store: {1}".format(key, val))
return val
# DEFAULTS
val = self._defaults.get(key)
if val is not None:
log.debug("{0} found in defaults: {1}".format(key, val))
return val
return None
def is_set(self, key):
"""Check to see if the key has been set in any of the data locations.
"""
path = key.split(self._key_delimiter)
lower_case_key = key.lower()
val = self._find(lower_case_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
return val is not None
def automatic_env(self):
"""Have Vyper check ENV variables for all keys set in
config, default & args.
"""
self._automatic_env_applied = True
def set_env_key_replacer(self, old, new):
"""Sets the strings.Replacer on the Vyper object.
Useful for mapping an environment variable to a key that does
not match it.
"""
self._env_key_replacer = old, new
def register_alias(self, alias, key):
"""Aliases provide another accessor for the same key.
This enables one to change a name without breaking the application.
"""
alias = alias.lower()
key = key.lower()
if alias != key and alias != self._real_key(key):
exists = self._aliases.get(alias)
if exists is None:
# if we alias something that exists in one of the dicts to
# another name, we'll never be able to get that value using the
# original name, so move the config value to the new _real_key.
val = self._config.get(alias)
if val:
self._config.pop(alias)
self._config[key] = val
val = self._kvstore.get(alias)
if val:
self._kvstore.pop(alias)
self._kvstore[key] = val
val = self._defaults.get(alias)
if val:
self._defaults.pop(alias)
self._defaults[key] = val
val = self._override.get(alias)
if val:
self._override.pop(alias)
self._override[key] = val
self._aliases[alias] = key
else:
log.warning("Creating circular reference alias {0} {1} {2}".format(
alias, key, self._real_key(key)))
def _real_key(self, key):
new_key = self._aliases.get(key)
if new_key is not None:
return self._real_key(new_key)
else:
return key
def in_config(self, key):
"""Check to see if the given key (or an alias) is in the config file.
"""
# if the requested key is an alias, then return the proper key
key = self._real_key(key)
exists = self._config.get(key)
return exists
def set_default(self, key, value):
"""Set the default value for this key.
Default only used when no value is provided by the user via
arg, config or env.
"""
k = self._real_key(key.lower())
self._defaults[k] = value
def set(self, key, value):
"""Sets the value for the key in the override register.
Will be used instead of values obtained via
args, config file, env, defaults or key/value store.
"""
k = self._real_key(key.lower())
self._override[k] = value
def read_in_config(self):
"""Vyper will discover and load the configuration file from disk
and key/value stores, searching in one of the defined paths.
"""
log.info("Attempting to read in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
self._config = {}
return self._unmarshall_reader(f, self._config)
def merge_in_config(self):
log.info("Attempting to merge in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
return self.merge_config(f)
def read_config(self, file_):
"""Vyper will read a configuration file, setting existing keys to
`None` if the key does not exist in the file.
"""
self._unmarshall_reader(file_, self._config)
def merge_config(self, file_):
if self._config is None:
self._config = {}
cfg = {}
cfg = self._unmarshall_reader(file_, cfg)
self._merge_dicts(cfg, self._config)
def _merge_dicts(self, src, target):
for k, v in src.items():
if isinstance(v, dict):
self._merge_dicts(v, target[k])
else:
target[k] = v
def read_remote_config(self):
"""Attempts to get configuration from a remote source
and read it in the remote configuration registry.
"""
return self._get_key_value_config()
def _unmarshall_reader(self, file_, d):
"""Unmarshall a file into a `dict`."""
return util.unmarshall_config_reader(file_, d, self._get_config_type())
def _get_key_value_config(self):
"""Retrieves the first found remote configuration."""
for rp in self._remote_providers:
val = self._get_remote_config(rp)
self._kvstore = val
return None
raise errors.RemoteConfigError("No Files Found")
def _get_remote_config(self, provider):
reader = provider.get()
self._unmarshall_reader(reader, self._kvstore)
return self._kvstore
def on_remote_config_change(self, func, *args, **kwargs):
self._on_remote_config_change = lambda x: func(*args, **kwargs)
for rp in self._remote_providers:
rp.add_listener(self._on_remote_config_change)
return None
def watch_remote_config(self):
for rp in self._remote_providers:
rp.add_listener()
return None
raise errors.RemoteConfigError("No Files Found")
def all_keys(self, uppercase_keys=False):
"""Return all keys regardless where they are set."""
d = {}
for k in self._override.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._args.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._env.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._config.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._kvstore.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._defaults.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._aliases.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
return d.keys()
def all_settings(self, uppercase_keys=False):
"""Return all settings as a `dict`."""
d = {}
for k in self.all_keys(uppercase_keys):
d[k] = self.get(k)
return d
def set_config_name(self, name):
"""Name for the config file. Does not include extension."""
self._config_name = name
self._config_file = ""
def set_config_type(self, type_):
"""Sets the type of the configuration returned by the
remote source, e.g. "json".
"""
self._config_type = type_
def _get_config_type(self):
if self._config_type != "":
return self._config_type
cf = self._get_config_file()
ext = os.path.splitext(cf)
if len(ext) > 1:
return ext[1][1:]
else:
return ""
def _get_config_file(self):
if self._config_file == "":
try:
cf = self._find_config_file()
self._config_file = cf
except errors.ConfigFileNotFoundError:
return ""
return self._config_file
def _search_in_path(self, path):
log.debug("Searching for config in: {0}".format(path))
for ext in constants.SUPPORTED_EXTENSIONS:
full_path = "{0}/{1}.{2}".format(path, self._config_name, ext)
log.debug("Checking for {0}".format(full_path))
if util.exists(full_path):
log.debug("Found: {0}".format(full_path))
return full_path
return ""
def _find_config_file(self):
"""Search all `config_paths` for any config file.
Returns the first path that exists (and is a config file).
"""
log.info("Searching for config in: {0}".format(
", ".join(str(p) for p in self._config_paths)))
for cp in self._config_paths:
f = self._search_in_path(cp)
if f != "":
return f
raise errors.ConfigFileNotFoundError(
self._config_name, self._config_paths)
def debug(self): # pragma: no cover
"""Prints all configuration registries for debugging purposes."""
print("Aliases:")
pprint.pprint(self._aliases)
print("Override:")
pprint.pprint(self._override)
print("Args:")
pprint.pprint(self._args)
print("Env:")
pprint.pprint(self._env)
print("Config:")
pprint.pprint(self._config)
print("Key/Value Store:")
pprint.pprint(self._kvstore)
print("Defaults:")
pprint.pprint(self._defaults)
|
admiralobvious/vyper | vyper/vyper.py | Vyper.sub | python | def sub(self, key):
subv = Vyper()
data = self.get(key)
if isinstance(data, dict):
subv._config = data
return subv
else:
return None | Returns new Vyper instance representing a sub tree of this instance. | train | https://github.com/admiralobvious/vyper/blob/58ec7b90661502b7b2fea7a30849b90b907fcdec/vyper/vyper.py#L224-L233 | [
"def get(self, key):\n \"\"\"Vyper is essentially repository for configurations.\n `get` can retrieve any value given the key to use.\n `get` has the behavior of returning the value associated with the first\n place from where it is set. Viper will check in the following order:\n override, arg, env, config file, key/value store, default.\n \"\"\"\n path = key.split(self._key_delimiter)\n\n lowercase_key = key.lower()\n val = self._find(lowercase_key)\n\n if val is None:\n source = self._find(path[0].lower())\n if source is not None and isinstance(source, dict):\n val = self._search_dict(source, path[1::])\n\n if val is None:\n return None\n\n return val\n"
] | class Vyper(object):
"""Vyper is a prioritized configuration registry. It maintains a set of
configuration sources, fetches values to populate those, and provides
them according to the source's priority.
The priority of the sources is the following:
1. overrides
2. args
3. env. variables
4. config file
5. key/value store
6. defaults
For example, if values from the following sources were loaded:
defaults: {
"secret": "",
"user": "default",
"endpoint": "https://localhost"
}
config: {
"user": "root"
"secret": "defaultsecret"
}
env: {
"secret": "somesecretkey"
}
The resulting config will have the following values:
{
"secret": "somesecretkey",
"user": "root",
"endpoint": "https://localhost"
}
"""
def __init__(self, config_name="config", key_delimiter="."):
# Delimiter that separates a list of keys
# used to access a nested value in one go.
self._key_delimiter = key_delimiter
# A set of paths to look for the config file in.
self._config_paths = []
# A set of remote providers to search for the configuration.
self._remote_providers = []
# Name of file to look for inside the path.
self._config_name = config_name
self._config_file = ""
self._config_type = ""
self._env_prefix = ""
self._automatic_env_applied = False
self._env_key_replacer = None
self._aliases = {}
self._override = {}
self._args = {}
self._env = {}
self._config = {}
self._kvstore = {}
self._defaults = {}
self._on_config_change = None
self._on_remote_config_change = None
self.parse_argv_disabled = False
def on_config_change(self, func, *args, **kwargs):
self._on_config_change = lambda: func(*args, **kwargs)
def watch_config(self):
config_file = self._get_config_file()
watcher = watch.get_watcher(config_file, self)
watcher.start()
def set_config_file(self, file_):
"""Explicitly define the path, name and extension of the config file
Vyper will use this and not check any of the config paths.
"""
self._config_file = file_
def set_env_prefix(self, prefix):
"""Define a prefix that ENVIRONMENT variables will use.
e.g. if your prefix is "spf", the env registry will look
for env. variables that start with "SPF_"
"""
self._env_prefix = prefix
def _merge_with_env_prefix(self, key):
if self._env_prefix != "":
return ("{0}_{1}".format(self._env_prefix, key)).upper()
return key.upper()
def _get_env(self, key):
"""Wrapper around os.getenv() which replaces characters
in the original key. This allows env vars which have different keys
than the config object keys.
"""
if self._env_key_replacer is not None:
key = key.replace(*self._env_key_replacer)
return os.getenv(key)
def config_file_used(self):
"""Return the file used to populate the config registry."""
return self._config_file
def add_config_path(self, path):
"""Add a path for Vyper to search for the config file in.
Can be called multiple times to define multiple search paths.
"""
abspath = util.abs_pathify(path)
if abspath not in self._config_paths:
log.info("Adding {0} to paths to search".format(abspath))
self._config_paths.append(abspath)
def add_remote_provider(self, provider, client, path):
"""Adds a remote configuration source.
Remote Providers are searched in the order they are added.
provider is a string value, "etcd", "consul" and "zookeeper" are
currently supported.
client is a client object
path is the path in the k/v store to retrieve configuration
To retrieve a config file called myapp.json from /configs/myapp.json
you should set path to /configs and set config name (set_config_name)
to "myapp"
"""
if provider not in constants.SUPPORTED_REMOTE_PROVIDERS:
raise errors.UnsupportedRemoteProviderError(provider)
host = ""
if provider == "etcd":
host = "{0}://{1}:{2}".format(client.protocol, client.host,
client.port)
elif provider == "consul":
host = "{0}://{1}:{2}".format(client.http.scheme, client.http.host,
client.http.port)
elif provider == "zookeeper":
host = ",".join(
str("{0}:{1}".format(h[0], h[1])) for h in client.hosts)
log.info("Adding {0}:{1} to remote provider list".format(
provider, host))
rp = remote.RemoteProvider(provider, client, path, self)
if not self._provider_path_exists(rp):
self._remote_providers.append(rp)
def _provider_path_exists(self, rp):
for p in self._remote_providers:
if p.path == rp.path:
return True
return False
def _search_dict(self, d, keys):
if not keys:
return d
for key in keys:
if key in d and not isinstance(d[key], dict):
return d[key]
elif key in d:
return self._search_dict(d[key], keys[1::])
else:
return None
def get(self, key):
"""Vyper is essentially repository for configurations.
`get` can retrieve any value given the key to use.
`get` has the behavior of returning the value associated with the first
place from where it is set. Viper will check in the following order:
override, arg, env, config file, key/value store, default.
"""
path = key.split(self._key_delimiter)
lowercase_key = key.lower()
val = self._find(lowercase_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is None:
return None
return val
def get_string(self, key):
return str(self.get(key))
def get_bool(self, key):
val = self.get(key)
if isinstance(val, str):
if val.lower() == 'false':
return False
return bool(val)
def get_int(self, key):
return int(self.get(key))
def get_float(self, key):
return float(self.get(key))
def get_unicode(self, key):
return text(self.get(key))
def get_bytes(self, key):
return b"{0}".format(self.get(key))
def unmarshall_key(self, key, cls):
"""Takes a single key and unmarshalls it into a class."""
return setattr(cls, key, self.get(key))
def unmarshall(self, cls):
"""Unmarshalls the config into a class. Make sure that the tags on
the attributes of the class are properly set.
"""
for k, v in self.all_settings().items():
setattr(cls, k, v)
return cls
def bind_args(self, parser):
if isinstance(parser, argparse.ArgumentParser):
return self._bind_parser_values(parser)
else:
return self.bind_arg_values(parser)
def bind_arg(self, key, arg):
return self.bind_arg_value(key, arg)
def _parse_args(self, parser, overrides=None):
if overrides:
return vars(parser.parse_args(overrides))
if not self.parse_argv_disabled:
return vars(parser.parse_args())
else:
return vars(parser.parse_args([]))
def _bind_parser_values(self, parser, overrides=None):
# method mostly for testing, use bind_args()
args = self._parse_args(parser, overrides)
defaults = \
{k: parser.get_default(k) for k in args.keys()}
for k, v in defaults.items():
self.set_default(k, v)
if args[k] != defaults[k]:
self.bind_arg(k, args[k])
def bind_arg_values(self, args):
for k, v in args.items():
try:
self.bind_arg_value(k, v)
except ValueError:
pass
def bind_arg_value(self, key, arg):
if arg is None:
raise ValueError("arg for {0} is None".format(key))
self._args[key.lower()] = arg
def bind_env(self, *input_):
"""Binds a Vyper key to a ENV variable.
ENV variables are case sensitive.
If only a key is provided, it will use the env key matching the key,
uppercased.
`env_prefix` will be used when set when env name is not provided.
"""
if len(input_) == 0:
return "bind_env missing key to bind to"
key = input_[0].lower()
if len(input_) == 1:
env_key = self._merge_with_env_prefix(key)
else:
env_key = input_[1]
self._env[key] = env_key
if self._key_delimiter in key:
parts = input_[0].split(self._key_delimiter)
env_info = {
"path": parts[1:-1],
"final_key": parts[-1],
"env_key": env_key
}
if self._env.get(parts[0]) is None:
self._env[parts[0]] = [env_info]
else:
self._env[parts[0]].append(env_info)
return None
def _find_real_key(self, key, source):
return next(
(real for real in source.keys() if real.lower() == key.lower()),
None)
def _find_insensitive(self, key, source):
real_key = self._find_real_key(key, source)
return source.get(real_key)
def _set_insensitive(self, key, val, source):
real_key = self._find_real_key(key, source)
if real_key is None:
msg = "No case insensitive variant of {0} found.".format(key)
raise KeyError(msg)
source[real_key] = val
def _find(self, key):
"""Given a key, find the value
Vyper will check in the following order:
override, arg, env, config file, key/value store, default
Vyper will check to see if an alias exists first.
"""
key = self._real_key(key)
# OVERRIDES
val = self._override.get(key)
if val is not None:
log.debug("{0} found in override: {1}".format(key, val))
return val
# ARGS
val = self._args.get(key)
if val is not None:
log.debug("{0} found in args: {1}".format(key, val))
return val
# ENVIRONMENT VARIABLES
if self._automatic_env_applied:
# even if it hasn't been registered, if `automatic_env` is used,
# check any `get` request
val = self._get_env(self._merge_with_env_prefix(key))
if val is not None:
log.debug("{0} found in environment: {1}".format(key, val))
return val
env_key = self._find_insensitive(key, self._env)
log.debug("Looking for {0} in env".format(key))
if isinstance(env_key, list):
parent = self._find_insensitive(key, self._config)
found_in_env = False
log.debug("Found env key parent {0}: {1}".format(key, parent))
for item in env_key:
log.debug("{0} registered as env var parent {1}:".format(
key, item["env_key"]))
val = self._get_env(item["env_key"])
if val is not None:
log.debug("{0} found in environment: {1}".format(
item["env_key"], val))
temp = parent
for path in item["path"]:
real_key = self._find_real_key(path, temp)
temp = temp[real_key]
self._set_insensitive(item["final_key"], val, temp)
found_in_env = True
else:
log.debug("{0} env value unset".format(item["env_key"]))
if found_in_env:
return parent
elif env_key is not None:
log.debug("{0} registered as env var: {1}".format(key, env_key))
val = self._get_env(env_key)
if val is not None:
log.debug("{0} found in environment: {1}".format(env_key, val))
return val
else:
log.debug("{0} env value unset".format(env_key))
# CONFIG FILE
val = self._find_insensitive(key, self._config)
if val is not None:
log.debug("{0} found in config: {1}".format(key, val))
return val
# Test for nested config parameter
if self._key_delimiter in key:
path = key.split(self._key_delimiter)
source = self._find(path[0])
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is not None:
log.debug("{0} found in nested config: {1}".format(
key, val))
return val
# KEY/VALUE STORE
val = self._kvstore.get(key)
if val is not None:
log.debug("{0} found in key/value store: {1}".format(key, val))
return val
# DEFAULTS
val = self._defaults.get(key)
if val is not None:
log.debug("{0} found in defaults: {1}".format(key, val))
return val
return None
def is_set(self, key):
"""Check to see if the key has been set in any of the data locations.
"""
path = key.split(self._key_delimiter)
lower_case_key = key.lower()
val = self._find(lower_case_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
return val is not None
def automatic_env(self):
"""Have Vyper check ENV variables for all keys set in
config, default & args.
"""
self._automatic_env_applied = True
def set_env_key_replacer(self, old, new):
"""Sets the strings.Replacer on the Vyper object.
Useful for mapping an environment variable to a key that does
not match it.
"""
self._env_key_replacer = old, new
def register_alias(self, alias, key):
"""Aliases provide another accessor for the same key.
This enables one to change a name without breaking the application.
"""
alias = alias.lower()
key = key.lower()
if alias != key and alias != self._real_key(key):
exists = self._aliases.get(alias)
if exists is None:
# if we alias something that exists in one of the dicts to
# another name, we'll never be able to get that value using the
# original name, so move the config value to the new _real_key.
val = self._config.get(alias)
if val:
self._config.pop(alias)
self._config[key] = val
val = self._kvstore.get(alias)
if val:
self._kvstore.pop(alias)
self._kvstore[key] = val
val = self._defaults.get(alias)
if val:
self._defaults.pop(alias)
self._defaults[key] = val
val = self._override.get(alias)
if val:
self._override.pop(alias)
self._override[key] = val
self._aliases[alias] = key
else:
log.warning("Creating circular reference alias {0} {1} {2}".format(
alias, key, self._real_key(key)))
def _real_key(self, key):
new_key = self._aliases.get(key)
if new_key is not None:
return self._real_key(new_key)
else:
return key
def in_config(self, key):
"""Check to see if the given key (or an alias) is in the config file.
"""
# if the requested key is an alias, then return the proper key
key = self._real_key(key)
exists = self._config.get(key)
return exists
def set_default(self, key, value):
"""Set the default value for this key.
Default only used when no value is provided by the user via
arg, config or env.
"""
k = self._real_key(key.lower())
self._defaults[k] = value
def set(self, key, value):
"""Sets the value for the key in the override register.
Will be used instead of values obtained via
args, config file, env, defaults or key/value store.
"""
k = self._real_key(key.lower())
self._override[k] = value
def read_in_config(self):
"""Vyper will discover and load the configuration file from disk
and key/value stores, searching in one of the defined paths.
"""
log.info("Attempting to read in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
self._config = {}
return self._unmarshall_reader(f, self._config)
def merge_in_config(self):
log.info("Attempting to merge in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
return self.merge_config(f)
def read_config(self, file_):
"""Vyper will read a configuration file, setting existing keys to
`None` if the key does not exist in the file.
"""
self._unmarshall_reader(file_, self._config)
def merge_config(self, file_):
if self._config is None:
self._config = {}
cfg = {}
cfg = self._unmarshall_reader(file_, cfg)
self._merge_dicts(cfg, self._config)
def _merge_dicts(self, src, target):
for k, v in src.items():
if isinstance(v, dict):
self._merge_dicts(v, target[k])
else:
target[k] = v
def read_remote_config(self):
"""Attempts to get configuration from a remote source
and read it in the remote configuration registry.
"""
return self._get_key_value_config()
def _unmarshall_reader(self, file_, d):
"""Unmarshall a file into a `dict`."""
return util.unmarshall_config_reader(file_, d, self._get_config_type())
def _get_key_value_config(self):
"""Retrieves the first found remote configuration."""
for rp in self._remote_providers:
val = self._get_remote_config(rp)
self._kvstore = val
return None
raise errors.RemoteConfigError("No Files Found")
def _get_remote_config(self, provider):
reader = provider.get()
self._unmarshall_reader(reader, self._kvstore)
return self._kvstore
def on_remote_config_change(self, func, *args, **kwargs):
self._on_remote_config_change = lambda x: func(*args, **kwargs)
for rp in self._remote_providers:
rp.add_listener(self._on_remote_config_change)
return None
def watch_remote_config(self):
for rp in self._remote_providers:
rp.add_listener()
return None
raise errors.RemoteConfigError("No Files Found")
def all_keys(self, uppercase_keys=False):
"""Return all keys regardless where they are set."""
d = {}
for k in self._override.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._args.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._env.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._config.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._kvstore.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._defaults.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._aliases.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
return d.keys()
def all_settings(self, uppercase_keys=False):
"""Return all settings as a `dict`."""
d = {}
for k in self.all_keys(uppercase_keys):
d[k] = self.get(k)
return d
def set_config_name(self, name):
"""Name for the config file. Does not include extension."""
self._config_name = name
self._config_file = ""
def set_config_type(self, type_):
"""Sets the type of the configuration returned by the
remote source, e.g. "json".
"""
self._config_type = type_
def _get_config_type(self):
if self._config_type != "":
return self._config_type
cf = self._get_config_file()
ext = os.path.splitext(cf)
if len(ext) > 1:
return ext[1][1:]
else:
return ""
def _get_config_file(self):
if self._config_file == "":
try:
cf = self._find_config_file()
self._config_file = cf
except errors.ConfigFileNotFoundError:
return ""
return self._config_file
def _search_in_path(self, path):
log.debug("Searching for config in: {0}".format(path))
for ext in constants.SUPPORTED_EXTENSIONS:
full_path = "{0}/{1}.{2}".format(path, self._config_name, ext)
log.debug("Checking for {0}".format(full_path))
if util.exists(full_path):
log.debug("Found: {0}".format(full_path))
return full_path
return ""
def _find_config_file(self):
"""Search all `config_paths` for any config file.
Returns the first path that exists (and is a config file).
"""
log.info("Searching for config in: {0}".format(
", ".join(str(p) for p in self._config_paths)))
for cp in self._config_paths:
f = self._search_in_path(cp)
if f != "":
return f
raise errors.ConfigFileNotFoundError(
self._config_name, self._config_paths)
def debug(self): # pragma: no cover
"""Prints all configuration registries for debugging purposes."""
print("Aliases:")
pprint.pprint(self._aliases)
print("Override:")
pprint.pprint(self._override)
print("Args:")
pprint.pprint(self._args)
print("Env:")
pprint.pprint(self._env)
print("Config:")
pprint.pprint(self._config)
print("Key/Value Store:")
pprint.pprint(self._kvstore)
print("Defaults:")
pprint.pprint(self._defaults)
|
admiralobvious/vyper | vyper/vyper.py | Vyper.unmarshall_key | python | def unmarshall_key(self, key, cls):
return setattr(cls, key, self.get(key)) | Takes a single key and unmarshalls it into a class. | train | https://github.com/admiralobvious/vyper/blob/58ec7b90661502b7b2fea7a30849b90b907fcdec/vyper/vyper.py#L235-L237 | [
"def get(self, key):\n \"\"\"Vyper is essentially repository for configurations.\n `get` can retrieve any value given the key to use.\n `get` has the behavior of returning the value associated with the first\n place from where it is set. Viper will check in the following order:\n override, arg, env, config file, key/value store, default.\n \"\"\"\n path = key.split(self._key_delimiter)\n\n lowercase_key = key.lower()\n val = self._find(lowercase_key)\n\n if val is None:\n source = self._find(path[0].lower())\n if source is not None and isinstance(source, dict):\n val = self._search_dict(source, path[1::])\n\n if val is None:\n return None\n\n return val\n"
] | class Vyper(object):
"""Vyper is a prioritized configuration registry. It maintains a set of
configuration sources, fetches values to populate those, and provides
them according to the source's priority.
The priority of the sources is the following:
1. overrides
2. args
3. env. variables
4. config file
5. key/value store
6. defaults
For example, if values from the following sources were loaded:
defaults: {
"secret": "",
"user": "default",
"endpoint": "https://localhost"
}
config: {
"user": "root"
"secret": "defaultsecret"
}
env: {
"secret": "somesecretkey"
}
The resulting config will have the following values:
{
"secret": "somesecretkey",
"user": "root",
"endpoint": "https://localhost"
}
"""
def __init__(self, config_name="config", key_delimiter="."):
# Delimiter that separates a list of keys
# used to access a nested value in one go.
self._key_delimiter = key_delimiter
# A set of paths to look for the config file in.
self._config_paths = []
# A set of remote providers to search for the configuration.
self._remote_providers = []
# Name of file to look for inside the path.
self._config_name = config_name
self._config_file = ""
self._config_type = ""
self._env_prefix = ""
self._automatic_env_applied = False
self._env_key_replacer = None
self._aliases = {}
self._override = {}
self._args = {}
self._env = {}
self._config = {}
self._kvstore = {}
self._defaults = {}
self._on_config_change = None
self._on_remote_config_change = None
self.parse_argv_disabled = False
def on_config_change(self, func, *args, **kwargs):
self._on_config_change = lambda: func(*args, **kwargs)
def watch_config(self):
config_file = self._get_config_file()
watcher = watch.get_watcher(config_file, self)
watcher.start()
def set_config_file(self, file_):
"""Explicitly define the path, name and extension of the config file
Vyper will use this and not check any of the config paths.
"""
self._config_file = file_
def set_env_prefix(self, prefix):
"""Define a prefix that ENVIRONMENT variables will use.
e.g. if your prefix is "spf", the env registry will look
for env. variables that start with "SPF_"
"""
self._env_prefix = prefix
def _merge_with_env_prefix(self, key):
if self._env_prefix != "":
return ("{0}_{1}".format(self._env_prefix, key)).upper()
return key.upper()
def _get_env(self, key):
"""Wrapper around os.getenv() which replaces characters
in the original key. This allows env vars which have different keys
than the config object keys.
"""
if self._env_key_replacer is not None:
key = key.replace(*self._env_key_replacer)
return os.getenv(key)
def config_file_used(self):
"""Return the file used to populate the config registry."""
return self._config_file
def add_config_path(self, path):
"""Add a path for Vyper to search for the config file in.
Can be called multiple times to define multiple search paths.
"""
abspath = util.abs_pathify(path)
if abspath not in self._config_paths:
log.info("Adding {0} to paths to search".format(abspath))
self._config_paths.append(abspath)
def add_remote_provider(self, provider, client, path):
"""Adds a remote configuration source.
Remote Providers are searched in the order they are added.
provider is a string value, "etcd", "consul" and "zookeeper" are
currently supported.
client is a client object
path is the path in the k/v store to retrieve configuration
To retrieve a config file called myapp.json from /configs/myapp.json
you should set path to /configs and set config name (set_config_name)
to "myapp"
"""
if provider not in constants.SUPPORTED_REMOTE_PROVIDERS:
raise errors.UnsupportedRemoteProviderError(provider)
host = ""
if provider == "etcd":
host = "{0}://{1}:{2}".format(client.protocol, client.host,
client.port)
elif provider == "consul":
host = "{0}://{1}:{2}".format(client.http.scheme, client.http.host,
client.http.port)
elif provider == "zookeeper":
host = ",".join(
str("{0}:{1}".format(h[0], h[1])) for h in client.hosts)
log.info("Adding {0}:{1} to remote provider list".format(
provider, host))
rp = remote.RemoteProvider(provider, client, path, self)
if not self._provider_path_exists(rp):
self._remote_providers.append(rp)
def _provider_path_exists(self, rp):
for p in self._remote_providers:
if p.path == rp.path:
return True
return False
def _search_dict(self, d, keys):
if not keys:
return d
for key in keys:
if key in d and not isinstance(d[key], dict):
return d[key]
elif key in d:
return self._search_dict(d[key], keys[1::])
else:
return None
def get(self, key):
"""Vyper is essentially repository for configurations.
`get` can retrieve any value given the key to use.
`get` has the behavior of returning the value associated with the first
place from where it is set. Viper will check in the following order:
override, arg, env, config file, key/value store, default.
"""
path = key.split(self._key_delimiter)
lowercase_key = key.lower()
val = self._find(lowercase_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is None:
return None
return val
def get_string(self, key):
return str(self.get(key))
def get_bool(self, key):
val = self.get(key)
if isinstance(val, str):
if val.lower() == 'false':
return False
return bool(val)
def get_int(self, key):
return int(self.get(key))
def get_float(self, key):
return float(self.get(key))
def get_unicode(self, key):
return text(self.get(key))
def get_bytes(self, key):
return b"{0}".format(self.get(key))
def sub(self, key):
"""Returns new Vyper instance representing a sub tree of this instance.
"""
subv = Vyper()
data = self.get(key)
if isinstance(data, dict):
subv._config = data
return subv
else:
return None
def unmarshall(self, cls):
"""Unmarshalls the config into a class. Make sure that the tags on
the attributes of the class are properly set.
"""
for k, v in self.all_settings().items():
setattr(cls, k, v)
return cls
def bind_args(self, parser):
if isinstance(parser, argparse.ArgumentParser):
return self._bind_parser_values(parser)
else:
return self.bind_arg_values(parser)
def bind_arg(self, key, arg):
return self.bind_arg_value(key, arg)
def _parse_args(self, parser, overrides=None):
if overrides:
return vars(parser.parse_args(overrides))
if not self.parse_argv_disabled:
return vars(parser.parse_args())
else:
return vars(parser.parse_args([]))
def _bind_parser_values(self, parser, overrides=None):
# method mostly for testing, use bind_args()
args = self._parse_args(parser, overrides)
defaults = \
{k: parser.get_default(k) for k in args.keys()}
for k, v in defaults.items():
self.set_default(k, v)
if args[k] != defaults[k]:
self.bind_arg(k, args[k])
def bind_arg_values(self, args):
for k, v in args.items():
try:
self.bind_arg_value(k, v)
except ValueError:
pass
def bind_arg_value(self, key, arg):
if arg is None:
raise ValueError("arg for {0} is None".format(key))
self._args[key.lower()] = arg
def bind_env(self, *input_):
"""Binds a Vyper key to a ENV variable.
ENV variables are case sensitive.
If only a key is provided, it will use the env key matching the key,
uppercased.
`env_prefix` will be used when set when env name is not provided.
"""
if len(input_) == 0:
return "bind_env missing key to bind to"
key = input_[0].lower()
if len(input_) == 1:
env_key = self._merge_with_env_prefix(key)
else:
env_key = input_[1]
self._env[key] = env_key
if self._key_delimiter in key:
parts = input_[0].split(self._key_delimiter)
env_info = {
"path": parts[1:-1],
"final_key": parts[-1],
"env_key": env_key
}
if self._env.get(parts[0]) is None:
self._env[parts[0]] = [env_info]
else:
self._env[parts[0]].append(env_info)
return None
def _find_real_key(self, key, source):
return next(
(real for real in source.keys() if real.lower() == key.lower()),
None)
def _find_insensitive(self, key, source):
real_key = self._find_real_key(key, source)
return source.get(real_key)
def _set_insensitive(self, key, val, source):
real_key = self._find_real_key(key, source)
if real_key is None:
msg = "No case insensitive variant of {0} found.".format(key)
raise KeyError(msg)
source[real_key] = val
def _find(self, key):
"""Given a key, find the value
Vyper will check in the following order:
override, arg, env, config file, key/value store, default
Vyper will check to see if an alias exists first.
"""
key = self._real_key(key)
# OVERRIDES
val = self._override.get(key)
if val is not None:
log.debug("{0} found in override: {1}".format(key, val))
return val
# ARGS
val = self._args.get(key)
if val is not None:
log.debug("{0} found in args: {1}".format(key, val))
return val
# ENVIRONMENT VARIABLES
if self._automatic_env_applied:
# even if it hasn't been registered, if `automatic_env` is used,
# check any `get` request
val = self._get_env(self._merge_with_env_prefix(key))
if val is not None:
log.debug("{0} found in environment: {1}".format(key, val))
return val
env_key = self._find_insensitive(key, self._env)
log.debug("Looking for {0} in env".format(key))
if isinstance(env_key, list):
parent = self._find_insensitive(key, self._config)
found_in_env = False
log.debug("Found env key parent {0}: {1}".format(key, parent))
for item in env_key:
log.debug("{0} registered as env var parent {1}:".format(
key, item["env_key"]))
val = self._get_env(item["env_key"])
if val is not None:
log.debug("{0} found in environment: {1}".format(
item["env_key"], val))
temp = parent
for path in item["path"]:
real_key = self._find_real_key(path, temp)
temp = temp[real_key]
self._set_insensitive(item["final_key"], val, temp)
found_in_env = True
else:
log.debug("{0} env value unset".format(item["env_key"]))
if found_in_env:
return parent
elif env_key is not None:
log.debug("{0} registered as env var: {1}".format(key, env_key))
val = self._get_env(env_key)
if val is not None:
log.debug("{0} found in environment: {1}".format(env_key, val))
return val
else:
log.debug("{0} env value unset".format(env_key))
# CONFIG FILE
val = self._find_insensitive(key, self._config)
if val is not None:
log.debug("{0} found in config: {1}".format(key, val))
return val
# Test for nested config parameter
if self._key_delimiter in key:
path = key.split(self._key_delimiter)
source = self._find(path[0])
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is not None:
log.debug("{0} found in nested config: {1}".format(
key, val))
return val
# KEY/VALUE STORE
val = self._kvstore.get(key)
if val is not None:
log.debug("{0} found in key/value store: {1}".format(key, val))
return val
# DEFAULTS
val = self._defaults.get(key)
if val is not None:
log.debug("{0} found in defaults: {1}".format(key, val))
return val
return None
def is_set(self, key):
"""Check to see if the key has been set in any of the data locations.
"""
path = key.split(self._key_delimiter)
lower_case_key = key.lower()
val = self._find(lower_case_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
return val is not None
def automatic_env(self):
"""Have Vyper check ENV variables for all keys set in
config, default & args.
"""
self._automatic_env_applied = True
def set_env_key_replacer(self, old, new):
"""Sets the strings.Replacer on the Vyper object.
Useful for mapping an environment variable to a key that does
not match it.
"""
self._env_key_replacer = old, new
def register_alias(self, alias, key):
"""Aliases provide another accessor for the same key.
This enables one to change a name without breaking the application.
"""
alias = alias.lower()
key = key.lower()
if alias != key and alias != self._real_key(key):
exists = self._aliases.get(alias)
if exists is None:
# if we alias something that exists in one of the dicts to
# another name, we'll never be able to get that value using the
# original name, so move the config value to the new _real_key.
val = self._config.get(alias)
if val:
self._config.pop(alias)
self._config[key] = val
val = self._kvstore.get(alias)
if val:
self._kvstore.pop(alias)
self._kvstore[key] = val
val = self._defaults.get(alias)
if val:
self._defaults.pop(alias)
self._defaults[key] = val
val = self._override.get(alias)
if val:
self._override.pop(alias)
self._override[key] = val
self._aliases[alias] = key
else:
log.warning("Creating circular reference alias {0} {1} {2}".format(
alias, key, self._real_key(key)))
def _real_key(self, key):
new_key = self._aliases.get(key)
if new_key is not None:
return self._real_key(new_key)
else:
return key
def in_config(self, key):
"""Check to see if the given key (or an alias) is in the config file.
"""
# if the requested key is an alias, then return the proper key
key = self._real_key(key)
exists = self._config.get(key)
return exists
def set_default(self, key, value):
"""Set the default value for this key.
Default only used when no value is provided by the user via
arg, config or env.
"""
k = self._real_key(key.lower())
self._defaults[k] = value
def set(self, key, value):
"""Sets the value for the key in the override register.
Will be used instead of values obtained via
args, config file, env, defaults or key/value store.
"""
k = self._real_key(key.lower())
self._override[k] = value
def read_in_config(self):
"""Vyper will discover and load the configuration file from disk
and key/value stores, searching in one of the defined paths.
"""
log.info("Attempting to read in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
self._config = {}
return self._unmarshall_reader(f, self._config)
def merge_in_config(self):
log.info("Attempting to merge in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
return self.merge_config(f)
def read_config(self, file_):
"""Vyper will read a configuration file, setting existing keys to
`None` if the key does not exist in the file.
"""
self._unmarshall_reader(file_, self._config)
def merge_config(self, file_):
if self._config is None:
self._config = {}
cfg = {}
cfg = self._unmarshall_reader(file_, cfg)
self._merge_dicts(cfg, self._config)
def _merge_dicts(self, src, target):
for k, v in src.items():
if isinstance(v, dict):
self._merge_dicts(v, target[k])
else:
target[k] = v
def read_remote_config(self):
"""Attempts to get configuration from a remote source
and read it in the remote configuration registry.
"""
return self._get_key_value_config()
def _unmarshall_reader(self, file_, d):
"""Unmarshall a file into a `dict`."""
return util.unmarshall_config_reader(file_, d, self._get_config_type())
def _get_key_value_config(self):
"""Retrieves the first found remote configuration."""
for rp in self._remote_providers:
val = self._get_remote_config(rp)
self._kvstore = val
return None
raise errors.RemoteConfigError("No Files Found")
def _get_remote_config(self, provider):
reader = provider.get()
self._unmarshall_reader(reader, self._kvstore)
return self._kvstore
def on_remote_config_change(self, func, *args, **kwargs):
self._on_remote_config_change = lambda x: func(*args, **kwargs)
for rp in self._remote_providers:
rp.add_listener(self._on_remote_config_change)
return None
def watch_remote_config(self):
for rp in self._remote_providers:
rp.add_listener()
return None
raise errors.RemoteConfigError("No Files Found")
def all_keys(self, uppercase_keys=False):
"""Return all keys regardless where they are set."""
d = {}
for k in self._override.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._args.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._env.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._config.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._kvstore.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._defaults.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._aliases.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
return d.keys()
def all_settings(self, uppercase_keys=False):
"""Return all settings as a `dict`."""
d = {}
for k in self.all_keys(uppercase_keys):
d[k] = self.get(k)
return d
def set_config_name(self, name):
"""Name for the config file. Does not include extension."""
self._config_name = name
self._config_file = ""
def set_config_type(self, type_):
"""Sets the type of the configuration returned by the
remote source, e.g. "json".
"""
self._config_type = type_
def _get_config_type(self):
if self._config_type != "":
return self._config_type
cf = self._get_config_file()
ext = os.path.splitext(cf)
if len(ext) > 1:
return ext[1][1:]
else:
return ""
def _get_config_file(self):
if self._config_file == "":
try:
cf = self._find_config_file()
self._config_file = cf
except errors.ConfigFileNotFoundError:
return ""
return self._config_file
def _search_in_path(self, path):
log.debug("Searching for config in: {0}".format(path))
for ext in constants.SUPPORTED_EXTENSIONS:
full_path = "{0}/{1}.{2}".format(path, self._config_name, ext)
log.debug("Checking for {0}".format(full_path))
if util.exists(full_path):
log.debug("Found: {0}".format(full_path))
return full_path
return ""
def _find_config_file(self):
"""Search all `config_paths` for any config file.
Returns the first path that exists (and is a config file).
"""
log.info("Searching for config in: {0}".format(
", ".join(str(p) for p in self._config_paths)))
for cp in self._config_paths:
f = self._search_in_path(cp)
if f != "":
return f
raise errors.ConfigFileNotFoundError(
self._config_name, self._config_paths)
def debug(self): # pragma: no cover
"""Prints all configuration registries for debugging purposes."""
print("Aliases:")
pprint.pprint(self._aliases)
print("Override:")
pprint.pprint(self._override)
print("Args:")
pprint.pprint(self._args)
print("Env:")
pprint.pprint(self._env)
print("Config:")
pprint.pprint(self._config)
print("Key/Value Store:")
pprint.pprint(self._kvstore)
print("Defaults:")
pprint.pprint(self._defaults)
|
admiralobvious/vyper | vyper/vyper.py | Vyper.unmarshall | python | def unmarshall(self, cls):
for k, v in self.all_settings().items():
setattr(cls, k, v)
return cls | Unmarshalls the config into a class. Make sure that the tags on
the attributes of the class are properly set. | train | https://github.com/admiralobvious/vyper/blob/58ec7b90661502b7b2fea7a30849b90b907fcdec/vyper/vyper.py#L239-L246 | [
"def all_settings(self, uppercase_keys=False):\n \"\"\"Return all settings as a `dict`.\"\"\"\n d = {}\n\n for k in self.all_keys(uppercase_keys):\n d[k] = self.get(k)\n\n return d\n"
] | class Vyper(object):
"""Vyper is a prioritized configuration registry. It maintains a set of
configuration sources, fetches values to populate those, and provides
them according to the source's priority.
The priority of the sources is the following:
1. overrides
2. args
3. env. variables
4. config file
5. key/value store
6. defaults
For example, if values from the following sources were loaded:
defaults: {
"secret": "",
"user": "default",
"endpoint": "https://localhost"
}
config: {
"user": "root"
"secret": "defaultsecret"
}
env: {
"secret": "somesecretkey"
}
The resulting config will have the following values:
{
"secret": "somesecretkey",
"user": "root",
"endpoint": "https://localhost"
}
"""
def __init__(self, config_name="config", key_delimiter="."):
# Delimiter that separates a list of keys
# used to access a nested value in one go.
self._key_delimiter = key_delimiter
# A set of paths to look for the config file in.
self._config_paths = []
# A set of remote providers to search for the configuration.
self._remote_providers = []
# Name of file to look for inside the path.
self._config_name = config_name
self._config_file = ""
self._config_type = ""
self._env_prefix = ""
self._automatic_env_applied = False
self._env_key_replacer = None
self._aliases = {}
self._override = {}
self._args = {}
self._env = {}
self._config = {}
self._kvstore = {}
self._defaults = {}
self._on_config_change = None
self._on_remote_config_change = None
self.parse_argv_disabled = False
def on_config_change(self, func, *args, **kwargs):
self._on_config_change = lambda: func(*args, **kwargs)
def watch_config(self):
config_file = self._get_config_file()
watcher = watch.get_watcher(config_file, self)
watcher.start()
def set_config_file(self, file_):
"""Explicitly define the path, name and extension of the config file
Vyper will use this and not check any of the config paths.
"""
self._config_file = file_
def set_env_prefix(self, prefix):
"""Define a prefix that ENVIRONMENT variables will use.
e.g. if your prefix is "spf", the env registry will look
for env. variables that start with "SPF_"
"""
self._env_prefix = prefix
def _merge_with_env_prefix(self, key):
if self._env_prefix != "":
return ("{0}_{1}".format(self._env_prefix, key)).upper()
return key.upper()
def _get_env(self, key):
"""Wrapper around os.getenv() which replaces characters
in the original key. This allows env vars which have different keys
than the config object keys.
"""
if self._env_key_replacer is not None:
key = key.replace(*self._env_key_replacer)
return os.getenv(key)
def config_file_used(self):
"""Return the file used to populate the config registry."""
return self._config_file
def add_config_path(self, path):
"""Add a path for Vyper to search for the config file in.
Can be called multiple times to define multiple search paths.
"""
abspath = util.abs_pathify(path)
if abspath not in self._config_paths:
log.info("Adding {0} to paths to search".format(abspath))
self._config_paths.append(abspath)
def add_remote_provider(self, provider, client, path):
"""Adds a remote configuration source.
Remote Providers are searched in the order they are added.
provider is a string value, "etcd", "consul" and "zookeeper" are
currently supported.
client is a client object
path is the path in the k/v store to retrieve configuration
To retrieve a config file called myapp.json from /configs/myapp.json
you should set path to /configs and set config name (set_config_name)
to "myapp"
"""
if provider not in constants.SUPPORTED_REMOTE_PROVIDERS:
raise errors.UnsupportedRemoteProviderError(provider)
host = ""
if provider == "etcd":
host = "{0}://{1}:{2}".format(client.protocol, client.host,
client.port)
elif provider == "consul":
host = "{0}://{1}:{2}".format(client.http.scheme, client.http.host,
client.http.port)
elif provider == "zookeeper":
host = ",".join(
str("{0}:{1}".format(h[0], h[1])) for h in client.hosts)
log.info("Adding {0}:{1} to remote provider list".format(
provider, host))
rp = remote.RemoteProvider(provider, client, path, self)
if not self._provider_path_exists(rp):
self._remote_providers.append(rp)
def _provider_path_exists(self, rp):
for p in self._remote_providers:
if p.path == rp.path:
return True
return False
def _search_dict(self, d, keys):
if not keys:
return d
for key in keys:
if key in d and not isinstance(d[key], dict):
return d[key]
elif key in d:
return self._search_dict(d[key], keys[1::])
else:
return None
def get(self, key):
"""Vyper is essentially repository for configurations.
`get` can retrieve any value given the key to use.
`get` has the behavior of returning the value associated with the first
place from where it is set. Viper will check in the following order:
override, arg, env, config file, key/value store, default.
"""
path = key.split(self._key_delimiter)
lowercase_key = key.lower()
val = self._find(lowercase_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is None:
return None
return val
def get_string(self, key):
return str(self.get(key))
def get_bool(self, key):
val = self.get(key)
if isinstance(val, str):
if val.lower() == 'false':
return False
return bool(val)
def get_int(self, key):
return int(self.get(key))
def get_float(self, key):
return float(self.get(key))
def get_unicode(self, key):
return text(self.get(key))
def get_bytes(self, key):
return b"{0}".format(self.get(key))
def sub(self, key):
"""Returns new Vyper instance representing a sub tree of this instance.
"""
subv = Vyper()
data = self.get(key)
if isinstance(data, dict):
subv._config = data
return subv
else:
return None
def unmarshall_key(self, key, cls):
"""Takes a single key and unmarshalls it into a class."""
return setattr(cls, key, self.get(key))
def bind_args(self, parser):
if isinstance(parser, argparse.ArgumentParser):
return self._bind_parser_values(parser)
else:
return self.bind_arg_values(parser)
def bind_arg(self, key, arg):
return self.bind_arg_value(key, arg)
def _parse_args(self, parser, overrides=None):
if overrides:
return vars(parser.parse_args(overrides))
if not self.parse_argv_disabled:
return vars(parser.parse_args())
else:
return vars(parser.parse_args([]))
def _bind_parser_values(self, parser, overrides=None):
# method mostly for testing, use bind_args()
args = self._parse_args(parser, overrides)
defaults = \
{k: parser.get_default(k) for k in args.keys()}
for k, v in defaults.items():
self.set_default(k, v)
if args[k] != defaults[k]:
self.bind_arg(k, args[k])
def bind_arg_values(self, args):
for k, v in args.items():
try:
self.bind_arg_value(k, v)
except ValueError:
pass
def bind_arg_value(self, key, arg):
if arg is None:
raise ValueError("arg for {0} is None".format(key))
self._args[key.lower()] = arg
def bind_env(self, *input_):
"""Binds a Vyper key to a ENV variable.
ENV variables are case sensitive.
If only a key is provided, it will use the env key matching the key,
uppercased.
`env_prefix` will be used when set when env name is not provided.
"""
if len(input_) == 0:
return "bind_env missing key to bind to"
key = input_[0].lower()
if len(input_) == 1:
env_key = self._merge_with_env_prefix(key)
else:
env_key = input_[1]
self._env[key] = env_key
if self._key_delimiter in key:
parts = input_[0].split(self._key_delimiter)
env_info = {
"path": parts[1:-1],
"final_key": parts[-1],
"env_key": env_key
}
if self._env.get(parts[0]) is None:
self._env[parts[0]] = [env_info]
else:
self._env[parts[0]].append(env_info)
return None
def _find_real_key(self, key, source):
return next(
(real for real in source.keys() if real.lower() == key.lower()),
None)
def _find_insensitive(self, key, source):
real_key = self._find_real_key(key, source)
return source.get(real_key)
def _set_insensitive(self, key, val, source):
real_key = self._find_real_key(key, source)
if real_key is None:
msg = "No case insensitive variant of {0} found.".format(key)
raise KeyError(msg)
source[real_key] = val
def _find(self, key):
"""Given a key, find the value
Vyper will check in the following order:
override, arg, env, config file, key/value store, default
Vyper will check to see if an alias exists first.
"""
key = self._real_key(key)
# OVERRIDES
val = self._override.get(key)
if val is not None:
log.debug("{0} found in override: {1}".format(key, val))
return val
# ARGS
val = self._args.get(key)
if val is not None:
log.debug("{0} found in args: {1}".format(key, val))
return val
# ENVIRONMENT VARIABLES
if self._automatic_env_applied:
# even if it hasn't been registered, if `automatic_env` is used,
# check any `get` request
val = self._get_env(self._merge_with_env_prefix(key))
if val is not None:
log.debug("{0} found in environment: {1}".format(key, val))
return val
env_key = self._find_insensitive(key, self._env)
log.debug("Looking for {0} in env".format(key))
if isinstance(env_key, list):
parent = self._find_insensitive(key, self._config)
found_in_env = False
log.debug("Found env key parent {0}: {1}".format(key, parent))
for item in env_key:
log.debug("{0} registered as env var parent {1}:".format(
key, item["env_key"]))
val = self._get_env(item["env_key"])
if val is not None:
log.debug("{0} found in environment: {1}".format(
item["env_key"], val))
temp = parent
for path in item["path"]:
real_key = self._find_real_key(path, temp)
temp = temp[real_key]
self._set_insensitive(item["final_key"], val, temp)
found_in_env = True
else:
log.debug("{0} env value unset".format(item["env_key"]))
if found_in_env:
return parent
elif env_key is not None:
log.debug("{0} registered as env var: {1}".format(key, env_key))
val = self._get_env(env_key)
if val is not None:
log.debug("{0} found in environment: {1}".format(env_key, val))
return val
else:
log.debug("{0} env value unset".format(env_key))
# CONFIG FILE
val = self._find_insensitive(key, self._config)
if val is not None:
log.debug("{0} found in config: {1}".format(key, val))
return val
# Test for nested config parameter
if self._key_delimiter in key:
path = key.split(self._key_delimiter)
source = self._find(path[0])
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is not None:
log.debug("{0} found in nested config: {1}".format(
key, val))
return val
# KEY/VALUE STORE
val = self._kvstore.get(key)
if val is not None:
log.debug("{0} found in key/value store: {1}".format(key, val))
return val
# DEFAULTS
val = self._defaults.get(key)
if val is not None:
log.debug("{0} found in defaults: {1}".format(key, val))
return val
return None
def is_set(self, key):
"""Check to see if the key has been set in any of the data locations.
"""
path = key.split(self._key_delimiter)
lower_case_key = key.lower()
val = self._find(lower_case_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
return val is not None
def automatic_env(self):
"""Have Vyper check ENV variables for all keys set in
config, default & args.
"""
self._automatic_env_applied = True
def set_env_key_replacer(self, old, new):
"""Sets the strings.Replacer on the Vyper object.
Useful for mapping an environment variable to a key that does
not match it.
"""
self._env_key_replacer = old, new
def register_alias(self, alias, key):
"""Aliases provide another accessor for the same key.
This enables one to change a name without breaking the application.
"""
alias = alias.lower()
key = key.lower()
if alias != key and alias != self._real_key(key):
exists = self._aliases.get(alias)
if exists is None:
# if we alias something that exists in one of the dicts to
# another name, we'll never be able to get that value using the
# original name, so move the config value to the new _real_key.
val = self._config.get(alias)
if val:
self._config.pop(alias)
self._config[key] = val
val = self._kvstore.get(alias)
if val:
self._kvstore.pop(alias)
self._kvstore[key] = val
val = self._defaults.get(alias)
if val:
self._defaults.pop(alias)
self._defaults[key] = val
val = self._override.get(alias)
if val:
self._override.pop(alias)
self._override[key] = val
self._aliases[alias] = key
else:
log.warning("Creating circular reference alias {0} {1} {2}".format(
alias, key, self._real_key(key)))
def _real_key(self, key):
new_key = self._aliases.get(key)
if new_key is not None:
return self._real_key(new_key)
else:
return key
def in_config(self, key):
"""Check to see if the given key (or an alias) is in the config file.
"""
# if the requested key is an alias, then return the proper key
key = self._real_key(key)
exists = self._config.get(key)
return exists
def set_default(self, key, value):
"""Set the default value for this key.
Default only used when no value is provided by the user via
arg, config or env.
"""
k = self._real_key(key.lower())
self._defaults[k] = value
def set(self, key, value):
"""Sets the value for the key in the override register.
Will be used instead of values obtained via
args, config file, env, defaults or key/value store.
"""
k = self._real_key(key.lower())
self._override[k] = value
def read_in_config(self):
"""Vyper will discover and load the configuration file from disk
and key/value stores, searching in one of the defined paths.
"""
log.info("Attempting to read in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
self._config = {}
return self._unmarshall_reader(f, self._config)
def merge_in_config(self):
log.info("Attempting to merge in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
return self.merge_config(f)
def read_config(self, file_):
"""Vyper will read a configuration file, setting existing keys to
`None` if the key does not exist in the file.
"""
self._unmarshall_reader(file_, self._config)
def merge_config(self, file_):
if self._config is None:
self._config = {}
cfg = {}
cfg = self._unmarshall_reader(file_, cfg)
self._merge_dicts(cfg, self._config)
def _merge_dicts(self, src, target):
for k, v in src.items():
if isinstance(v, dict):
self._merge_dicts(v, target[k])
else:
target[k] = v
def read_remote_config(self):
"""Attempts to get configuration from a remote source
and read it in the remote configuration registry.
"""
return self._get_key_value_config()
def _unmarshall_reader(self, file_, d):
"""Unmarshall a file into a `dict`."""
return util.unmarshall_config_reader(file_, d, self._get_config_type())
def _get_key_value_config(self):
"""Retrieves the first found remote configuration."""
for rp in self._remote_providers:
val = self._get_remote_config(rp)
self._kvstore = val
return None
raise errors.RemoteConfigError("No Files Found")
def _get_remote_config(self, provider):
reader = provider.get()
self._unmarshall_reader(reader, self._kvstore)
return self._kvstore
def on_remote_config_change(self, func, *args, **kwargs):
self._on_remote_config_change = lambda x: func(*args, **kwargs)
for rp in self._remote_providers:
rp.add_listener(self._on_remote_config_change)
return None
def watch_remote_config(self):
for rp in self._remote_providers:
rp.add_listener()
return None
raise errors.RemoteConfigError("No Files Found")
def all_keys(self, uppercase_keys=False):
"""Return all keys regardless where they are set."""
d = {}
for k in self._override.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._args.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._env.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._config.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._kvstore.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._defaults.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._aliases.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
return d.keys()
def all_settings(self, uppercase_keys=False):
"""Return all settings as a `dict`."""
d = {}
for k in self.all_keys(uppercase_keys):
d[k] = self.get(k)
return d
def set_config_name(self, name):
"""Name for the config file. Does not include extension."""
self._config_name = name
self._config_file = ""
def set_config_type(self, type_):
"""Sets the type of the configuration returned by the
remote source, e.g. "json".
"""
self._config_type = type_
def _get_config_type(self):
if self._config_type != "":
return self._config_type
cf = self._get_config_file()
ext = os.path.splitext(cf)
if len(ext) > 1:
return ext[1][1:]
else:
return ""
def _get_config_file(self):
if self._config_file == "":
try:
cf = self._find_config_file()
self._config_file = cf
except errors.ConfigFileNotFoundError:
return ""
return self._config_file
def _search_in_path(self, path):
log.debug("Searching for config in: {0}".format(path))
for ext in constants.SUPPORTED_EXTENSIONS:
full_path = "{0}/{1}.{2}".format(path, self._config_name, ext)
log.debug("Checking for {0}".format(full_path))
if util.exists(full_path):
log.debug("Found: {0}".format(full_path))
return full_path
return ""
def _find_config_file(self):
"""Search all `config_paths` for any config file.
Returns the first path that exists (and is a config file).
"""
log.info("Searching for config in: {0}".format(
", ".join(str(p) for p in self._config_paths)))
for cp in self._config_paths:
f = self._search_in_path(cp)
if f != "":
return f
raise errors.ConfigFileNotFoundError(
self._config_name, self._config_paths)
def debug(self): # pragma: no cover
"""Prints all configuration registries for debugging purposes."""
print("Aliases:")
pprint.pprint(self._aliases)
print("Override:")
pprint.pprint(self._override)
print("Args:")
pprint.pprint(self._args)
print("Env:")
pprint.pprint(self._env)
print("Config:")
pprint.pprint(self._config)
print("Key/Value Store:")
pprint.pprint(self._kvstore)
print("Defaults:")
pprint.pprint(self._defaults)
|
admiralobvious/vyper | vyper/vyper.py | Vyper.bind_env | python | def bind_env(self, *input_):
if len(input_) == 0:
return "bind_env missing key to bind to"
key = input_[0].lower()
if len(input_) == 1:
env_key = self._merge_with_env_prefix(key)
else:
env_key = input_[1]
self._env[key] = env_key
if self._key_delimiter in key:
parts = input_[0].split(self._key_delimiter)
env_info = {
"path": parts[1:-1],
"final_key": parts[-1],
"env_key": env_key
}
if self._env.get(parts[0]) is None:
self._env[parts[0]] = [env_info]
else:
self._env[parts[0]].append(env_info)
return None | Binds a Vyper key to a ENV variable.
ENV variables are case sensitive.
If only a key is provided, it will use the env key matching the key,
uppercased.
`env_prefix` will be used when set when env name is not provided. | train | https://github.com/admiralobvious/vyper/blob/58ec7b90661502b7b2fea7a30849b90b907fcdec/vyper/vyper.py#L289-L321 | [
"def _merge_with_env_prefix(self, key):\n if self._env_prefix != \"\":\n return (\"{0}_{1}\".format(self._env_prefix, key)).upper()\n return key.upper()\n"
] | class Vyper(object):
"""Vyper is a prioritized configuration registry. It maintains a set of
configuration sources, fetches values to populate those, and provides
them according to the source's priority.
The priority of the sources is the following:
1. overrides
2. args
3. env. variables
4. config file
5. key/value store
6. defaults
For example, if values from the following sources were loaded:
defaults: {
"secret": "",
"user": "default",
"endpoint": "https://localhost"
}
config: {
"user": "root"
"secret": "defaultsecret"
}
env: {
"secret": "somesecretkey"
}
The resulting config will have the following values:
{
"secret": "somesecretkey",
"user": "root",
"endpoint": "https://localhost"
}
"""
def __init__(self, config_name="config", key_delimiter="."):
# Delimiter that separates a list of keys
# used to access a nested value in one go.
self._key_delimiter = key_delimiter
# A set of paths to look for the config file in.
self._config_paths = []
# A set of remote providers to search for the configuration.
self._remote_providers = []
# Name of file to look for inside the path.
self._config_name = config_name
self._config_file = ""
self._config_type = ""
self._env_prefix = ""
self._automatic_env_applied = False
self._env_key_replacer = None
self._aliases = {}
self._override = {}
self._args = {}
self._env = {}
self._config = {}
self._kvstore = {}
self._defaults = {}
self._on_config_change = None
self._on_remote_config_change = None
self.parse_argv_disabled = False
def on_config_change(self, func, *args, **kwargs):
self._on_config_change = lambda: func(*args, **kwargs)
def watch_config(self):
config_file = self._get_config_file()
watcher = watch.get_watcher(config_file, self)
watcher.start()
def set_config_file(self, file_):
"""Explicitly define the path, name and extension of the config file
Vyper will use this and not check any of the config paths.
"""
self._config_file = file_
def set_env_prefix(self, prefix):
"""Define a prefix that ENVIRONMENT variables will use.
e.g. if your prefix is "spf", the env registry will look
for env. variables that start with "SPF_"
"""
self._env_prefix = prefix
def _merge_with_env_prefix(self, key):
if self._env_prefix != "":
return ("{0}_{1}".format(self._env_prefix, key)).upper()
return key.upper()
def _get_env(self, key):
"""Wrapper around os.getenv() which replaces characters
in the original key. This allows env vars which have different keys
than the config object keys.
"""
if self._env_key_replacer is not None:
key = key.replace(*self._env_key_replacer)
return os.getenv(key)
def config_file_used(self):
"""Return the file used to populate the config registry."""
return self._config_file
def add_config_path(self, path):
"""Add a path for Vyper to search for the config file in.
Can be called multiple times to define multiple search paths.
"""
abspath = util.abs_pathify(path)
if abspath not in self._config_paths:
log.info("Adding {0} to paths to search".format(abspath))
self._config_paths.append(abspath)
def add_remote_provider(self, provider, client, path):
"""Adds a remote configuration source.
Remote Providers are searched in the order they are added.
provider is a string value, "etcd", "consul" and "zookeeper" are
currently supported.
client is a client object
path is the path in the k/v store to retrieve configuration
To retrieve a config file called myapp.json from /configs/myapp.json
you should set path to /configs and set config name (set_config_name)
to "myapp"
"""
if provider not in constants.SUPPORTED_REMOTE_PROVIDERS:
raise errors.UnsupportedRemoteProviderError(provider)
host = ""
if provider == "etcd":
host = "{0}://{1}:{2}".format(client.protocol, client.host,
client.port)
elif provider == "consul":
host = "{0}://{1}:{2}".format(client.http.scheme, client.http.host,
client.http.port)
elif provider == "zookeeper":
host = ",".join(
str("{0}:{1}".format(h[0], h[1])) for h in client.hosts)
log.info("Adding {0}:{1} to remote provider list".format(
provider, host))
rp = remote.RemoteProvider(provider, client, path, self)
if not self._provider_path_exists(rp):
self._remote_providers.append(rp)
def _provider_path_exists(self, rp):
for p in self._remote_providers:
if p.path == rp.path:
return True
return False
def _search_dict(self, d, keys):
if not keys:
return d
for key in keys:
if key in d and not isinstance(d[key], dict):
return d[key]
elif key in d:
return self._search_dict(d[key], keys[1::])
else:
return None
def get(self, key):
"""Vyper is essentially repository for configurations.
`get` can retrieve any value given the key to use.
`get` has the behavior of returning the value associated with the first
place from where it is set. Viper will check in the following order:
override, arg, env, config file, key/value store, default.
"""
path = key.split(self._key_delimiter)
lowercase_key = key.lower()
val = self._find(lowercase_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is None:
return None
return val
def get_string(self, key):
return str(self.get(key))
def get_bool(self, key):
val = self.get(key)
if isinstance(val, str):
if val.lower() == 'false':
return False
return bool(val)
def get_int(self, key):
return int(self.get(key))
def get_float(self, key):
return float(self.get(key))
def get_unicode(self, key):
return text(self.get(key))
def get_bytes(self, key):
return b"{0}".format(self.get(key))
def sub(self, key):
"""Returns new Vyper instance representing a sub tree of this instance.
"""
subv = Vyper()
data = self.get(key)
if isinstance(data, dict):
subv._config = data
return subv
else:
return None
def unmarshall_key(self, key, cls):
"""Takes a single key and unmarshalls it into a class."""
return setattr(cls, key, self.get(key))
def unmarshall(self, cls):
"""Unmarshalls the config into a class. Make sure that the tags on
the attributes of the class are properly set.
"""
for k, v in self.all_settings().items():
setattr(cls, k, v)
return cls
def bind_args(self, parser):
if isinstance(parser, argparse.ArgumentParser):
return self._bind_parser_values(parser)
else:
return self.bind_arg_values(parser)
def bind_arg(self, key, arg):
return self.bind_arg_value(key, arg)
def _parse_args(self, parser, overrides=None):
if overrides:
return vars(parser.parse_args(overrides))
if not self.parse_argv_disabled:
return vars(parser.parse_args())
else:
return vars(parser.parse_args([]))
def _bind_parser_values(self, parser, overrides=None):
# method mostly for testing, use bind_args()
args = self._parse_args(parser, overrides)
defaults = \
{k: parser.get_default(k) for k in args.keys()}
for k, v in defaults.items():
self.set_default(k, v)
if args[k] != defaults[k]:
self.bind_arg(k, args[k])
def bind_arg_values(self, args):
for k, v in args.items():
try:
self.bind_arg_value(k, v)
except ValueError:
pass
def bind_arg_value(self, key, arg):
if arg is None:
raise ValueError("arg for {0} is None".format(key))
self._args[key.lower()] = arg
def _find_real_key(self, key, source):
return next(
(real for real in source.keys() if real.lower() == key.lower()),
None)
def _find_insensitive(self, key, source):
real_key = self._find_real_key(key, source)
return source.get(real_key)
def _set_insensitive(self, key, val, source):
real_key = self._find_real_key(key, source)
if real_key is None:
msg = "No case insensitive variant of {0} found.".format(key)
raise KeyError(msg)
source[real_key] = val
def _find(self, key):
"""Given a key, find the value
Vyper will check in the following order:
override, arg, env, config file, key/value store, default
Vyper will check to see if an alias exists first.
"""
key = self._real_key(key)
# OVERRIDES
val = self._override.get(key)
if val is not None:
log.debug("{0} found in override: {1}".format(key, val))
return val
# ARGS
val = self._args.get(key)
if val is not None:
log.debug("{0} found in args: {1}".format(key, val))
return val
# ENVIRONMENT VARIABLES
if self._automatic_env_applied:
# even if it hasn't been registered, if `automatic_env` is used,
# check any `get` request
val = self._get_env(self._merge_with_env_prefix(key))
if val is not None:
log.debug("{0} found in environment: {1}".format(key, val))
return val
env_key = self._find_insensitive(key, self._env)
log.debug("Looking for {0} in env".format(key))
if isinstance(env_key, list):
parent = self._find_insensitive(key, self._config)
found_in_env = False
log.debug("Found env key parent {0}: {1}".format(key, parent))
for item in env_key:
log.debug("{0} registered as env var parent {1}:".format(
key, item["env_key"]))
val = self._get_env(item["env_key"])
if val is not None:
log.debug("{0} found in environment: {1}".format(
item["env_key"], val))
temp = parent
for path in item["path"]:
real_key = self._find_real_key(path, temp)
temp = temp[real_key]
self._set_insensitive(item["final_key"], val, temp)
found_in_env = True
else:
log.debug("{0} env value unset".format(item["env_key"]))
if found_in_env:
return parent
elif env_key is not None:
log.debug("{0} registered as env var: {1}".format(key, env_key))
val = self._get_env(env_key)
if val is not None:
log.debug("{0} found in environment: {1}".format(env_key, val))
return val
else:
log.debug("{0} env value unset".format(env_key))
# CONFIG FILE
val = self._find_insensitive(key, self._config)
if val is not None:
log.debug("{0} found in config: {1}".format(key, val))
return val
# Test for nested config parameter
if self._key_delimiter in key:
path = key.split(self._key_delimiter)
source = self._find(path[0])
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is not None:
log.debug("{0} found in nested config: {1}".format(
key, val))
return val
# KEY/VALUE STORE
val = self._kvstore.get(key)
if val is not None:
log.debug("{0} found in key/value store: {1}".format(key, val))
return val
# DEFAULTS
val = self._defaults.get(key)
if val is not None:
log.debug("{0} found in defaults: {1}".format(key, val))
return val
return None
def is_set(self, key):
"""Check to see if the key has been set in any of the data locations.
"""
path = key.split(self._key_delimiter)
lower_case_key = key.lower()
val = self._find(lower_case_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
return val is not None
def automatic_env(self):
"""Have Vyper check ENV variables for all keys set in
config, default & args.
"""
self._automatic_env_applied = True
def set_env_key_replacer(self, old, new):
"""Sets the strings.Replacer on the Vyper object.
Useful for mapping an environment variable to a key that does
not match it.
"""
self._env_key_replacer = old, new
def register_alias(self, alias, key):
"""Aliases provide another accessor for the same key.
This enables one to change a name without breaking the application.
"""
alias = alias.lower()
key = key.lower()
if alias != key and alias != self._real_key(key):
exists = self._aliases.get(alias)
if exists is None:
# if we alias something that exists in one of the dicts to
# another name, we'll never be able to get that value using the
# original name, so move the config value to the new _real_key.
val = self._config.get(alias)
if val:
self._config.pop(alias)
self._config[key] = val
val = self._kvstore.get(alias)
if val:
self._kvstore.pop(alias)
self._kvstore[key] = val
val = self._defaults.get(alias)
if val:
self._defaults.pop(alias)
self._defaults[key] = val
val = self._override.get(alias)
if val:
self._override.pop(alias)
self._override[key] = val
self._aliases[alias] = key
else:
log.warning("Creating circular reference alias {0} {1} {2}".format(
alias, key, self._real_key(key)))
def _real_key(self, key):
new_key = self._aliases.get(key)
if new_key is not None:
return self._real_key(new_key)
else:
return key
def in_config(self, key):
"""Check to see if the given key (or an alias) is in the config file.
"""
# if the requested key is an alias, then return the proper key
key = self._real_key(key)
exists = self._config.get(key)
return exists
def set_default(self, key, value):
"""Set the default value for this key.
Default only used when no value is provided by the user via
arg, config or env.
"""
k = self._real_key(key.lower())
self._defaults[k] = value
def set(self, key, value):
"""Sets the value for the key in the override register.
Will be used instead of values obtained via
args, config file, env, defaults or key/value store.
"""
k = self._real_key(key.lower())
self._override[k] = value
def read_in_config(self):
"""Vyper will discover and load the configuration file from disk
and key/value stores, searching in one of the defined paths.
"""
log.info("Attempting to read in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
self._config = {}
return self._unmarshall_reader(f, self._config)
def merge_in_config(self):
log.info("Attempting to merge in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
return self.merge_config(f)
def read_config(self, file_):
"""Vyper will read a configuration file, setting existing keys to
`None` if the key does not exist in the file.
"""
self._unmarshall_reader(file_, self._config)
def merge_config(self, file_):
if self._config is None:
self._config = {}
cfg = {}
cfg = self._unmarshall_reader(file_, cfg)
self._merge_dicts(cfg, self._config)
def _merge_dicts(self, src, target):
for k, v in src.items():
if isinstance(v, dict):
self._merge_dicts(v, target[k])
else:
target[k] = v
def read_remote_config(self):
"""Attempts to get configuration from a remote source
and read it in the remote configuration registry.
"""
return self._get_key_value_config()
def _unmarshall_reader(self, file_, d):
"""Unmarshall a file into a `dict`."""
return util.unmarshall_config_reader(file_, d, self._get_config_type())
def _get_key_value_config(self):
"""Retrieves the first found remote configuration."""
for rp in self._remote_providers:
val = self._get_remote_config(rp)
self._kvstore = val
return None
raise errors.RemoteConfigError("No Files Found")
def _get_remote_config(self, provider):
reader = provider.get()
self._unmarshall_reader(reader, self._kvstore)
return self._kvstore
def on_remote_config_change(self, func, *args, **kwargs):
self._on_remote_config_change = lambda x: func(*args, **kwargs)
for rp in self._remote_providers:
rp.add_listener(self._on_remote_config_change)
return None
def watch_remote_config(self):
for rp in self._remote_providers:
rp.add_listener()
return None
raise errors.RemoteConfigError("No Files Found")
def all_keys(self, uppercase_keys=False):
"""Return all keys regardless where they are set."""
d = {}
for k in self._override.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._args.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._env.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._config.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._kvstore.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._defaults.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._aliases.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
return d.keys()
def all_settings(self, uppercase_keys=False):
"""Return all settings as a `dict`."""
d = {}
for k in self.all_keys(uppercase_keys):
d[k] = self.get(k)
return d
def set_config_name(self, name):
"""Name for the config file. Does not include extension."""
self._config_name = name
self._config_file = ""
def set_config_type(self, type_):
"""Sets the type of the configuration returned by the
remote source, e.g. "json".
"""
self._config_type = type_
def _get_config_type(self):
if self._config_type != "":
return self._config_type
cf = self._get_config_file()
ext = os.path.splitext(cf)
if len(ext) > 1:
return ext[1][1:]
else:
return ""
def _get_config_file(self):
if self._config_file == "":
try:
cf = self._find_config_file()
self._config_file = cf
except errors.ConfigFileNotFoundError:
return ""
return self._config_file
def _search_in_path(self, path):
log.debug("Searching for config in: {0}".format(path))
for ext in constants.SUPPORTED_EXTENSIONS:
full_path = "{0}/{1}.{2}".format(path, self._config_name, ext)
log.debug("Checking for {0}".format(full_path))
if util.exists(full_path):
log.debug("Found: {0}".format(full_path))
return full_path
return ""
def _find_config_file(self):
"""Search all `config_paths` for any config file.
Returns the first path that exists (and is a config file).
"""
log.info("Searching for config in: {0}".format(
", ".join(str(p) for p in self._config_paths)))
for cp in self._config_paths:
f = self._search_in_path(cp)
if f != "":
return f
raise errors.ConfigFileNotFoundError(
self._config_name, self._config_paths)
def debug(self): # pragma: no cover
"""Prints all configuration registries for debugging purposes."""
print("Aliases:")
pprint.pprint(self._aliases)
print("Override:")
pprint.pprint(self._override)
print("Args:")
pprint.pprint(self._args)
print("Env:")
pprint.pprint(self._env)
print("Config:")
pprint.pprint(self._config)
print("Key/Value Store:")
pprint.pprint(self._kvstore)
print("Defaults:")
pprint.pprint(self._defaults)
|
admiralobvious/vyper | vyper/vyper.py | Vyper._find | python | def _find(self, key):
key = self._real_key(key)
# OVERRIDES
val = self._override.get(key)
if val is not None:
log.debug("{0} found in override: {1}".format(key, val))
return val
# ARGS
val = self._args.get(key)
if val is not None:
log.debug("{0} found in args: {1}".format(key, val))
return val
# ENVIRONMENT VARIABLES
if self._automatic_env_applied:
# even if it hasn't been registered, if `automatic_env` is used,
# check any `get` request
val = self._get_env(self._merge_with_env_prefix(key))
if val is not None:
log.debug("{0} found in environment: {1}".format(key, val))
return val
env_key = self._find_insensitive(key, self._env)
log.debug("Looking for {0} in env".format(key))
if isinstance(env_key, list):
parent = self._find_insensitive(key, self._config)
found_in_env = False
log.debug("Found env key parent {0}: {1}".format(key, parent))
for item in env_key:
log.debug("{0} registered as env var parent {1}:".format(
key, item["env_key"]))
val = self._get_env(item["env_key"])
if val is not None:
log.debug("{0} found in environment: {1}".format(
item["env_key"], val))
temp = parent
for path in item["path"]:
real_key = self._find_real_key(path, temp)
temp = temp[real_key]
self._set_insensitive(item["final_key"], val, temp)
found_in_env = True
else:
log.debug("{0} env value unset".format(item["env_key"]))
if found_in_env:
return parent
elif env_key is not None:
log.debug("{0} registered as env var: {1}".format(key, env_key))
val = self._get_env(env_key)
if val is not None:
log.debug("{0} found in environment: {1}".format(env_key, val))
return val
else:
log.debug("{0} env value unset".format(env_key))
# CONFIG FILE
val = self._find_insensitive(key, self._config)
if val is not None:
log.debug("{0} found in config: {1}".format(key, val))
return val
# Test for nested config parameter
if self._key_delimiter in key:
path = key.split(self._key_delimiter)
source = self._find(path[0])
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is not None:
log.debug("{0} found in nested config: {1}".format(
key, val))
return val
# KEY/VALUE STORE
val = self._kvstore.get(key)
if val is not None:
log.debug("{0} found in key/value store: {1}".format(key, val))
return val
# DEFAULTS
val = self._defaults.get(key)
if val is not None:
log.debug("{0} found in defaults: {1}".format(key, val))
return val
return None | Given a key, find the value
Vyper will check in the following order:
override, arg, env, config file, key/value store, default
Vyper will check to see if an alias exists first. | train | https://github.com/admiralobvious/vyper/blob/58ec7b90661502b7b2fea7a30849b90b907fcdec/vyper/vyper.py#L340-L436 | [
"def _merge_with_env_prefix(self, key):\n if self._env_prefix != \"\":\n return (\"{0}_{1}\".format(self._env_prefix, key)).upper()\n return key.upper()\n",
"def _get_env(self, key):\n \"\"\"Wrapper around os.getenv() which replaces characters\n in the original key. This allows env vars which have different keys\n than the config object keys.\n \"\"\"\n if self._env_key_replacer is not None:\n key = key.replace(*self._env_key_replacer)\n return os.getenv(key)\n",
"def _search_dict(self, d, keys):\n if not keys:\n return d\n for key in keys:\n if key in d and not isinstance(d[key], dict):\n return d[key]\n elif key in d:\n return self._search_dict(d[key], keys[1::])\n else:\n return None\n",
"def _find_real_key(self, key, source):\n return next(\n (real for real in source.keys() if real.lower() == key.lower()),\n None)\n",
"def _find_insensitive(self, key, source):\n real_key = self._find_real_key(key, source)\n return source.get(real_key)\n",
"def _set_insensitive(self, key, val, source):\n real_key = self._find_real_key(key, source)\n if real_key is None:\n msg = \"No case insensitive variant of {0} found.\".format(key)\n raise KeyError(msg)\n\n source[real_key] = val\n",
"def _find(self, key):\n \"\"\"Given a key, find the value\n Vyper will check in the following order:\n override, arg, env, config file, key/value store, default\n Vyper will check to see if an alias exists first.\n \"\"\"\n key = self._real_key(key)\n\n # OVERRIDES\n val = self._override.get(key)\n if val is not None:\n log.debug(\"{0} found in override: {1}\".format(key, val))\n return val\n\n # ARGS\n val = self._args.get(key)\n if val is not None:\n log.debug(\"{0} found in args: {1}\".format(key, val))\n return val\n\n # ENVIRONMENT VARIABLES\n if self._automatic_env_applied:\n # even if it hasn't been registered, if `automatic_env` is used,\n # check any `get` request\n val = self._get_env(self._merge_with_env_prefix(key))\n if val is not None:\n log.debug(\"{0} found in environment: {1}\".format(key, val))\n return val\n\n env_key = self._find_insensitive(key, self._env)\n log.debug(\"Looking for {0} in env\".format(key))\n if isinstance(env_key, list):\n parent = self._find_insensitive(key, self._config)\n found_in_env = False\n log.debug(\"Found env key parent {0}: {1}\".format(key, parent))\n\n for item in env_key:\n log.debug(\"{0} registered as env var parent {1}:\".format(\n key, item[\"env_key\"]))\n val = self._get_env(item[\"env_key\"])\n\n if val is not None:\n log.debug(\"{0} found in environment: {1}\".format(\n item[\"env_key\"], val))\n temp = parent\n for path in item[\"path\"]:\n real_key = self._find_real_key(path, temp)\n temp = temp[real_key]\n\n self._set_insensitive(item[\"final_key\"], val, temp)\n found_in_env = True\n else:\n log.debug(\"{0} env value unset\".format(item[\"env_key\"]))\n\n if found_in_env:\n return parent\n\n elif env_key is not None:\n log.debug(\"{0} registered as env var: {1}\".format(key, env_key))\n val = self._get_env(env_key)\n if val is not None:\n log.debug(\"{0} found in environment: {1}\".format(env_key, val))\n return val\n else:\n log.debug(\"{0} env value unset\".format(env_key))\n\n # CONFIG FILE\n val = self._find_insensitive(key, self._config)\n if val is not None:\n log.debug(\"{0} found in config: {1}\".format(key, val))\n return val\n\n # Test for nested config parameter\n if self._key_delimiter in key:\n path = key.split(self._key_delimiter)\n\n source = self._find(path[0])\n if source is not None and isinstance(source, dict):\n val = self._search_dict(source, path[1::])\n if val is not None:\n log.debug(\"{0} found in nested config: {1}\".format(\n key, val))\n return val\n\n # KEY/VALUE STORE\n val = self._kvstore.get(key)\n if val is not None:\n log.debug(\"{0} found in key/value store: {1}\".format(key, val))\n return val\n\n # DEFAULTS\n val = self._defaults.get(key)\n if val is not None:\n log.debug(\"{0} found in defaults: {1}\".format(key, val))\n return val\n\n return None\n",
"def _real_key(self, key):\n new_key = self._aliases.get(key)\n if new_key is not None:\n return self._real_key(new_key)\n else:\n return key\n"
] | class Vyper(object):
"""Vyper is a prioritized configuration registry. It maintains a set of
configuration sources, fetches values to populate those, and provides
them according to the source's priority.
The priority of the sources is the following:
1. overrides
2. args
3. env. variables
4. config file
5. key/value store
6. defaults
For example, if values from the following sources were loaded:
defaults: {
"secret": "",
"user": "default",
"endpoint": "https://localhost"
}
config: {
"user": "root"
"secret": "defaultsecret"
}
env: {
"secret": "somesecretkey"
}
The resulting config will have the following values:
{
"secret": "somesecretkey",
"user": "root",
"endpoint": "https://localhost"
}
"""
def __init__(self, config_name="config", key_delimiter="."):
# Delimiter that separates a list of keys
# used to access a nested value in one go.
self._key_delimiter = key_delimiter
# A set of paths to look for the config file in.
self._config_paths = []
# A set of remote providers to search for the configuration.
self._remote_providers = []
# Name of file to look for inside the path.
self._config_name = config_name
self._config_file = ""
self._config_type = ""
self._env_prefix = ""
self._automatic_env_applied = False
self._env_key_replacer = None
self._aliases = {}
self._override = {}
self._args = {}
self._env = {}
self._config = {}
self._kvstore = {}
self._defaults = {}
self._on_config_change = None
self._on_remote_config_change = None
self.parse_argv_disabled = False
def on_config_change(self, func, *args, **kwargs):
self._on_config_change = lambda: func(*args, **kwargs)
def watch_config(self):
config_file = self._get_config_file()
watcher = watch.get_watcher(config_file, self)
watcher.start()
def set_config_file(self, file_):
"""Explicitly define the path, name and extension of the config file
Vyper will use this and not check any of the config paths.
"""
self._config_file = file_
def set_env_prefix(self, prefix):
"""Define a prefix that ENVIRONMENT variables will use.
e.g. if your prefix is "spf", the env registry will look
for env. variables that start with "SPF_"
"""
self._env_prefix = prefix
def _merge_with_env_prefix(self, key):
if self._env_prefix != "":
return ("{0}_{1}".format(self._env_prefix, key)).upper()
return key.upper()
def _get_env(self, key):
"""Wrapper around os.getenv() which replaces characters
in the original key. This allows env vars which have different keys
than the config object keys.
"""
if self._env_key_replacer is not None:
key = key.replace(*self._env_key_replacer)
return os.getenv(key)
def config_file_used(self):
"""Return the file used to populate the config registry."""
return self._config_file
def add_config_path(self, path):
"""Add a path for Vyper to search for the config file in.
Can be called multiple times to define multiple search paths.
"""
abspath = util.abs_pathify(path)
if abspath not in self._config_paths:
log.info("Adding {0} to paths to search".format(abspath))
self._config_paths.append(abspath)
def add_remote_provider(self, provider, client, path):
"""Adds a remote configuration source.
Remote Providers are searched in the order they are added.
provider is a string value, "etcd", "consul" and "zookeeper" are
currently supported.
client is a client object
path is the path in the k/v store to retrieve configuration
To retrieve a config file called myapp.json from /configs/myapp.json
you should set path to /configs and set config name (set_config_name)
to "myapp"
"""
if provider not in constants.SUPPORTED_REMOTE_PROVIDERS:
raise errors.UnsupportedRemoteProviderError(provider)
host = ""
if provider == "etcd":
host = "{0}://{1}:{2}".format(client.protocol, client.host,
client.port)
elif provider == "consul":
host = "{0}://{1}:{2}".format(client.http.scheme, client.http.host,
client.http.port)
elif provider == "zookeeper":
host = ",".join(
str("{0}:{1}".format(h[0], h[1])) for h in client.hosts)
log.info("Adding {0}:{1} to remote provider list".format(
provider, host))
rp = remote.RemoteProvider(provider, client, path, self)
if not self._provider_path_exists(rp):
self._remote_providers.append(rp)
def _provider_path_exists(self, rp):
for p in self._remote_providers:
if p.path == rp.path:
return True
return False
def _search_dict(self, d, keys):
if not keys:
return d
for key in keys:
if key in d and not isinstance(d[key], dict):
return d[key]
elif key in d:
return self._search_dict(d[key], keys[1::])
else:
return None
def get(self, key):
"""Vyper is essentially repository for configurations.
`get` can retrieve any value given the key to use.
`get` has the behavior of returning the value associated with the first
place from where it is set. Viper will check in the following order:
override, arg, env, config file, key/value store, default.
"""
path = key.split(self._key_delimiter)
lowercase_key = key.lower()
val = self._find(lowercase_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is None:
return None
return val
def get_string(self, key):
return str(self.get(key))
def get_bool(self, key):
val = self.get(key)
if isinstance(val, str):
if val.lower() == 'false':
return False
return bool(val)
def get_int(self, key):
return int(self.get(key))
def get_float(self, key):
return float(self.get(key))
def get_unicode(self, key):
return text(self.get(key))
def get_bytes(self, key):
return b"{0}".format(self.get(key))
def sub(self, key):
"""Returns new Vyper instance representing a sub tree of this instance.
"""
subv = Vyper()
data = self.get(key)
if isinstance(data, dict):
subv._config = data
return subv
else:
return None
def unmarshall_key(self, key, cls):
"""Takes a single key and unmarshalls it into a class."""
return setattr(cls, key, self.get(key))
def unmarshall(self, cls):
"""Unmarshalls the config into a class. Make sure that the tags on
the attributes of the class are properly set.
"""
for k, v in self.all_settings().items():
setattr(cls, k, v)
return cls
def bind_args(self, parser):
if isinstance(parser, argparse.ArgumentParser):
return self._bind_parser_values(parser)
else:
return self.bind_arg_values(parser)
def bind_arg(self, key, arg):
return self.bind_arg_value(key, arg)
def _parse_args(self, parser, overrides=None):
if overrides:
return vars(parser.parse_args(overrides))
if not self.parse_argv_disabled:
return vars(parser.parse_args())
else:
return vars(parser.parse_args([]))
def _bind_parser_values(self, parser, overrides=None):
# method mostly for testing, use bind_args()
args = self._parse_args(parser, overrides)
defaults = \
{k: parser.get_default(k) for k in args.keys()}
for k, v in defaults.items():
self.set_default(k, v)
if args[k] != defaults[k]:
self.bind_arg(k, args[k])
def bind_arg_values(self, args):
for k, v in args.items():
try:
self.bind_arg_value(k, v)
except ValueError:
pass
def bind_arg_value(self, key, arg):
if arg is None:
raise ValueError("arg for {0} is None".format(key))
self._args[key.lower()] = arg
def bind_env(self, *input_):
"""Binds a Vyper key to a ENV variable.
ENV variables are case sensitive.
If only a key is provided, it will use the env key matching the key,
uppercased.
`env_prefix` will be used when set when env name is not provided.
"""
if len(input_) == 0:
return "bind_env missing key to bind to"
key = input_[0].lower()
if len(input_) == 1:
env_key = self._merge_with_env_prefix(key)
else:
env_key = input_[1]
self._env[key] = env_key
if self._key_delimiter in key:
parts = input_[0].split(self._key_delimiter)
env_info = {
"path": parts[1:-1],
"final_key": parts[-1],
"env_key": env_key
}
if self._env.get(parts[0]) is None:
self._env[parts[0]] = [env_info]
else:
self._env[parts[0]].append(env_info)
return None
def _find_real_key(self, key, source):
return next(
(real for real in source.keys() if real.lower() == key.lower()),
None)
def _find_insensitive(self, key, source):
real_key = self._find_real_key(key, source)
return source.get(real_key)
def _set_insensitive(self, key, val, source):
real_key = self._find_real_key(key, source)
if real_key is None:
msg = "No case insensitive variant of {0} found.".format(key)
raise KeyError(msg)
source[real_key] = val
def is_set(self, key):
"""Check to see if the key has been set in any of the data locations.
"""
path = key.split(self._key_delimiter)
lower_case_key = key.lower()
val = self._find(lower_case_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
return val is not None
def automatic_env(self):
"""Have Vyper check ENV variables for all keys set in
config, default & args.
"""
self._automatic_env_applied = True
def set_env_key_replacer(self, old, new):
"""Sets the strings.Replacer on the Vyper object.
Useful for mapping an environment variable to a key that does
not match it.
"""
self._env_key_replacer = old, new
def register_alias(self, alias, key):
"""Aliases provide another accessor for the same key.
This enables one to change a name without breaking the application.
"""
alias = alias.lower()
key = key.lower()
if alias != key and alias != self._real_key(key):
exists = self._aliases.get(alias)
if exists is None:
# if we alias something that exists in one of the dicts to
# another name, we'll never be able to get that value using the
# original name, so move the config value to the new _real_key.
val = self._config.get(alias)
if val:
self._config.pop(alias)
self._config[key] = val
val = self._kvstore.get(alias)
if val:
self._kvstore.pop(alias)
self._kvstore[key] = val
val = self._defaults.get(alias)
if val:
self._defaults.pop(alias)
self._defaults[key] = val
val = self._override.get(alias)
if val:
self._override.pop(alias)
self._override[key] = val
self._aliases[alias] = key
else:
log.warning("Creating circular reference alias {0} {1} {2}".format(
alias, key, self._real_key(key)))
def _real_key(self, key):
new_key = self._aliases.get(key)
if new_key is not None:
return self._real_key(new_key)
else:
return key
def in_config(self, key):
"""Check to see if the given key (or an alias) is in the config file.
"""
# if the requested key is an alias, then return the proper key
key = self._real_key(key)
exists = self._config.get(key)
return exists
def set_default(self, key, value):
"""Set the default value for this key.
Default only used when no value is provided by the user via
arg, config or env.
"""
k = self._real_key(key.lower())
self._defaults[k] = value
def set(self, key, value):
"""Sets the value for the key in the override register.
Will be used instead of values obtained via
args, config file, env, defaults or key/value store.
"""
k = self._real_key(key.lower())
self._override[k] = value
def read_in_config(self):
"""Vyper will discover and load the configuration file from disk
and key/value stores, searching in one of the defined paths.
"""
log.info("Attempting to read in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
self._config = {}
return self._unmarshall_reader(f, self._config)
def merge_in_config(self):
log.info("Attempting to merge in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
return self.merge_config(f)
def read_config(self, file_):
"""Vyper will read a configuration file, setting existing keys to
`None` if the key does not exist in the file.
"""
self._unmarshall_reader(file_, self._config)
def merge_config(self, file_):
if self._config is None:
self._config = {}
cfg = {}
cfg = self._unmarshall_reader(file_, cfg)
self._merge_dicts(cfg, self._config)
def _merge_dicts(self, src, target):
for k, v in src.items():
if isinstance(v, dict):
self._merge_dicts(v, target[k])
else:
target[k] = v
def read_remote_config(self):
"""Attempts to get configuration from a remote source
and read it in the remote configuration registry.
"""
return self._get_key_value_config()
def _unmarshall_reader(self, file_, d):
"""Unmarshall a file into a `dict`."""
return util.unmarshall_config_reader(file_, d, self._get_config_type())
def _get_key_value_config(self):
"""Retrieves the first found remote configuration."""
for rp in self._remote_providers:
val = self._get_remote_config(rp)
self._kvstore = val
return None
raise errors.RemoteConfigError("No Files Found")
def _get_remote_config(self, provider):
reader = provider.get()
self._unmarshall_reader(reader, self._kvstore)
return self._kvstore
def on_remote_config_change(self, func, *args, **kwargs):
self._on_remote_config_change = lambda x: func(*args, **kwargs)
for rp in self._remote_providers:
rp.add_listener(self._on_remote_config_change)
return None
def watch_remote_config(self):
for rp in self._remote_providers:
rp.add_listener()
return None
raise errors.RemoteConfigError("No Files Found")
def all_keys(self, uppercase_keys=False):
"""Return all keys regardless where they are set."""
d = {}
for k in self._override.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._args.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._env.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._config.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._kvstore.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._defaults.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._aliases.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
return d.keys()
def all_settings(self, uppercase_keys=False):
"""Return all settings as a `dict`."""
d = {}
for k in self.all_keys(uppercase_keys):
d[k] = self.get(k)
return d
def set_config_name(self, name):
"""Name for the config file. Does not include extension."""
self._config_name = name
self._config_file = ""
def set_config_type(self, type_):
"""Sets the type of the configuration returned by the
remote source, e.g. "json".
"""
self._config_type = type_
def _get_config_type(self):
if self._config_type != "":
return self._config_type
cf = self._get_config_file()
ext = os.path.splitext(cf)
if len(ext) > 1:
return ext[1][1:]
else:
return ""
def _get_config_file(self):
if self._config_file == "":
try:
cf = self._find_config_file()
self._config_file = cf
except errors.ConfigFileNotFoundError:
return ""
return self._config_file
def _search_in_path(self, path):
log.debug("Searching for config in: {0}".format(path))
for ext in constants.SUPPORTED_EXTENSIONS:
full_path = "{0}/{1}.{2}".format(path, self._config_name, ext)
log.debug("Checking for {0}".format(full_path))
if util.exists(full_path):
log.debug("Found: {0}".format(full_path))
return full_path
return ""
def _find_config_file(self):
"""Search all `config_paths` for any config file.
Returns the first path that exists (and is a config file).
"""
log.info("Searching for config in: {0}".format(
", ".join(str(p) for p in self._config_paths)))
for cp in self._config_paths:
f = self._search_in_path(cp)
if f != "":
return f
raise errors.ConfigFileNotFoundError(
self._config_name, self._config_paths)
def debug(self): # pragma: no cover
"""Prints all configuration registries for debugging purposes."""
print("Aliases:")
pprint.pprint(self._aliases)
print("Override:")
pprint.pprint(self._override)
print("Args:")
pprint.pprint(self._args)
print("Env:")
pprint.pprint(self._env)
print("Config:")
pprint.pprint(self._config)
print("Key/Value Store:")
pprint.pprint(self._kvstore)
print("Defaults:")
pprint.pprint(self._defaults)
|
admiralobvious/vyper | vyper/vyper.py | Vyper.is_set | python | def is_set(self, key):
path = key.split(self._key_delimiter)
lower_case_key = key.lower()
val = self._find(lower_case_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
return val is not None | Check to see if the key has been set in any of the data locations. | train | https://github.com/admiralobvious/vyper/blob/58ec7b90661502b7b2fea7a30849b90b907fcdec/vyper/vyper.py#L438-L451 | [
"def _search_dict(self, d, keys):\n if not keys:\n return d\n for key in keys:\n if key in d and not isinstance(d[key], dict):\n return d[key]\n elif key in d:\n return self._search_dict(d[key], keys[1::])\n else:\n return None\n",
"def _find(self, key):\n \"\"\"Given a key, find the value\n Vyper will check in the following order:\n override, arg, env, config file, key/value store, default\n Vyper will check to see if an alias exists first.\n \"\"\"\n key = self._real_key(key)\n\n # OVERRIDES\n val = self._override.get(key)\n if val is not None:\n log.debug(\"{0} found in override: {1}\".format(key, val))\n return val\n\n # ARGS\n val = self._args.get(key)\n if val is not None:\n log.debug(\"{0} found in args: {1}\".format(key, val))\n return val\n\n # ENVIRONMENT VARIABLES\n if self._automatic_env_applied:\n # even if it hasn't been registered, if `automatic_env` is used,\n # check any `get` request\n val = self._get_env(self._merge_with_env_prefix(key))\n if val is not None:\n log.debug(\"{0} found in environment: {1}\".format(key, val))\n return val\n\n env_key = self._find_insensitive(key, self._env)\n log.debug(\"Looking for {0} in env\".format(key))\n if isinstance(env_key, list):\n parent = self._find_insensitive(key, self._config)\n found_in_env = False\n log.debug(\"Found env key parent {0}: {1}\".format(key, parent))\n\n for item in env_key:\n log.debug(\"{0} registered as env var parent {1}:\".format(\n key, item[\"env_key\"]))\n val = self._get_env(item[\"env_key\"])\n\n if val is not None:\n log.debug(\"{0} found in environment: {1}\".format(\n item[\"env_key\"], val))\n temp = parent\n for path in item[\"path\"]:\n real_key = self._find_real_key(path, temp)\n temp = temp[real_key]\n\n self._set_insensitive(item[\"final_key\"], val, temp)\n found_in_env = True\n else:\n log.debug(\"{0} env value unset\".format(item[\"env_key\"]))\n\n if found_in_env:\n return parent\n\n elif env_key is not None:\n log.debug(\"{0} registered as env var: {1}\".format(key, env_key))\n val = self._get_env(env_key)\n if val is not None:\n log.debug(\"{0} found in environment: {1}\".format(env_key, val))\n return val\n else:\n log.debug(\"{0} env value unset\".format(env_key))\n\n # CONFIG FILE\n val = self._find_insensitive(key, self._config)\n if val is not None:\n log.debug(\"{0} found in config: {1}\".format(key, val))\n return val\n\n # Test for nested config parameter\n if self._key_delimiter in key:\n path = key.split(self._key_delimiter)\n\n source = self._find(path[0])\n if source is not None and isinstance(source, dict):\n val = self._search_dict(source, path[1::])\n if val is not None:\n log.debug(\"{0} found in nested config: {1}\".format(\n key, val))\n return val\n\n # KEY/VALUE STORE\n val = self._kvstore.get(key)\n if val is not None:\n log.debug(\"{0} found in key/value store: {1}\".format(key, val))\n return val\n\n # DEFAULTS\n val = self._defaults.get(key)\n if val is not None:\n log.debug(\"{0} found in defaults: {1}\".format(key, val))\n return val\n\n return None\n"
] | class Vyper(object):
"""Vyper is a prioritized configuration registry. It maintains a set of
configuration sources, fetches values to populate those, and provides
them according to the source's priority.
The priority of the sources is the following:
1. overrides
2. args
3. env. variables
4. config file
5. key/value store
6. defaults
For example, if values from the following sources were loaded:
defaults: {
"secret": "",
"user": "default",
"endpoint": "https://localhost"
}
config: {
"user": "root"
"secret": "defaultsecret"
}
env: {
"secret": "somesecretkey"
}
The resulting config will have the following values:
{
"secret": "somesecretkey",
"user": "root",
"endpoint": "https://localhost"
}
"""
def __init__(self, config_name="config", key_delimiter="."):
# Delimiter that separates a list of keys
# used to access a nested value in one go.
self._key_delimiter = key_delimiter
# A set of paths to look for the config file in.
self._config_paths = []
# A set of remote providers to search for the configuration.
self._remote_providers = []
# Name of file to look for inside the path.
self._config_name = config_name
self._config_file = ""
self._config_type = ""
self._env_prefix = ""
self._automatic_env_applied = False
self._env_key_replacer = None
self._aliases = {}
self._override = {}
self._args = {}
self._env = {}
self._config = {}
self._kvstore = {}
self._defaults = {}
self._on_config_change = None
self._on_remote_config_change = None
self.parse_argv_disabled = False
def on_config_change(self, func, *args, **kwargs):
self._on_config_change = lambda: func(*args, **kwargs)
def watch_config(self):
config_file = self._get_config_file()
watcher = watch.get_watcher(config_file, self)
watcher.start()
def set_config_file(self, file_):
"""Explicitly define the path, name and extension of the config file
Vyper will use this and not check any of the config paths.
"""
self._config_file = file_
def set_env_prefix(self, prefix):
"""Define a prefix that ENVIRONMENT variables will use.
e.g. if your prefix is "spf", the env registry will look
for env. variables that start with "SPF_"
"""
self._env_prefix = prefix
def _merge_with_env_prefix(self, key):
if self._env_prefix != "":
return ("{0}_{1}".format(self._env_prefix, key)).upper()
return key.upper()
def _get_env(self, key):
"""Wrapper around os.getenv() which replaces characters
in the original key. This allows env vars which have different keys
than the config object keys.
"""
if self._env_key_replacer is not None:
key = key.replace(*self._env_key_replacer)
return os.getenv(key)
def config_file_used(self):
"""Return the file used to populate the config registry."""
return self._config_file
def add_config_path(self, path):
"""Add a path for Vyper to search for the config file in.
Can be called multiple times to define multiple search paths.
"""
abspath = util.abs_pathify(path)
if abspath not in self._config_paths:
log.info("Adding {0} to paths to search".format(abspath))
self._config_paths.append(abspath)
def add_remote_provider(self, provider, client, path):
"""Adds a remote configuration source.
Remote Providers are searched in the order they are added.
provider is a string value, "etcd", "consul" and "zookeeper" are
currently supported.
client is a client object
path is the path in the k/v store to retrieve configuration
To retrieve a config file called myapp.json from /configs/myapp.json
you should set path to /configs and set config name (set_config_name)
to "myapp"
"""
if provider not in constants.SUPPORTED_REMOTE_PROVIDERS:
raise errors.UnsupportedRemoteProviderError(provider)
host = ""
if provider == "etcd":
host = "{0}://{1}:{2}".format(client.protocol, client.host,
client.port)
elif provider == "consul":
host = "{0}://{1}:{2}".format(client.http.scheme, client.http.host,
client.http.port)
elif provider == "zookeeper":
host = ",".join(
str("{0}:{1}".format(h[0], h[1])) for h in client.hosts)
log.info("Adding {0}:{1} to remote provider list".format(
provider, host))
rp = remote.RemoteProvider(provider, client, path, self)
if not self._provider_path_exists(rp):
self._remote_providers.append(rp)
def _provider_path_exists(self, rp):
for p in self._remote_providers:
if p.path == rp.path:
return True
return False
def _search_dict(self, d, keys):
if not keys:
return d
for key in keys:
if key in d and not isinstance(d[key], dict):
return d[key]
elif key in d:
return self._search_dict(d[key], keys[1::])
else:
return None
def get(self, key):
"""Vyper is essentially repository for configurations.
`get` can retrieve any value given the key to use.
`get` has the behavior of returning the value associated with the first
place from where it is set. Viper will check in the following order:
override, arg, env, config file, key/value store, default.
"""
path = key.split(self._key_delimiter)
lowercase_key = key.lower()
val = self._find(lowercase_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is None:
return None
return val
def get_string(self, key):
return str(self.get(key))
def get_bool(self, key):
val = self.get(key)
if isinstance(val, str):
if val.lower() == 'false':
return False
return bool(val)
def get_int(self, key):
return int(self.get(key))
def get_float(self, key):
return float(self.get(key))
def get_unicode(self, key):
return text(self.get(key))
def get_bytes(self, key):
return b"{0}".format(self.get(key))
def sub(self, key):
"""Returns new Vyper instance representing a sub tree of this instance.
"""
subv = Vyper()
data = self.get(key)
if isinstance(data, dict):
subv._config = data
return subv
else:
return None
def unmarshall_key(self, key, cls):
"""Takes a single key and unmarshalls it into a class."""
return setattr(cls, key, self.get(key))
def unmarshall(self, cls):
"""Unmarshalls the config into a class. Make sure that the tags on
the attributes of the class are properly set.
"""
for k, v in self.all_settings().items():
setattr(cls, k, v)
return cls
def bind_args(self, parser):
if isinstance(parser, argparse.ArgumentParser):
return self._bind_parser_values(parser)
else:
return self.bind_arg_values(parser)
def bind_arg(self, key, arg):
return self.bind_arg_value(key, arg)
def _parse_args(self, parser, overrides=None):
if overrides:
return vars(parser.parse_args(overrides))
if not self.parse_argv_disabled:
return vars(parser.parse_args())
else:
return vars(parser.parse_args([]))
def _bind_parser_values(self, parser, overrides=None):
# method mostly for testing, use bind_args()
args = self._parse_args(parser, overrides)
defaults = \
{k: parser.get_default(k) for k in args.keys()}
for k, v in defaults.items():
self.set_default(k, v)
if args[k] != defaults[k]:
self.bind_arg(k, args[k])
def bind_arg_values(self, args):
for k, v in args.items():
try:
self.bind_arg_value(k, v)
except ValueError:
pass
def bind_arg_value(self, key, arg):
if arg is None:
raise ValueError("arg for {0} is None".format(key))
self._args[key.lower()] = arg
def bind_env(self, *input_):
"""Binds a Vyper key to a ENV variable.
ENV variables are case sensitive.
If only a key is provided, it will use the env key matching the key,
uppercased.
`env_prefix` will be used when set when env name is not provided.
"""
if len(input_) == 0:
return "bind_env missing key to bind to"
key = input_[0].lower()
if len(input_) == 1:
env_key = self._merge_with_env_prefix(key)
else:
env_key = input_[1]
self._env[key] = env_key
if self._key_delimiter in key:
parts = input_[0].split(self._key_delimiter)
env_info = {
"path": parts[1:-1],
"final_key": parts[-1],
"env_key": env_key
}
if self._env.get(parts[0]) is None:
self._env[parts[0]] = [env_info]
else:
self._env[parts[0]].append(env_info)
return None
def _find_real_key(self, key, source):
return next(
(real for real in source.keys() if real.lower() == key.lower()),
None)
def _find_insensitive(self, key, source):
real_key = self._find_real_key(key, source)
return source.get(real_key)
def _set_insensitive(self, key, val, source):
real_key = self._find_real_key(key, source)
if real_key is None:
msg = "No case insensitive variant of {0} found.".format(key)
raise KeyError(msg)
source[real_key] = val
def _find(self, key):
"""Given a key, find the value
Vyper will check in the following order:
override, arg, env, config file, key/value store, default
Vyper will check to see if an alias exists first.
"""
key = self._real_key(key)
# OVERRIDES
val = self._override.get(key)
if val is not None:
log.debug("{0} found in override: {1}".format(key, val))
return val
# ARGS
val = self._args.get(key)
if val is not None:
log.debug("{0} found in args: {1}".format(key, val))
return val
# ENVIRONMENT VARIABLES
if self._automatic_env_applied:
# even if it hasn't been registered, if `automatic_env` is used,
# check any `get` request
val = self._get_env(self._merge_with_env_prefix(key))
if val is not None:
log.debug("{0} found in environment: {1}".format(key, val))
return val
env_key = self._find_insensitive(key, self._env)
log.debug("Looking for {0} in env".format(key))
if isinstance(env_key, list):
parent = self._find_insensitive(key, self._config)
found_in_env = False
log.debug("Found env key parent {0}: {1}".format(key, parent))
for item in env_key:
log.debug("{0} registered as env var parent {1}:".format(
key, item["env_key"]))
val = self._get_env(item["env_key"])
if val is not None:
log.debug("{0} found in environment: {1}".format(
item["env_key"], val))
temp = parent
for path in item["path"]:
real_key = self._find_real_key(path, temp)
temp = temp[real_key]
self._set_insensitive(item["final_key"], val, temp)
found_in_env = True
else:
log.debug("{0} env value unset".format(item["env_key"]))
if found_in_env:
return parent
elif env_key is not None:
log.debug("{0} registered as env var: {1}".format(key, env_key))
val = self._get_env(env_key)
if val is not None:
log.debug("{0} found in environment: {1}".format(env_key, val))
return val
else:
log.debug("{0} env value unset".format(env_key))
# CONFIG FILE
val = self._find_insensitive(key, self._config)
if val is not None:
log.debug("{0} found in config: {1}".format(key, val))
return val
# Test for nested config parameter
if self._key_delimiter in key:
path = key.split(self._key_delimiter)
source = self._find(path[0])
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is not None:
log.debug("{0} found in nested config: {1}".format(
key, val))
return val
# KEY/VALUE STORE
val = self._kvstore.get(key)
if val is not None:
log.debug("{0} found in key/value store: {1}".format(key, val))
return val
# DEFAULTS
val = self._defaults.get(key)
if val is not None:
log.debug("{0} found in defaults: {1}".format(key, val))
return val
return None
def automatic_env(self):
"""Have Vyper check ENV variables for all keys set in
config, default & args.
"""
self._automatic_env_applied = True
def set_env_key_replacer(self, old, new):
"""Sets the strings.Replacer on the Vyper object.
Useful for mapping an environment variable to a key that does
not match it.
"""
self._env_key_replacer = old, new
def register_alias(self, alias, key):
"""Aliases provide another accessor for the same key.
This enables one to change a name without breaking the application.
"""
alias = alias.lower()
key = key.lower()
if alias != key and alias != self._real_key(key):
exists = self._aliases.get(alias)
if exists is None:
# if we alias something that exists in one of the dicts to
# another name, we'll never be able to get that value using the
# original name, so move the config value to the new _real_key.
val = self._config.get(alias)
if val:
self._config.pop(alias)
self._config[key] = val
val = self._kvstore.get(alias)
if val:
self._kvstore.pop(alias)
self._kvstore[key] = val
val = self._defaults.get(alias)
if val:
self._defaults.pop(alias)
self._defaults[key] = val
val = self._override.get(alias)
if val:
self._override.pop(alias)
self._override[key] = val
self._aliases[alias] = key
else:
log.warning("Creating circular reference alias {0} {1} {2}".format(
alias, key, self._real_key(key)))
def _real_key(self, key):
new_key = self._aliases.get(key)
if new_key is not None:
return self._real_key(new_key)
else:
return key
def in_config(self, key):
"""Check to see if the given key (or an alias) is in the config file.
"""
# if the requested key is an alias, then return the proper key
key = self._real_key(key)
exists = self._config.get(key)
return exists
def set_default(self, key, value):
"""Set the default value for this key.
Default only used when no value is provided by the user via
arg, config or env.
"""
k = self._real_key(key.lower())
self._defaults[k] = value
def set(self, key, value):
"""Sets the value for the key in the override register.
Will be used instead of values obtained via
args, config file, env, defaults or key/value store.
"""
k = self._real_key(key.lower())
self._override[k] = value
def read_in_config(self):
"""Vyper will discover and load the configuration file from disk
and key/value stores, searching in one of the defined paths.
"""
log.info("Attempting to read in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
self._config = {}
return self._unmarshall_reader(f, self._config)
def merge_in_config(self):
log.info("Attempting to merge in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
return self.merge_config(f)
def read_config(self, file_):
"""Vyper will read a configuration file, setting existing keys to
`None` if the key does not exist in the file.
"""
self._unmarshall_reader(file_, self._config)
def merge_config(self, file_):
if self._config is None:
self._config = {}
cfg = {}
cfg = self._unmarshall_reader(file_, cfg)
self._merge_dicts(cfg, self._config)
def _merge_dicts(self, src, target):
for k, v in src.items():
if isinstance(v, dict):
self._merge_dicts(v, target[k])
else:
target[k] = v
def read_remote_config(self):
"""Attempts to get configuration from a remote source
and read it in the remote configuration registry.
"""
return self._get_key_value_config()
def _unmarshall_reader(self, file_, d):
"""Unmarshall a file into a `dict`."""
return util.unmarshall_config_reader(file_, d, self._get_config_type())
def _get_key_value_config(self):
"""Retrieves the first found remote configuration."""
for rp in self._remote_providers:
val = self._get_remote_config(rp)
self._kvstore = val
return None
raise errors.RemoteConfigError("No Files Found")
def _get_remote_config(self, provider):
reader = provider.get()
self._unmarshall_reader(reader, self._kvstore)
return self._kvstore
def on_remote_config_change(self, func, *args, **kwargs):
self._on_remote_config_change = lambda x: func(*args, **kwargs)
for rp in self._remote_providers:
rp.add_listener(self._on_remote_config_change)
return None
def watch_remote_config(self):
for rp in self._remote_providers:
rp.add_listener()
return None
raise errors.RemoteConfigError("No Files Found")
def all_keys(self, uppercase_keys=False):
"""Return all keys regardless where they are set."""
d = {}
for k in self._override.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._args.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._env.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._config.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._kvstore.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._defaults.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._aliases.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
return d.keys()
def all_settings(self, uppercase_keys=False):
"""Return all settings as a `dict`."""
d = {}
for k in self.all_keys(uppercase_keys):
d[k] = self.get(k)
return d
def set_config_name(self, name):
"""Name for the config file. Does not include extension."""
self._config_name = name
self._config_file = ""
def set_config_type(self, type_):
"""Sets the type of the configuration returned by the
remote source, e.g. "json".
"""
self._config_type = type_
def _get_config_type(self):
if self._config_type != "":
return self._config_type
cf = self._get_config_file()
ext = os.path.splitext(cf)
if len(ext) > 1:
return ext[1][1:]
else:
return ""
def _get_config_file(self):
if self._config_file == "":
try:
cf = self._find_config_file()
self._config_file = cf
except errors.ConfigFileNotFoundError:
return ""
return self._config_file
def _search_in_path(self, path):
log.debug("Searching for config in: {0}".format(path))
for ext in constants.SUPPORTED_EXTENSIONS:
full_path = "{0}/{1}.{2}".format(path, self._config_name, ext)
log.debug("Checking for {0}".format(full_path))
if util.exists(full_path):
log.debug("Found: {0}".format(full_path))
return full_path
return ""
def _find_config_file(self):
"""Search all `config_paths` for any config file.
Returns the first path that exists (and is a config file).
"""
log.info("Searching for config in: {0}".format(
", ".join(str(p) for p in self._config_paths)))
for cp in self._config_paths:
f = self._search_in_path(cp)
if f != "":
return f
raise errors.ConfigFileNotFoundError(
self._config_name, self._config_paths)
def debug(self): # pragma: no cover
"""Prints all configuration registries for debugging purposes."""
print("Aliases:")
pprint.pprint(self._aliases)
print("Override:")
pprint.pprint(self._override)
print("Args:")
pprint.pprint(self._args)
print("Env:")
pprint.pprint(self._env)
print("Config:")
pprint.pprint(self._config)
print("Key/Value Store:")
pprint.pprint(self._kvstore)
print("Defaults:")
pprint.pprint(self._defaults)
|
admiralobvious/vyper | vyper/vyper.py | Vyper.register_alias | python | def register_alias(self, alias, key):
alias = alias.lower()
key = key.lower()
if alias != key and alias != self._real_key(key):
exists = self._aliases.get(alias)
if exists is None:
# if we alias something that exists in one of the dicts to
# another name, we'll never be able to get that value using the
# original name, so move the config value to the new _real_key.
val = self._config.get(alias)
if val:
self._config.pop(alias)
self._config[key] = val
val = self._kvstore.get(alias)
if val:
self._kvstore.pop(alias)
self._kvstore[key] = val
val = self._defaults.get(alias)
if val:
self._defaults.pop(alias)
self._defaults[key] = val
val = self._override.get(alias)
if val:
self._override.pop(alias)
self._override[key] = val
self._aliases[alias] = key
else:
log.warning("Creating circular reference alias {0} {1} {2}".format(
alias, key, self._real_key(key))) | Aliases provide another accessor for the same key.
This enables one to change a name without breaking the application. | train | https://github.com/admiralobvious/vyper/blob/58ec7b90661502b7b2fea7a30849b90b907fcdec/vyper/vyper.py#L466-L499 | [
"def _real_key(self, key):\n new_key = self._aliases.get(key)\n if new_key is not None:\n return self._real_key(new_key)\n else:\n return key\n"
] | class Vyper(object):
"""Vyper is a prioritized configuration registry. It maintains a set of
configuration sources, fetches values to populate those, and provides
them according to the source's priority.
The priority of the sources is the following:
1. overrides
2. args
3. env. variables
4. config file
5. key/value store
6. defaults
For example, if values from the following sources were loaded:
defaults: {
"secret": "",
"user": "default",
"endpoint": "https://localhost"
}
config: {
"user": "root"
"secret": "defaultsecret"
}
env: {
"secret": "somesecretkey"
}
The resulting config will have the following values:
{
"secret": "somesecretkey",
"user": "root",
"endpoint": "https://localhost"
}
"""
def __init__(self, config_name="config", key_delimiter="."):
# Delimiter that separates a list of keys
# used to access a nested value in one go.
self._key_delimiter = key_delimiter
# A set of paths to look for the config file in.
self._config_paths = []
# A set of remote providers to search for the configuration.
self._remote_providers = []
# Name of file to look for inside the path.
self._config_name = config_name
self._config_file = ""
self._config_type = ""
self._env_prefix = ""
self._automatic_env_applied = False
self._env_key_replacer = None
self._aliases = {}
self._override = {}
self._args = {}
self._env = {}
self._config = {}
self._kvstore = {}
self._defaults = {}
self._on_config_change = None
self._on_remote_config_change = None
self.parse_argv_disabled = False
def on_config_change(self, func, *args, **kwargs):
self._on_config_change = lambda: func(*args, **kwargs)
def watch_config(self):
config_file = self._get_config_file()
watcher = watch.get_watcher(config_file, self)
watcher.start()
def set_config_file(self, file_):
"""Explicitly define the path, name and extension of the config file
Vyper will use this and not check any of the config paths.
"""
self._config_file = file_
def set_env_prefix(self, prefix):
"""Define a prefix that ENVIRONMENT variables will use.
e.g. if your prefix is "spf", the env registry will look
for env. variables that start with "SPF_"
"""
self._env_prefix = prefix
def _merge_with_env_prefix(self, key):
if self._env_prefix != "":
return ("{0}_{1}".format(self._env_prefix, key)).upper()
return key.upper()
def _get_env(self, key):
"""Wrapper around os.getenv() which replaces characters
in the original key. This allows env vars which have different keys
than the config object keys.
"""
if self._env_key_replacer is not None:
key = key.replace(*self._env_key_replacer)
return os.getenv(key)
def config_file_used(self):
"""Return the file used to populate the config registry."""
return self._config_file
def add_config_path(self, path):
"""Add a path for Vyper to search for the config file in.
Can be called multiple times to define multiple search paths.
"""
abspath = util.abs_pathify(path)
if abspath not in self._config_paths:
log.info("Adding {0} to paths to search".format(abspath))
self._config_paths.append(abspath)
def add_remote_provider(self, provider, client, path):
"""Adds a remote configuration source.
Remote Providers are searched in the order they are added.
provider is a string value, "etcd", "consul" and "zookeeper" are
currently supported.
client is a client object
path is the path in the k/v store to retrieve configuration
To retrieve a config file called myapp.json from /configs/myapp.json
you should set path to /configs and set config name (set_config_name)
to "myapp"
"""
if provider not in constants.SUPPORTED_REMOTE_PROVIDERS:
raise errors.UnsupportedRemoteProviderError(provider)
host = ""
if provider == "etcd":
host = "{0}://{1}:{2}".format(client.protocol, client.host,
client.port)
elif provider == "consul":
host = "{0}://{1}:{2}".format(client.http.scheme, client.http.host,
client.http.port)
elif provider == "zookeeper":
host = ",".join(
str("{0}:{1}".format(h[0], h[1])) for h in client.hosts)
log.info("Adding {0}:{1} to remote provider list".format(
provider, host))
rp = remote.RemoteProvider(provider, client, path, self)
if not self._provider_path_exists(rp):
self._remote_providers.append(rp)
def _provider_path_exists(self, rp):
for p in self._remote_providers:
if p.path == rp.path:
return True
return False
def _search_dict(self, d, keys):
if not keys:
return d
for key in keys:
if key in d and not isinstance(d[key], dict):
return d[key]
elif key in d:
return self._search_dict(d[key], keys[1::])
else:
return None
def get(self, key):
"""Vyper is essentially repository for configurations.
`get` can retrieve any value given the key to use.
`get` has the behavior of returning the value associated with the first
place from where it is set. Viper will check in the following order:
override, arg, env, config file, key/value store, default.
"""
path = key.split(self._key_delimiter)
lowercase_key = key.lower()
val = self._find(lowercase_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is None:
return None
return val
def get_string(self, key):
return str(self.get(key))
def get_bool(self, key):
val = self.get(key)
if isinstance(val, str):
if val.lower() == 'false':
return False
return bool(val)
def get_int(self, key):
return int(self.get(key))
def get_float(self, key):
return float(self.get(key))
def get_unicode(self, key):
return text(self.get(key))
def get_bytes(self, key):
return b"{0}".format(self.get(key))
def sub(self, key):
"""Returns new Vyper instance representing a sub tree of this instance.
"""
subv = Vyper()
data = self.get(key)
if isinstance(data, dict):
subv._config = data
return subv
else:
return None
def unmarshall_key(self, key, cls):
"""Takes a single key and unmarshalls it into a class."""
return setattr(cls, key, self.get(key))
def unmarshall(self, cls):
"""Unmarshalls the config into a class. Make sure that the tags on
the attributes of the class are properly set.
"""
for k, v in self.all_settings().items():
setattr(cls, k, v)
return cls
def bind_args(self, parser):
if isinstance(parser, argparse.ArgumentParser):
return self._bind_parser_values(parser)
else:
return self.bind_arg_values(parser)
def bind_arg(self, key, arg):
return self.bind_arg_value(key, arg)
def _parse_args(self, parser, overrides=None):
if overrides:
return vars(parser.parse_args(overrides))
if not self.parse_argv_disabled:
return vars(parser.parse_args())
else:
return vars(parser.parse_args([]))
def _bind_parser_values(self, parser, overrides=None):
# method mostly for testing, use bind_args()
args = self._parse_args(parser, overrides)
defaults = \
{k: parser.get_default(k) for k in args.keys()}
for k, v in defaults.items():
self.set_default(k, v)
if args[k] != defaults[k]:
self.bind_arg(k, args[k])
def bind_arg_values(self, args):
for k, v in args.items():
try:
self.bind_arg_value(k, v)
except ValueError:
pass
def bind_arg_value(self, key, arg):
if arg is None:
raise ValueError("arg for {0} is None".format(key))
self._args[key.lower()] = arg
def bind_env(self, *input_):
"""Binds a Vyper key to a ENV variable.
ENV variables are case sensitive.
If only a key is provided, it will use the env key matching the key,
uppercased.
`env_prefix` will be used when set when env name is not provided.
"""
if len(input_) == 0:
return "bind_env missing key to bind to"
key = input_[0].lower()
if len(input_) == 1:
env_key = self._merge_with_env_prefix(key)
else:
env_key = input_[1]
self._env[key] = env_key
if self._key_delimiter in key:
parts = input_[0].split(self._key_delimiter)
env_info = {
"path": parts[1:-1],
"final_key": parts[-1],
"env_key": env_key
}
if self._env.get(parts[0]) is None:
self._env[parts[0]] = [env_info]
else:
self._env[parts[0]].append(env_info)
return None
def _find_real_key(self, key, source):
return next(
(real for real in source.keys() if real.lower() == key.lower()),
None)
def _find_insensitive(self, key, source):
real_key = self._find_real_key(key, source)
return source.get(real_key)
def _set_insensitive(self, key, val, source):
real_key = self._find_real_key(key, source)
if real_key is None:
msg = "No case insensitive variant of {0} found.".format(key)
raise KeyError(msg)
source[real_key] = val
def _find(self, key):
"""Given a key, find the value
Vyper will check in the following order:
override, arg, env, config file, key/value store, default
Vyper will check to see if an alias exists first.
"""
key = self._real_key(key)
# OVERRIDES
val = self._override.get(key)
if val is not None:
log.debug("{0} found in override: {1}".format(key, val))
return val
# ARGS
val = self._args.get(key)
if val is not None:
log.debug("{0} found in args: {1}".format(key, val))
return val
# ENVIRONMENT VARIABLES
if self._automatic_env_applied:
# even if it hasn't been registered, if `automatic_env` is used,
# check any `get` request
val = self._get_env(self._merge_with_env_prefix(key))
if val is not None:
log.debug("{0} found in environment: {1}".format(key, val))
return val
env_key = self._find_insensitive(key, self._env)
log.debug("Looking for {0} in env".format(key))
if isinstance(env_key, list):
parent = self._find_insensitive(key, self._config)
found_in_env = False
log.debug("Found env key parent {0}: {1}".format(key, parent))
for item in env_key:
log.debug("{0} registered as env var parent {1}:".format(
key, item["env_key"]))
val = self._get_env(item["env_key"])
if val is not None:
log.debug("{0} found in environment: {1}".format(
item["env_key"], val))
temp = parent
for path in item["path"]:
real_key = self._find_real_key(path, temp)
temp = temp[real_key]
self._set_insensitive(item["final_key"], val, temp)
found_in_env = True
else:
log.debug("{0} env value unset".format(item["env_key"]))
if found_in_env:
return parent
elif env_key is not None:
log.debug("{0} registered as env var: {1}".format(key, env_key))
val = self._get_env(env_key)
if val is not None:
log.debug("{0} found in environment: {1}".format(env_key, val))
return val
else:
log.debug("{0} env value unset".format(env_key))
# CONFIG FILE
val = self._find_insensitive(key, self._config)
if val is not None:
log.debug("{0} found in config: {1}".format(key, val))
return val
# Test for nested config parameter
if self._key_delimiter in key:
path = key.split(self._key_delimiter)
source = self._find(path[0])
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is not None:
log.debug("{0} found in nested config: {1}".format(
key, val))
return val
# KEY/VALUE STORE
val = self._kvstore.get(key)
if val is not None:
log.debug("{0} found in key/value store: {1}".format(key, val))
return val
# DEFAULTS
val = self._defaults.get(key)
if val is not None:
log.debug("{0} found in defaults: {1}".format(key, val))
return val
return None
def is_set(self, key):
"""Check to see if the key has been set in any of the data locations.
"""
path = key.split(self._key_delimiter)
lower_case_key = key.lower()
val = self._find(lower_case_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
return val is not None
def automatic_env(self):
"""Have Vyper check ENV variables for all keys set in
config, default & args.
"""
self._automatic_env_applied = True
def set_env_key_replacer(self, old, new):
"""Sets the strings.Replacer on the Vyper object.
Useful for mapping an environment variable to a key that does
not match it.
"""
self._env_key_replacer = old, new
def _real_key(self, key):
new_key = self._aliases.get(key)
if new_key is not None:
return self._real_key(new_key)
else:
return key
def in_config(self, key):
"""Check to see if the given key (or an alias) is in the config file.
"""
# if the requested key is an alias, then return the proper key
key = self._real_key(key)
exists = self._config.get(key)
return exists
def set_default(self, key, value):
"""Set the default value for this key.
Default only used when no value is provided by the user via
arg, config or env.
"""
k = self._real_key(key.lower())
self._defaults[k] = value
def set(self, key, value):
"""Sets the value for the key in the override register.
Will be used instead of values obtained via
args, config file, env, defaults or key/value store.
"""
k = self._real_key(key.lower())
self._override[k] = value
def read_in_config(self):
"""Vyper will discover and load the configuration file from disk
and key/value stores, searching in one of the defined paths.
"""
log.info("Attempting to read in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
self._config = {}
return self._unmarshall_reader(f, self._config)
def merge_in_config(self):
log.info("Attempting to merge in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
return self.merge_config(f)
def read_config(self, file_):
"""Vyper will read a configuration file, setting existing keys to
`None` if the key does not exist in the file.
"""
self._unmarshall_reader(file_, self._config)
def merge_config(self, file_):
if self._config is None:
self._config = {}
cfg = {}
cfg = self._unmarshall_reader(file_, cfg)
self._merge_dicts(cfg, self._config)
def _merge_dicts(self, src, target):
for k, v in src.items():
if isinstance(v, dict):
self._merge_dicts(v, target[k])
else:
target[k] = v
def read_remote_config(self):
"""Attempts to get configuration from a remote source
and read it in the remote configuration registry.
"""
return self._get_key_value_config()
def _unmarshall_reader(self, file_, d):
"""Unmarshall a file into a `dict`."""
return util.unmarshall_config_reader(file_, d, self._get_config_type())
def _get_key_value_config(self):
"""Retrieves the first found remote configuration."""
for rp in self._remote_providers:
val = self._get_remote_config(rp)
self._kvstore = val
return None
raise errors.RemoteConfigError("No Files Found")
def _get_remote_config(self, provider):
reader = provider.get()
self._unmarshall_reader(reader, self._kvstore)
return self._kvstore
def on_remote_config_change(self, func, *args, **kwargs):
self._on_remote_config_change = lambda x: func(*args, **kwargs)
for rp in self._remote_providers:
rp.add_listener(self._on_remote_config_change)
return None
def watch_remote_config(self):
for rp in self._remote_providers:
rp.add_listener()
return None
raise errors.RemoteConfigError("No Files Found")
def all_keys(self, uppercase_keys=False):
"""Return all keys regardless where they are set."""
d = {}
for k in self._override.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._args.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._env.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._config.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._kvstore.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._defaults.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._aliases.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
return d.keys()
def all_settings(self, uppercase_keys=False):
"""Return all settings as a `dict`."""
d = {}
for k in self.all_keys(uppercase_keys):
d[k] = self.get(k)
return d
def set_config_name(self, name):
"""Name for the config file. Does not include extension."""
self._config_name = name
self._config_file = ""
def set_config_type(self, type_):
"""Sets the type of the configuration returned by the
remote source, e.g. "json".
"""
self._config_type = type_
def _get_config_type(self):
if self._config_type != "":
return self._config_type
cf = self._get_config_file()
ext = os.path.splitext(cf)
if len(ext) > 1:
return ext[1][1:]
else:
return ""
def _get_config_file(self):
if self._config_file == "":
try:
cf = self._find_config_file()
self._config_file = cf
except errors.ConfigFileNotFoundError:
return ""
return self._config_file
def _search_in_path(self, path):
log.debug("Searching for config in: {0}".format(path))
for ext in constants.SUPPORTED_EXTENSIONS:
full_path = "{0}/{1}.{2}".format(path, self._config_name, ext)
log.debug("Checking for {0}".format(full_path))
if util.exists(full_path):
log.debug("Found: {0}".format(full_path))
return full_path
return ""
def _find_config_file(self):
"""Search all `config_paths` for any config file.
Returns the first path that exists (and is a config file).
"""
log.info("Searching for config in: {0}".format(
", ".join(str(p) for p in self._config_paths)))
for cp in self._config_paths:
f = self._search_in_path(cp)
if f != "":
return f
raise errors.ConfigFileNotFoundError(
self._config_name, self._config_paths)
def debug(self): # pragma: no cover
"""Prints all configuration registries for debugging purposes."""
print("Aliases:")
pprint.pprint(self._aliases)
print("Override:")
pprint.pprint(self._override)
print("Args:")
pprint.pprint(self._args)
print("Env:")
pprint.pprint(self._env)
print("Config:")
pprint.pprint(self._config)
print("Key/Value Store:")
pprint.pprint(self._kvstore)
print("Defaults:")
pprint.pprint(self._defaults)
|
admiralobvious/vyper | vyper/vyper.py | Vyper.in_config | python | def in_config(self, key):
# if the requested key is an alias, then return the proper key
key = self._real_key(key)
exists = self._config.get(key)
return exists | Check to see if the given key (or an alias) is in the config file. | train | https://github.com/admiralobvious/vyper/blob/58ec7b90661502b7b2fea7a30849b90b907fcdec/vyper/vyper.py#L508-L515 | [
"def _real_key(self, key):\n new_key = self._aliases.get(key)\n if new_key is not None:\n return self._real_key(new_key)\n else:\n return key\n"
] | class Vyper(object):
"""Vyper is a prioritized configuration registry. It maintains a set of
configuration sources, fetches values to populate those, and provides
them according to the source's priority.
The priority of the sources is the following:
1. overrides
2. args
3. env. variables
4. config file
5. key/value store
6. defaults
For example, if values from the following sources were loaded:
defaults: {
"secret": "",
"user": "default",
"endpoint": "https://localhost"
}
config: {
"user": "root"
"secret": "defaultsecret"
}
env: {
"secret": "somesecretkey"
}
The resulting config will have the following values:
{
"secret": "somesecretkey",
"user": "root",
"endpoint": "https://localhost"
}
"""
def __init__(self, config_name="config", key_delimiter="."):
# Delimiter that separates a list of keys
# used to access a nested value in one go.
self._key_delimiter = key_delimiter
# A set of paths to look for the config file in.
self._config_paths = []
# A set of remote providers to search for the configuration.
self._remote_providers = []
# Name of file to look for inside the path.
self._config_name = config_name
self._config_file = ""
self._config_type = ""
self._env_prefix = ""
self._automatic_env_applied = False
self._env_key_replacer = None
self._aliases = {}
self._override = {}
self._args = {}
self._env = {}
self._config = {}
self._kvstore = {}
self._defaults = {}
self._on_config_change = None
self._on_remote_config_change = None
self.parse_argv_disabled = False
def on_config_change(self, func, *args, **kwargs):
self._on_config_change = lambda: func(*args, **kwargs)
def watch_config(self):
config_file = self._get_config_file()
watcher = watch.get_watcher(config_file, self)
watcher.start()
def set_config_file(self, file_):
"""Explicitly define the path, name and extension of the config file
Vyper will use this and not check any of the config paths.
"""
self._config_file = file_
def set_env_prefix(self, prefix):
"""Define a prefix that ENVIRONMENT variables will use.
e.g. if your prefix is "spf", the env registry will look
for env. variables that start with "SPF_"
"""
self._env_prefix = prefix
def _merge_with_env_prefix(self, key):
if self._env_prefix != "":
return ("{0}_{1}".format(self._env_prefix, key)).upper()
return key.upper()
def _get_env(self, key):
"""Wrapper around os.getenv() which replaces characters
in the original key. This allows env vars which have different keys
than the config object keys.
"""
if self._env_key_replacer is not None:
key = key.replace(*self._env_key_replacer)
return os.getenv(key)
def config_file_used(self):
"""Return the file used to populate the config registry."""
return self._config_file
def add_config_path(self, path):
"""Add a path for Vyper to search for the config file in.
Can be called multiple times to define multiple search paths.
"""
abspath = util.abs_pathify(path)
if abspath not in self._config_paths:
log.info("Adding {0} to paths to search".format(abspath))
self._config_paths.append(abspath)
def add_remote_provider(self, provider, client, path):
"""Adds a remote configuration source.
Remote Providers are searched in the order they are added.
provider is a string value, "etcd", "consul" and "zookeeper" are
currently supported.
client is a client object
path is the path in the k/v store to retrieve configuration
To retrieve a config file called myapp.json from /configs/myapp.json
you should set path to /configs and set config name (set_config_name)
to "myapp"
"""
if provider not in constants.SUPPORTED_REMOTE_PROVIDERS:
raise errors.UnsupportedRemoteProviderError(provider)
host = ""
if provider == "etcd":
host = "{0}://{1}:{2}".format(client.protocol, client.host,
client.port)
elif provider == "consul":
host = "{0}://{1}:{2}".format(client.http.scheme, client.http.host,
client.http.port)
elif provider == "zookeeper":
host = ",".join(
str("{0}:{1}".format(h[0], h[1])) for h in client.hosts)
log.info("Adding {0}:{1} to remote provider list".format(
provider, host))
rp = remote.RemoteProvider(provider, client, path, self)
if not self._provider_path_exists(rp):
self._remote_providers.append(rp)
def _provider_path_exists(self, rp):
for p in self._remote_providers:
if p.path == rp.path:
return True
return False
def _search_dict(self, d, keys):
if not keys:
return d
for key in keys:
if key in d and not isinstance(d[key], dict):
return d[key]
elif key in d:
return self._search_dict(d[key], keys[1::])
else:
return None
def get(self, key):
"""Vyper is essentially repository for configurations.
`get` can retrieve any value given the key to use.
`get` has the behavior of returning the value associated with the first
place from where it is set. Viper will check in the following order:
override, arg, env, config file, key/value store, default.
"""
path = key.split(self._key_delimiter)
lowercase_key = key.lower()
val = self._find(lowercase_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is None:
return None
return val
def get_string(self, key):
return str(self.get(key))
def get_bool(self, key):
val = self.get(key)
if isinstance(val, str):
if val.lower() == 'false':
return False
return bool(val)
def get_int(self, key):
return int(self.get(key))
def get_float(self, key):
return float(self.get(key))
def get_unicode(self, key):
return text(self.get(key))
def get_bytes(self, key):
return b"{0}".format(self.get(key))
def sub(self, key):
"""Returns new Vyper instance representing a sub tree of this instance.
"""
subv = Vyper()
data = self.get(key)
if isinstance(data, dict):
subv._config = data
return subv
else:
return None
def unmarshall_key(self, key, cls):
"""Takes a single key and unmarshalls it into a class."""
return setattr(cls, key, self.get(key))
def unmarshall(self, cls):
"""Unmarshalls the config into a class. Make sure that the tags on
the attributes of the class are properly set.
"""
for k, v in self.all_settings().items():
setattr(cls, k, v)
return cls
def bind_args(self, parser):
if isinstance(parser, argparse.ArgumentParser):
return self._bind_parser_values(parser)
else:
return self.bind_arg_values(parser)
def bind_arg(self, key, arg):
return self.bind_arg_value(key, arg)
def _parse_args(self, parser, overrides=None):
if overrides:
return vars(parser.parse_args(overrides))
if not self.parse_argv_disabled:
return vars(parser.parse_args())
else:
return vars(parser.parse_args([]))
def _bind_parser_values(self, parser, overrides=None):
# method mostly for testing, use bind_args()
args = self._parse_args(parser, overrides)
defaults = \
{k: parser.get_default(k) for k in args.keys()}
for k, v in defaults.items():
self.set_default(k, v)
if args[k] != defaults[k]:
self.bind_arg(k, args[k])
def bind_arg_values(self, args):
for k, v in args.items():
try:
self.bind_arg_value(k, v)
except ValueError:
pass
def bind_arg_value(self, key, arg):
if arg is None:
raise ValueError("arg for {0} is None".format(key))
self._args[key.lower()] = arg
def bind_env(self, *input_):
"""Binds a Vyper key to a ENV variable.
ENV variables are case sensitive.
If only a key is provided, it will use the env key matching the key,
uppercased.
`env_prefix` will be used when set when env name is not provided.
"""
if len(input_) == 0:
return "bind_env missing key to bind to"
key = input_[0].lower()
if len(input_) == 1:
env_key = self._merge_with_env_prefix(key)
else:
env_key = input_[1]
self._env[key] = env_key
if self._key_delimiter in key:
parts = input_[0].split(self._key_delimiter)
env_info = {
"path": parts[1:-1],
"final_key": parts[-1],
"env_key": env_key
}
if self._env.get(parts[0]) is None:
self._env[parts[0]] = [env_info]
else:
self._env[parts[0]].append(env_info)
return None
def _find_real_key(self, key, source):
return next(
(real for real in source.keys() if real.lower() == key.lower()),
None)
def _find_insensitive(self, key, source):
real_key = self._find_real_key(key, source)
return source.get(real_key)
def _set_insensitive(self, key, val, source):
real_key = self._find_real_key(key, source)
if real_key is None:
msg = "No case insensitive variant of {0} found.".format(key)
raise KeyError(msg)
source[real_key] = val
def _find(self, key):
"""Given a key, find the value
Vyper will check in the following order:
override, arg, env, config file, key/value store, default
Vyper will check to see if an alias exists first.
"""
key = self._real_key(key)
# OVERRIDES
val = self._override.get(key)
if val is not None:
log.debug("{0} found in override: {1}".format(key, val))
return val
# ARGS
val = self._args.get(key)
if val is not None:
log.debug("{0} found in args: {1}".format(key, val))
return val
# ENVIRONMENT VARIABLES
if self._automatic_env_applied:
# even if it hasn't been registered, if `automatic_env` is used,
# check any `get` request
val = self._get_env(self._merge_with_env_prefix(key))
if val is not None:
log.debug("{0} found in environment: {1}".format(key, val))
return val
env_key = self._find_insensitive(key, self._env)
log.debug("Looking for {0} in env".format(key))
if isinstance(env_key, list):
parent = self._find_insensitive(key, self._config)
found_in_env = False
log.debug("Found env key parent {0}: {1}".format(key, parent))
for item in env_key:
log.debug("{0} registered as env var parent {1}:".format(
key, item["env_key"]))
val = self._get_env(item["env_key"])
if val is not None:
log.debug("{0} found in environment: {1}".format(
item["env_key"], val))
temp = parent
for path in item["path"]:
real_key = self._find_real_key(path, temp)
temp = temp[real_key]
self._set_insensitive(item["final_key"], val, temp)
found_in_env = True
else:
log.debug("{0} env value unset".format(item["env_key"]))
if found_in_env:
return parent
elif env_key is not None:
log.debug("{0} registered as env var: {1}".format(key, env_key))
val = self._get_env(env_key)
if val is not None:
log.debug("{0} found in environment: {1}".format(env_key, val))
return val
else:
log.debug("{0} env value unset".format(env_key))
# CONFIG FILE
val = self._find_insensitive(key, self._config)
if val is not None:
log.debug("{0} found in config: {1}".format(key, val))
return val
# Test for nested config parameter
if self._key_delimiter in key:
path = key.split(self._key_delimiter)
source = self._find(path[0])
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is not None:
log.debug("{0} found in nested config: {1}".format(
key, val))
return val
# KEY/VALUE STORE
val = self._kvstore.get(key)
if val is not None:
log.debug("{0} found in key/value store: {1}".format(key, val))
return val
# DEFAULTS
val = self._defaults.get(key)
if val is not None:
log.debug("{0} found in defaults: {1}".format(key, val))
return val
return None
def is_set(self, key):
"""Check to see if the key has been set in any of the data locations.
"""
path = key.split(self._key_delimiter)
lower_case_key = key.lower()
val = self._find(lower_case_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
return val is not None
def automatic_env(self):
"""Have Vyper check ENV variables for all keys set in
config, default & args.
"""
self._automatic_env_applied = True
def set_env_key_replacer(self, old, new):
"""Sets the strings.Replacer on the Vyper object.
Useful for mapping an environment variable to a key that does
not match it.
"""
self._env_key_replacer = old, new
def register_alias(self, alias, key):
"""Aliases provide another accessor for the same key.
This enables one to change a name without breaking the application.
"""
alias = alias.lower()
key = key.lower()
if alias != key and alias != self._real_key(key):
exists = self._aliases.get(alias)
if exists is None:
# if we alias something that exists in one of the dicts to
# another name, we'll never be able to get that value using the
# original name, so move the config value to the new _real_key.
val = self._config.get(alias)
if val:
self._config.pop(alias)
self._config[key] = val
val = self._kvstore.get(alias)
if val:
self._kvstore.pop(alias)
self._kvstore[key] = val
val = self._defaults.get(alias)
if val:
self._defaults.pop(alias)
self._defaults[key] = val
val = self._override.get(alias)
if val:
self._override.pop(alias)
self._override[key] = val
self._aliases[alias] = key
else:
log.warning("Creating circular reference alias {0} {1} {2}".format(
alias, key, self._real_key(key)))
def _real_key(self, key):
new_key = self._aliases.get(key)
if new_key is not None:
return self._real_key(new_key)
else:
return key
def set_default(self, key, value):
"""Set the default value for this key.
Default only used when no value is provided by the user via
arg, config or env.
"""
k = self._real_key(key.lower())
self._defaults[k] = value
def set(self, key, value):
"""Sets the value for the key in the override register.
Will be used instead of values obtained via
args, config file, env, defaults or key/value store.
"""
k = self._real_key(key.lower())
self._override[k] = value
def read_in_config(self):
"""Vyper will discover and load the configuration file from disk
and key/value stores, searching in one of the defined paths.
"""
log.info("Attempting to read in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
self._config = {}
return self._unmarshall_reader(f, self._config)
def merge_in_config(self):
log.info("Attempting to merge in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
return self.merge_config(f)
def read_config(self, file_):
"""Vyper will read a configuration file, setting existing keys to
`None` if the key does not exist in the file.
"""
self._unmarshall_reader(file_, self._config)
def merge_config(self, file_):
if self._config is None:
self._config = {}
cfg = {}
cfg = self._unmarshall_reader(file_, cfg)
self._merge_dicts(cfg, self._config)
def _merge_dicts(self, src, target):
for k, v in src.items():
if isinstance(v, dict):
self._merge_dicts(v, target[k])
else:
target[k] = v
def read_remote_config(self):
"""Attempts to get configuration from a remote source
and read it in the remote configuration registry.
"""
return self._get_key_value_config()
def _unmarshall_reader(self, file_, d):
"""Unmarshall a file into a `dict`."""
return util.unmarshall_config_reader(file_, d, self._get_config_type())
def _get_key_value_config(self):
"""Retrieves the first found remote configuration."""
for rp in self._remote_providers:
val = self._get_remote_config(rp)
self._kvstore = val
return None
raise errors.RemoteConfigError("No Files Found")
def _get_remote_config(self, provider):
reader = provider.get()
self._unmarshall_reader(reader, self._kvstore)
return self._kvstore
def on_remote_config_change(self, func, *args, **kwargs):
self._on_remote_config_change = lambda x: func(*args, **kwargs)
for rp in self._remote_providers:
rp.add_listener(self._on_remote_config_change)
return None
def watch_remote_config(self):
for rp in self._remote_providers:
rp.add_listener()
return None
raise errors.RemoteConfigError("No Files Found")
def all_keys(self, uppercase_keys=False):
"""Return all keys regardless where they are set."""
d = {}
for k in self._override.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._args.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._env.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._config.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._kvstore.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._defaults.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._aliases.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
return d.keys()
def all_settings(self, uppercase_keys=False):
"""Return all settings as a `dict`."""
d = {}
for k in self.all_keys(uppercase_keys):
d[k] = self.get(k)
return d
def set_config_name(self, name):
"""Name for the config file. Does not include extension."""
self._config_name = name
self._config_file = ""
def set_config_type(self, type_):
"""Sets the type of the configuration returned by the
remote source, e.g. "json".
"""
self._config_type = type_
def _get_config_type(self):
if self._config_type != "":
return self._config_type
cf = self._get_config_file()
ext = os.path.splitext(cf)
if len(ext) > 1:
return ext[1][1:]
else:
return ""
def _get_config_file(self):
if self._config_file == "":
try:
cf = self._find_config_file()
self._config_file = cf
except errors.ConfigFileNotFoundError:
return ""
return self._config_file
def _search_in_path(self, path):
log.debug("Searching for config in: {0}".format(path))
for ext in constants.SUPPORTED_EXTENSIONS:
full_path = "{0}/{1}.{2}".format(path, self._config_name, ext)
log.debug("Checking for {0}".format(full_path))
if util.exists(full_path):
log.debug("Found: {0}".format(full_path))
return full_path
return ""
def _find_config_file(self):
"""Search all `config_paths` for any config file.
Returns the first path that exists (and is a config file).
"""
log.info("Searching for config in: {0}".format(
", ".join(str(p) for p in self._config_paths)))
for cp in self._config_paths:
f = self._search_in_path(cp)
if f != "":
return f
raise errors.ConfigFileNotFoundError(
self._config_name, self._config_paths)
def debug(self): # pragma: no cover
"""Prints all configuration registries for debugging purposes."""
print("Aliases:")
pprint.pprint(self._aliases)
print("Override:")
pprint.pprint(self._override)
print("Args:")
pprint.pprint(self._args)
print("Env:")
pprint.pprint(self._env)
print("Config:")
pprint.pprint(self._config)
print("Key/Value Store:")
pprint.pprint(self._kvstore)
print("Defaults:")
pprint.pprint(self._defaults)
|
admiralobvious/vyper | vyper/vyper.py | Vyper.set_default | python | def set_default(self, key, value):
k = self._real_key(key.lower())
self._defaults[k] = value | Set the default value for this key.
Default only used when no value is provided by the user via
arg, config or env. | train | https://github.com/admiralobvious/vyper/blob/58ec7b90661502b7b2fea7a30849b90b907fcdec/vyper/vyper.py#L517-L523 | [
"def _real_key(self, key):\n new_key = self._aliases.get(key)\n if new_key is not None:\n return self._real_key(new_key)\n else:\n return key\n"
] | class Vyper(object):
"""Vyper is a prioritized configuration registry. It maintains a set of
configuration sources, fetches values to populate those, and provides
them according to the source's priority.
The priority of the sources is the following:
1. overrides
2. args
3. env. variables
4. config file
5. key/value store
6. defaults
For example, if values from the following sources were loaded:
defaults: {
"secret": "",
"user": "default",
"endpoint": "https://localhost"
}
config: {
"user": "root"
"secret": "defaultsecret"
}
env: {
"secret": "somesecretkey"
}
The resulting config will have the following values:
{
"secret": "somesecretkey",
"user": "root",
"endpoint": "https://localhost"
}
"""
def __init__(self, config_name="config", key_delimiter="."):
# Delimiter that separates a list of keys
# used to access a nested value in one go.
self._key_delimiter = key_delimiter
# A set of paths to look for the config file in.
self._config_paths = []
# A set of remote providers to search for the configuration.
self._remote_providers = []
# Name of file to look for inside the path.
self._config_name = config_name
self._config_file = ""
self._config_type = ""
self._env_prefix = ""
self._automatic_env_applied = False
self._env_key_replacer = None
self._aliases = {}
self._override = {}
self._args = {}
self._env = {}
self._config = {}
self._kvstore = {}
self._defaults = {}
self._on_config_change = None
self._on_remote_config_change = None
self.parse_argv_disabled = False
def on_config_change(self, func, *args, **kwargs):
self._on_config_change = lambda: func(*args, **kwargs)
def watch_config(self):
config_file = self._get_config_file()
watcher = watch.get_watcher(config_file, self)
watcher.start()
def set_config_file(self, file_):
"""Explicitly define the path, name and extension of the config file
Vyper will use this and not check any of the config paths.
"""
self._config_file = file_
def set_env_prefix(self, prefix):
"""Define a prefix that ENVIRONMENT variables will use.
e.g. if your prefix is "spf", the env registry will look
for env. variables that start with "SPF_"
"""
self._env_prefix = prefix
def _merge_with_env_prefix(self, key):
if self._env_prefix != "":
return ("{0}_{1}".format(self._env_prefix, key)).upper()
return key.upper()
def _get_env(self, key):
"""Wrapper around os.getenv() which replaces characters
in the original key. This allows env vars which have different keys
than the config object keys.
"""
if self._env_key_replacer is not None:
key = key.replace(*self._env_key_replacer)
return os.getenv(key)
def config_file_used(self):
"""Return the file used to populate the config registry."""
return self._config_file
def add_config_path(self, path):
"""Add a path for Vyper to search for the config file in.
Can be called multiple times to define multiple search paths.
"""
abspath = util.abs_pathify(path)
if abspath not in self._config_paths:
log.info("Adding {0} to paths to search".format(abspath))
self._config_paths.append(abspath)
def add_remote_provider(self, provider, client, path):
"""Adds a remote configuration source.
Remote Providers are searched in the order they are added.
provider is a string value, "etcd", "consul" and "zookeeper" are
currently supported.
client is a client object
path is the path in the k/v store to retrieve configuration
To retrieve a config file called myapp.json from /configs/myapp.json
you should set path to /configs and set config name (set_config_name)
to "myapp"
"""
if provider not in constants.SUPPORTED_REMOTE_PROVIDERS:
raise errors.UnsupportedRemoteProviderError(provider)
host = ""
if provider == "etcd":
host = "{0}://{1}:{2}".format(client.protocol, client.host,
client.port)
elif provider == "consul":
host = "{0}://{1}:{2}".format(client.http.scheme, client.http.host,
client.http.port)
elif provider == "zookeeper":
host = ",".join(
str("{0}:{1}".format(h[0], h[1])) for h in client.hosts)
log.info("Adding {0}:{1} to remote provider list".format(
provider, host))
rp = remote.RemoteProvider(provider, client, path, self)
if not self._provider_path_exists(rp):
self._remote_providers.append(rp)
def _provider_path_exists(self, rp):
for p in self._remote_providers:
if p.path == rp.path:
return True
return False
def _search_dict(self, d, keys):
if not keys:
return d
for key in keys:
if key in d and not isinstance(d[key], dict):
return d[key]
elif key in d:
return self._search_dict(d[key], keys[1::])
else:
return None
def get(self, key):
"""Vyper is essentially repository for configurations.
`get` can retrieve any value given the key to use.
`get` has the behavior of returning the value associated with the first
place from where it is set. Viper will check in the following order:
override, arg, env, config file, key/value store, default.
"""
path = key.split(self._key_delimiter)
lowercase_key = key.lower()
val = self._find(lowercase_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is None:
return None
return val
def get_string(self, key):
return str(self.get(key))
def get_bool(self, key):
val = self.get(key)
if isinstance(val, str):
if val.lower() == 'false':
return False
return bool(val)
def get_int(self, key):
return int(self.get(key))
def get_float(self, key):
return float(self.get(key))
def get_unicode(self, key):
return text(self.get(key))
def get_bytes(self, key):
return b"{0}".format(self.get(key))
def sub(self, key):
"""Returns new Vyper instance representing a sub tree of this instance.
"""
subv = Vyper()
data = self.get(key)
if isinstance(data, dict):
subv._config = data
return subv
else:
return None
def unmarshall_key(self, key, cls):
"""Takes a single key and unmarshalls it into a class."""
return setattr(cls, key, self.get(key))
def unmarshall(self, cls):
"""Unmarshalls the config into a class. Make sure that the tags on
the attributes of the class are properly set.
"""
for k, v in self.all_settings().items():
setattr(cls, k, v)
return cls
def bind_args(self, parser):
if isinstance(parser, argparse.ArgumentParser):
return self._bind_parser_values(parser)
else:
return self.bind_arg_values(parser)
def bind_arg(self, key, arg):
return self.bind_arg_value(key, arg)
def _parse_args(self, parser, overrides=None):
if overrides:
return vars(parser.parse_args(overrides))
if not self.parse_argv_disabled:
return vars(parser.parse_args())
else:
return vars(parser.parse_args([]))
def _bind_parser_values(self, parser, overrides=None):
# method mostly for testing, use bind_args()
args = self._parse_args(parser, overrides)
defaults = \
{k: parser.get_default(k) for k in args.keys()}
for k, v in defaults.items():
self.set_default(k, v)
if args[k] != defaults[k]:
self.bind_arg(k, args[k])
def bind_arg_values(self, args):
for k, v in args.items():
try:
self.bind_arg_value(k, v)
except ValueError:
pass
def bind_arg_value(self, key, arg):
if arg is None:
raise ValueError("arg for {0} is None".format(key))
self._args[key.lower()] = arg
def bind_env(self, *input_):
"""Binds a Vyper key to a ENV variable.
ENV variables are case sensitive.
If only a key is provided, it will use the env key matching the key,
uppercased.
`env_prefix` will be used when set when env name is not provided.
"""
if len(input_) == 0:
return "bind_env missing key to bind to"
key = input_[0].lower()
if len(input_) == 1:
env_key = self._merge_with_env_prefix(key)
else:
env_key = input_[1]
self._env[key] = env_key
if self._key_delimiter in key:
parts = input_[0].split(self._key_delimiter)
env_info = {
"path": parts[1:-1],
"final_key": parts[-1],
"env_key": env_key
}
if self._env.get(parts[0]) is None:
self._env[parts[0]] = [env_info]
else:
self._env[parts[0]].append(env_info)
return None
def _find_real_key(self, key, source):
return next(
(real for real in source.keys() if real.lower() == key.lower()),
None)
def _find_insensitive(self, key, source):
real_key = self._find_real_key(key, source)
return source.get(real_key)
def _set_insensitive(self, key, val, source):
real_key = self._find_real_key(key, source)
if real_key is None:
msg = "No case insensitive variant of {0} found.".format(key)
raise KeyError(msg)
source[real_key] = val
def _find(self, key):
"""Given a key, find the value
Vyper will check in the following order:
override, arg, env, config file, key/value store, default
Vyper will check to see if an alias exists first.
"""
key = self._real_key(key)
# OVERRIDES
val = self._override.get(key)
if val is not None:
log.debug("{0} found in override: {1}".format(key, val))
return val
# ARGS
val = self._args.get(key)
if val is not None:
log.debug("{0} found in args: {1}".format(key, val))
return val
# ENVIRONMENT VARIABLES
if self._automatic_env_applied:
# even if it hasn't been registered, if `automatic_env` is used,
# check any `get` request
val = self._get_env(self._merge_with_env_prefix(key))
if val is not None:
log.debug("{0} found in environment: {1}".format(key, val))
return val
env_key = self._find_insensitive(key, self._env)
log.debug("Looking for {0} in env".format(key))
if isinstance(env_key, list):
parent = self._find_insensitive(key, self._config)
found_in_env = False
log.debug("Found env key parent {0}: {1}".format(key, parent))
for item in env_key:
log.debug("{0} registered as env var parent {1}:".format(
key, item["env_key"]))
val = self._get_env(item["env_key"])
if val is not None:
log.debug("{0} found in environment: {1}".format(
item["env_key"], val))
temp = parent
for path in item["path"]:
real_key = self._find_real_key(path, temp)
temp = temp[real_key]
self._set_insensitive(item["final_key"], val, temp)
found_in_env = True
else:
log.debug("{0} env value unset".format(item["env_key"]))
if found_in_env:
return parent
elif env_key is not None:
log.debug("{0} registered as env var: {1}".format(key, env_key))
val = self._get_env(env_key)
if val is not None:
log.debug("{0} found in environment: {1}".format(env_key, val))
return val
else:
log.debug("{0} env value unset".format(env_key))
# CONFIG FILE
val = self._find_insensitive(key, self._config)
if val is not None:
log.debug("{0} found in config: {1}".format(key, val))
return val
# Test for nested config parameter
if self._key_delimiter in key:
path = key.split(self._key_delimiter)
source = self._find(path[0])
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is not None:
log.debug("{0} found in nested config: {1}".format(
key, val))
return val
# KEY/VALUE STORE
val = self._kvstore.get(key)
if val is not None:
log.debug("{0} found in key/value store: {1}".format(key, val))
return val
# DEFAULTS
val = self._defaults.get(key)
if val is not None:
log.debug("{0} found in defaults: {1}".format(key, val))
return val
return None
def is_set(self, key):
"""Check to see if the key has been set in any of the data locations.
"""
path = key.split(self._key_delimiter)
lower_case_key = key.lower()
val = self._find(lower_case_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
return val is not None
def automatic_env(self):
"""Have Vyper check ENV variables for all keys set in
config, default & args.
"""
self._automatic_env_applied = True
def set_env_key_replacer(self, old, new):
"""Sets the strings.Replacer on the Vyper object.
Useful for mapping an environment variable to a key that does
not match it.
"""
self._env_key_replacer = old, new
def register_alias(self, alias, key):
"""Aliases provide another accessor for the same key.
This enables one to change a name without breaking the application.
"""
alias = alias.lower()
key = key.lower()
if alias != key and alias != self._real_key(key):
exists = self._aliases.get(alias)
if exists is None:
# if we alias something that exists in one of the dicts to
# another name, we'll never be able to get that value using the
# original name, so move the config value to the new _real_key.
val = self._config.get(alias)
if val:
self._config.pop(alias)
self._config[key] = val
val = self._kvstore.get(alias)
if val:
self._kvstore.pop(alias)
self._kvstore[key] = val
val = self._defaults.get(alias)
if val:
self._defaults.pop(alias)
self._defaults[key] = val
val = self._override.get(alias)
if val:
self._override.pop(alias)
self._override[key] = val
self._aliases[alias] = key
else:
log.warning("Creating circular reference alias {0} {1} {2}".format(
alias, key, self._real_key(key)))
def _real_key(self, key):
new_key = self._aliases.get(key)
if new_key is not None:
return self._real_key(new_key)
else:
return key
def in_config(self, key):
"""Check to see if the given key (or an alias) is in the config file.
"""
# if the requested key is an alias, then return the proper key
key = self._real_key(key)
exists = self._config.get(key)
return exists
def set(self, key, value):
"""Sets the value for the key in the override register.
Will be used instead of values obtained via
args, config file, env, defaults or key/value store.
"""
k = self._real_key(key.lower())
self._override[k] = value
def read_in_config(self):
"""Vyper will discover and load the configuration file from disk
and key/value stores, searching in one of the defined paths.
"""
log.info("Attempting to read in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
self._config = {}
return self._unmarshall_reader(f, self._config)
def merge_in_config(self):
log.info("Attempting to merge in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
return self.merge_config(f)
def read_config(self, file_):
"""Vyper will read a configuration file, setting existing keys to
`None` if the key does not exist in the file.
"""
self._unmarshall_reader(file_, self._config)
def merge_config(self, file_):
if self._config is None:
self._config = {}
cfg = {}
cfg = self._unmarshall_reader(file_, cfg)
self._merge_dicts(cfg, self._config)
def _merge_dicts(self, src, target):
for k, v in src.items():
if isinstance(v, dict):
self._merge_dicts(v, target[k])
else:
target[k] = v
def read_remote_config(self):
"""Attempts to get configuration from a remote source
and read it in the remote configuration registry.
"""
return self._get_key_value_config()
def _unmarshall_reader(self, file_, d):
"""Unmarshall a file into a `dict`."""
return util.unmarshall_config_reader(file_, d, self._get_config_type())
def _get_key_value_config(self):
"""Retrieves the first found remote configuration."""
for rp in self._remote_providers:
val = self._get_remote_config(rp)
self._kvstore = val
return None
raise errors.RemoteConfigError("No Files Found")
def _get_remote_config(self, provider):
reader = provider.get()
self._unmarshall_reader(reader, self._kvstore)
return self._kvstore
def on_remote_config_change(self, func, *args, **kwargs):
self._on_remote_config_change = lambda x: func(*args, **kwargs)
for rp in self._remote_providers:
rp.add_listener(self._on_remote_config_change)
return None
def watch_remote_config(self):
for rp in self._remote_providers:
rp.add_listener()
return None
raise errors.RemoteConfigError("No Files Found")
def all_keys(self, uppercase_keys=False):
"""Return all keys regardless where they are set."""
d = {}
for k in self._override.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._args.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._env.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._config.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._kvstore.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._defaults.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._aliases.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
return d.keys()
def all_settings(self, uppercase_keys=False):
"""Return all settings as a `dict`."""
d = {}
for k in self.all_keys(uppercase_keys):
d[k] = self.get(k)
return d
def set_config_name(self, name):
"""Name for the config file. Does not include extension."""
self._config_name = name
self._config_file = ""
def set_config_type(self, type_):
"""Sets the type of the configuration returned by the
remote source, e.g. "json".
"""
self._config_type = type_
def _get_config_type(self):
if self._config_type != "":
return self._config_type
cf = self._get_config_file()
ext = os.path.splitext(cf)
if len(ext) > 1:
return ext[1][1:]
else:
return ""
def _get_config_file(self):
if self._config_file == "":
try:
cf = self._find_config_file()
self._config_file = cf
except errors.ConfigFileNotFoundError:
return ""
return self._config_file
def _search_in_path(self, path):
log.debug("Searching for config in: {0}".format(path))
for ext in constants.SUPPORTED_EXTENSIONS:
full_path = "{0}/{1}.{2}".format(path, self._config_name, ext)
log.debug("Checking for {0}".format(full_path))
if util.exists(full_path):
log.debug("Found: {0}".format(full_path))
return full_path
return ""
def _find_config_file(self):
"""Search all `config_paths` for any config file.
Returns the first path that exists (and is a config file).
"""
log.info("Searching for config in: {0}".format(
", ".join(str(p) for p in self._config_paths)))
for cp in self._config_paths:
f = self._search_in_path(cp)
if f != "":
return f
raise errors.ConfigFileNotFoundError(
self._config_name, self._config_paths)
def debug(self): # pragma: no cover
"""Prints all configuration registries for debugging purposes."""
print("Aliases:")
pprint.pprint(self._aliases)
print("Override:")
pprint.pprint(self._override)
print("Args:")
pprint.pprint(self._args)
print("Env:")
pprint.pprint(self._env)
print("Config:")
pprint.pprint(self._config)
print("Key/Value Store:")
pprint.pprint(self._kvstore)
print("Defaults:")
pprint.pprint(self._defaults)
|
admiralobvious/vyper | vyper/vyper.py | Vyper.set | python | def set(self, key, value):
k = self._real_key(key.lower())
self._override[k] = value | Sets the value for the key in the override register.
Will be used instead of values obtained via
args, config file, env, defaults or key/value store. | train | https://github.com/admiralobvious/vyper/blob/58ec7b90661502b7b2fea7a30849b90b907fcdec/vyper/vyper.py#L525-L531 | [
"def _real_key(self, key):\n new_key = self._aliases.get(key)\n if new_key is not None:\n return self._real_key(new_key)\n else:\n return key\n"
] | class Vyper(object):
"""Vyper is a prioritized configuration registry. It maintains a set of
configuration sources, fetches values to populate those, and provides
them according to the source's priority.
The priority of the sources is the following:
1. overrides
2. args
3. env. variables
4. config file
5. key/value store
6. defaults
For example, if values from the following sources were loaded:
defaults: {
"secret": "",
"user": "default",
"endpoint": "https://localhost"
}
config: {
"user": "root"
"secret": "defaultsecret"
}
env: {
"secret": "somesecretkey"
}
The resulting config will have the following values:
{
"secret": "somesecretkey",
"user": "root",
"endpoint": "https://localhost"
}
"""
def __init__(self, config_name="config", key_delimiter="."):
# Delimiter that separates a list of keys
# used to access a nested value in one go.
self._key_delimiter = key_delimiter
# A set of paths to look for the config file in.
self._config_paths = []
# A set of remote providers to search for the configuration.
self._remote_providers = []
# Name of file to look for inside the path.
self._config_name = config_name
self._config_file = ""
self._config_type = ""
self._env_prefix = ""
self._automatic_env_applied = False
self._env_key_replacer = None
self._aliases = {}
self._override = {}
self._args = {}
self._env = {}
self._config = {}
self._kvstore = {}
self._defaults = {}
self._on_config_change = None
self._on_remote_config_change = None
self.parse_argv_disabled = False
def on_config_change(self, func, *args, **kwargs):
self._on_config_change = lambda: func(*args, **kwargs)
def watch_config(self):
config_file = self._get_config_file()
watcher = watch.get_watcher(config_file, self)
watcher.start()
def set_config_file(self, file_):
"""Explicitly define the path, name and extension of the config file
Vyper will use this and not check any of the config paths.
"""
self._config_file = file_
def set_env_prefix(self, prefix):
"""Define a prefix that ENVIRONMENT variables will use.
e.g. if your prefix is "spf", the env registry will look
for env. variables that start with "SPF_"
"""
self._env_prefix = prefix
def _merge_with_env_prefix(self, key):
if self._env_prefix != "":
return ("{0}_{1}".format(self._env_prefix, key)).upper()
return key.upper()
def _get_env(self, key):
"""Wrapper around os.getenv() which replaces characters
in the original key. This allows env vars which have different keys
than the config object keys.
"""
if self._env_key_replacer is not None:
key = key.replace(*self._env_key_replacer)
return os.getenv(key)
def config_file_used(self):
"""Return the file used to populate the config registry."""
return self._config_file
def add_config_path(self, path):
"""Add a path for Vyper to search for the config file in.
Can be called multiple times to define multiple search paths.
"""
abspath = util.abs_pathify(path)
if abspath not in self._config_paths:
log.info("Adding {0} to paths to search".format(abspath))
self._config_paths.append(abspath)
def add_remote_provider(self, provider, client, path):
"""Adds a remote configuration source.
Remote Providers are searched in the order they are added.
provider is a string value, "etcd", "consul" and "zookeeper" are
currently supported.
client is a client object
path is the path in the k/v store to retrieve configuration
To retrieve a config file called myapp.json from /configs/myapp.json
you should set path to /configs and set config name (set_config_name)
to "myapp"
"""
if provider not in constants.SUPPORTED_REMOTE_PROVIDERS:
raise errors.UnsupportedRemoteProviderError(provider)
host = ""
if provider == "etcd":
host = "{0}://{1}:{2}".format(client.protocol, client.host,
client.port)
elif provider == "consul":
host = "{0}://{1}:{2}".format(client.http.scheme, client.http.host,
client.http.port)
elif provider == "zookeeper":
host = ",".join(
str("{0}:{1}".format(h[0], h[1])) for h in client.hosts)
log.info("Adding {0}:{1} to remote provider list".format(
provider, host))
rp = remote.RemoteProvider(provider, client, path, self)
if not self._provider_path_exists(rp):
self._remote_providers.append(rp)
def _provider_path_exists(self, rp):
for p in self._remote_providers:
if p.path == rp.path:
return True
return False
def _search_dict(self, d, keys):
if not keys:
return d
for key in keys:
if key in d and not isinstance(d[key], dict):
return d[key]
elif key in d:
return self._search_dict(d[key], keys[1::])
else:
return None
def get(self, key):
"""Vyper is essentially repository for configurations.
`get` can retrieve any value given the key to use.
`get` has the behavior of returning the value associated with the first
place from where it is set. Viper will check in the following order:
override, arg, env, config file, key/value store, default.
"""
path = key.split(self._key_delimiter)
lowercase_key = key.lower()
val = self._find(lowercase_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is None:
return None
return val
def get_string(self, key):
return str(self.get(key))
def get_bool(self, key):
val = self.get(key)
if isinstance(val, str):
if val.lower() == 'false':
return False
return bool(val)
def get_int(self, key):
return int(self.get(key))
def get_float(self, key):
return float(self.get(key))
def get_unicode(self, key):
return text(self.get(key))
def get_bytes(self, key):
return b"{0}".format(self.get(key))
def sub(self, key):
"""Returns new Vyper instance representing a sub tree of this instance.
"""
subv = Vyper()
data = self.get(key)
if isinstance(data, dict):
subv._config = data
return subv
else:
return None
def unmarshall_key(self, key, cls):
"""Takes a single key and unmarshalls it into a class."""
return setattr(cls, key, self.get(key))
def unmarshall(self, cls):
"""Unmarshalls the config into a class. Make sure that the tags on
the attributes of the class are properly set.
"""
for k, v in self.all_settings().items():
setattr(cls, k, v)
return cls
def bind_args(self, parser):
if isinstance(parser, argparse.ArgumentParser):
return self._bind_parser_values(parser)
else:
return self.bind_arg_values(parser)
def bind_arg(self, key, arg):
return self.bind_arg_value(key, arg)
def _parse_args(self, parser, overrides=None):
if overrides:
return vars(parser.parse_args(overrides))
if not self.parse_argv_disabled:
return vars(parser.parse_args())
else:
return vars(parser.parse_args([]))
def _bind_parser_values(self, parser, overrides=None):
# method mostly for testing, use bind_args()
args = self._parse_args(parser, overrides)
defaults = \
{k: parser.get_default(k) for k in args.keys()}
for k, v in defaults.items():
self.set_default(k, v)
if args[k] != defaults[k]:
self.bind_arg(k, args[k])
def bind_arg_values(self, args):
for k, v in args.items():
try:
self.bind_arg_value(k, v)
except ValueError:
pass
def bind_arg_value(self, key, arg):
if arg is None:
raise ValueError("arg for {0} is None".format(key))
self._args[key.lower()] = arg
def bind_env(self, *input_):
"""Binds a Vyper key to a ENV variable.
ENV variables are case sensitive.
If only a key is provided, it will use the env key matching the key,
uppercased.
`env_prefix` will be used when set when env name is not provided.
"""
if len(input_) == 0:
return "bind_env missing key to bind to"
key = input_[0].lower()
if len(input_) == 1:
env_key = self._merge_with_env_prefix(key)
else:
env_key = input_[1]
self._env[key] = env_key
if self._key_delimiter in key:
parts = input_[0].split(self._key_delimiter)
env_info = {
"path": parts[1:-1],
"final_key": parts[-1],
"env_key": env_key
}
if self._env.get(parts[0]) is None:
self._env[parts[0]] = [env_info]
else:
self._env[parts[0]].append(env_info)
return None
def _find_real_key(self, key, source):
return next(
(real for real in source.keys() if real.lower() == key.lower()),
None)
def _find_insensitive(self, key, source):
real_key = self._find_real_key(key, source)
return source.get(real_key)
def _set_insensitive(self, key, val, source):
real_key = self._find_real_key(key, source)
if real_key is None:
msg = "No case insensitive variant of {0} found.".format(key)
raise KeyError(msg)
source[real_key] = val
def _find(self, key):
"""Given a key, find the value
Vyper will check in the following order:
override, arg, env, config file, key/value store, default
Vyper will check to see if an alias exists first.
"""
key = self._real_key(key)
# OVERRIDES
val = self._override.get(key)
if val is not None:
log.debug("{0} found in override: {1}".format(key, val))
return val
# ARGS
val = self._args.get(key)
if val is not None:
log.debug("{0} found in args: {1}".format(key, val))
return val
# ENVIRONMENT VARIABLES
if self._automatic_env_applied:
# even if it hasn't been registered, if `automatic_env` is used,
# check any `get` request
val = self._get_env(self._merge_with_env_prefix(key))
if val is not None:
log.debug("{0} found in environment: {1}".format(key, val))
return val
env_key = self._find_insensitive(key, self._env)
log.debug("Looking for {0} in env".format(key))
if isinstance(env_key, list):
parent = self._find_insensitive(key, self._config)
found_in_env = False
log.debug("Found env key parent {0}: {1}".format(key, parent))
for item in env_key:
log.debug("{0} registered as env var parent {1}:".format(
key, item["env_key"]))
val = self._get_env(item["env_key"])
if val is not None:
log.debug("{0} found in environment: {1}".format(
item["env_key"], val))
temp = parent
for path in item["path"]:
real_key = self._find_real_key(path, temp)
temp = temp[real_key]
self._set_insensitive(item["final_key"], val, temp)
found_in_env = True
else:
log.debug("{0} env value unset".format(item["env_key"]))
if found_in_env:
return parent
elif env_key is not None:
log.debug("{0} registered as env var: {1}".format(key, env_key))
val = self._get_env(env_key)
if val is not None:
log.debug("{0} found in environment: {1}".format(env_key, val))
return val
else:
log.debug("{0} env value unset".format(env_key))
# CONFIG FILE
val = self._find_insensitive(key, self._config)
if val is not None:
log.debug("{0} found in config: {1}".format(key, val))
return val
# Test for nested config parameter
if self._key_delimiter in key:
path = key.split(self._key_delimiter)
source = self._find(path[0])
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is not None:
log.debug("{0} found in nested config: {1}".format(
key, val))
return val
# KEY/VALUE STORE
val = self._kvstore.get(key)
if val is not None:
log.debug("{0} found in key/value store: {1}".format(key, val))
return val
# DEFAULTS
val = self._defaults.get(key)
if val is not None:
log.debug("{0} found in defaults: {1}".format(key, val))
return val
return None
def is_set(self, key):
"""Check to see if the key has been set in any of the data locations.
"""
path = key.split(self._key_delimiter)
lower_case_key = key.lower()
val = self._find(lower_case_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
return val is not None
def automatic_env(self):
"""Have Vyper check ENV variables for all keys set in
config, default & args.
"""
self._automatic_env_applied = True
def set_env_key_replacer(self, old, new):
"""Sets the strings.Replacer on the Vyper object.
Useful for mapping an environment variable to a key that does
not match it.
"""
self._env_key_replacer = old, new
def register_alias(self, alias, key):
"""Aliases provide another accessor for the same key.
This enables one to change a name without breaking the application.
"""
alias = alias.lower()
key = key.lower()
if alias != key and alias != self._real_key(key):
exists = self._aliases.get(alias)
if exists is None:
# if we alias something that exists in one of the dicts to
# another name, we'll never be able to get that value using the
# original name, so move the config value to the new _real_key.
val = self._config.get(alias)
if val:
self._config.pop(alias)
self._config[key] = val
val = self._kvstore.get(alias)
if val:
self._kvstore.pop(alias)
self._kvstore[key] = val
val = self._defaults.get(alias)
if val:
self._defaults.pop(alias)
self._defaults[key] = val
val = self._override.get(alias)
if val:
self._override.pop(alias)
self._override[key] = val
self._aliases[alias] = key
else:
log.warning("Creating circular reference alias {0} {1} {2}".format(
alias, key, self._real_key(key)))
def _real_key(self, key):
new_key = self._aliases.get(key)
if new_key is not None:
return self._real_key(new_key)
else:
return key
def in_config(self, key):
"""Check to see if the given key (or an alias) is in the config file.
"""
# if the requested key is an alias, then return the proper key
key = self._real_key(key)
exists = self._config.get(key)
return exists
def set_default(self, key, value):
"""Set the default value for this key.
Default only used when no value is provided by the user via
arg, config or env.
"""
k = self._real_key(key.lower())
self._defaults[k] = value
def read_in_config(self):
"""Vyper will discover and load the configuration file from disk
and key/value stores, searching in one of the defined paths.
"""
log.info("Attempting to read in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
self._config = {}
return self._unmarshall_reader(f, self._config)
def merge_in_config(self):
log.info("Attempting to merge in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
return self.merge_config(f)
def read_config(self, file_):
"""Vyper will read a configuration file, setting existing keys to
`None` if the key does not exist in the file.
"""
self._unmarshall_reader(file_, self._config)
def merge_config(self, file_):
if self._config is None:
self._config = {}
cfg = {}
cfg = self._unmarshall_reader(file_, cfg)
self._merge_dicts(cfg, self._config)
def _merge_dicts(self, src, target):
for k, v in src.items():
if isinstance(v, dict):
self._merge_dicts(v, target[k])
else:
target[k] = v
def read_remote_config(self):
"""Attempts to get configuration from a remote source
and read it in the remote configuration registry.
"""
return self._get_key_value_config()
def _unmarshall_reader(self, file_, d):
"""Unmarshall a file into a `dict`."""
return util.unmarshall_config_reader(file_, d, self._get_config_type())
def _get_key_value_config(self):
"""Retrieves the first found remote configuration."""
for rp in self._remote_providers:
val = self._get_remote_config(rp)
self._kvstore = val
return None
raise errors.RemoteConfigError("No Files Found")
def _get_remote_config(self, provider):
reader = provider.get()
self._unmarshall_reader(reader, self._kvstore)
return self._kvstore
def on_remote_config_change(self, func, *args, **kwargs):
self._on_remote_config_change = lambda x: func(*args, **kwargs)
for rp in self._remote_providers:
rp.add_listener(self._on_remote_config_change)
return None
def watch_remote_config(self):
for rp in self._remote_providers:
rp.add_listener()
return None
raise errors.RemoteConfigError("No Files Found")
def all_keys(self, uppercase_keys=False):
"""Return all keys regardless where they are set."""
d = {}
for k in self._override.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._args.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._env.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._config.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._kvstore.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._defaults.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._aliases.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
return d.keys()
def all_settings(self, uppercase_keys=False):
"""Return all settings as a `dict`."""
d = {}
for k in self.all_keys(uppercase_keys):
d[k] = self.get(k)
return d
def set_config_name(self, name):
"""Name for the config file. Does not include extension."""
self._config_name = name
self._config_file = ""
def set_config_type(self, type_):
"""Sets the type of the configuration returned by the
remote source, e.g. "json".
"""
self._config_type = type_
def _get_config_type(self):
if self._config_type != "":
return self._config_type
cf = self._get_config_file()
ext = os.path.splitext(cf)
if len(ext) > 1:
return ext[1][1:]
else:
return ""
def _get_config_file(self):
if self._config_file == "":
try:
cf = self._find_config_file()
self._config_file = cf
except errors.ConfigFileNotFoundError:
return ""
return self._config_file
def _search_in_path(self, path):
log.debug("Searching for config in: {0}".format(path))
for ext in constants.SUPPORTED_EXTENSIONS:
full_path = "{0}/{1}.{2}".format(path, self._config_name, ext)
log.debug("Checking for {0}".format(full_path))
if util.exists(full_path):
log.debug("Found: {0}".format(full_path))
return full_path
return ""
def _find_config_file(self):
"""Search all `config_paths` for any config file.
Returns the first path that exists (and is a config file).
"""
log.info("Searching for config in: {0}".format(
", ".join(str(p) for p in self._config_paths)))
for cp in self._config_paths:
f = self._search_in_path(cp)
if f != "":
return f
raise errors.ConfigFileNotFoundError(
self._config_name, self._config_paths)
def debug(self): # pragma: no cover
"""Prints all configuration registries for debugging purposes."""
print("Aliases:")
pprint.pprint(self._aliases)
print("Override:")
pprint.pprint(self._override)
print("Args:")
pprint.pprint(self._args)
print("Env:")
pprint.pprint(self._env)
print("Config:")
pprint.pprint(self._config)
print("Key/Value Store:")
pprint.pprint(self._kvstore)
print("Defaults:")
pprint.pprint(self._defaults)
|
admiralobvious/vyper | vyper/vyper.py | Vyper.read_in_config | python | def read_in_config(self):
log.info("Attempting to read in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
self._config = {}
return self._unmarshall_reader(f, self._config) | Vyper will discover and load the configuration file from disk
and key/value stores, searching in one of the defined paths. | train | https://github.com/admiralobvious/vyper/blob/58ec7b90661502b7b2fea7a30849b90b907fcdec/vyper/vyper.py#L533-L546 | [
"def _get_config_type(self):\n if self._config_type != \"\":\n return self._config_type\n\n cf = self._get_config_file()\n ext = os.path.splitext(cf)\n\n if len(ext) > 1:\n return ext[1][1:]\n else:\n return \"\"\n"
] | class Vyper(object):
"""Vyper is a prioritized configuration registry. It maintains a set of
configuration sources, fetches values to populate those, and provides
them according to the source's priority.
The priority of the sources is the following:
1. overrides
2. args
3. env. variables
4. config file
5. key/value store
6. defaults
For example, if values from the following sources were loaded:
defaults: {
"secret": "",
"user": "default",
"endpoint": "https://localhost"
}
config: {
"user": "root"
"secret": "defaultsecret"
}
env: {
"secret": "somesecretkey"
}
The resulting config will have the following values:
{
"secret": "somesecretkey",
"user": "root",
"endpoint": "https://localhost"
}
"""
def __init__(self, config_name="config", key_delimiter="."):
# Delimiter that separates a list of keys
# used to access a nested value in one go.
self._key_delimiter = key_delimiter
# A set of paths to look for the config file in.
self._config_paths = []
# A set of remote providers to search for the configuration.
self._remote_providers = []
# Name of file to look for inside the path.
self._config_name = config_name
self._config_file = ""
self._config_type = ""
self._env_prefix = ""
self._automatic_env_applied = False
self._env_key_replacer = None
self._aliases = {}
self._override = {}
self._args = {}
self._env = {}
self._config = {}
self._kvstore = {}
self._defaults = {}
self._on_config_change = None
self._on_remote_config_change = None
self.parse_argv_disabled = False
def on_config_change(self, func, *args, **kwargs):
self._on_config_change = lambda: func(*args, **kwargs)
def watch_config(self):
config_file = self._get_config_file()
watcher = watch.get_watcher(config_file, self)
watcher.start()
def set_config_file(self, file_):
"""Explicitly define the path, name and extension of the config file
Vyper will use this and not check any of the config paths.
"""
self._config_file = file_
def set_env_prefix(self, prefix):
"""Define a prefix that ENVIRONMENT variables will use.
e.g. if your prefix is "spf", the env registry will look
for env. variables that start with "SPF_"
"""
self._env_prefix = prefix
def _merge_with_env_prefix(self, key):
if self._env_prefix != "":
return ("{0}_{1}".format(self._env_prefix, key)).upper()
return key.upper()
def _get_env(self, key):
"""Wrapper around os.getenv() which replaces characters
in the original key. This allows env vars which have different keys
than the config object keys.
"""
if self._env_key_replacer is not None:
key = key.replace(*self._env_key_replacer)
return os.getenv(key)
def config_file_used(self):
"""Return the file used to populate the config registry."""
return self._config_file
def add_config_path(self, path):
"""Add a path for Vyper to search for the config file in.
Can be called multiple times to define multiple search paths.
"""
abspath = util.abs_pathify(path)
if abspath not in self._config_paths:
log.info("Adding {0} to paths to search".format(abspath))
self._config_paths.append(abspath)
def add_remote_provider(self, provider, client, path):
"""Adds a remote configuration source.
Remote Providers are searched in the order they are added.
provider is a string value, "etcd", "consul" and "zookeeper" are
currently supported.
client is a client object
path is the path in the k/v store to retrieve configuration
To retrieve a config file called myapp.json from /configs/myapp.json
you should set path to /configs and set config name (set_config_name)
to "myapp"
"""
if provider not in constants.SUPPORTED_REMOTE_PROVIDERS:
raise errors.UnsupportedRemoteProviderError(provider)
host = ""
if provider == "etcd":
host = "{0}://{1}:{2}".format(client.protocol, client.host,
client.port)
elif provider == "consul":
host = "{0}://{1}:{2}".format(client.http.scheme, client.http.host,
client.http.port)
elif provider == "zookeeper":
host = ",".join(
str("{0}:{1}".format(h[0], h[1])) for h in client.hosts)
log.info("Adding {0}:{1} to remote provider list".format(
provider, host))
rp = remote.RemoteProvider(provider, client, path, self)
if not self._provider_path_exists(rp):
self._remote_providers.append(rp)
def _provider_path_exists(self, rp):
for p in self._remote_providers:
if p.path == rp.path:
return True
return False
def _search_dict(self, d, keys):
if not keys:
return d
for key in keys:
if key in d and not isinstance(d[key], dict):
return d[key]
elif key in d:
return self._search_dict(d[key], keys[1::])
else:
return None
def get(self, key):
"""Vyper is essentially repository for configurations.
`get` can retrieve any value given the key to use.
`get` has the behavior of returning the value associated with the first
place from where it is set. Viper will check in the following order:
override, arg, env, config file, key/value store, default.
"""
path = key.split(self._key_delimiter)
lowercase_key = key.lower()
val = self._find(lowercase_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is None:
return None
return val
def get_string(self, key):
return str(self.get(key))
def get_bool(self, key):
val = self.get(key)
if isinstance(val, str):
if val.lower() == 'false':
return False
return bool(val)
def get_int(self, key):
return int(self.get(key))
def get_float(self, key):
return float(self.get(key))
def get_unicode(self, key):
return text(self.get(key))
def get_bytes(self, key):
return b"{0}".format(self.get(key))
def sub(self, key):
"""Returns new Vyper instance representing a sub tree of this instance.
"""
subv = Vyper()
data = self.get(key)
if isinstance(data, dict):
subv._config = data
return subv
else:
return None
def unmarshall_key(self, key, cls):
"""Takes a single key and unmarshalls it into a class."""
return setattr(cls, key, self.get(key))
def unmarshall(self, cls):
"""Unmarshalls the config into a class. Make sure that the tags on
the attributes of the class are properly set.
"""
for k, v in self.all_settings().items():
setattr(cls, k, v)
return cls
def bind_args(self, parser):
if isinstance(parser, argparse.ArgumentParser):
return self._bind_parser_values(parser)
else:
return self.bind_arg_values(parser)
def bind_arg(self, key, arg):
return self.bind_arg_value(key, arg)
def _parse_args(self, parser, overrides=None):
if overrides:
return vars(parser.parse_args(overrides))
if not self.parse_argv_disabled:
return vars(parser.parse_args())
else:
return vars(parser.parse_args([]))
def _bind_parser_values(self, parser, overrides=None):
# method mostly for testing, use bind_args()
args = self._parse_args(parser, overrides)
defaults = \
{k: parser.get_default(k) for k in args.keys()}
for k, v in defaults.items():
self.set_default(k, v)
if args[k] != defaults[k]:
self.bind_arg(k, args[k])
def bind_arg_values(self, args):
for k, v in args.items():
try:
self.bind_arg_value(k, v)
except ValueError:
pass
def bind_arg_value(self, key, arg):
if arg is None:
raise ValueError("arg for {0} is None".format(key))
self._args[key.lower()] = arg
def bind_env(self, *input_):
"""Binds a Vyper key to a ENV variable.
ENV variables are case sensitive.
If only a key is provided, it will use the env key matching the key,
uppercased.
`env_prefix` will be used when set when env name is not provided.
"""
if len(input_) == 0:
return "bind_env missing key to bind to"
key = input_[0].lower()
if len(input_) == 1:
env_key = self._merge_with_env_prefix(key)
else:
env_key = input_[1]
self._env[key] = env_key
if self._key_delimiter in key:
parts = input_[0].split(self._key_delimiter)
env_info = {
"path": parts[1:-1],
"final_key": parts[-1],
"env_key": env_key
}
if self._env.get(parts[0]) is None:
self._env[parts[0]] = [env_info]
else:
self._env[parts[0]].append(env_info)
return None
def _find_real_key(self, key, source):
return next(
(real for real in source.keys() if real.lower() == key.lower()),
None)
def _find_insensitive(self, key, source):
real_key = self._find_real_key(key, source)
return source.get(real_key)
def _set_insensitive(self, key, val, source):
real_key = self._find_real_key(key, source)
if real_key is None:
msg = "No case insensitive variant of {0} found.".format(key)
raise KeyError(msg)
source[real_key] = val
def _find(self, key):
"""Given a key, find the value
Vyper will check in the following order:
override, arg, env, config file, key/value store, default
Vyper will check to see if an alias exists first.
"""
key = self._real_key(key)
# OVERRIDES
val = self._override.get(key)
if val is not None:
log.debug("{0} found in override: {1}".format(key, val))
return val
# ARGS
val = self._args.get(key)
if val is not None:
log.debug("{0} found in args: {1}".format(key, val))
return val
# ENVIRONMENT VARIABLES
if self._automatic_env_applied:
# even if it hasn't been registered, if `automatic_env` is used,
# check any `get` request
val = self._get_env(self._merge_with_env_prefix(key))
if val is not None:
log.debug("{0} found in environment: {1}".format(key, val))
return val
env_key = self._find_insensitive(key, self._env)
log.debug("Looking for {0} in env".format(key))
if isinstance(env_key, list):
parent = self._find_insensitive(key, self._config)
found_in_env = False
log.debug("Found env key parent {0}: {1}".format(key, parent))
for item in env_key:
log.debug("{0} registered as env var parent {1}:".format(
key, item["env_key"]))
val = self._get_env(item["env_key"])
if val is not None:
log.debug("{0} found in environment: {1}".format(
item["env_key"], val))
temp = parent
for path in item["path"]:
real_key = self._find_real_key(path, temp)
temp = temp[real_key]
self._set_insensitive(item["final_key"], val, temp)
found_in_env = True
else:
log.debug("{0} env value unset".format(item["env_key"]))
if found_in_env:
return parent
elif env_key is not None:
log.debug("{0} registered as env var: {1}".format(key, env_key))
val = self._get_env(env_key)
if val is not None:
log.debug("{0} found in environment: {1}".format(env_key, val))
return val
else:
log.debug("{0} env value unset".format(env_key))
# CONFIG FILE
val = self._find_insensitive(key, self._config)
if val is not None:
log.debug("{0} found in config: {1}".format(key, val))
return val
# Test for nested config parameter
if self._key_delimiter in key:
path = key.split(self._key_delimiter)
source = self._find(path[0])
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is not None:
log.debug("{0} found in nested config: {1}".format(
key, val))
return val
# KEY/VALUE STORE
val = self._kvstore.get(key)
if val is not None:
log.debug("{0} found in key/value store: {1}".format(key, val))
return val
# DEFAULTS
val = self._defaults.get(key)
if val is not None:
log.debug("{0} found in defaults: {1}".format(key, val))
return val
return None
def is_set(self, key):
"""Check to see if the key has been set in any of the data locations.
"""
path = key.split(self._key_delimiter)
lower_case_key = key.lower()
val = self._find(lower_case_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
return val is not None
def automatic_env(self):
"""Have Vyper check ENV variables for all keys set in
config, default & args.
"""
self._automatic_env_applied = True
def set_env_key_replacer(self, old, new):
"""Sets the strings.Replacer on the Vyper object.
Useful for mapping an environment variable to a key that does
not match it.
"""
self._env_key_replacer = old, new
def register_alias(self, alias, key):
"""Aliases provide another accessor for the same key.
This enables one to change a name without breaking the application.
"""
alias = alias.lower()
key = key.lower()
if alias != key and alias != self._real_key(key):
exists = self._aliases.get(alias)
if exists is None:
# if we alias something that exists in one of the dicts to
# another name, we'll never be able to get that value using the
# original name, so move the config value to the new _real_key.
val = self._config.get(alias)
if val:
self._config.pop(alias)
self._config[key] = val
val = self._kvstore.get(alias)
if val:
self._kvstore.pop(alias)
self._kvstore[key] = val
val = self._defaults.get(alias)
if val:
self._defaults.pop(alias)
self._defaults[key] = val
val = self._override.get(alias)
if val:
self._override.pop(alias)
self._override[key] = val
self._aliases[alias] = key
else:
log.warning("Creating circular reference alias {0} {1} {2}".format(
alias, key, self._real_key(key)))
def _real_key(self, key):
new_key = self._aliases.get(key)
if new_key is not None:
return self._real_key(new_key)
else:
return key
def in_config(self, key):
"""Check to see if the given key (or an alias) is in the config file.
"""
# if the requested key is an alias, then return the proper key
key = self._real_key(key)
exists = self._config.get(key)
return exists
def set_default(self, key, value):
"""Set the default value for this key.
Default only used when no value is provided by the user via
arg, config or env.
"""
k = self._real_key(key.lower())
self._defaults[k] = value
def set(self, key, value):
"""Sets the value for the key in the override register.
Will be used instead of values obtained via
args, config file, env, defaults or key/value store.
"""
k = self._real_key(key.lower())
self._override[k] = value
def merge_in_config(self):
log.info("Attempting to merge in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
return self.merge_config(f)
def read_config(self, file_):
"""Vyper will read a configuration file, setting existing keys to
`None` if the key does not exist in the file.
"""
self._unmarshall_reader(file_, self._config)
def merge_config(self, file_):
if self._config is None:
self._config = {}
cfg = {}
cfg = self._unmarshall_reader(file_, cfg)
self._merge_dicts(cfg, self._config)
def _merge_dicts(self, src, target):
for k, v in src.items():
if isinstance(v, dict):
self._merge_dicts(v, target[k])
else:
target[k] = v
def read_remote_config(self):
"""Attempts to get configuration from a remote source
and read it in the remote configuration registry.
"""
return self._get_key_value_config()
def _unmarshall_reader(self, file_, d):
"""Unmarshall a file into a `dict`."""
return util.unmarshall_config_reader(file_, d, self._get_config_type())
def _get_key_value_config(self):
"""Retrieves the first found remote configuration."""
for rp in self._remote_providers:
val = self._get_remote_config(rp)
self._kvstore = val
return None
raise errors.RemoteConfigError("No Files Found")
def _get_remote_config(self, provider):
reader = provider.get()
self._unmarshall_reader(reader, self._kvstore)
return self._kvstore
def on_remote_config_change(self, func, *args, **kwargs):
self._on_remote_config_change = lambda x: func(*args, **kwargs)
for rp in self._remote_providers:
rp.add_listener(self._on_remote_config_change)
return None
def watch_remote_config(self):
for rp in self._remote_providers:
rp.add_listener()
return None
raise errors.RemoteConfigError("No Files Found")
def all_keys(self, uppercase_keys=False):
"""Return all keys regardless where they are set."""
d = {}
for k in self._override.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._args.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._env.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._config.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._kvstore.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._defaults.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._aliases.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
return d.keys()
def all_settings(self, uppercase_keys=False):
"""Return all settings as a `dict`."""
d = {}
for k in self.all_keys(uppercase_keys):
d[k] = self.get(k)
return d
def set_config_name(self, name):
"""Name for the config file. Does not include extension."""
self._config_name = name
self._config_file = ""
def set_config_type(self, type_):
"""Sets the type of the configuration returned by the
remote source, e.g. "json".
"""
self._config_type = type_
def _get_config_type(self):
if self._config_type != "":
return self._config_type
cf = self._get_config_file()
ext = os.path.splitext(cf)
if len(ext) > 1:
return ext[1][1:]
else:
return ""
def _get_config_file(self):
if self._config_file == "":
try:
cf = self._find_config_file()
self._config_file = cf
except errors.ConfigFileNotFoundError:
return ""
return self._config_file
def _search_in_path(self, path):
log.debug("Searching for config in: {0}".format(path))
for ext in constants.SUPPORTED_EXTENSIONS:
full_path = "{0}/{1}.{2}".format(path, self._config_name, ext)
log.debug("Checking for {0}".format(full_path))
if util.exists(full_path):
log.debug("Found: {0}".format(full_path))
return full_path
return ""
def _find_config_file(self):
"""Search all `config_paths` for any config file.
Returns the first path that exists (and is a config file).
"""
log.info("Searching for config in: {0}".format(
", ".join(str(p) for p in self._config_paths)))
for cp in self._config_paths:
f = self._search_in_path(cp)
if f != "":
return f
raise errors.ConfigFileNotFoundError(
self._config_name, self._config_paths)
def debug(self): # pragma: no cover
"""Prints all configuration registries for debugging purposes."""
print("Aliases:")
pprint.pprint(self._aliases)
print("Override:")
pprint.pprint(self._override)
print("Args:")
pprint.pprint(self._args)
print("Env:")
pprint.pprint(self._env)
print("Config:")
pprint.pprint(self._config)
print("Key/Value Store:")
pprint.pprint(self._kvstore)
print("Defaults:")
pprint.pprint(self._defaults)
|
admiralobvious/vyper | vyper/vyper.py | Vyper._unmarshall_reader | python | def _unmarshall_reader(self, file_, d):
return util.unmarshall_config_reader(file_, d, self._get_config_type()) | Unmarshall a file into a `dict`. | train | https://github.com/admiralobvious/vyper/blob/58ec7b90661502b7b2fea7a30849b90b907fcdec/vyper/vyper.py#L586-L588 | [
"def unmarshall_config_reader(file_, d, config_type):\n config_type = config_type.lower()\n\n if config_type in [\"yaml\", \"yml\"]:\n try:\n f = yaml.load(file_)\n try:\n d.update(yaml.load(f))\n except AttributeError: # to read files\n d.update(f)\n except Exception as e:\n raise ConfigParserError(e)\n\n elif config_type == \"json\":\n try:\n f = json.loads(file_)\n d.update(f)\n except Exception as e:\n raise ConfigParserError(e)\n\n elif config_type == \"toml\":\n try:\n try:\n f = toml.loads(file_)\n d.update(f)\n except AttributeError: # to read streams\n d.update(file_)\n except Exception as e:\n raise ConfigParserError(e)\n\n return d\n",
"def _get_config_type(self):\n if self._config_type != \"\":\n return self._config_type\n\n cf = self._get_config_file()\n ext = os.path.splitext(cf)\n\n if len(ext) > 1:\n return ext[1][1:]\n else:\n return \"\"\n"
] | class Vyper(object):
"""Vyper is a prioritized configuration registry. It maintains a set of
configuration sources, fetches values to populate those, and provides
them according to the source's priority.
The priority of the sources is the following:
1. overrides
2. args
3. env. variables
4. config file
5. key/value store
6. defaults
For example, if values from the following sources were loaded:
defaults: {
"secret": "",
"user": "default",
"endpoint": "https://localhost"
}
config: {
"user": "root"
"secret": "defaultsecret"
}
env: {
"secret": "somesecretkey"
}
The resulting config will have the following values:
{
"secret": "somesecretkey",
"user": "root",
"endpoint": "https://localhost"
}
"""
def __init__(self, config_name="config", key_delimiter="."):
# Delimiter that separates a list of keys
# used to access a nested value in one go.
self._key_delimiter = key_delimiter
# A set of paths to look for the config file in.
self._config_paths = []
# A set of remote providers to search for the configuration.
self._remote_providers = []
# Name of file to look for inside the path.
self._config_name = config_name
self._config_file = ""
self._config_type = ""
self._env_prefix = ""
self._automatic_env_applied = False
self._env_key_replacer = None
self._aliases = {}
self._override = {}
self._args = {}
self._env = {}
self._config = {}
self._kvstore = {}
self._defaults = {}
self._on_config_change = None
self._on_remote_config_change = None
self.parse_argv_disabled = False
def on_config_change(self, func, *args, **kwargs):
self._on_config_change = lambda: func(*args, **kwargs)
def watch_config(self):
config_file = self._get_config_file()
watcher = watch.get_watcher(config_file, self)
watcher.start()
def set_config_file(self, file_):
"""Explicitly define the path, name and extension of the config file
Vyper will use this and not check any of the config paths.
"""
self._config_file = file_
def set_env_prefix(self, prefix):
"""Define a prefix that ENVIRONMENT variables will use.
e.g. if your prefix is "spf", the env registry will look
for env. variables that start with "SPF_"
"""
self._env_prefix = prefix
def _merge_with_env_prefix(self, key):
if self._env_prefix != "":
return ("{0}_{1}".format(self._env_prefix, key)).upper()
return key.upper()
def _get_env(self, key):
"""Wrapper around os.getenv() which replaces characters
in the original key. This allows env vars which have different keys
than the config object keys.
"""
if self._env_key_replacer is not None:
key = key.replace(*self._env_key_replacer)
return os.getenv(key)
def config_file_used(self):
"""Return the file used to populate the config registry."""
return self._config_file
def add_config_path(self, path):
"""Add a path for Vyper to search for the config file in.
Can be called multiple times to define multiple search paths.
"""
abspath = util.abs_pathify(path)
if abspath not in self._config_paths:
log.info("Adding {0} to paths to search".format(abspath))
self._config_paths.append(abspath)
def add_remote_provider(self, provider, client, path):
"""Adds a remote configuration source.
Remote Providers are searched in the order they are added.
provider is a string value, "etcd", "consul" and "zookeeper" are
currently supported.
client is a client object
path is the path in the k/v store to retrieve configuration
To retrieve a config file called myapp.json from /configs/myapp.json
you should set path to /configs and set config name (set_config_name)
to "myapp"
"""
if provider not in constants.SUPPORTED_REMOTE_PROVIDERS:
raise errors.UnsupportedRemoteProviderError(provider)
host = ""
if provider == "etcd":
host = "{0}://{1}:{2}".format(client.protocol, client.host,
client.port)
elif provider == "consul":
host = "{0}://{1}:{2}".format(client.http.scheme, client.http.host,
client.http.port)
elif provider == "zookeeper":
host = ",".join(
str("{0}:{1}".format(h[0], h[1])) for h in client.hosts)
log.info("Adding {0}:{1} to remote provider list".format(
provider, host))
rp = remote.RemoteProvider(provider, client, path, self)
if not self._provider_path_exists(rp):
self._remote_providers.append(rp)
def _provider_path_exists(self, rp):
for p in self._remote_providers:
if p.path == rp.path:
return True
return False
def _search_dict(self, d, keys):
if not keys:
return d
for key in keys:
if key in d and not isinstance(d[key], dict):
return d[key]
elif key in d:
return self._search_dict(d[key], keys[1::])
else:
return None
def get(self, key):
"""Vyper is essentially repository for configurations.
`get` can retrieve any value given the key to use.
`get` has the behavior of returning the value associated with the first
place from where it is set. Viper will check in the following order:
override, arg, env, config file, key/value store, default.
"""
path = key.split(self._key_delimiter)
lowercase_key = key.lower()
val = self._find(lowercase_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is None:
return None
return val
def get_string(self, key):
return str(self.get(key))
def get_bool(self, key):
val = self.get(key)
if isinstance(val, str):
if val.lower() == 'false':
return False
return bool(val)
def get_int(self, key):
return int(self.get(key))
def get_float(self, key):
return float(self.get(key))
def get_unicode(self, key):
return text(self.get(key))
def get_bytes(self, key):
return b"{0}".format(self.get(key))
def sub(self, key):
"""Returns new Vyper instance representing a sub tree of this instance.
"""
subv = Vyper()
data = self.get(key)
if isinstance(data, dict):
subv._config = data
return subv
else:
return None
def unmarshall_key(self, key, cls):
"""Takes a single key and unmarshalls it into a class."""
return setattr(cls, key, self.get(key))
def unmarshall(self, cls):
"""Unmarshalls the config into a class. Make sure that the tags on
the attributes of the class are properly set.
"""
for k, v in self.all_settings().items():
setattr(cls, k, v)
return cls
def bind_args(self, parser):
if isinstance(parser, argparse.ArgumentParser):
return self._bind_parser_values(parser)
else:
return self.bind_arg_values(parser)
def bind_arg(self, key, arg):
return self.bind_arg_value(key, arg)
def _parse_args(self, parser, overrides=None):
if overrides:
return vars(parser.parse_args(overrides))
if not self.parse_argv_disabled:
return vars(parser.parse_args())
else:
return vars(parser.parse_args([]))
def _bind_parser_values(self, parser, overrides=None):
# method mostly for testing, use bind_args()
args = self._parse_args(parser, overrides)
defaults = \
{k: parser.get_default(k) for k in args.keys()}
for k, v in defaults.items():
self.set_default(k, v)
if args[k] != defaults[k]:
self.bind_arg(k, args[k])
def bind_arg_values(self, args):
for k, v in args.items():
try:
self.bind_arg_value(k, v)
except ValueError:
pass
def bind_arg_value(self, key, arg):
if arg is None:
raise ValueError("arg for {0} is None".format(key))
self._args[key.lower()] = arg
def bind_env(self, *input_):
"""Binds a Vyper key to a ENV variable.
ENV variables are case sensitive.
If only a key is provided, it will use the env key matching the key,
uppercased.
`env_prefix` will be used when set when env name is not provided.
"""
if len(input_) == 0:
return "bind_env missing key to bind to"
key = input_[0].lower()
if len(input_) == 1:
env_key = self._merge_with_env_prefix(key)
else:
env_key = input_[1]
self._env[key] = env_key
if self._key_delimiter in key:
parts = input_[0].split(self._key_delimiter)
env_info = {
"path": parts[1:-1],
"final_key": parts[-1],
"env_key": env_key
}
if self._env.get(parts[0]) is None:
self._env[parts[0]] = [env_info]
else:
self._env[parts[0]].append(env_info)
return None
def _find_real_key(self, key, source):
return next(
(real for real in source.keys() if real.lower() == key.lower()),
None)
def _find_insensitive(self, key, source):
real_key = self._find_real_key(key, source)
return source.get(real_key)
def _set_insensitive(self, key, val, source):
real_key = self._find_real_key(key, source)
if real_key is None:
msg = "No case insensitive variant of {0} found.".format(key)
raise KeyError(msg)
source[real_key] = val
def _find(self, key):
"""Given a key, find the value
Vyper will check in the following order:
override, arg, env, config file, key/value store, default
Vyper will check to see if an alias exists first.
"""
key = self._real_key(key)
# OVERRIDES
val = self._override.get(key)
if val is not None:
log.debug("{0} found in override: {1}".format(key, val))
return val
# ARGS
val = self._args.get(key)
if val is not None:
log.debug("{0} found in args: {1}".format(key, val))
return val
# ENVIRONMENT VARIABLES
if self._automatic_env_applied:
# even if it hasn't been registered, if `automatic_env` is used,
# check any `get` request
val = self._get_env(self._merge_with_env_prefix(key))
if val is not None:
log.debug("{0} found in environment: {1}".format(key, val))
return val
env_key = self._find_insensitive(key, self._env)
log.debug("Looking for {0} in env".format(key))
if isinstance(env_key, list):
parent = self._find_insensitive(key, self._config)
found_in_env = False
log.debug("Found env key parent {0}: {1}".format(key, parent))
for item in env_key:
log.debug("{0} registered as env var parent {1}:".format(
key, item["env_key"]))
val = self._get_env(item["env_key"])
if val is not None:
log.debug("{0} found in environment: {1}".format(
item["env_key"], val))
temp = parent
for path in item["path"]:
real_key = self._find_real_key(path, temp)
temp = temp[real_key]
self._set_insensitive(item["final_key"], val, temp)
found_in_env = True
else:
log.debug("{0} env value unset".format(item["env_key"]))
if found_in_env:
return parent
elif env_key is not None:
log.debug("{0} registered as env var: {1}".format(key, env_key))
val = self._get_env(env_key)
if val is not None:
log.debug("{0} found in environment: {1}".format(env_key, val))
return val
else:
log.debug("{0} env value unset".format(env_key))
# CONFIG FILE
val = self._find_insensitive(key, self._config)
if val is not None:
log.debug("{0} found in config: {1}".format(key, val))
return val
# Test for nested config parameter
if self._key_delimiter in key:
path = key.split(self._key_delimiter)
source = self._find(path[0])
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is not None:
log.debug("{0} found in nested config: {1}".format(
key, val))
return val
# KEY/VALUE STORE
val = self._kvstore.get(key)
if val is not None:
log.debug("{0} found in key/value store: {1}".format(key, val))
return val
# DEFAULTS
val = self._defaults.get(key)
if val is not None:
log.debug("{0} found in defaults: {1}".format(key, val))
return val
return None
def is_set(self, key):
"""Check to see if the key has been set in any of the data locations.
"""
path = key.split(self._key_delimiter)
lower_case_key = key.lower()
val = self._find(lower_case_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
return val is not None
def automatic_env(self):
"""Have Vyper check ENV variables for all keys set in
config, default & args.
"""
self._automatic_env_applied = True
def set_env_key_replacer(self, old, new):
"""Sets the strings.Replacer on the Vyper object.
Useful for mapping an environment variable to a key that does
not match it.
"""
self._env_key_replacer = old, new
def register_alias(self, alias, key):
"""Aliases provide another accessor for the same key.
This enables one to change a name without breaking the application.
"""
alias = alias.lower()
key = key.lower()
if alias != key and alias != self._real_key(key):
exists = self._aliases.get(alias)
if exists is None:
# if we alias something that exists in one of the dicts to
# another name, we'll never be able to get that value using the
# original name, so move the config value to the new _real_key.
val = self._config.get(alias)
if val:
self._config.pop(alias)
self._config[key] = val
val = self._kvstore.get(alias)
if val:
self._kvstore.pop(alias)
self._kvstore[key] = val
val = self._defaults.get(alias)
if val:
self._defaults.pop(alias)
self._defaults[key] = val
val = self._override.get(alias)
if val:
self._override.pop(alias)
self._override[key] = val
self._aliases[alias] = key
else:
log.warning("Creating circular reference alias {0} {1} {2}".format(
alias, key, self._real_key(key)))
def _real_key(self, key):
new_key = self._aliases.get(key)
if new_key is not None:
return self._real_key(new_key)
else:
return key
def in_config(self, key):
"""Check to see if the given key (or an alias) is in the config file.
"""
# if the requested key is an alias, then return the proper key
key = self._real_key(key)
exists = self._config.get(key)
return exists
def set_default(self, key, value):
"""Set the default value for this key.
Default only used when no value is provided by the user via
arg, config or env.
"""
k = self._real_key(key.lower())
self._defaults[k] = value
def set(self, key, value):
"""Sets the value for the key in the override register.
Will be used instead of values obtained via
args, config file, env, defaults or key/value store.
"""
k = self._real_key(key.lower())
self._override[k] = value
def read_in_config(self):
"""Vyper will discover and load the configuration file from disk
and key/value stores, searching in one of the defined paths.
"""
log.info("Attempting to read in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
self._config = {}
return self._unmarshall_reader(f, self._config)
def merge_in_config(self):
log.info("Attempting to merge in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
return self.merge_config(f)
def read_config(self, file_):
"""Vyper will read a configuration file, setting existing keys to
`None` if the key does not exist in the file.
"""
self._unmarshall_reader(file_, self._config)
def merge_config(self, file_):
if self._config is None:
self._config = {}
cfg = {}
cfg = self._unmarshall_reader(file_, cfg)
self._merge_dicts(cfg, self._config)
def _merge_dicts(self, src, target):
for k, v in src.items():
if isinstance(v, dict):
self._merge_dicts(v, target[k])
else:
target[k] = v
def read_remote_config(self):
"""Attempts to get configuration from a remote source
and read it in the remote configuration registry.
"""
return self._get_key_value_config()
def _get_key_value_config(self):
"""Retrieves the first found remote configuration."""
for rp in self._remote_providers:
val = self._get_remote_config(rp)
self._kvstore = val
return None
raise errors.RemoteConfigError("No Files Found")
def _get_remote_config(self, provider):
reader = provider.get()
self._unmarshall_reader(reader, self._kvstore)
return self._kvstore
def on_remote_config_change(self, func, *args, **kwargs):
self._on_remote_config_change = lambda x: func(*args, **kwargs)
for rp in self._remote_providers:
rp.add_listener(self._on_remote_config_change)
return None
def watch_remote_config(self):
for rp in self._remote_providers:
rp.add_listener()
return None
raise errors.RemoteConfigError("No Files Found")
def all_keys(self, uppercase_keys=False):
"""Return all keys regardless where they are set."""
d = {}
for k in self._override.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._args.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._env.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._config.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._kvstore.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._defaults.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._aliases.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
return d.keys()
def all_settings(self, uppercase_keys=False):
"""Return all settings as a `dict`."""
d = {}
for k in self.all_keys(uppercase_keys):
d[k] = self.get(k)
return d
def set_config_name(self, name):
"""Name for the config file. Does not include extension."""
self._config_name = name
self._config_file = ""
def set_config_type(self, type_):
"""Sets the type of the configuration returned by the
remote source, e.g. "json".
"""
self._config_type = type_
def _get_config_type(self):
if self._config_type != "":
return self._config_type
cf = self._get_config_file()
ext = os.path.splitext(cf)
if len(ext) > 1:
return ext[1][1:]
else:
return ""
def _get_config_file(self):
if self._config_file == "":
try:
cf = self._find_config_file()
self._config_file = cf
except errors.ConfigFileNotFoundError:
return ""
return self._config_file
def _search_in_path(self, path):
log.debug("Searching for config in: {0}".format(path))
for ext in constants.SUPPORTED_EXTENSIONS:
full_path = "{0}/{1}.{2}".format(path, self._config_name, ext)
log.debug("Checking for {0}".format(full_path))
if util.exists(full_path):
log.debug("Found: {0}".format(full_path))
return full_path
return ""
def _find_config_file(self):
"""Search all `config_paths` for any config file.
Returns the first path that exists (and is a config file).
"""
log.info("Searching for config in: {0}".format(
", ".join(str(p) for p in self._config_paths)))
for cp in self._config_paths:
f = self._search_in_path(cp)
if f != "":
return f
raise errors.ConfigFileNotFoundError(
self._config_name, self._config_paths)
def debug(self): # pragma: no cover
"""Prints all configuration registries for debugging purposes."""
print("Aliases:")
pprint.pprint(self._aliases)
print("Override:")
pprint.pprint(self._override)
print("Args:")
pprint.pprint(self._args)
print("Env:")
pprint.pprint(self._env)
print("Config:")
pprint.pprint(self._config)
print("Key/Value Store:")
pprint.pprint(self._kvstore)
print("Defaults:")
pprint.pprint(self._defaults)
|
admiralobvious/vyper | vyper/vyper.py | Vyper._get_key_value_config | python | def _get_key_value_config(self):
for rp in self._remote_providers:
val = self._get_remote_config(rp)
self._kvstore = val
return None
raise errors.RemoteConfigError("No Files Found") | Retrieves the first found remote configuration. | train | https://github.com/admiralobvious/vyper/blob/58ec7b90661502b7b2fea7a30849b90b907fcdec/vyper/vyper.py#L590-L597 | [
"def _get_remote_config(self, provider):\n reader = provider.get()\n self._unmarshall_reader(reader, self._kvstore)\n return self._kvstore\n"
] | class Vyper(object):
"""Vyper is a prioritized configuration registry. It maintains a set of
configuration sources, fetches values to populate those, and provides
them according to the source's priority.
The priority of the sources is the following:
1. overrides
2. args
3. env. variables
4. config file
5. key/value store
6. defaults
For example, if values from the following sources were loaded:
defaults: {
"secret": "",
"user": "default",
"endpoint": "https://localhost"
}
config: {
"user": "root"
"secret": "defaultsecret"
}
env: {
"secret": "somesecretkey"
}
The resulting config will have the following values:
{
"secret": "somesecretkey",
"user": "root",
"endpoint": "https://localhost"
}
"""
def __init__(self, config_name="config", key_delimiter="."):
# Delimiter that separates a list of keys
# used to access a nested value in one go.
self._key_delimiter = key_delimiter
# A set of paths to look for the config file in.
self._config_paths = []
# A set of remote providers to search for the configuration.
self._remote_providers = []
# Name of file to look for inside the path.
self._config_name = config_name
self._config_file = ""
self._config_type = ""
self._env_prefix = ""
self._automatic_env_applied = False
self._env_key_replacer = None
self._aliases = {}
self._override = {}
self._args = {}
self._env = {}
self._config = {}
self._kvstore = {}
self._defaults = {}
self._on_config_change = None
self._on_remote_config_change = None
self.parse_argv_disabled = False
def on_config_change(self, func, *args, **kwargs):
self._on_config_change = lambda: func(*args, **kwargs)
def watch_config(self):
config_file = self._get_config_file()
watcher = watch.get_watcher(config_file, self)
watcher.start()
def set_config_file(self, file_):
"""Explicitly define the path, name and extension of the config file
Vyper will use this and not check any of the config paths.
"""
self._config_file = file_
def set_env_prefix(self, prefix):
"""Define a prefix that ENVIRONMENT variables will use.
e.g. if your prefix is "spf", the env registry will look
for env. variables that start with "SPF_"
"""
self._env_prefix = prefix
def _merge_with_env_prefix(self, key):
if self._env_prefix != "":
return ("{0}_{1}".format(self._env_prefix, key)).upper()
return key.upper()
def _get_env(self, key):
"""Wrapper around os.getenv() which replaces characters
in the original key. This allows env vars which have different keys
than the config object keys.
"""
if self._env_key_replacer is not None:
key = key.replace(*self._env_key_replacer)
return os.getenv(key)
def config_file_used(self):
"""Return the file used to populate the config registry."""
return self._config_file
def add_config_path(self, path):
"""Add a path for Vyper to search for the config file in.
Can be called multiple times to define multiple search paths.
"""
abspath = util.abs_pathify(path)
if abspath not in self._config_paths:
log.info("Adding {0} to paths to search".format(abspath))
self._config_paths.append(abspath)
def add_remote_provider(self, provider, client, path):
"""Adds a remote configuration source.
Remote Providers are searched in the order they are added.
provider is a string value, "etcd", "consul" and "zookeeper" are
currently supported.
client is a client object
path is the path in the k/v store to retrieve configuration
To retrieve a config file called myapp.json from /configs/myapp.json
you should set path to /configs and set config name (set_config_name)
to "myapp"
"""
if provider not in constants.SUPPORTED_REMOTE_PROVIDERS:
raise errors.UnsupportedRemoteProviderError(provider)
host = ""
if provider == "etcd":
host = "{0}://{1}:{2}".format(client.protocol, client.host,
client.port)
elif provider == "consul":
host = "{0}://{1}:{2}".format(client.http.scheme, client.http.host,
client.http.port)
elif provider == "zookeeper":
host = ",".join(
str("{0}:{1}".format(h[0], h[1])) for h in client.hosts)
log.info("Adding {0}:{1} to remote provider list".format(
provider, host))
rp = remote.RemoteProvider(provider, client, path, self)
if not self._provider_path_exists(rp):
self._remote_providers.append(rp)
def _provider_path_exists(self, rp):
for p in self._remote_providers:
if p.path == rp.path:
return True
return False
def _search_dict(self, d, keys):
if not keys:
return d
for key in keys:
if key in d and not isinstance(d[key], dict):
return d[key]
elif key in d:
return self._search_dict(d[key], keys[1::])
else:
return None
def get(self, key):
"""Vyper is essentially repository for configurations.
`get` can retrieve any value given the key to use.
`get` has the behavior of returning the value associated with the first
place from where it is set. Viper will check in the following order:
override, arg, env, config file, key/value store, default.
"""
path = key.split(self._key_delimiter)
lowercase_key = key.lower()
val = self._find(lowercase_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is None:
return None
return val
def get_string(self, key):
return str(self.get(key))
def get_bool(self, key):
val = self.get(key)
if isinstance(val, str):
if val.lower() == 'false':
return False
return bool(val)
def get_int(self, key):
return int(self.get(key))
def get_float(self, key):
return float(self.get(key))
def get_unicode(self, key):
return text(self.get(key))
def get_bytes(self, key):
return b"{0}".format(self.get(key))
def sub(self, key):
"""Returns new Vyper instance representing a sub tree of this instance.
"""
subv = Vyper()
data = self.get(key)
if isinstance(data, dict):
subv._config = data
return subv
else:
return None
def unmarshall_key(self, key, cls):
"""Takes a single key and unmarshalls it into a class."""
return setattr(cls, key, self.get(key))
def unmarshall(self, cls):
"""Unmarshalls the config into a class. Make sure that the tags on
the attributes of the class are properly set.
"""
for k, v in self.all_settings().items():
setattr(cls, k, v)
return cls
def bind_args(self, parser):
if isinstance(parser, argparse.ArgumentParser):
return self._bind_parser_values(parser)
else:
return self.bind_arg_values(parser)
def bind_arg(self, key, arg):
return self.bind_arg_value(key, arg)
def _parse_args(self, parser, overrides=None):
if overrides:
return vars(parser.parse_args(overrides))
if not self.parse_argv_disabled:
return vars(parser.parse_args())
else:
return vars(parser.parse_args([]))
def _bind_parser_values(self, parser, overrides=None):
# method mostly for testing, use bind_args()
args = self._parse_args(parser, overrides)
defaults = \
{k: parser.get_default(k) for k in args.keys()}
for k, v in defaults.items():
self.set_default(k, v)
if args[k] != defaults[k]:
self.bind_arg(k, args[k])
def bind_arg_values(self, args):
for k, v in args.items():
try:
self.bind_arg_value(k, v)
except ValueError:
pass
def bind_arg_value(self, key, arg):
if arg is None:
raise ValueError("arg for {0} is None".format(key))
self._args[key.lower()] = arg
def bind_env(self, *input_):
"""Binds a Vyper key to a ENV variable.
ENV variables are case sensitive.
If only a key is provided, it will use the env key matching the key,
uppercased.
`env_prefix` will be used when set when env name is not provided.
"""
if len(input_) == 0:
return "bind_env missing key to bind to"
key = input_[0].lower()
if len(input_) == 1:
env_key = self._merge_with_env_prefix(key)
else:
env_key = input_[1]
self._env[key] = env_key
if self._key_delimiter in key:
parts = input_[0].split(self._key_delimiter)
env_info = {
"path": parts[1:-1],
"final_key": parts[-1],
"env_key": env_key
}
if self._env.get(parts[0]) is None:
self._env[parts[0]] = [env_info]
else:
self._env[parts[0]].append(env_info)
return None
def _find_real_key(self, key, source):
return next(
(real for real in source.keys() if real.lower() == key.lower()),
None)
def _find_insensitive(self, key, source):
real_key = self._find_real_key(key, source)
return source.get(real_key)
def _set_insensitive(self, key, val, source):
real_key = self._find_real_key(key, source)
if real_key is None:
msg = "No case insensitive variant of {0} found.".format(key)
raise KeyError(msg)
source[real_key] = val
def _find(self, key):
"""Given a key, find the value
Vyper will check in the following order:
override, arg, env, config file, key/value store, default
Vyper will check to see if an alias exists first.
"""
key = self._real_key(key)
# OVERRIDES
val = self._override.get(key)
if val is not None:
log.debug("{0} found in override: {1}".format(key, val))
return val
# ARGS
val = self._args.get(key)
if val is not None:
log.debug("{0} found in args: {1}".format(key, val))
return val
# ENVIRONMENT VARIABLES
if self._automatic_env_applied:
# even if it hasn't been registered, if `automatic_env` is used,
# check any `get` request
val = self._get_env(self._merge_with_env_prefix(key))
if val is not None:
log.debug("{0} found in environment: {1}".format(key, val))
return val
env_key = self._find_insensitive(key, self._env)
log.debug("Looking for {0} in env".format(key))
if isinstance(env_key, list):
parent = self._find_insensitive(key, self._config)
found_in_env = False
log.debug("Found env key parent {0}: {1}".format(key, parent))
for item in env_key:
log.debug("{0} registered as env var parent {1}:".format(
key, item["env_key"]))
val = self._get_env(item["env_key"])
if val is not None:
log.debug("{0} found in environment: {1}".format(
item["env_key"], val))
temp = parent
for path in item["path"]:
real_key = self._find_real_key(path, temp)
temp = temp[real_key]
self._set_insensitive(item["final_key"], val, temp)
found_in_env = True
else:
log.debug("{0} env value unset".format(item["env_key"]))
if found_in_env:
return parent
elif env_key is not None:
log.debug("{0} registered as env var: {1}".format(key, env_key))
val = self._get_env(env_key)
if val is not None:
log.debug("{0} found in environment: {1}".format(env_key, val))
return val
else:
log.debug("{0} env value unset".format(env_key))
# CONFIG FILE
val = self._find_insensitive(key, self._config)
if val is not None:
log.debug("{0} found in config: {1}".format(key, val))
return val
# Test for nested config parameter
if self._key_delimiter in key:
path = key.split(self._key_delimiter)
source = self._find(path[0])
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is not None:
log.debug("{0} found in nested config: {1}".format(
key, val))
return val
# KEY/VALUE STORE
val = self._kvstore.get(key)
if val is not None:
log.debug("{0} found in key/value store: {1}".format(key, val))
return val
# DEFAULTS
val = self._defaults.get(key)
if val is not None:
log.debug("{0} found in defaults: {1}".format(key, val))
return val
return None
def is_set(self, key):
"""Check to see if the key has been set in any of the data locations.
"""
path = key.split(self._key_delimiter)
lower_case_key = key.lower()
val = self._find(lower_case_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
return val is not None
def automatic_env(self):
"""Have Vyper check ENV variables for all keys set in
config, default & args.
"""
self._automatic_env_applied = True
def set_env_key_replacer(self, old, new):
"""Sets the strings.Replacer on the Vyper object.
Useful for mapping an environment variable to a key that does
not match it.
"""
self._env_key_replacer = old, new
def register_alias(self, alias, key):
"""Aliases provide another accessor for the same key.
This enables one to change a name without breaking the application.
"""
alias = alias.lower()
key = key.lower()
if alias != key and alias != self._real_key(key):
exists = self._aliases.get(alias)
if exists is None:
# if we alias something that exists in one of the dicts to
# another name, we'll never be able to get that value using the
# original name, so move the config value to the new _real_key.
val = self._config.get(alias)
if val:
self._config.pop(alias)
self._config[key] = val
val = self._kvstore.get(alias)
if val:
self._kvstore.pop(alias)
self._kvstore[key] = val
val = self._defaults.get(alias)
if val:
self._defaults.pop(alias)
self._defaults[key] = val
val = self._override.get(alias)
if val:
self._override.pop(alias)
self._override[key] = val
self._aliases[alias] = key
else:
log.warning("Creating circular reference alias {0} {1} {2}".format(
alias, key, self._real_key(key)))
def _real_key(self, key):
new_key = self._aliases.get(key)
if new_key is not None:
return self._real_key(new_key)
else:
return key
def in_config(self, key):
"""Check to see if the given key (or an alias) is in the config file.
"""
# if the requested key is an alias, then return the proper key
key = self._real_key(key)
exists = self._config.get(key)
return exists
def set_default(self, key, value):
"""Set the default value for this key.
Default only used when no value is provided by the user via
arg, config or env.
"""
k = self._real_key(key.lower())
self._defaults[k] = value
def set(self, key, value):
"""Sets the value for the key in the override register.
Will be used instead of values obtained via
args, config file, env, defaults or key/value store.
"""
k = self._real_key(key.lower())
self._override[k] = value
def read_in_config(self):
"""Vyper will discover and load the configuration file from disk
and key/value stores, searching in one of the defined paths.
"""
log.info("Attempting to read in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
self._config = {}
return self._unmarshall_reader(f, self._config)
def merge_in_config(self):
log.info("Attempting to merge in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
return self.merge_config(f)
def read_config(self, file_):
"""Vyper will read a configuration file, setting existing keys to
`None` if the key does not exist in the file.
"""
self._unmarshall_reader(file_, self._config)
def merge_config(self, file_):
if self._config is None:
self._config = {}
cfg = {}
cfg = self._unmarshall_reader(file_, cfg)
self._merge_dicts(cfg, self._config)
def _merge_dicts(self, src, target):
for k, v in src.items():
if isinstance(v, dict):
self._merge_dicts(v, target[k])
else:
target[k] = v
def read_remote_config(self):
"""Attempts to get configuration from a remote source
and read it in the remote configuration registry.
"""
return self._get_key_value_config()
def _unmarshall_reader(self, file_, d):
"""Unmarshall a file into a `dict`."""
return util.unmarshall_config_reader(file_, d, self._get_config_type())
def _get_remote_config(self, provider):
reader = provider.get()
self._unmarshall_reader(reader, self._kvstore)
return self._kvstore
def on_remote_config_change(self, func, *args, **kwargs):
self._on_remote_config_change = lambda x: func(*args, **kwargs)
for rp in self._remote_providers:
rp.add_listener(self._on_remote_config_change)
return None
def watch_remote_config(self):
for rp in self._remote_providers:
rp.add_listener()
return None
raise errors.RemoteConfigError("No Files Found")
def all_keys(self, uppercase_keys=False):
"""Return all keys regardless where they are set."""
d = {}
for k in self._override.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._args.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._env.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._config.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._kvstore.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._defaults.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._aliases.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
return d.keys()
def all_settings(self, uppercase_keys=False):
"""Return all settings as a `dict`."""
d = {}
for k in self.all_keys(uppercase_keys):
d[k] = self.get(k)
return d
def set_config_name(self, name):
"""Name for the config file. Does not include extension."""
self._config_name = name
self._config_file = ""
def set_config_type(self, type_):
"""Sets the type of the configuration returned by the
remote source, e.g. "json".
"""
self._config_type = type_
def _get_config_type(self):
if self._config_type != "":
return self._config_type
cf = self._get_config_file()
ext = os.path.splitext(cf)
if len(ext) > 1:
return ext[1][1:]
else:
return ""
def _get_config_file(self):
if self._config_file == "":
try:
cf = self._find_config_file()
self._config_file = cf
except errors.ConfigFileNotFoundError:
return ""
return self._config_file
def _search_in_path(self, path):
log.debug("Searching for config in: {0}".format(path))
for ext in constants.SUPPORTED_EXTENSIONS:
full_path = "{0}/{1}.{2}".format(path, self._config_name, ext)
log.debug("Checking for {0}".format(full_path))
if util.exists(full_path):
log.debug("Found: {0}".format(full_path))
return full_path
return ""
def _find_config_file(self):
"""Search all `config_paths` for any config file.
Returns the first path that exists (and is a config file).
"""
log.info("Searching for config in: {0}".format(
", ".join(str(p) for p in self._config_paths)))
for cp in self._config_paths:
f = self._search_in_path(cp)
if f != "":
return f
raise errors.ConfigFileNotFoundError(
self._config_name, self._config_paths)
def debug(self): # pragma: no cover
"""Prints all configuration registries for debugging purposes."""
print("Aliases:")
pprint.pprint(self._aliases)
print("Override:")
pprint.pprint(self._override)
print("Args:")
pprint.pprint(self._args)
print("Env:")
pprint.pprint(self._env)
print("Config:")
pprint.pprint(self._config)
print("Key/Value Store:")
pprint.pprint(self._kvstore)
print("Defaults:")
pprint.pprint(self._defaults)
|
admiralobvious/vyper | vyper/vyper.py | Vyper.all_keys | python | def all_keys(self, uppercase_keys=False):
d = {}
for k in self._override.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._args.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._env.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._config.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._kvstore.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._defaults.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._aliases.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
return d.keys() | Return all keys regardless where they are set. | train | https://github.com/admiralobvious/vyper/blob/58ec7b90661502b7b2fea7a30849b90b907fcdec/vyper/vyper.py#L617-L642 | null | class Vyper(object):
"""Vyper is a prioritized configuration registry. It maintains a set of
configuration sources, fetches values to populate those, and provides
them according to the source's priority.
The priority of the sources is the following:
1. overrides
2. args
3. env. variables
4. config file
5. key/value store
6. defaults
For example, if values from the following sources were loaded:
defaults: {
"secret": "",
"user": "default",
"endpoint": "https://localhost"
}
config: {
"user": "root"
"secret": "defaultsecret"
}
env: {
"secret": "somesecretkey"
}
The resulting config will have the following values:
{
"secret": "somesecretkey",
"user": "root",
"endpoint": "https://localhost"
}
"""
def __init__(self, config_name="config", key_delimiter="."):
# Delimiter that separates a list of keys
# used to access a nested value in one go.
self._key_delimiter = key_delimiter
# A set of paths to look for the config file in.
self._config_paths = []
# A set of remote providers to search for the configuration.
self._remote_providers = []
# Name of file to look for inside the path.
self._config_name = config_name
self._config_file = ""
self._config_type = ""
self._env_prefix = ""
self._automatic_env_applied = False
self._env_key_replacer = None
self._aliases = {}
self._override = {}
self._args = {}
self._env = {}
self._config = {}
self._kvstore = {}
self._defaults = {}
self._on_config_change = None
self._on_remote_config_change = None
self.parse_argv_disabled = False
def on_config_change(self, func, *args, **kwargs):
self._on_config_change = lambda: func(*args, **kwargs)
def watch_config(self):
config_file = self._get_config_file()
watcher = watch.get_watcher(config_file, self)
watcher.start()
def set_config_file(self, file_):
"""Explicitly define the path, name and extension of the config file
Vyper will use this and not check any of the config paths.
"""
self._config_file = file_
def set_env_prefix(self, prefix):
"""Define a prefix that ENVIRONMENT variables will use.
e.g. if your prefix is "spf", the env registry will look
for env. variables that start with "SPF_"
"""
self._env_prefix = prefix
def _merge_with_env_prefix(self, key):
if self._env_prefix != "":
return ("{0}_{1}".format(self._env_prefix, key)).upper()
return key.upper()
def _get_env(self, key):
"""Wrapper around os.getenv() which replaces characters
in the original key. This allows env vars which have different keys
than the config object keys.
"""
if self._env_key_replacer is not None:
key = key.replace(*self._env_key_replacer)
return os.getenv(key)
def config_file_used(self):
"""Return the file used to populate the config registry."""
return self._config_file
def add_config_path(self, path):
"""Add a path for Vyper to search for the config file in.
Can be called multiple times to define multiple search paths.
"""
abspath = util.abs_pathify(path)
if abspath not in self._config_paths:
log.info("Adding {0} to paths to search".format(abspath))
self._config_paths.append(abspath)
def add_remote_provider(self, provider, client, path):
"""Adds a remote configuration source.
Remote Providers are searched in the order they are added.
provider is a string value, "etcd", "consul" and "zookeeper" are
currently supported.
client is a client object
path is the path in the k/v store to retrieve configuration
To retrieve a config file called myapp.json from /configs/myapp.json
you should set path to /configs and set config name (set_config_name)
to "myapp"
"""
if provider not in constants.SUPPORTED_REMOTE_PROVIDERS:
raise errors.UnsupportedRemoteProviderError(provider)
host = ""
if provider == "etcd":
host = "{0}://{1}:{2}".format(client.protocol, client.host,
client.port)
elif provider == "consul":
host = "{0}://{1}:{2}".format(client.http.scheme, client.http.host,
client.http.port)
elif provider == "zookeeper":
host = ",".join(
str("{0}:{1}".format(h[0], h[1])) for h in client.hosts)
log.info("Adding {0}:{1} to remote provider list".format(
provider, host))
rp = remote.RemoteProvider(provider, client, path, self)
if not self._provider_path_exists(rp):
self._remote_providers.append(rp)
def _provider_path_exists(self, rp):
for p in self._remote_providers:
if p.path == rp.path:
return True
return False
def _search_dict(self, d, keys):
if not keys:
return d
for key in keys:
if key in d and not isinstance(d[key], dict):
return d[key]
elif key in d:
return self._search_dict(d[key], keys[1::])
else:
return None
def get(self, key):
"""Vyper is essentially repository for configurations.
`get` can retrieve any value given the key to use.
`get` has the behavior of returning the value associated with the first
place from where it is set. Viper will check in the following order:
override, arg, env, config file, key/value store, default.
"""
path = key.split(self._key_delimiter)
lowercase_key = key.lower()
val = self._find(lowercase_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is None:
return None
return val
def get_string(self, key):
return str(self.get(key))
def get_bool(self, key):
val = self.get(key)
if isinstance(val, str):
if val.lower() == 'false':
return False
return bool(val)
def get_int(self, key):
return int(self.get(key))
def get_float(self, key):
return float(self.get(key))
def get_unicode(self, key):
return text(self.get(key))
def get_bytes(self, key):
return b"{0}".format(self.get(key))
def sub(self, key):
"""Returns new Vyper instance representing a sub tree of this instance.
"""
subv = Vyper()
data = self.get(key)
if isinstance(data, dict):
subv._config = data
return subv
else:
return None
def unmarshall_key(self, key, cls):
"""Takes a single key and unmarshalls it into a class."""
return setattr(cls, key, self.get(key))
def unmarshall(self, cls):
"""Unmarshalls the config into a class. Make sure that the tags on
the attributes of the class are properly set.
"""
for k, v in self.all_settings().items():
setattr(cls, k, v)
return cls
def bind_args(self, parser):
if isinstance(parser, argparse.ArgumentParser):
return self._bind_parser_values(parser)
else:
return self.bind_arg_values(parser)
def bind_arg(self, key, arg):
return self.bind_arg_value(key, arg)
def _parse_args(self, parser, overrides=None):
if overrides:
return vars(parser.parse_args(overrides))
if not self.parse_argv_disabled:
return vars(parser.parse_args())
else:
return vars(parser.parse_args([]))
def _bind_parser_values(self, parser, overrides=None):
# method mostly for testing, use bind_args()
args = self._parse_args(parser, overrides)
defaults = \
{k: parser.get_default(k) for k in args.keys()}
for k, v in defaults.items():
self.set_default(k, v)
if args[k] != defaults[k]:
self.bind_arg(k, args[k])
def bind_arg_values(self, args):
for k, v in args.items():
try:
self.bind_arg_value(k, v)
except ValueError:
pass
def bind_arg_value(self, key, arg):
if arg is None:
raise ValueError("arg for {0} is None".format(key))
self._args[key.lower()] = arg
def bind_env(self, *input_):
"""Binds a Vyper key to a ENV variable.
ENV variables are case sensitive.
If only a key is provided, it will use the env key matching the key,
uppercased.
`env_prefix` will be used when set when env name is not provided.
"""
if len(input_) == 0:
return "bind_env missing key to bind to"
key = input_[0].lower()
if len(input_) == 1:
env_key = self._merge_with_env_prefix(key)
else:
env_key = input_[1]
self._env[key] = env_key
if self._key_delimiter in key:
parts = input_[0].split(self._key_delimiter)
env_info = {
"path": parts[1:-1],
"final_key": parts[-1],
"env_key": env_key
}
if self._env.get(parts[0]) is None:
self._env[parts[0]] = [env_info]
else:
self._env[parts[0]].append(env_info)
return None
def _find_real_key(self, key, source):
return next(
(real for real in source.keys() if real.lower() == key.lower()),
None)
def _find_insensitive(self, key, source):
real_key = self._find_real_key(key, source)
return source.get(real_key)
def _set_insensitive(self, key, val, source):
real_key = self._find_real_key(key, source)
if real_key is None:
msg = "No case insensitive variant of {0} found.".format(key)
raise KeyError(msg)
source[real_key] = val
def _find(self, key):
"""Given a key, find the value
Vyper will check in the following order:
override, arg, env, config file, key/value store, default
Vyper will check to see if an alias exists first.
"""
key = self._real_key(key)
# OVERRIDES
val = self._override.get(key)
if val is not None:
log.debug("{0} found in override: {1}".format(key, val))
return val
# ARGS
val = self._args.get(key)
if val is not None:
log.debug("{0} found in args: {1}".format(key, val))
return val
# ENVIRONMENT VARIABLES
if self._automatic_env_applied:
# even if it hasn't been registered, if `automatic_env` is used,
# check any `get` request
val = self._get_env(self._merge_with_env_prefix(key))
if val is not None:
log.debug("{0} found in environment: {1}".format(key, val))
return val
env_key = self._find_insensitive(key, self._env)
log.debug("Looking for {0} in env".format(key))
if isinstance(env_key, list):
parent = self._find_insensitive(key, self._config)
found_in_env = False
log.debug("Found env key parent {0}: {1}".format(key, parent))
for item in env_key:
log.debug("{0} registered as env var parent {1}:".format(
key, item["env_key"]))
val = self._get_env(item["env_key"])
if val is not None:
log.debug("{0} found in environment: {1}".format(
item["env_key"], val))
temp = parent
for path in item["path"]:
real_key = self._find_real_key(path, temp)
temp = temp[real_key]
self._set_insensitive(item["final_key"], val, temp)
found_in_env = True
else:
log.debug("{0} env value unset".format(item["env_key"]))
if found_in_env:
return parent
elif env_key is not None:
log.debug("{0} registered as env var: {1}".format(key, env_key))
val = self._get_env(env_key)
if val is not None:
log.debug("{0} found in environment: {1}".format(env_key, val))
return val
else:
log.debug("{0} env value unset".format(env_key))
# CONFIG FILE
val = self._find_insensitive(key, self._config)
if val is not None:
log.debug("{0} found in config: {1}".format(key, val))
return val
# Test for nested config parameter
if self._key_delimiter in key:
path = key.split(self._key_delimiter)
source = self._find(path[0])
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is not None:
log.debug("{0} found in nested config: {1}".format(
key, val))
return val
# KEY/VALUE STORE
val = self._kvstore.get(key)
if val is not None:
log.debug("{0} found in key/value store: {1}".format(key, val))
return val
# DEFAULTS
val = self._defaults.get(key)
if val is not None:
log.debug("{0} found in defaults: {1}".format(key, val))
return val
return None
def is_set(self, key):
"""Check to see if the key has been set in any of the data locations.
"""
path = key.split(self._key_delimiter)
lower_case_key = key.lower()
val = self._find(lower_case_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
return val is not None
def automatic_env(self):
"""Have Vyper check ENV variables for all keys set in
config, default & args.
"""
self._automatic_env_applied = True
def set_env_key_replacer(self, old, new):
"""Sets the strings.Replacer on the Vyper object.
Useful for mapping an environment variable to a key that does
not match it.
"""
self._env_key_replacer = old, new
def register_alias(self, alias, key):
"""Aliases provide another accessor for the same key.
This enables one to change a name without breaking the application.
"""
alias = alias.lower()
key = key.lower()
if alias != key and alias != self._real_key(key):
exists = self._aliases.get(alias)
if exists is None:
# if we alias something that exists in one of the dicts to
# another name, we'll never be able to get that value using the
# original name, so move the config value to the new _real_key.
val = self._config.get(alias)
if val:
self._config.pop(alias)
self._config[key] = val
val = self._kvstore.get(alias)
if val:
self._kvstore.pop(alias)
self._kvstore[key] = val
val = self._defaults.get(alias)
if val:
self._defaults.pop(alias)
self._defaults[key] = val
val = self._override.get(alias)
if val:
self._override.pop(alias)
self._override[key] = val
self._aliases[alias] = key
else:
log.warning("Creating circular reference alias {0} {1} {2}".format(
alias, key, self._real_key(key)))
def _real_key(self, key):
new_key = self._aliases.get(key)
if new_key is not None:
return self._real_key(new_key)
else:
return key
def in_config(self, key):
"""Check to see if the given key (or an alias) is in the config file.
"""
# if the requested key is an alias, then return the proper key
key = self._real_key(key)
exists = self._config.get(key)
return exists
def set_default(self, key, value):
"""Set the default value for this key.
Default only used when no value is provided by the user via
arg, config or env.
"""
k = self._real_key(key.lower())
self._defaults[k] = value
def set(self, key, value):
"""Sets the value for the key in the override register.
Will be used instead of values obtained via
args, config file, env, defaults or key/value store.
"""
k = self._real_key(key.lower())
self._override[k] = value
def read_in_config(self):
"""Vyper will discover and load the configuration file from disk
and key/value stores, searching in one of the defined paths.
"""
log.info("Attempting to read in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
self._config = {}
return self._unmarshall_reader(f, self._config)
def merge_in_config(self):
log.info("Attempting to merge in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
return self.merge_config(f)
def read_config(self, file_):
"""Vyper will read a configuration file, setting existing keys to
`None` if the key does not exist in the file.
"""
self._unmarshall_reader(file_, self._config)
def merge_config(self, file_):
if self._config is None:
self._config = {}
cfg = {}
cfg = self._unmarshall_reader(file_, cfg)
self._merge_dicts(cfg, self._config)
def _merge_dicts(self, src, target):
for k, v in src.items():
if isinstance(v, dict):
self._merge_dicts(v, target[k])
else:
target[k] = v
def read_remote_config(self):
"""Attempts to get configuration from a remote source
and read it in the remote configuration registry.
"""
return self._get_key_value_config()
def _unmarshall_reader(self, file_, d):
"""Unmarshall a file into a `dict`."""
return util.unmarshall_config_reader(file_, d, self._get_config_type())
def _get_key_value_config(self):
"""Retrieves the first found remote configuration."""
for rp in self._remote_providers:
val = self._get_remote_config(rp)
self._kvstore = val
return None
raise errors.RemoteConfigError("No Files Found")
def _get_remote_config(self, provider):
reader = provider.get()
self._unmarshall_reader(reader, self._kvstore)
return self._kvstore
def on_remote_config_change(self, func, *args, **kwargs):
self._on_remote_config_change = lambda x: func(*args, **kwargs)
for rp in self._remote_providers:
rp.add_listener(self._on_remote_config_change)
return None
def watch_remote_config(self):
for rp in self._remote_providers:
rp.add_listener()
return None
raise errors.RemoteConfigError("No Files Found")
def all_settings(self, uppercase_keys=False):
"""Return all settings as a `dict`."""
d = {}
for k in self.all_keys(uppercase_keys):
d[k] = self.get(k)
return d
def set_config_name(self, name):
"""Name for the config file. Does not include extension."""
self._config_name = name
self._config_file = ""
def set_config_type(self, type_):
"""Sets the type of the configuration returned by the
remote source, e.g. "json".
"""
self._config_type = type_
def _get_config_type(self):
if self._config_type != "":
return self._config_type
cf = self._get_config_file()
ext = os.path.splitext(cf)
if len(ext) > 1:
return ext[1][1:]
else:
return ""
def _get_config_file(self):
if self._config_file == "":
try:
cf = self._find_config_file()
self._config_file = cf
except errors.ConfigFileNotFoundError:
return ""
return self._config_file
def _search_in_path(self, path):
log.debug("Searching for config in: {0}".format(path))
for ext in constants.SUPPORTED_EXTENSIONS:
full_path = "{0}/{1}.{2}".format(path, self._config_name, ext)
log.debug("Checking for {0}".format(full_path))
if util.exists(full_path):
log.debug("Found: {0}".format(full_path))
return full_path
return ""
def _find_config_file(self):
"""Search all `config_paths` for any config file.
Returns the first path that exists (and is a config file).
"""
log.info("Searching for config in: {0}".format(
", ".join(str(p) for p in self._config_paths)))
for cp in self._config_paths:
f = self._search_in_path(cp)
if f != "":
return f
raise errors.ConfigFileNotFoundError(
self._config_name, self._config_paths)
def debug(self): # pragma: no cover
"""Prints all configuration registries for debugging purposes."""
print("Aliases:")
pprint.pprint(self._aliases)
print("Override:")
pprint.pprint(self._override)
print("Args:")
pprint.pprint(self._args)
print("Env:")
pprint.pprint(self._env)
print("Config:")
pprint.pprint(self._config)
print("Key/Value Store:")
pprint.pprint(self._kvstore)
print("Defaults:")
pprint.pprint(self._defaults)
|
admiralobvious/vyper | vyper/vyper.py | Vyper.all_settings | python | def all_settings(self, uppercase_keys=False):
d = {}
for k in self.all_keys(uppercase_keys):
d[k] = self.get(k)
return d | Return all settings as a `dict`. | train | https://github.com/admiralobvious/vyper/blob/58ec7b90661502b7b2fea7a30849b90b907fcdec/vyper/vyper.py#L644-L651 | [
"def get(self, key):\n \"\"\"Vyper is essentially repository for configurations.\n `get` can retrieve any value given the key to use.\n `get` has the behavior of returning the value associated with the first\n place from where it is set. Viper will check in the following order:\n override, arg, env, config file, key/value store, default.\n \"\"\"\n path = key.split(self._key_delimiter)\n\n lowercase_key = key.lower()\n val = self._find(lowercase_key)\n\n if val is None:\n source = self._find(path[0].lower())\n if source is not None and isinstance(source, dict):\n val = self._search_dict(source, path[1::])\n\n if val is None:\n return None\n\n return val\n",
"def all_keys(self, uppercase_keys=False):\n \"\"\"Return all keys regardless where they are set.\"\"\"\n d = {}\n\n for k in self._override.keys():\n d[k.upper() if uppercase_keys else k.lower()] = {}\n\n for k in self._args.keys():\n d[k.upper() if uppercase_keys else k.lower()] = {}\n\n for k in self._env.keys():\n d[k.upper() if uppercase_keys else k.lower()] = {}\n\n for k in self._config.keys():\n d[k.upper() if uppercase_keys else k.lower()] = {}\n\n for k in self._kvstore.keys():\n d[k.upper() if uppercase_keys else k.lower()] = {}\n\n for k in self._defaults.keys():\n d[k.upper() if uppercase_keys else k.lower()] = {}\n\n for k in self._aliases.keys():\n d[k.upper() if uppercase_keys else k.lower()] = {}\n\n return d.keys()\n"
] | class Vyper(object):
"""Vyper is a prioritized configuration registry. It maintains a set of
configuration sources, fetches values to populate those, and provides
them according to the source's priority.
The priority of the sources is the following:
1. overrides
2. args
3. env. variables
4. config file
5. key/value store
6. defaults
For example, if values from the following sources were loaded:
defaults: {
"secret": "",
"user": "default",
"endpoint": "https://localhost"
}
config: {
"user": "root"
"secret": "defaultsecret"
}
env: {
"secret": "somesecretkey"
}
The resulting config will have the following values:
{
"secret": "somesecretkey",
"user": "root",
"endpoint": "https://localhost"
}
"""
def __init__(self, config_name="config", key_delimiter="."):
# Delimiter that separates a list of keys
# used to access a nested value in one go.
self._key_delimiter = key_delimiter
# A set of paths to look for the config file in.
self._config_paths = []
# A set of remote providers to search for the configuration.
self._remote_providers = []
# Name of file to look for inside the path.
self._config_name = config_name
self._config_file = ""
self._config_type = ""
self._env_prefix = ""
self._automatic_env_applied = False
self._env_key_replacer = None
self._aliases = {}
self._override = {}
self._args = {}
self._env = {}
self._config = {}
self._kvstore = {}
self._defaults = {}
self._on_config_change = None
self._on_remote_config_change = None
self.parse_argv_disabled = False
def on_config_change(self, func, *args, **kwargs):
self._on_config_change = lambda: func(*args, **kwargs)
def watch_config(self):
config_file = self._get_config_file()
watcher = watch.get_watcher(config_file, self)
watcher.start()
def set_config_file(self, file_):
"""Explicitly define the path, name and extension of the config file
Vyper will use this and not check any of the config paths.
"""
self._config_file = file_
def set_env_prefix(self, prefix):
"""Define a prefix that ENVIRONMENT variables will use.
e.g. if your prefix is "spf", the env registry will look
for env. variables that start with "SPF_"
"""
self._env_prefix = prefix
def _merge_with_env_prefix(self, key):
if self._env_prefix != "":
return ("{0}_{1}".format(self._env_prefix, key)).upper()
return key.upper()
def _get_env(self, key):
"""Wrapper around os.getenv() which replaces characters
in the original key. This allows env vars which have different keys
than the config object keys.
"""
if self._env_key_replacer is not None:
key = key.replace(*self._env_key_replacer)
return os.getenv(key)
def config_file_used(self):
"""Return the file used to populate the config registry."""
return self._config_file
def add_config_path(self, path):
"""Add a path for Vyper to search for the config file in.
Can be called multiple times to define multiple search paths.
"""
abspath = util.abs_pathify(path)
if abspath not in self._config_paths:
log.info("Adding {0} to paths to search".format(abspath))
self._config_paths.append(abspath)
def add_remote_provider(self, provider, client, path):
"""Adds a remote configuration source.
Remote Providers are searched in the order they are added.
provider is a string value, "etcd", "consul" and "zookeeper" are
currently supported.
client is a client object
path is the path in the k/v store to retrieve configuration
To retrieve a config file called myapp.json from /configs/myapp.json
you should set path to /configs and set config name (set_config_name)
to "myapp"
"""
if provider not in constants.SUPPORTED_REMOTE_PROVIDERS:
raise errors.UnsupportedRemoteProviderError(provider)
host = ""
if provider == "etcd":
host = "{0}://{1}:{2}".format(client.protocol, client.host,
client.port)
elif provider == "consul":
host = "{0}://{1}:{2}".format(client.http.scheme, client.http.host,
client.http.port)
elif provider == "zookeeper":
host = ",".join(
str("{0}:{1}".format(h[0], h[1])) for h in client.hosts)
log.info("Adding {0}:{1} to remote provider list".format(
provider, host))
rp = remote.RemoteProvider(provider, client, path, self)
if not self._provider_path_exists(rp):
self._remote_providers.append(rp)
def _provider_path_exists(self, rp):
for p in self._remote_providers:
if p.path == rp.path:
return True
return False
def _search_dict(self, d, keys):
if not keys:
return d
for key in keys:
if key in d and not isinstance(d[key], dict):
return d[key]
elif key in d:
return self._search_dict(d[key], keys[1::])
else:
return None
def get(self, key):
"""Vyper is essentially repository for configurations.
`get` can retrieve any value given the key to use.
`get` has the behavior of returning the value associated with the first
place from where it is set. Viper will check in the following order:
override, arg, env, config file, key/value store, default.
"""
path = key.split(self._key_delimiter)
lowercase_key = key.lower()
val = self._find(lowercase_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is None:
return None
return val
def get_string(self, key):
return str(self.get(key))
def get_bool(self, key):
val = self.get(key)
if isinstance(val, str):
if val.lower() == 'false':
return False
return bool(val)
def get_int(self, key):
return int(self.get(key))
def get_float(self, key):
return float(self.get(key))
def get_unicode(self, key):
return text(self.get(key))
def get_bytes(self, key):
return b"{0}".format(self.get(key))
def sub(self, key):
"""Returns new Vyper instance representing a sub tree of this instance.
"""
subv = Vyper()
data = self.get(key)
if isinstance(data, dict):
subv._config = data
return subv
else:
return None
def unmarshall_key(self, key, cls):
"""Takes a single key and unmarshalls it into a class."""
return setattr(cls, key, self.get(key))
def unmarshall(self, cls):
"""Unmarshalls the config into a class. Make sure that the tags on
the attributes of the class are properly set.
"""
for k, v in self.all_settings().items():
setattr(cls, k, v)
return cls
def bind_args(self, parser):
if isinstance(parser, argparse.ArgumentParser):
return self._bind_parser_values(parser)
else:
return self.bind_arg_values(parser)
def bind_arg(self, key, arg):
return self.bind_arg_value(key, arg)
def _parse_args(self, parser, overrides=None):
if overrides:
return vars(parser.parse_args(overrides))
if not self.parse_argv_disabled:
return vars(parser.parse_args())
else:
return vars(parser.parse_args([]))
def _bind_parser_values(self, parser, overrides=None):
# method mostly for testing, use bind_args()
args = self._parse_args(parser, overrides)
defaults = \
{k: parser.get_default(k) for k in args.keys()}
for k, v in defaults.items():
self.set_default(k, v)
if args[k] != defaults[k]:
self.bind_arg(k, args[k])
def bind_arg_values(self, args):
for k, v in args.items():
try:
self.bind_arg_value(k, v)
except ValueError:
pass
def bind_arg_value(self, key, arg):
if arg is None:
raise ValueError("arg for {0} is None".format(key))
self._args[key.lower()] = arg
def bind_env(self, *input_):
"""Binds a Vyper key to a ENV variable.
ENV variables are case sensitive.
If only a key is provided, it will use the env key matching the key,
uppercased.
`env_prefix` will be used when set when env name is not provided.
"""
if len(input_) == 0:
return "bind_env missing key to bind to"
key = input_[0].lower()
if len(input_) == 1:
env_key = self._merge_with_env_prefix(key)
else:
env_key = input_[1]
self._env[key] = env_key
if self._key_delimiter in key:
parts = input_[0].split(self._key_delimiter)
env_info = {
"path": parts[1:-1],
"final_key": parts[-1],
"env_key": env_key
}
if self._env.get(parts[0]) is None:
self._env[parts[0]] = [env_info]
else:
self._env[parts[0]].append(env_info)
return None
def _find_real_key(self, key, source):
return next(
(real for real in source.keys() if real.lower() == key.lower()),
None)
def _find_insensitive(self, key, source):
real_key = self._find_real_key(key, source)
return source.get(real_key)
def _set_insensitive(self, key, val, source):
real_key = self._find_real_key(key, source)
if real_key is None:
msg = "No case insensitive variant of {0} found.".format(key)
raise KeyError(msg)
source[real_key] = val
def _find(self, key):
"""Given a key, find the value
Vyper will check in the following order:
override, arg, env, config file, key/value store, default
Vyper will check to see if an alias exists first.
"""
key = self._real_key(key)
# OVERRIDES
val = self._override.get(key)
if val is not None:
log.debug("{0} found in override: {1}".format(key, val))
return val
# ARGS
val = self._args.get(key)
if val is not None:
log.debug("{0} found in args: {1}".format(key, val))
return val
# ENVIRONMENT VARIABLES
if self._automatic_env_applied:
# even if it hasn't been registered, if `automatic_env` is used,
# check any `get` request
val = self._get_env(self._merge_with_env_prefix(key))
if val is not None:
log.debug("{0} found in environment: {1}".format(key, val))
return val
env_key = self._find_insensitive(key, self._env)
log.debug("Looking for {0} in env".format(key))
if isinstance(env_key, list):
parent = self._find_insensitive(key, self._config)
found_in_env = False
log.debug("Found env key parent {0}: {1}".format(key, parent))
for item in env_key:
log.debug("{0} registered as env var parent {1}:".format(
key, item["env_key"]))
val = self._get_env(item["env_key"])
if val is not None:
log.debug("{0} found in environment: {1}".format(
item["env_key"], val))
temp = parent
for path in item["path"]:
real_key = self._find_real_key(path, temp)
temp = temp[real_key]
self._set_insensitive(item["final_key"], val, temp)
found_in_env = True
else:
log.debug("{0} env value unset".format(item["env_key"]))
if found_in_env:
return parent
elif env_key is not None:
log.debug("{0} registered as env var: {1}".format(key, env_key))
val = self._get_env(env_key)
if val is not None:
log.debug("{0} found in environment: {1}".format(env_key, val))
return val
else:
log.debug("{0} env value unset".format(env_key))
# CONFIG FILE
val = self._find_insensitive(key, self._config)
if val is not None:
log.debug("{0} found in config: {1}".format(key, val))
return val
# Test for nested config parameter
if self._key_delimiter in key:
path = key.split(self._key_delimiter)
source = self._find(path[0])
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is not None:
log.debug("{0} found in nested config: {1}".format(
key, val))
return val
# KEY/VALUE STORE
val = self._kvstore.get(key)
if val is not None:
log.debug("{0} found in key/value store: {1}".format(key, val))
return val
# DEFAULTS
val = self._defaults.get(key)
if val is not None:
log.debug("{0} found in defaults: {1}".format(key, val))
return val
return None
def is_set(self, key):
"""Check to see if the key has been set in any of the data locations.
"""
path = key.split(self._key_delimiter)
lower_case_key = key.lower()
val = self._find(lower_case_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
return val is not None
def automatic_env(self):
"""Have Vyper check ENV variables for all keys set in
config, default & args.
"""
self._automatic_env_applied = True
def set_env_key_replacer(self, old, new):
"""Sets the strings.Replacer on the Vyper object.
Useful for mapping an environment variable to a key that does
not match it.
"""
self._env_key_replacer = old, new
def register_alias(self, alias, key):
"""Aliases provide another accessor for the same key.
This enables one to change a name without breaking the application.
"""
alias = alias.lower()
key = key.lower()
if alias != key and alias != self._real_key(key):
exists = self._aliases.get(alias)
if exists is None:
# if we alias something that exists in one of the dicts to
# another name, we'll never be able to get that value using the
# original name, so move the config value to the new _real_key.
val = self._config.get(alias)
if val:
self._config.pop(alias)
self._config[key] = val
val = self._kvstore.get(alias)
if val:
self._kvstore.pop(alias)
self._kvstore[key] = val
val = self._defaults.get(alias)
if val:
self._defaults.pop(alias)
self._defaults[key] = val
val = self._override.get(alias)
if val:
self._override.pop(alias)
self._override[key] = val
self._aliases[alias] = key
else:
log.warning("Creating circular reference alias {0} {1} {2}".format(
alias, key, self._real_key(key)))
def _real_key(self, key):
new_key = self._aliases.get(key)
if new_key is not None:
return self._real_key(new_key)
else:
return key
def in_config(self, key):
"""Check to see if the given key (or an alias) is in the config file.
"""
# if the requested key is an alias, then return the proper key
key = self._real_key(key)
exists = self._config.get(key)
return exists
def set_default(self, key, value):
"""Set the default value for this key.
Default only used when no value is provided by the user via
arg, config or env.
"""
k = self._real_key(key.lower())
self._defaults[k] = value
def set(self, key, value):
"""Sets the value for the key in the override register.
Will be used instead of values obtained via
args, config file, env, defaults or key/value store.
"""
k = self._real_key(key.lower())
self._override[k] = value
def read_in_config(self):
"""Vyper will discover and load the configuration file from disk
and key/value stores, searching in one of the defined paths.
"""
log.info("Attempting to read in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
self._config = {}
return self._unmarshall_reader(f, self._config)
def merge_in_config(self):
log.info("Attempting to merge in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
return self.merge_config(f)
def read_config(self, file_):
"""Vyper will read a configuration file, setting existing keys to
`None` if the key does not exist in the file.
"""
self._unmarshall_reader(file_, self._config)
def merge_config(self, file_):
if self._config is None:
self._config = {}
cfg = {}
cfg = self._unmarshall_reader(file_, cfg)
self._merge_dicts(cfg, self._config)
def _merge_dicts(self, src, target):
for k, v in src.items():
if isinstance(v, dict):
self._merge_dicts(v, target[k])
else:
target[k] = v
def read_remote_config(self):
"""Attempts to get configuration from a remote source
and read it in the remote configuration registry.
"""
return self._get_key_value_config()
def _unmarshall_reader(self, file_, d):
"""Unmarshall a file into a `dict`."""
return util.unmarshall_config_reader(file_, d, self._get_config_type())
def _get_key_value_config(self):
"""Retrieves the first found remote configuration."""
for rp in self._remote_providers:
val = self._get_remote_config(rp)
self._kvstore = val
return None
raise errors.RemoteConfigError("No Files Found")
def _get_remote_config(self, provider):
reader = provider.get()
self._unmarshall_reader(reader, self._kvstore)
return self._kvstore
def on_remote_config_change(self, func, *args, **kwargs):
self._on_remote_config_change = lambda x: func(*args, **kwargs)
for rp in self._remote_providers:
rp.add_listener(self._on_remote_config_change)
return None
def watch_remote_config(self):
for rp in self._remote_providers:
rp.add_listener()
return None
raise errors.RemoteConfigError("No Files Found")
def all_keys(self, uppercase_keys=False):
"""Return all keys regardless where they are set."""
d = {}
for k in self._override.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._args.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._env.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._config.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._kvstore.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._defaults.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._aliases.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
return d.keys()
def set_config_name(self, name):
"""Name for the config file. Does not include extension."""
self._config_name = name
self._config_file = ""
def set_config_type(self, type_):
"""Sets the type of the configuration returned by the
remote source, e.g. "json".
"""
self._config_type = type_
def _get_config_type(self):
if self._config_type != "":
return self._config_type
cf = self._get_config_file()
ext = os.path.splitext(cf)
if len(ext) > 1:
return ext[1][1:]
else:
return ""
def _get_config_file(self):
if self._config_file == "":
try:
cf = self._find_config_file()
self._config_file = cf
except errors.ConfigFileNotFoundError:
return ""
return self._config_file
def _search_in_path(self, path):
log.debug("Searching for config in: {0}".format(path))
for ext in constants.SUPPORTED_EXTENSIONS:
full_path = "{0}/{1}.{2}".format(path, self._config_name, ext)
log.debug("Checking for {0}".format(full_path))
if util.exists(full_path):
log.debug("Found: {0}".format(full_path))
return full_path
return ""
def _find_config_file(self):
"""Search all `config_paths` for any config file.
Returns the first path that exists (and is a config file).
"""
log.info("Searching for config in: {0}".format(
", ".join(str(p) for p in self._config_paths)))
for cp in self._config_paths:
f = self._search_in_path(cp)
if f != "":
return f
raise errors.ConfigFileNotFoundError(
self._config_name, self._config_paths)
def debug(self): # pragma: no cover
"""Prints all configuration registries for debugging purposes."""
print("Aliases:")
pprint.pprint(self._aliases)
print("Override:")
pprint.pprint(self._override)
print("Args:")
pprint.pprint(self._args)
print("Env:")
pprint.pprint(self._env)
print("Config:")
pprint.pprint(self._config)
print("Key/Value Store:")
pprint.pprint(self._kvstore)
print("Defaults:")
pprint.pprint(self._defaults)
|
admiralobvious/vyper | vyper/vyper.py | Vyper._find_config_file | python | def _find_config_file(self):
log.info("Searching for config in: {0}".format(
", ".join(str(p) for p in self._config_paths)))
for cp in self._config_paths:
f = self._search_in_path(cp)
if f != "":
return f
raise errors.ConfigFileNotFoundError(
self._config_name, self._config_paths) | Search all `config_paths` for any config file.
Returns the first path that exists (and is a config file). | train | https://github.com/admiralobvious/vyper/blob/58ec7b90661502b7b2fea7a30849b90b907fcdec/vyper/vyper.py#L698-L711 | [
"def _search_in_path(self, path):\n log.debug(\"Searching for config in: {0}\".format(path))\n\n for ext in constants.SUPPORTED_EXTENSIONS:\n full_path = \"{0}/{1}.{2}\".format(path, self._config_name, ext)\n log.debug(\"Checking for {0}\".format(full_path))\n if util.exists(full_path):\n log.debug(\"Found: {0}\".format(full_path))\n return full_path\n\n return \"\"\n"
] | class Vyper(object):
"""Vyper is a prioritized configuration registry. It maintains a set of
configuration sources, fetches values to populate those, and provides
them according to the source's priority.
The priority of the sources is the following:
1. overrides
2. args
3. env. variables
4. config file
5. key/value store
6. defaults
For example, if values from the following sources were loaded:
defaults: {
"secret": "",
"user": "default",
"endpoint": "https://localhost"
}
config: {
"user": "root"
"secret": "defaultsecret"
}
env: {
"secret": "somesecretkey"
}
The resulting config will have the following values:
{
"secret": "somesecretkey",
"user": "root",
"endpoint": "https://localhost"
}
"""
def __init__(self, config_name="config", key_delimiter="."):
# Delimiter that separates a list of keys
# used to access a nested value in one go.
self._key_delimiter = key_delimiter
# A set of paths to look for the config file in.
self._config_paths = []
# A set of remote providers to search for the configuration.
self._remote_providers = []
# Name of file to look for inside the path.
self._config_name = config_name
self._config_file = ""
self._config_type = ""
self._env_prefix = ""
self._automatic_env_applied = False
self._env_key_replacer = None
self._aliases = {}
self._override = {}
self._args = {}
self._env = {}
self._config = {}
self._kvstore = {}
self._defaults = {}
self._on_config_change = None
self._on_remote_config_change = None
self.parse_argv_disabled = False
def on_config_change(self, func, *args, **kwargs):
self._on_config_change = lambda: func(*args, **kwargs)
def watch_config(self):
config_file = self._get_config_file()
watcher = watch.get_watcher(config_file, self)
watcher.start()
def set_config_file(self, file_):
"""Explicitly define the path, name and extension of the config file
Vyper will use this and not check any of the config paths.
"""
self._config_file = file_
def set_env_prefix(self, prefix):
"""Define a prefix that ENVIRONMENT variables will use.
e.g. if your prefix is "spf", the env registry will look
for env. variables that start with "SPF_"
"""
self._env_prefix = prefix
def _merge_with_env_prefix(self, key):
if self._env_prefix != "":
return ("{0}_{1}".format(self._env_prefix, key)).upper()
return key.upper()
def _get_env(self, key):
"""Wrapper around os.getenv() which replaces characters
in the original key. This allows env vars which have different keys
than the config object keys.
"""
if self._env_key_replacer is not None:
key = key.replace(*self._env_key_replacer)
return os.getenv(key)
def config_file_used(self):
"""Return the file used to populate the config registry."""
return self._config_file
def add_config_path(self, path):
"""Add a path for Vyper to search for the config file in.
Can be called multiple times to define multiple search paths.
"""
abspath = util.abs_pathify(path)
if abspath not in self._config_paths:
log.info("Adding {0} to paths to search".format(abspath))
self._config_paths.append(abspath)
def add_remote_provider(self, provider, client, path):
"""Adds a remote configuration source.
Remote Providers are searched in the order they are added.
provider is a string value, "etcd", "consul" and "zookeeper" are
currently supported.
client is a client object
path is the path in the k/v store to retrieve configuration
To retrieve a config file called myapp.json from /configs/myapp.json
you should set path to /configs and set config name (set_config_name)
to "myapp"
"""
if provider not in constants.SUPPORTED_REMOTE_PROVIDERS:
raise errors.UnsupportedRemoteProviderError(provider)
host = ""
if provider == "etcd":
host = "{0}://{1}:{2}".format(client.protocol, client.host,
client.port)
elif provider == "consul":
host = "{0}://{1}:{2}".format(client.http.scheme, client.http.host,
client.http.port)
elif provider == "zookeeper":
host = ",".join(
str("{0}:{1}".format(h[0], h[1])) for h in client.hosts)
log.info("Adding {0}:{1} to remote provider list".format(
provider, host))
rp = remote.RemoteProvider(provider, client, path, self)
if not self._provider_path_exists(rp):
self._remote_providers.append(rp)
def _provider_path_exists(self, rp):
for p in self._remote_providers:
if p.path == rp.path:
return True
return False
def _search_dict(self, d, keys):
if not keys:
return d
for key in keys:
if key in d and not isinstance(d[key], dict):
return d[key]
elif key in d:
return self._search_dict(d[key], keys[1::])
else:
return None
def get(self, key):
"""Vyper is essentially repository for configurations.
`get` can retrieve any value given the key to use.
`get` has the behavior of returning the value associated with the first
place from where it is set. Viper will check in the following order:
override, arg, env, config file, key/value store, default.
"""
path = key.split(self._key_delimiter)
lowercase_key = key.lower()
val = self._find(lowercase_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is None:
return None
return val
def get_string(self, key):
return str(self.get(key))
def get_bool(self, key):
val = self.get(key)
if isinstance(val, str):
if val.lower() == 'false':
return False
return bool(val)
def get_int(self, key):
return int(self.get(key))
def get_float(self, key):
return float(self.get(key))
def get_unicode(self, key):
return text(self.get(key))
def get_bytes(self, key):
return b"{0}".format(self.get(key))
def sub(self, key):
"""Returns new Vyper instance representing a sub tree of this instance.
"""
subv = Vyper()
data = self.get(key)
if isinstance(data, dict):
subv._config = data
return subv
else:
return None
def unmarshall_key(self, key, cls):
"""Takes a single key and unmarshalls it into a class."""
return setattr(cls, key, self.get(key))
def unmarshall(self, cls):
"""Unmarshalls the config into a class. Make sure that the tags on
the attributes of the class are properly set.
"""
for k, v in self.all_settings().items():
setattr(cls, k, v)
return cls
def bind_args(self, parser):
if isinstance(parser, argparse.ArgumentParser):
return self._bind_parser_values(parser)
else:
return self.bind_arg_values(parser)
def bind_arg(self, key, arg):
return self.bind_arg_value(key, arg)
def _parse_args(self, parser, overrides=None):
if overrides:
return vars(parser.parse_args(overrides))
if not self.parse_argv_disabled:
return vars(parser.parse_args())
else:
return vars(parser.parse_args([]))
def _bind_parser_values(self, parser, overrides=None):
# method mostly for testing, use bind_args()
args = self._parse_args(parser, overrides)
defaults = \
{k: parser.get_default(k) for k in args.keys()}
for k, v in defaults.items():
self.set_default(k, v)
if args[k] != defaults[k]:
self.bind_arg(k, args[k])
def bind_arg_values(self, args):
for k, v in args.items():
try:
self.bind_arg_value(k, v)
except ValueError:
pass
def bind_arg_value(self, key, arg):
if arg is None:
raise ValueError("arg for {0} is None".format(key))
self._args[key.lower()] = arg
def bind_env(self, *input_):
"""Binds a Vyper key to a ENV variable.
ENV variables are case sensitive.
If only a key is provided, it will use the env key matching the key,
uppercased.
`env_prefix` will be used when set when env name is not provided.
"""
if len(input_) == 0:
return "bind_env missing key to bind to"
key = input_[0].lower()
if len(input_) == 1:
env_key = self._merge_with_env_prefix(key)
else:
env_key = input_[1]
self._env[key] = env_key
if self._key_delimiter in key:
parts = input_[0].split(self._key_delimiter)
env_info = {
"path": parts[1:-1],
"final_key": parts[-1],
"env_key": env_key
}
if self._env.get(parts[0]) is None:
self._env[parts[0]] = [env_info]
else:
self._env[parts[0]].append(env_info)
return None
def _find_real_key(self, key, source):
return next(
(real for real in source.keys() if real.lower() == key.lower()),
None)
def _find_insensitive(self, key, source):
real_key = self._find_real_key(key, source)
return source.get(real_key)
def _set_insensitive(self, key, val, source):
real_key = self._find_real_key(key, source)
if real_key is None:
msg = "No case insensitive variant of {0} found.".format(key)
raise KeyError(msg)
source[real_key] = val
def _find(self, key):
"""Given a key, find the value
Vyper will check in the following order:
override, arg, env, config file, key/value store, default
Vyper will check to see if an alias exists first.
"""
key = self._real_key(key)
# OVERRIDES
val = self._override.get(key)
if val is not None:
log.debug("{0} found in override: {1}".format(key, val))
return val
# ARGS
val = self._args.get(key)
if val is not None:
log.debug("{0} found in args: {1}".format(key, val))
return val
# ENVIRONMENT VARIABLES
if self._automatic_env_applied:
# even if it hasn't been registered, if `automatic_env` is used,
# check any `get` request
val = self._get_env(self._merge_with_env_prefix(key))
if val is not None:
log.debug("{0} found in environment: {1}".format(key, val))
return val
env_key = self._find_insensitive(key, self._env)
log.debug("Looking for {0} in env".format(key))
if isinstance(env_key, list):
parent = self._find_insensitive(key, self._config)
found_in_env = False
log.debug("Found env key parent {0}: {1}".format(key, parent))
for item in env_key:
log.debug("{0} registered as env var parent {1}:".format(
key, item["env_key"]))
val = self._get_env(item["env_key"])
if val is not None:
log.debug("{0} found in environment: {1}".format(
item["env_key"], val))
temp = parent
for path in item["path"]:
real_key = self._find_real_key(path, temp)
temp = temp[real_key]
self._set_insensitive(item["final_key"], val, temp)
found_in_env = True
else:
log.debug("{0} env value unset".format(item["env_key"]))
if found_in_env:
return parent
elif env_key is not None:
log.debug("{0} registered as env var: {1}".format(key, env_key))
val = self._get_env(env_key)
if val is not None:
log.debug("{0} found in environment: {1}".format(env_key, val))
return val
else:
log.debug("{0} env value unset".format(env_key))
# CONFIG FILE
val = self._find_insensitive(key, self._config)
if val is not None:
log.debug("{0} found in config: {1}".format(key, val))
return val
# Test for nested config parameter
if self._key_delimiter in key:
path = key.split(self._key_delimiter)
source = self._find(path[0])
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is not None:
log.debug("{0} found in nested config: {1}".format(
key, val))
return val
# KEY/VALUE STORE
val = self._kvstore.get(key)
if val is not None:
log.debug("{0} found in key/value store: {1}".format(key, val))
return val
# DEFAULTS
val = self._defaults.get(key)
if val is not None:
log.debug("{0} found in defaults: {1}".format(key, val))
return val
return None
def is_set(self, key):
"""Check to see if the key has been set in any of the data locations.
"""
path = key.split(self._key_delimiter)
lower_case_key = key.lower()
val = self._find(lower_case_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
return val is not None
def automatic_env(self):
"""Have Vyper check ENV variables for all keys set in
config, default & args.
"""
self._automatic_env_applied = True
def set_env_key_replacer(self, old, new):
"""Sets the strings.Replacer on the Vyper object.
Useful for mapping an environment variable to a key that does
not match it.
"""
self._env_key_replacer = old, new
def register_alias(self, alias, key):
"""Aliases provide another accessor for the same key.
This enables one to change a name without breaking the application.
"""
alias = alias.lower()
key = key.lower()
if alias != key and alias != self._real_key(key):
exists = self._aliases.get(alias)
if exists is None:
# if we alias something that exists in one of the dicts to
# another name, we'll never be able to get that value using the
# original name, so move the config value to the new _real_key.
val = self._config.get(alias)
if val:
self._config.pop(alias)
self._config[key] = val
val = self._kvstore.get(alias)
if val:
self._kvstore.pop(alias)
self._kvstore[key] = val
val = self._defaults.get(alias)
if val:
self._defaults.pop(alias)
self._defaults[key] = val
val = self._override.get(alias)
if val:
self._override.pop(alias)
self._override[key] = val
self._aliases[alias] = key
else:
log.warning("Creating circular reference alias {0} {1} {2}".format(
alias, key, self._real_key(key)))
def _real_key(self, key):
new_key = self._aliases.get(key)
if new_key is not None:
return self._real_key(new_key)
else:
return key
def in_config(self, key):
"""Check to see if the given key (or an alias) is in the config file.
"""
# if the requested key is an alias, then return the proper key
key = self._real_key(key)
exists = self._config.get(key)
return exists
def set_default(self, key, value):
"""Set the default value for this key.
Default only used when no value is provided by the user via
arg, config or env.
"""
k = self._real_key(key.lower())
self._defaults[k] = value
def set(self, key, value):
"""Sets the value for the key in the override register.
Will be used instead of values obtained via
args, config file, env, defaults or key/value store.
"""
k = self._real_key(key.lower())
self._override[k] = value
def read_in_config(self):
"""Vyper will discover and load the configuration file from disk
and key/value stores, searching in one of the defined paths.
"""
log.info("Attempting to read in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
self._config = {}
return self._unmarshall_reader(f, self._config)
def merge_in_config(self):
log.info("Attempting to merge in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
return self.merge_config(f)
def read_config(self, file_):
"""Vyper will read a configuration file, setting existing keys to
`None` if the key does not exist in the file.
"""
self._unmarshall_reader(file_, self._config)
def merge_config(self, file_):
if self._config is None:
self._config = {}
cfg = {}
cfg = self._unmarshall_reader(file_, cfg)
self._merge_dicts(cfg, self._config)
def _merge_dicts(self, src, target):
for k, v in src.items():
if isinstance(v, dict):
self._merge_dicts(v, target[k])
else:
target[k] = v
def read_remote_config(self):
"""Attempts to get configuration from a remote source
and read it in the remote configuration registry.
"""
return self._get_key_value_config()
def _unmarshall_reader(self, file_, d):
"""Unmarshall a file into a `dict`."""
return util.unmarshall_config_reader(file_, d, self._get_config_type())
def _get_key_value_config(self):
"""Retrieves the first found remote configuration."""
for rp in self._remote_providers:
val = self._get_remote_config(rp)
self._kvstore = val
return None
raise errors.RemoteConfigError("No Files Found")
def _get_remote_config(self, provider):
reader = provider.get()
self._unmarshall_reader(reader, self._kvstore)
return self._kvstore
def on_remote_config_change(self, func, *args, **kwargs):
self._on_remote_config_change = lambda x: func(*args, **kwargs)
for rp in self._remote_providers:
rp.add_listener(self._on_remote_config_change)
return None
def watch_remote_config(self):
for rp in self._remote_providers:
rp.add_listener()
return None
raise errors.RemoteConfigError("No Files Found")
def all_keys(self, uppercase_keys=False):
"""Return all keys regardless where they are set."""
d = {}
for k in self._override.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._args.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._env.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._config.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._kvstore.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._defaults.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._aliases.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
return d.keys()
def all_settings(self, uppercase_keys=False):
"""Return all settings as a `dict`."""
d = {}
for k in self.all_keys(uppercase_keys):
d[k] = self.get(k)
return d
def set_config_name(self, name):
"""Name for the config file. Does not include extension."""
self._config_name = name
self._config_file = ""
def set_config_type(self, type_):
"""Sets the type of the configuration returned by the
remote source, e.g. "json".
"""
self._config_type = type_
def _get_config_type(self):
if self._config_type != "":
return self._config_type
cf = self._get_config_file()
ext = os.path.splitext(cf)
if len(ext) > 1:
return ext[1][1:]
else:
return ""
def _get_config_file(self):
if self._config_file == "":
try:
cf = self._find_config_file()
self._config_file = cf
except errors.ConfigFileNotFoundError:
return ""
return self._config_file
def _search_in_path(self, path):
log.debug("Searching for config in: {0}".format(path))
for ext in constants.SUPPORTED_EXTENSIONS:
full_path = "{0}/{1}.{2}".format(path, self._config_name, ext)
log.debug("Checking for {0}".format(full_path))
if util.exists(full_path):
log.debug("Found: {0}".format(full_path))
return full_path
return ""
def debug(self): # pragma: no cover
"""Prints all configuration registries for debugging purposes."""
print("Aliases:")
pprint.pprint(self._aliases)
print("Override:")
pprint.pprint(self._override)
print("Args:")
pprint.pprint(self._args)
print("Env:")
pprint.pprint(self._env)
print("Config:")
pprint.pprint(self._config)
print("Key/Value Store:")
pprint.pprint(self._kvstore)
print("Defaults:")
pprint.pprint(self._defaults)
|
admiralobvious/vyper | vyper/vyper.py | Vyper.debug | python | def debug(self): # pragma: no cover
print("Aliases:")
pprint.pprint(self._aliases)
print("Override:")
pprint.pprint(self._override)
print("Args:")
pprint.pprint(self._args)
print("Env:")
pprint.pprint(self._env)
print("Config:")
pprint.pprint(self._config)
print("Key/Value Store:")
pprint.pprint(self._kvstore)
print("Defaults:")
pprint.pprint(self._defaults) | Prints all configuration registries for debugging purposes. | train | https://github.com/admiralobvious/vyper/blob/58ec7b90661502b7b2fea7a30849b90b907fcdec/vyper/vyper.py#L713-L729 | null | class Vyper(object):
"""Vyper is a prioritized configuration registry. It maintains a set of
configuration sources, fetches values to populate those, and provides
them according to the source's priority.
The priority of the sources is the following:
1. overrides
2. args
3. env. variables
4. config file
5. key/value store
6. defaults
For example, if values from the following sources were loaded:
defaults: {
"secret": "",
"user": "default",
"endpoint": "https://localhost"
}
config: {
"user": "root"
"secret": "defaultsecret"
}
env: {
"secret": "somesecretkey"
}
The resulting config will have the following values:
{
"secret": "somesecretkey",
"user": "root",
"endpoint": "https://localhost"
}
"""
def __init__(self, config_name="config", key_delimiter="."):
# Delimiter that separates a list of keys
# used to access a nested value in one go.
self._key_delimiter = key_delimiter
# A set of paths to look for the config file in.
self._config_paths = []
# A set of remote providers to search for the configuration.
self._remote_providers = []
# Name of file to look for inside the path.
self._config_name = config_name
self._config_file = ""
self._config_type = ""
self._env_prefix = ""
self._automatic_env_applied = False
self._env_key_replacer = None
self._aliases = {}
self._override = {}
self._args = {}
self._env = {}
self._config = {}
self._kvstore = {}
self._defaults = {}
self._on_config_change = None
self._on_remote_config_change = None
self.parse_argv_disabled = False
def on_config_change(self, func, *args, **kwargs):
self._on_config_change = lambda: func(*args, **kwargs)
def watch_config(self):
config_file = self._get_config_file()
watcher = watch.get_watcher(config_file, self)
watcher.start()
def set_config_file(self, file_):
"""Explicitly define the path, name and extension of the config file
Vyper will use this and not check any of the config paths.
"""
self._config_file = file_
def set_env_prefix(self, prefix):
"""Define a prefix that ENVIRONMENT variables will use.
e.g. if your prefix is "spf", the env registry will look
for env. variables that start with "SPF_"
"""
self._env_prefix = prefix
def _merge_with_env_prefix(self, key):
if self._env_prefix != "":
return ("{0}_{1}".format(self._env_prefix, key)).upper()
return key.upper()
def _get_env(self, key):
"""Wrapper around os.getenv() which replaces characters
in the original key. This allows env vars which have different keys
than the config object keys.
"""
if self._env_key_replacer is not None:
key = key.replace(*self._env_key_replacer)
return os.getenv(key)
def config_file_used(self):
"""Return the file used to populate the config registry."""
return self._config_file
def add_config_path(self, path):
"""Add a path for Vyper to search for the config file in.
Can be called multiple times to define multiple search paths.
"""
abspath = util.abs_pathify(path)
if abspath not in self._config_paths:
log.info("Adding {0} to paths to search".format(abspath))
self._config_paths.append(abspath)
def add_remote_provider(self, provider, client, path):
"""Adds a remote configuration source.
Remote Providers are searched in the order they are added.
provider is a string value, "etcd", "consul" and "zookeeper" are
currently supported.
client is a client object
path is the path in the k/v store to retrieve configuration
To retrieve a config file called myapp.json from /configs/myapp.json
you should set path to /configs and set config name (set_config_name)
to "myapp"
"""
if provider not in constants.SUPPORTED_REMOTE_PROVIDERS:
raise errors.UnsupportedRemoteProviderError(provider)
host = ""
if provider == "etcd":
host = "{0}://{1}:{2}".format(client.protocol, client.host,
client.port)
elif provider == "consul":
host = "{0}://{1}:{2}".format(client.http.scheme, client.http.host,
client.http.port)
elif provider == "zookeeper":
host = ",".join(
str("{0}:{1}".format(h[0], h[1])) for h in client.hosts)
log.info("Adding {0}:{1} to remote provider list".format(
provider, host))
rp = remote.RemoteProvider(provider, client, path, self)
if not self._provider_path_exists(rp):
self._remote_providers.append(rp)
def _provider_path_exists(self, rp):
for p in self._remote_providers:
if p.path == rp.path:
return True
return False
def _search_dict(self, d, keys):
if not keys:
return d
for key in keys:
if key in d and not isinstance(d[key], dict):
return d[key]
elif key in d:
return self._search_dict(d[key], keys[1::])
else:
return None
def get(self, key):
"""Vyper is essentially repository for configurations.
`get` can retrieve any value given the key to use.
`get` has the behavior of returning the value associated with the first
place from where it is set. Viper will check in the following order:
override, arg, env, config file, key/value store, default.
"""
path = key.split(self._key_delimiter)
lowercase_key = key.lower()
val = self._find(lowercase_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is None:
return None
return val
def get_string(self, key):
return str(self.get(key))
def get_bool(self, key):
val = self.get(key)
if isinstance(val, str):
if val.lower() == 'false':
return False
return bool(val)
def get_int(self, key):
return int(self.get(key))
def get_float(self, key):
return float(self.get(key))
def get_unicode(self, key):
return text(self.get(key))
def get_bytes(self, key):
return b"{0}".format(self.get(key))
def sub(self, key):
"""Returns new Vyper instance representing a sub tree of this instance.
"""
subv = Vyper()
data = self.get(key)
if isinstance(data, dict):
subv._config = data
return subv
else:
return None
def unmarshall_key(self, key, cls):
"""Takes a single key and unmarshalls it into a class."""
return setattr(cls, key, self.get(key))
def unmarshall(self, cls):
"""Unmarshalls the config into a class. Make sure that the tags on
the attributes of the class are properly set.
"""
for k, v in self.all_settings().items():
setattr(cls, k, v)
return cls
def bind_args(self, parser):
if isinstance(parser, argparse.ArgumentParser):
return self._bind_parser_values(parser)
else:
return self.bind_arg_values(parser)
def bind_arg(self, key, arg):
return self.bind_arg_value(key, arg)
def _parse_args(self, parser, overrides=None):
if overrides:
return vars(parser.parse_args(overrides))
if not self.parse_argv_disabled:
return vars(parser.parse_args())
else:
return vars(parser.parse_args([]))
def _bind_parser_values(self, parser, overrides=None):
# method mostly for testing, use bind_args()
args = self._parse_args(parser, overrides)
defaults = \
{k: parser.get_default(k) for k in args.keys()}
for k, v in defaults.items():
self.set_default(k, v)
if args[k] != defaults[k]:
self.bind_arg(k, args[k])
def bind_arg_values(self, args):
for k, v in args.items():
try:
self.bind_arg_value(k, v)
except ValueError:
pass
def bind_arg_value(self, key, arg):
if arg is None:
raise ValueError("arg for {0} is None".format(key))
self._args[key.lower()] = arg
def bind_env(self, *input_):
"""Binds a Vyper key to a ENV variable.
ENV variables are case sensitive.
If only a key is provided, it will use the env key matching the key,
uppercased.
`env_prefix` will be used when set when env name is not provided.
"""
if len(input_) == 0:
return "bind_env missing key to bind to"
key = input_[0].lower()
if len(input_) == 1:
env_key = self._merge_with_env_prefix(key)
else:
env_key = input_[1]
self._env[key] = env_key
if self._key_delimiter in key:
parts = input_[0].split(self._key_delimiter)
env_info = {
"path": parts[1:-1],
"final_key": parts[-1],
"env_key": env_key
}
if self._env.get(parts[0]) is None:
self._env[parts[0]] = [env_info]
else:
self._env[parts[0]].append(env_info)
return None
def _find_real_key(self, key, source):
return next(
(real for real in source.keys() if real.lower() == key.lower()),
None)
def _find_insensitive(self, key, source):
real_key = self._find_real_key(key, source)
return source.get(real_key)
def _set_insensitive(self, key, val, source):
real_key = self._find_real_key(key, source)
if real_key is None:
msg = "No case insensitive variant of {0} found.".format(key)
raise KeyError(msg)
source[real_key] = val
def _find(self, key):
"""Given a key, find the value
Vyper will check in the following order:
override, arg, env, config file, key/value store, default
Vyper will check to see if an alias exists first.
"""
key = self._real_key(key)
# OVERRIDES
val = self._override.get(key)
if val is not None:
log.debug("{0} found in override: {1}".format(key, val))
return val
# ARGS
val = self._args.get(key)
if val is not None:
log.debug("{0} found in args: {1}".format(key, val))
return val
# ENVIRONMENT VARIABLES
if self._automatic_env_applied:
# even if it hasn't been registered, if `automatic_env` is used,
# check any `get` request
val = self._get_env(self._merge_with_env_prefix(key))
if val is not None:
log.debug("{0} found in environment: {1}".format(key, val))
return val
env_key = self._find_insensitive(key, self._env)
log.debug("Looking for {0} in env".format(key))
if isinstance(env_key, list):
parent = self._find_insensitive(key, self._config)
found_in_env = False
log.debug("Found env key parent {0}: {1}".format(key, parent))
for item in env_key:
log.debug("{0} registered as env var parent {1}:".format(
key, item["env_key"]))
val = self._get_env(item["env_key"])
if val is not None:
log.debug("{0} found in environment: {1}".format(
item["env_key"], val))
temp = parent
for path in item["path"]:
real_key = self._find_real_key(path, temp)
temp = temp[real_key]
self._set_insensitive(item["final_key"], val, temp)
found_in_env = True
else:
log.debug("{0} env value unset".format(item["env_key"]))
if found_in_env:
return parent
elif env_key is not None:
log.debug("{0} registered as env var: {1}".format(key, env_key))
val = self._get_env(env_key)
if val is not None:
log.debug("{0} found in environment: {1}".format(env_key, val))
return val
else:
log.debug("{0} env value unset".format(env_key))
# CONFIG FILE
val = self._find_insensitive(key, self._config)
if val is not None:
log.debug("{0} found in config: {1}".format(key, val))
return val
# Test for nested config parameter
if self._key_delimiter in key:
path = key.split(self._key_delimiter)
source = self._find(path[0])
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
if val is not None:
log.debug("{0} found in nested config: {1}".format(
key, val))
return val
# KEY/VALUE STORE
val = self._kvstore.get(key)
if val is not None:
log.debug("{0} found in key/value store: {1}".format(key, val))
return val
# DEFAULTS
val = self._defaults.get(key)
if val is not None:
log.debug("{0} found in defaults: {1}".format(key, val))
return val
return None
def is_set(self, key):
"""Check to see if the key has been set in any of the data locations.
"""
path = key.split(self._key_delimiter)
lower_case_key = key.lower()
val = self._find(lower_case_key)
if val is None:
source = self._find(path[0].lower())
if source is not None and isinstance(source, dict):
val = self._search_dict(source, path[1::])
return val is not None
def automatic_env(self):
"""Have Vyper check ENV variables for all keys set in
config, default & args.
"""
self._automatic_env_applied = True
def set_env_key_replacer(self, old, new):
"""Sets the strings.Replacer on the Vyper object.
Useful for mapping an environment variable to a key that does
not match it.
"""
self._env_key_replacer = old, new
def register_alias(self, alias, key):
"""Aliases provide another accessor for the same key.
This enables one to change a name without breaking the application.
"""
alias = alias.lower()
key = key.lower()
if alias != key and alias != self._real_key(key):
exists = self._aliases.get(alias)
if exists is None:
# if we alias something that exists in one of the dicts to
# another name, we'll never be able to get that value using the
# original name, so move the config value to the new _real_key.
val = self._config.get(alias)
if val:
self._config.pop(alias)
self._config[key] = val
val = self._kvstore.get(alias)
if val:
self._kvstore.pop(alias)
self._kvstore[key] = val
val = self._defaults.get(alias)
if val:
self._defaults.pop(alias)
self._defaults[key] = val
val = self._override.get(alias)
if val:
self._override.pop(alias)
self._override[key] = val
self._aliases[alias] = key
else:
log.warning("Creating circular reference alias {0} {1} {2}".format(
alias, key, self._real_key(key)))
def _real_key(self, key):
new_key = self._aliases.get(key)
if new_key is not None:
return self._real_key(new_key)
else:
return key
def in_config(self, key):
"""Check to see if the given key (or an alias) is in the config file.
"""
# if the requested key is an alias, then return the proper key
key = self._real_key(key)
exists = self._config.get(key)
return exists
def set_default(self, key, value):
"""Set the default value for this key.
Default only used when no value is provided by the user via
arg, config or env.
"""
k = self._real_key(key.lower())
self._defaults[k] = value
def set(self, key, value):
"""Sets the value for the key in the override register.
Will be used instead of values obtained via
args, config file, env, defaults or key/value store.
"""
k = self._real_key(key.lower())
self._override[k] = value
def read_in_config(self):
"""Vyper will discover and load the configuration file from disk
and key/value stores, searching in one of the defined paths.
"""
log.info("Attempting to read in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
self._config = {}
return self._unmarshall_reader(f, self._config)
def merge_in_config(self):
log.info("Attempting to merge in config file")
if self._get_config_type() not in constants.SUPPORTED_EXTENSIONS:
raise errors.UnsupportedConfigError(self._get_config_type())
with open(self._get_config_file()) as fp:
f = fp.read()
return self.merge_config(f)
def read_config(self, file_):
"""Vyper will read a configuration file, setting existing keys to
`None` if the key does not exist in the file.
"""
self._unmarshall_reader(file_, self._config)
def merge_config(self, file_):
if self._config is None:
self._config = {}
cfg = {}
cfg = self._unmarshall_reader(file_, cfg)
self._merge_dicts(cfg, self._config)
def _merge_dicts(self, src, target):
for k, v in src.items():
if isinstance(v, dict):
self._merge_dicts(v, target[k])
else:
target[k] = v
def read_remote_config(self):
"""Attempts to get configuration from a remote source
and read it in the remote configuration registry.
"""
return self._get_key_value_config()
def _unmarshall_reader(self, file_, d):
"""Unmarshall a file into a `dict`."""
return util.unmarshall_config_reader(file_, d, self._get_config_type())
def _get_key_value_config(self):
"""Retrieves the first found remote configuration."""
for rp in self._remote_providers:
val = self._get_remote_config(rp)
self._kvstore = val
return None
raise errors.RemoteConfigError("No Files Found")
def _get_remote_config(self, provider):
reader = provider.get()
self._unmarshall_reader(reader, self._kvstore)
return self._kvstore
def on_remote_config_change(self, func, *args, **kwargs):
self._on_remote_config_change = lambda x: func(*args, **kwargs)
for rp in self._remote_providers:
rp.add_listener(self._on_remote_config_change)
return None
def watch_remote_config(self):
for rp in self._remote_providers:
rp.add_listener()
return None
raise errors.RemoteConfigError("No Files Found")
def all_keys(self, uppercase_keys=False):
"""Return all keys regardless where they are set."""
d = {}
for k in self._override.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._args.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._env.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._config.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._kvstore.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._defaults.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
for k in self._aliases.keys():
d[k.upper() if uppercase_keys else k.lower()] = {}
return d.keys()
def all_settings(self, uppercase_keys=False):
"""Return all settings as a `dict`."""
d = {}
for k in self.all_keys(uppercase_keys):
d[k] = self.get(k)
return d
def set_config_name(self, name):
"""Name for the config file. Does not include extension."""
self._config_name = name
self._config_file = ""
def set_config_type(self, type_):
"""Sets the type of the configuration returned by the
remote source, e.g. "json".
"""
self._config_type = type_
def _get_config_type(self):
if self._config_type != "":
return self._config_type
cf = self._get_config_file()
ext = os.path.splitext(cf)
if len(ext) > 1:
return ext[1][1:]
else:
return ""
def _get_config_file(self):
if self._config_file == "":
try:
cf = self._find_config_file()
self._config_file = cf
except errors.ConfigFileNotFoundError:
return ""
return self._config_file
def _search_in_path(self, path):
log.debug("Searching for config in: {0}".format(path))
for ext in constants.SUPPORTED_EXTENSIONS:
full_path = "{0}/{1}.{2}".format(path, self._config_name, ext)
log.debug("Checking for {0}".format(full_path))
if util.exists(full_path):
log.debug("Found: {0}".format(full_path))
return full_path
return ""
def _find_config_file(self):
"""Search all `config_paths` for any config file.
Returns the first path that exists (and is a config file).
"""
log.info("Searching for config in: {0}".format(
", ".join(str(p) for p in self._config_paths)))
for cp in self._config_paths:
f = self._search_in_path(cp)
if f != "":
return f
raise errors.ConfigFileNotFoundError(
self._config_name, self._config_paths)
|
aitjcize/cppman | cppman/crawler.py | CPPReferenceLinkParser._find_links | python | def _find_links(self):
processed = {}
links = []
body = re.search('<[^>]*body[^>]*>(.+?)</body>', self.text, re.S).group(1)
bodyr = body[::-1]
href = "href"[::-1]
span = "span"[::-1]
mark_rev = "t-mark-rev"[::-1]
_class = "class"[::-1]
for std, url in re.findall(
'>' + span + '/<\)([^(<>]*)\(' + '>[^<]*?' +
'''['"][^'"]*''' + mark_rev + '''[^'"]*['"]\s*=\s*''' + _class +
'[^<]*' + span + '''<.*?['"]([^'"]+)['"]=''' + href
, bodyr):
std = std[::-1]
url = url[::-1]
links.append(Link(url, std))
processed[url] = True
for url in re.findall('''href\s*=\s*['"]\s*([^'"]+)['"]''', self.text):
if url in processed:
continue
links.append(Link(url, ""))
processed[url] = True
return links | The link follow by the span.t-mark-rev will contained c++xx information.
Consider the below case
<a href="LinkA">LinkA</a>
<a href="LinkB">LinkB</a>
<span class="t-mark-rev">(C++11)</span>
We're reversing the body so it is easier to write the regex to get the pair of (std, url). | train | https://github.com/aitjcize/cppman/blob/7b48e81b2cd3baa912d73dfe977ecbaff945a93c/cppman/crawler.py#L82-L116 | null | class CPPReferenceLinkParser(LinkParser):
|
aitjcize/cppman | cppman/formatter/cplusplus.py | escape_pre_section | python | def escape_pre_section(table):
def replace_newline(g):
return g.group(1).replace('\n', '\n.br\n')
return re.sub('<pre.*?>(.*?)</pre>', replace_newline, table, flags=re.S) | Escape <pre> section in table. | train | https://github.com/aitjcize/cppman/blob/7b48e81b2cd3baa912d73dfe977ecbaff945a93c/cppman/formatter/cplusplus.py#L142-L147 | null | # -*- coding: utf-8 -*-
#
# formatter.py - format html from cplusplus.com to groff syntax
#
# Copyright (C) 2010 - 2015 Wei-Ning Huang (AZ) <aitjcize@gmail.com>
# All Rights reserved.
#
# This file is part of cppman.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import datetime
import re
import urllib.request
from cppman.util import html2man, fixupHTML
from cppman.formatter.tableparser import parse_table
# Format replacement RE list
# The '.SE' pseudo macro is described in the function: html2groff
pre_rps = [
# Snippet, ugly hack: we don't want to treat code listing as table
(r'<table class="snippet">(.*?)</table>',
r'\n.in +2n\n\1\n.in\n.sp\n', re.S),
]
rps = [
# Header, Name
(r'\s*<div id="I_type"[^>]*>(.*?)\s*</div>\s*'
r'<div id="I_file"[^>]*>(.*?)</div>\s*'
r'<h1>(.*?)</h1>\s*<div class="C_prototype"[^>]*>'
r'(.*?)</div>\s*<div id="I_description"[^>]*>(.*?)</div>',
r'.TH "\3" 3 "%s" "cplusplus.com" "C++ Programmer\'s Manual"\n'
r'\n.SH "NAME"\n\3 - \5\n'
r'\n.SE\n.SH "TYPE"\n\1\n'
r'\n.SE\n.SH "SYNOPSIS"\n#include \2\n.sp\n\4\n'
r'\n.SE\n.SH "DESCRIPTION"\n' % datetime.date.today(), re.S),
(r'\s*<div id="I_type"[^>]*>(.*?)\s*</div>\s*'
r'<div id="I_file"[^>]*>(.*?)</div>\s*'
r'<h1>(.*?)</h1>\s*'
r'<div id="I_description"[^>]*>(.*?)</div>',
r'.TH "\3" 3 "%s" "cplusplus.com" "C++ Programmer\'s Manual"\n'
r'\n.SH "NAME"\n\3 - \4\n'
r'\n.SE\n.SH "TYPE"\n\1\n'
r'\n.SE\n.SH "SYNOPSIS"\n#include \2\n.sp\n'
r'\n.SE\n.SH "DESCRIPTION"\n' % datetime.date.today(), re.S),
(r'\s*<div id="I_type"[^>]*>(.*?)\s*</div>\s*<h1>(.*?)</h1>\s*'
r'<div id="I_description"[^>]*>(.*?)</div>',
r'.TH "\2" 3 "%s" "cplusplus.com" "C++ Programmer\'s Manual"\n'
r'\n.SH "NAME"\n\2 - \3\n'
r'\n.SE\n.SH "TYPE"\n\1\n'
r'\n.SE\n.SH "DESCRIPTION"\n' % datetime.date.today(), re.S),
(r'\s*<div id="I_type"[^>]*>(.*?)\s*</div>\s*<h1>(.*?)</h1>\s*'
r'<div id="I_file"[^>]*>(.*?)</div>\s*<div id="I_description"[^>]*>'
'(.*?)</div>',
r'.TH "\2" 3 "%s" "cplusplus.com" "C++ Programmer\'s Manual"\n'
r'\n.SH "NAME"\n\2 - \4\n'
r'\n.SE\n.SH "TYPE"\n\1\n'
r'\n.SE\n.SH "DESCRIPTION"\n' % datetime.date.today(), re.S),
(r'\s*<div id="I_type"[^>]*>(.*?)\s*</div>\s*<h1>(.*?)</h1>\s*'
r'<div class="C_prototype"[^>]*>(.*?)</div>\s*'
r'<div id="I_description"[^>]*>(.*?)</div>',
r'.TH "\2" 3 "%s" "cplusplus.com" "C++ Programmer\'s Manual"\n'
r'\n.SH "NAME"\n\2 - \4\n'
r'\n.SE\n.SH "TYPE"\n\1\n'
r'\n.SE\n.SH "SYNOPSIS"\n\3\n'
r'\n.SE\n.SH "DESCRIPTION"\n' % datetime.date.today(), re.S),
(r'<span alt="[^"]*?" class="C_ico cpp11warning"[^>]*>', r' [C++11]', re.S),
# Remove empty #include
(r'#include \n.sp\n', r'', 0),
# Remove empty sections
(r'\n.SH (.+?)\n+.SE', r'', 0),
# Section headers
(r'.*<h3>(.+?)</h3>', r'\n.SE\n.SH "\1"\n', 0),
# 'ul' tag
(r'<ul>', r'\n.RS 2\n', 0),
(r'</ul>', r'\n.RE\n.sp\n', 0),
# 'li' tag
(r'<li>\s*(.+?)</li>', r'\n.IP \[bu] 3\n\1\n', re.S),
# 'pre' tag
(r'<pre[^>]*>(.+?)</pre\s*>', r'\n.nf\n\1\n.fi\n', re.S),
# Subsections
(r'<b>(.+?)</b>:<br/>', r'.SS \1\n', 0),
# Member functions / See Also table
# Without C++11 tag
(r'<dl class="links"><dt><a href="[^"]*"><b>([^ ]+?)</b></a></dt><dd>'
r'([^<]*?)<span class="typ">\s*\(([^<]*?)\n?\)</span></dd></dl>',
r'\n.IP "\1(3)"\n\2 (\3)\n', re.S),
# With C++11 tag
(r'<dl class="links"><dt><a href="[^"]*"><b>([^ ]+?) <b class="C_cpp11" '
r'title="(.+?)"></b></b></a></dt><dd>'
r'([^<]*?)<span class="typ">\s*\((.*?)\n?\)</span></dd></dl>',
r'\n.IP "\1(3) [\2]"\n\3 (\4)\n', re.S),
# Footer
(r'<div id="CH_bb">.*$',
r'\n.SE\n.SH "REFERENCE"\n'
r'cplusplus.com, 2000-2015 - All rights reserved.', re.S),
# C++ version tag
(r'<div.+?title="(C\+\+..)"[^>]*>', r'.sp\n\1\n', 0),
# 'br' tag
(r'<br/>', r'\n.br\n', 0),
(r'\n.br\n.br\n', r'\n.sp\n', 0),
# 'dd' 'dt' tag
(r'<dt>(.+?)</dt>\s*<dd>(.+?)</dd>', r'.IP "\1"\n\2\n', re.S),
# Bold
(r'<strong>(.+?)</strong>', r'\n.B \1\n', 0),
# Remove row number in EXAMPLE
(r'<td class="rownum">.*?</td>', r'', re.S),
# Any other tags
(r'<script[^>]*>[^<]*</script>', r'', 0),
(r'<.*?>', r'', re.S),
# Misc
(r'<', r'<', 0),
(r'>', r'>', 0),
(r'"', r'"', 0),
(r'&', r'&', 0),
(r' ', r' ', 0),
(r'\\([^\^nE])', r'\\\\\1', 0),
(r'>/">', r'', 0),
(r'/">', r'', 0),
# Remove empty lines
(r'\n\s*\n+', r'\n', 0),
(r'\n\n+', r'\n', 0),
# Preserve \n" in EXAMPLE
(r'\\n', r'\\en', 0),
]
def html2groff(data, name):
"""Convert HTML text from cplusplus.com to Groff-formatted text."""
# Remove sidebar
try:
data = data[data.index('<div class="C_doc">'):]
except ValueError:
pass
# Pre replace all
for rp in pre_rps:
data = re.compile(rp[0], rp[2]).sub(rp[1], data)
for table in re.findall(r'<table.*?>.*?</table>', data, re.S):
tbl = parse_table(escape_pre_section(table))
# Escape column with '.' as prefix
tbl = re.compile(r'T{\n(\..*?)\nT}', re.S).sub(r'T{\n\\E \1\nT}', tbl)
data = data.replace(table, tbl)
# Replace all
for rp in rps:
data = re.compile(rp[0], rp[2]).sub(rp[1], data)
# Upper case all section headers
for st in re.findall(r'.SH .*\n', data):
data = data.replace(st, st.upper())
# Add tags to member/inherited member functions
# e.g. insert -> vector::insert
#
# .SE is a pseudo macro I created which means 'SECTION END'
# The reason I use it is because I need a marker to know where section
# ends.
# re.findall find patterns which does not overlap, which means if I do
# this: secs = re.findall(r'\n\.SH "(.+?)"(.+?)\.SH', data, re.S)
# re.findall will skip the later .SH tag and thus skip the later section.
# To fix this, '.SE' is used to mark the end of the section so the next
# '.SH' can be find by re.findall
page_type = re.search(r'\n\.SH "TYPE"\n(.+?)\n', data)
if page_type and 'class' in page_type.group(1):
class_name = re.search(r'\n\.SH "NAME"\n(?:.*::)?(.+?) ', data).group(1)
secs = re.findall(r'\n\.SH "(.+?)"(.+?)\.SE', data, re.S)
for sec, content in secs:
# Member functions
if ('MEMBER' in sec and
'NON-MEMBER' not in sec and
'INHERITED' not in sec and
sec != 'MEMBER TYPES'):
content2 = re.sub(r'\n\.IP "([^:]+?)"', r'\n.IP "%s::\1"'
% class_name, content)
# Replace (constructor) (destructor)
content2 = re.sub(r'\(constructor\)', r'%s' % class_name,
content2)
content2 = re.sub(r'\(destructor\)', r'~%s' % class_name,
content2)
data = data.replace(content, content2)
# Inherited member functions
elif 'MEMBER' in sec and 'INHERITED' in sec:
inherit = re.search(r'.+?INHERITED FROM (.+)',
sec).group(1).lower()
content2 = re.sub(r'\n\.IP "(.+)"', r'\n.IP "%s::\1"'
% inherit, content)
data = data.replace(content, content2)
# Remove pseudo macro '.SE'
data = data.replace('\n.SE', '')
return data
def func_test():
"""Test if there is major format changes in cplusplus.com"""
ifs = urllib.request.urlopen('http://www.cplusplus.com/printf')
result = html2groff(fixupHTML(ifs.read()), 'printf')
assert '.SH "NAME"' in result
assert '.SH "TYPE"' in result
assert '.SH "DESCRIPTION"' in result
def test():
"""Simple Text"""
ifs = urllib.request.urlopen('http://www.cplusplus.com/vector')
print(html2groff(fixupHTML(ifs.read()), 'std::vector'), end=' ')
# with open('test.html') as ifs:
# print html2groff(fixupHTML(ifs.read()), 'std::vector'),
if __name__ == '__main__':
test()
|
aitjcize/cppman | cppman/formatter/cplusplus.py | html2groff | python | def html2groff(data, name):
# Remove sidebar
try:
data = data[data.index('<div class="C_doc">'):]
except ValueError:
pass
# Pre replace all
for rp in pre_rps:
data = re.compile(rp[0], rp[2]).sub(rp[1], data)
for table in re.findall(r'<table.*?>.*?</table>', data, re.S):
tbl = parse_table(escape_pre_section(table))
# Escape column with '.' as prefix
tbl = re.compile(r'T{\n(\..*?)\nT}', re.S).sub(r'T{\n\\E \1\nT}', tbl)
data = data.replace(table, tbl)
# Replace all
for rp in rps:
data = re.compile(rp[0], rp[2]).sub(rp[1], data)
# Upper case all section headers
for st in re.findall(r'.SH .*\n', data):
data = data.replace(st, st.upper())
# Add tags to member/inherited member functions
# e.g. insert -> vector::insert
#
# .SE is a pseudo macro I created which means 'SECTION END'
# The reason I use it is because I need a marker to know where section
# ends.
# re.findall find patterns which does not overlap, which means if I do
# this: secs = re.findall(r'\n\.SH "(.+?)"(.+?)\.SH', data, re.S)
# re.findall will skip the later .SH tag and thus skip the later section.
# To fix this, '.SE' is used to mark the end of the section so the next
# '.SH' can be find by re.findall
page_type = re.search(r'\n\.SH "TYPE"\n(.+?)\n', data)
if page_type and 'class' in page_type.group(1):
class_name = re.search(r'\n\.SH "NAME"\n(?:.*::)?(.+?) ', data).group(1)
secs = re.findall(r'\n\.SH "(.+?)"(.+?)\.SE', data, re.S)
for sec, content in secs:
# Member functions
if ('MEMBER' in sec and
'NON-MEMBER' not in sec and
'INHERITED' not in sec and
sec != 'MEMBER TYPES'):
content2 = re.sub(r'\n\.IP "([^:]+?)"', r'\n.IP "%s::\1"'
% class_name, content)
# Replace (constructor) (destructor)
content2 = re.sub(r'\(constructor\)', r'%s' % class_name,
content2)
content2 = re.sub(r'\(destructor\)', r'~%s' % class_name,
content2)
data = data.replace(content, content2)
# Inherited member functions
elif 'MEMBER' in sec and 'INHERITED' in sec:
inherit = re.search(r'.+?INHERITED FROM (.+)',
sec).group(1).lower()
content2 = re.sub(r'\n\.IP "(.+)"', r'\n.IP "%s::\1"'
% inherit, content)
data = data.replace(content, content2)
# Remove pseudo macro '.SE'
data = data.replace('\n.SE', '')
return data | Convert HTML text from cplusplus.com to Groff-formatted text. | train | https://github.com/aitjcize/cppman/blob/7b48e81b2cd3baa912d73dfe977ecbaff945a93c/cppman/formatter/cplusplus.py#L150-L219 | [
"def parse_table(html):\n root = Node(None, 'root', '', html)\n fd = io.StringIO()\n root.gen(fd)\n return fd.getvalue()\n",
"def escape_pre_section(table):\n \"\"\"Escape <pre> section in table.\"\"\"\n def replace_newline(g):\n return g.group(1).replace('\\n', '\\n.br\\n')\n\n return re.sub('<pre.*?>(.*?)</pre>', replace_newline, table, flags=re.S)\n"
] | # -*- coding: utf-8 -*-
#
# formatter.py - format html from cplusplus.com to groff syntax
#
# Copyright (C) 2010 - 2015 Wei-Ning Huang (AZ) <aitjcize@gmail.com>
# All Rights reserved.
#
# This file is part of cppman.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import datetime
import re
import urllib.request
from cppman.util import html2man, fixupHTML
from cppman.formatter.tableparser import parse_table
# Format replacement RE list
# The '.SE' pseudo macro is described in the function: html2groff
pre_rps = [
# Snippet, ugly hack: we don't want to treat code listing as table
(r'<table class="snippet">(.*?)</table>',
r'\n.in +2n\n\1\n.in\n.sp\n', re.S),
]
rps = [
# Header, Name
(r'\s*<div id="I_type"[^>]*>(.*?)\s*</div>\s*'
r'<div id="I_file"[^>]*>(.*?)</div>\s*'
r'<h1>(.*?)</h1>\s*<div class="C_prototype"[^>]*>'
r'(.*?)</div>\s*<div id="I_description"[^>]*>(.*?)</div>',
r'.TH "\3" 3 "%s" "cplusplus.com" "C++ Programmer\'s Manual"\n'
r'\n.SH "NAME"\n\3 - \5\n'
r'\n.SE\n.SH "TYPE"\n\1\n'
r'\n.SE\n.SH "SYNOPSIS"\n#include \2\n.sp\n\4\n'
r'\n.SE\n.SH "DESCRIPTION"\n' % datetime.date.today(), re.S),
(r'\s*<div id="I_type"[^>]*>(.*?)\s*</div>\s*'
r'<div id="I_file"[^>]*>(.*?)</div>\s*'
r'<h1>(.*?)</h1>\s*'
r'<div id="I_description"[^>]*>(.*?)</div>',
r'.TH "\3" 3 "%s" "cplusplus.com" "C++ Programmer\'s Manual"\n'
r'\n.SH "NAME"\n\3 - \4\n'
r'\n.SE\n.SH "TYPE"\n\1\n'
r'\n.SE\n.SH "SYNOPSIS"\n#include \2\n.sp\n'
r'\n.SE\n.SH "DESCRIPTION"\n' % datetime.date.today(), re.S),
(r'\s*<div id="I_type"[^>]*>(.*?)\s*</div>\s*<h1>(.*?)</h1>\s*'
r'<div id="I_description"[^>]*>(.*?)</div>',
r'.TH "\2" 3 "%s" "cplusplus.com" "C++ Programmer\'s Manual"\n'
r'\n.SH "NAME"\n\2 - \3\n'
r'\n.SE\n.SH "TYPE"\n\1\n'
r'\n.SE\n.SH "DESCRIPTION"\n' % datetime.date.today(), re.S),
(r'\s*<div id="I_type"[^>]*>(.*?)\s*</div>\s*<h1>(.*?)</h1>\s*'
r'<div id="I_file"[^>]*>(.*?)</div>\s*<div id="I_description"[^>]*>'
'(.*?)</div>',
r'.TH "\2" 3 "%s" "cplusplus.com" "C++ Programmer\'s Manual"\n'
r'\n.SH "NAME"\n\2 - \4\n'
r'\n.SE\n.SH "TYPE"\n\1\n'
r'\n.SE\n.SH "DESCRIPTION"\n' % datetime.date.today(), re.S),
(r'\s*<div id="I_type"[^>]*>(.*?)\s*</div>\s*<h1>(.*?)</h1>\s*'
r'<div class="C_prototype"[^>]*>(.*?)</div>\s*'
r'<div id="I_description"[^>]*>(.*?)</div>',
r'.TH "\2" 3 "%s" "cplusplus.com" "C++ Programmer\'s Manual"\n'
r'\n.SH "NAME"\n\2 - \4\n'
r'\n.SE\n.SH "TYPE"\n\1\n'
r'\n.SE\n.SH "SYNOPSIS"\n\3\n'
r'\n.SE\n.SH "DESCRIPTION"\n' % datetime.date.today(), re.S),
(r'<span alt="[^"]*?" class="C_ico cpp11warning"[^>]*>', r' [C++11]', re.S),
# Remove empty #include
(r'#include \n.sp\n', r'', 0),
# Remove empty sections
(r'\n.SH (.+?)\n+.SE', r'', 0),
# Section headers
(r'.*<h3>(.+?)</h3>', r'\n.SE\n.SH "\1"\n', 0),
# 'ul' tag
(r'<ul>', r'\n.RS 2\n', 0),
(r'</ul>', r'\n.RE\n.sp\n', 0),
# 'li' tag
(r'<li>\s*(.+?)</li>', r'\n.IP \[bu] 3\n\1\n', re.S),
# 'pre' tag
(r'<pre[^>]*>(.+?)</pre\s*>', r'\n.nf\n\1\n.fi\n', re.S),
# Subsections
(r'<b>(.+?)</b>:<br/>', r'.SS \1\n', 0),
# Member functions / See Also table
# Without C++11 tag
(r'<dl class="links"><dt><a href="[^"]*"><b>([^ ]+?)</b></a></dt><dd>'
r'([^<]*?)<span class="typ">\s*\(([^<]*?)\n?\)</span></dd></dl>',
r'\n.IP "\1(3)"\n\2 (\3)\n', re.S),
# With C++11 tag
(r'<dl class="links"><dt><a href="[^"]*"><b>([^ ]+?) <b class="C_cpp11" '
r'title="(.+?)"></b></b></a></dt><dd>'
r'([^<]*?)<span class="typ">\s*\((.*?)\n?\)</span></dd></dl>',
r'\n.IP "\1(3) [\2]"\n\3 (\4)\n', re.S),
# Footer
(r'<div id="CH_bb">.*$',
r'\n.SE\n.SH "REFERENCE"\n'
r'cplusplus.com, 2000-2015 - All rights reserved.', re.S),
# C++ version tag
(r'<div.+?title="(C\+\+..)"[^>]*>', r'.sp\n\1\n', 0),
# 'br' tag
(r'<br/>', r'\n.br\n', 0),
(r'\n.br\n.br\n', r'\n.sp\n', 0),
# 'dd' 'dt' tag
(r'<dt>(.+?)</dt>\s*<dd>(.+?)</dd>', r'.IP "\1"\n\2\n', re.S),
# Bold
(r'<strong>(.+?)</strong>', r'\n.B \1\n', 0),
# Remove row number in EXAMPLE
(r'<td class="rownum">.*?</td>', r'', re.S),
# Any other tags
(r'<script[^>]*>[^<]*</script>', r'', 0),
(r'<.*?>', r'', re.S),
# Misc
(r'<', r'<', 0),
(r'>', r'>', 0),
(r'"', r'"', 0),
(r'&', r'&', 0),
(r' ', r' ', 0),
(r'\\([^\^nE])', r'\\\\\1', 0),
(r'>/">', r'', 0),
(r'/">', r'', 0),
# Remove empty lines
(r'\n\s*\n+', r'\n', 0),
(r'\n\n+', r'\n', 0),
# Preserve \n" in EXAMPLE
(r'\\n', r'\\en', 0),
]
def escape_pre_section(table):
"""Escape <pre> section in table."""
def replace_newline(g):
return g.group(1).replace('\n', '\n.br\n')
return re.sub('<pre.*?>(.*?)</pre>', replace_newline, table, flags=re.S)
def func_test():
"""Test if there is major format changes in cplusplus.com"""
ifs = urllib.request.urlopen('http://www.cplusplus.com/printf')
result = html2groff(fixupHTML(ifs.read()), 'printf')
assert '.SH "NAME"' in result
assert '.SH "TYPE"' in result
assert '.SH "DESCRIPTION"' in result
def test():
"""Simple Text"""
ifs = urllib.request.urlopen('http://www.cplusplus.com/vector')
print(html2groff(fixupHTML(ifs.read()), 'std::vector'), end=' ')
# with open('test.html') as ifs:
# print html2groff(fixupHTML(ifs.read()), 'std::vector'),
if __name__ == '__main__':
test()
|
aitjcize/cppman | cppman/util.py | update_mandb_path | python | def update_mandb_path():
manpath_file = os.path.join(environ.HOME, ".manpath")
man_dir = environ.cache_dir
manindex_dir = environ.manindex_dirl
lines = []
""" read all lines """
try:
with open(manpath_file, 'r') as f:
lines = f.readlines()
except IOError:
return
""" remove MANDATORY_MANPATH and MANDB_MAP entry """
lines = [line for line in lines if man_dir not in line]
with open(manpath_file, 'w') as f:
if environ.config.UpdateManPath:
lines.append('MANDATORY_MANPATH\t%s\n' % man_dir)
lines.append('MANDB_MAP\t\t\t%s\t%s\n' % (man_dir, manindex_dir))
f.writelines(lines) | Add $XDG_CACHE_HOME/cppman/man to $HOME/.manpath | train | https://github.com/aitjcize/cppman/blob/7b48e81b2cd3baa912d73dfe977ecbaff945a93c/cppman/util.py#L37-L60 | null | # -*- coding: utf-8 -*-
#
# util.py - Misc utilities
#
# Copyright (C) 2010 - 2015 Wei-Ning Huang (AZ) <aitjcize@gmail.com>
# All Rights reserved.
#
# This file is part of cppman.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import fcntl
import os
import struct
import subprocess
import sys
import termios
from cppman import environ
import bs4
def update_man3_link():
man3_path = os.path.join(environ.cache_dir, 'man3')
if os.path.lexists(man3_path):
if os.path.islink(man3_path):
if os.readlink(man3_path) == environ.config.Source:
return
else:
os.unlink(man3_path)
else:
raise RuntimeError("Can't create link since `%s' already exists" %
man3_path)
try:
os.makedirs(os.path.join(environ.cache_dir, environ.config.Source))
except Exception:
pass
os.symlink(environ.config.Source, man3_path)
def get_width():
"""Get terminal width"""
# Get terminal size
ws = struct.pack("HHHH", 0, 0, 0, 0)
ws = fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ, ws)
lines, columns, x, y = struct.unpack("HHHH", ws)
width = min(columns * 39 // 40, columns - 2)
return width
def groff2man(data):
"""Read groff-formatted text and output man pages."""
width = get_width()
cmd = 'groff -t -Tascii -m man -rLL=%dn -rLT=%dn' % (width, width)
handle = subprocess.Popen(
cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
man_text, stderr = handle.communicate(data)
return man_text
def html2man(data, formatter):
"""Convert HTML text from cplusplus.com to man pages."""
groff_text = formatter(data)
man_text = groff2man(groff_text)
return man_text
def fixupHTML(data):
return str(bs4.BeautifulSoup(data, "html5lib"))
|
aitjcize/cppman | cppman/util.py | get_width | python | def get_width():
# Get terminal size
ws = struct.pack("HHHH", 0, 0, 0, 0)
ws = fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ, ws)
lines, columns, x, y = struct.unpack("HHHH", ws)
width = min(columns * 39 // 40, columns - 2)
return width | Get terminal width | train | https://github.com/aitjcize/cppman/blob/7b48e81b2cd3baa912d73dfe977ecbaff945a93c/cppman/util.py#L83-L90 | null | # -*- coding: utf-8 -*-
#
# util.py - Misc utilities
#
# Copyright (C) 2010 - 2015 Wei-Ning Huang (AZ) <aitjcize@gmail.com>
# All Rights reserved.
#
# This file is part of cppman.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import fcntl
import os
import struct
import subprocess
import sys
import termios
from cppman import environ
import bs4
def update_mandb_path():
"""Add $XDG_CACHE_HOME/cppman/man to $HOME/.manpath"""
manpath_file = os.path.join(environ.HOME, ".manpath")
man_dir = environ.cache_dir
manindex_dir = environ.manindex_dirl
lines = []
""" read all lines """
try:
with open(manpath_file, 'r') as f:
lines = f.readlines()
except IOError:
return
""" remove MANDATORY_MANPATH and MANDB_MAP entry """
lines = [line for line in lines if man_dir not in line]
with open(manpath_file, 'w') as f:
if environ.config.UpdateManPath:
lines.append('MANDATORY_MANPATH\t%s\n' % man_dir)
lines.append('MANDB_MAP\t\t\t%s\t%s\n' % (man_dir, manindex_dir))
f.writelines(lines)
def update_man3_link():
man3_path = os.path.join(environ.cache_dir, 'man3')
if os.path.lexists(man3_path):
if os.path.islink(man3_path):
if os.readlink(man3_path) == environ.config.Source:
return
else:
os.unlink(man3_path)
else:
raise RuntimeError("Can't create link since `%s' already exists" %
man3_path)
try:
os.makedirs(os.path.join(environ.cache_dir, environ.config.Source))
except Exception:
pass
os.symlink(environ.config.Source, man3_path)
def groff2man(data):
"""Read groff-formatted text and output man pages."""
width = get_width()
cmd = 'groff -t -Tascii -m man -rLL=%dn -rLT=%dn' % (width, width)
handle = subprocess.Popen(
cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
man_text, stderr = handle.communicate(data)
return man_text
def html2man(data, formatter):
"""Convert HTML text from cplusplus.com to man pages."""
groff_text = formatter(data)
man_text = groff2man(groff_text)
return man_text
def fixupHTML(data):
return str(bs4.BeautifulSoup(data, "html5lib"))
|
aitjcize/cppman | cppman/util.py | groff2man | python | def groff2man(data):
width = get_width()
cmd = 'groff -t -Tascii -m man -rLL=%dn -rLT=%dn' % (width, width)
handle = subprocess.Popen(
cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
man_text, stderr = handle.communicate(data)
return man_text | Read groff-formatted text and output man pages. | train | https://github.com/aitjcize/cppman/blob/7b48e81b2cd3baa912d73dfe977ecbaff945a93c/cppman/util.py#L93-L102 | [
"def get_width():\n \"\"\"Get terminal width\"\"\"\n # Get terminal size\n ws = struct.pack(\"HHHH\", 0, 0, 0, 0)\n ws = fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ, ws)\n lines, columns, x, y = struct.unpack(\"HHHH\", ws)\n width = min(columns * 39 // 40, columns - 2)\n return width\n"
] | # -*- coding: utf-8 -*-
#
# util.py - Misc utilities
#
# Copyright (C) 2010 - 2015 Wei-Ning Huang (AZ) <aitjcize@gmail.com>
# All Rights reserved.
#
# This file is part of cppman.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import fcntl
import os
import struct
import subprocess
import sys
import termios
from cppman import environ
import bs4
def update_mandb_path():
"""Add $XDG_CACHE_HOME/cppman/man to $HOME/.manpath"""
manpath_file = os.path.join(environ.HOME, ".manpath")
man_dir = environ.cache_dir
manindex_dir = environ.manindex_dirl
lines = []
""" read all lines """
try:
with open(manpath_file, 'r') as f:
lines = f.readlines()
except IOError:
return
""" remove MANDATORY_MANPATH and MANDB_MAP entry """
lines = [line for line in lines if man_dir not in line]
with open(manpath_file, 'w') as f:
if environ.config.UpdateManPath:
lines.append('MANDATORY_MANPATH\t%s\n' % man_dir)
lines.append('MANDB_MAP\t\t\t%s\t%s\n' % (man_dir, manindex_dir))
f.writelines(lines)
def update_man3_link():
man3_path = os.path.join(environ.cache_dir, 'man3')
if os.path.lexists(man3_path):
if os.path.islink(man3_path):
if os.readlink(man3_path) == environ.config.Source:
return
else:
os.unlink(man3_path)
else:
raise RuntimeError("Can't create link since `%s' already exists" %
man3_path)
try:
os.makedirs(os.path.join(environ.cache_dir, environ.config.Source))
except Exception:
pass
os.symlink(environ.config.Source, man3_path)
def get_width():
"""Get terminal width"""
# Get terminal size
ws = struct.pack("HHHH", 0, 0, 0, 0)
ws = fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ, ws)
lines, columns, x, y = struct.unpack("HHHH", ws)
width = min(columns * 39 // 40, columns - 2)
return width
def html2man(data, formatter):
"""Convert HTML text from cplusplus.com to man pages."""
groff_text = formatter(data)
man_text = groff2man(groff_text)
return man_text
def fixupHTML(data):
return str(bs4.BeautifulSoup(data, "html5lib"))
|
aitjcize/cppman | cppman/util.py | html2man | python | def html2man(data, formatter):
groff_text = formatter(data)
man_text = groff2man(groff_text)
return man_text | Convert HTML text from cplusplus.com to man pages. | train | https://github.com/aitjcize/cppman/blob/7b48e81b2cd3baa912d73dfe977ecbaff945a93c/cppman/util.py#L105-L109 | [
"def groff2man(data):\n \"\"\"Read groff-formatted text and output man pages.\"\"\"\n width = get_width()\n\n cmd = 'groff -t -Tascii -m man -rLL=%dn -rLT=%dn' % (width, width)\n handle = subprocess.Popen(\n cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n man_text, stderr = handle.communicate(data)\n return man_text\n"
] | # -*- coding: utf-8 -*-
#
# util.py - Misc utilities
#
# Copyright (C) 2010 - 2015 Wei-Ning Huang (AZ) <aitjcize@gmail.com>
# All Rights reserved.
#
# This file is part of cppman.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import fcntl
import os
import struct
import subprocess
import sys
import termios
from cppman import environ
import bs4
def update_mandb_path():
"""Add $XDG_CACHE_HOME/cppman/man to $HOME/.manpath"""
manpath_file = os.path.join(environ.HOME, ".manpath")
man_dir = environ.cache_dir
manindex_dir = environ.manindex_dirl
lines = []
""" read all lines """
try:
with open(manpath_file, 'r') as f:
lines = f.readlines()
except IOError:
return
""" remove MANDATORY_MANPATH and MANDB_MAP entry """
lines = [line for line in lines if man_dir not in line]
with open(manpath_file, 'w') as f:
if environ.config.UpdateManPath:
lines.append('MANDATORY_MANPATH\t%s\n' % man_dir)
lines.append('MANDB_MAP\t\t\t%s\t%s\n' % (man_dir, manindex_dir))
f.writelines(lines)
def update_man3_link():
man3_path = os.path.join(environ.cache_dir, 'man3')
if os.path.lexists(man3_path):
if os.path.islink(man3_path):
if os.readlink(man3_path) == environ.config.Source:
return
else:
os.unlink(man3_path)
else:
raise RuntimeError("Can't create link since `%s' already exists" %
man3_path)
try:
os.makedirs(os.path.join(environ.cache_dir, environ.config.Source))
except Exception:
pass
os.symlink(environ.config.Source, man3_path)
def get_width():
"""Get terminal width"""
# Get terminal size
ws = struct.pack("HHHH", 0, 0, 0, 0)
ws = fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ, ws)
lines, columns, x, y = struct.unpack("HHHH", ws)
width = min(columns * 39 // 40, columns - 2)
return width
def groff2man(data):
"""Read groff-formatted text and output man pages."""
width = get_width()
cmd = 'groff -t -Tascii -m man -rLL=%dn -rLT=%dn' % (width, width)
handle = subprocess.Popen(
cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
man_text, stderr = handle.communicate(data)
return man_text
def fixupHTML(data):
return str(bs4.BeautifulSoup(data, "html5lib"))
|
aitjcize/cppman | cppman/formatter/cppreference.py | html2groff | python | def html2groff(data, name):
# Remove header and footer
try:
data = data[data.index('<div id="cpp-content-base">'):]
data = data[:data.index('<div class="printfooter">') + 25]
except ValueError:
pass
# Remove non-printable characters
data = ''.join([x for x in data if x in string.printable])
for table in re.findall(
r'<table class="(?:wikitable|dsctable)"[^>]*>.*?</table>',
data, re.S):
tbl = parse_table(table)
# Escape column with '.' as prefix
tbl = re.compile(r'T{\n(\..*?)\nT}', re.S).sub(r'T{\n\\E \1\nT}', tbl)
data = data.replace(table, tbl)
# Pre replace all
for rp in rps:
data = re.compile(rp[0], rp[2]).sub(rp[1], data)
# Remove non-printable characters
data = ''.join([x for x in data if x in string.printable])
# Upper case all section headers
for st in re.findall(r'.SH .*\n', data):
data = data.replace(st, st.upper())
# Add tags to member/inherited member functions
# e.g. insert -> vector::insert
#
# .SE is a pseudo macro I created which means 'SECTION END'
# The reason I use it is because I need a marker to know where section
# ends.
# re.findall find patterns which does not overlap, which means if I do
# this: secs = re.findall(r'\n\.SH "(.+?)"(.+?)\.SH', data, re.S)
# re.findall will skip the later .SH tag and thus skip the later section.
# To fix this, '.SE' is used to mark the end of the section so the next
# '.SH' can be find by re.findall
try:
idx = data.index('.IEND')
except ValueError:
idx = None
def add_header_multi(prefix, g):
if ',' in g.group(1):
res = ', '.join(['%s::%s' % (prefix, x.strip())
for x in g.group(1).split(',')])
else:
res = '%s::%s' % (prefix, g.group(1))
return '\n.IP "%s"' % res
if idx:
class_name = name
if class_name.startswith('std::'):
normalized_class_name = class_name[len('std::'):]
else:
normalized_class_name = class_name
class_member_content = data[:idx]
secs = re.findall(r'\.SH "(.+?)"(.+?)\.SE', class_member_content, re.S)
for sec, content in secs:
# Member functions
if ('MEMBER' in sec and
'NON-MEMBER' not in sec and
'INHERITED' not in sec and
sec != 'MEMBER TYPES'):
content2 = re.sub(r'\n\.IP "([^:]+?)"',
partial(add_header_multi, class_name),
content)
# Replace (constructor) (destructor)
content2 = re.sub(r'\(constructor\)', r'%s' %
normalized_class_name, content2)
content2 = re.sub(r'\(destructor\)', r'~%s' %
normalized_class_name, content2)
data = data.replace(content, content2)
blocks = re.findall(r'\.IBEGIN\s*(.+?)\s*\n(.+?)\.IEND', data, re.S)
for inherited_class, content in blocks:
content2 = re.sub(r'\.SH "(.+?)"', r'\n.SH "\1 INHERITED FROM %s"'
% inherited_class.upper(), content)
data = data.replace(content, content2)
secs = re.findall(r'\.SH "(.+?)"(.+?)\.SE', content, re.S)
for sec, content in secs:
# Inherited member functions
if 'MEMBER' in sec and \
sec != 'MEMBER TYPES':
content2 = re.sub(r'\n\.IP "(.+)"',
partial(add_header_multi, inherited_class),
content)
data = data.replace(content, content2)
# Remove unneeded pseudo macro
data = re.sub('(?:\n.SE|.IBEGIN.*?\n|\n.IEND)', '', data)
# Replace all macros
desc_re = re.search(r'.SH "DESCRIPTION"\n.*?([^\n\s].*?)\n', data)
shortdesc = ''
# not empty description
if desc_re and not desc_re.group(1).startswith('.SH'):
shortdesc = '- ' + desc_re.group(1)
def dereference(g):
d = dict(name=name, shortdesc=shortdesc)
if g.group(1) in d:
return d[g.group(1)]
data = re.sub('{{(.*?)}}', dereference, data)
return data | Convert HTML text from cppreference.com to Groff-formatted text. | train | https://github.com/aitjcize/cppman/blob/7b48e81b2cd3baa912d73dfe977ecbaff945a93c/cppman/formatter/cppreference.py#L189-L307 | [
"def parse_table(html):\n root = Node(None, 'root', '', html)\n fd = io.StringIO()\n root.gen(fd)\n return fd.getvalue()\n"
] | # -*- coding: utf-8 -*-
#
# formatter.py - format html from cplusplus.com to groff syntax
#
# Copyright (C) 2010 - 2015 Wei-Ning Huang (AZ) <aitjcize@gmail.com>
# All Rights reserved.
#
# This file is part of cppman.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import datetime
import re
import string
import urllib.request
from functools import partial
from cppman.util import html2man, fixupHTML
from cppman.formatter.tableparser import parse_table
def member_table_def(g):
tbl = parse_table('<table>%s</table>' % str(g.group(3)))
# Escape column with '.' as prefix
tbl = re.compile(r'T{\n(\..*?)\nT}', re.S).sub(r'T{\n\\E \1\nT}', tbl)
return '\n.IP "%s"\n%s\n%s\n' % (g.group(1), g.group(2), tbl)
def member_type_function(g):
head = re.sub(r'<.*?>', '', g.group(1)).strip()
tail = ''
cppvertag = re.search('^(.*)(\[(?:(?:since|until) )?C\+\+\d+\])$', head)
if cppvertag:
head = cppvertag.group(1).strip()
tail = ' ' + cppvertag.group(2)
if ',' in head:
head = ', '.join([x.strip() + '(3)' for x in head.split(',')])
else:
head = head.strip() + '(3)'
return '\n.IP "%s"\n%s\n' % (head + tail, g.group(2))
NAV_BAR_END = '<div class="t-navbar-sep">.?</div></div>'
# Format replacement RE list
# The '.SE' pseudo macro is described in the function: html2groff
rps = [
# Workaround: remove <p> in t-dcl
(r'<tr class="t-dcl">(.*?)</tr>',
lambda g: re.sub('<p/?>', '', g.group(1)), re.S),
# Header, Name
(r'<h1.*?>(.*?)</h1>',
r'\n.TH "{{name}}" 3 "%s" "cppreference.com" "C++ Programmer\'s Manual"\n'
r'\n.SH "NAME"\n{{name}} {{shortdesc}}\n.SE\n' % datetime.date.today(),
re.S),
# Defined in header
(r'<div class="t-navbar"[^>]*>.*?' + NAV_BAR_END + r'.*?'
r'Defined in header <code>(.*?)</code>(.*?)<tr class="t-dcl-sep">',
r'\n.SH "SYNOPSIS"\n#include \1\n.sp\n'
r'.nf\n\2\n.fi\n.SE\n'
r'\n.SH "DESCRIPTION"\n', re.S),
(r'<div class="t-navbar"[^>]*>.*?' + NAV_BAR_END +
r'(.*?)<tr class="t-dcl-sep">',
r'\n.SH "SYNOPSIS"\n.nf\n\1\n.fi\n.SE\n'
r'\n.SH "DESCRIPTION"\n', re.S),
# <unordered_map>
(r'<div class="t-navbar"[^>]*>.*?' + NAV_BAR_END +
r'(.*?)<table class="t-dsc-begin">',
r'\n.SH "DESCRIPTION"\n\1\n', re.S),
# access specifiers
(r'<div class="t-navbar"[^>]*>.*?' + NAV_BAR_END +
r'(.*?)<h3',
r'\n.SH "DESCRIPTION"\n\1\n<h3', re.S),
(r'<td>\s*\([0-9]+\)\s*</td>', r'', 0),
# Section headers
(r'<div class="t-inherited">.*?<h2>.*?Inherited from\s*(.*?)\s*</h2>',
r'\n.SE\n.IEND\n.IBEGIN \1\n', re.S),
# Remove tags
(r'<span class="edit.*?">.*?</span> ?', r'', re.S),
(r'[edit]', r'', re.S),
(r'\[edit\]', r'', re.S),
(r'<div id="siteSub">.*?</div>', r'', 0),
(r'<div id="contentSub">.*?</div>', r'', 0),
(r'<table class="toc" id="toc"[^>]*>.*?</table>', r'', re.S),
(r'<h2[^>]*>.*?</h2>', r'', re.S),
(r'<div class="coliru-btn coliru-btn-run-init">.*?</div>', r'', re.S),
(r'<tr class="t-dsc-hitem">.*?</tr>', r'', re.S),
# C++11/14/17/20
(r'\(((?:since|until) C\+\+\d+)\)', r' [\1]', re.S),
(r'\((C\+\+\d+)\)', r' [\1]', re.S),
# Subsections
(r'<h5[^>]*>\s*(.*)</h5>', r'\n.SS "\1"\n', 0),
# Group t-lines
(r'<span></span>', r'', re.S),
(r'<span class="t-lines">(?:<span>.+?</span>)+</span>',
lambda x: re.sub('\s*</span><span>\s*', r', ', x.group(0)), re.S),
# Member type & function second col is group see basic_fstream for example
(r'<tr class="t-dsc">\s*?<td>((?:(?!</td>).)*?)</td>\s*?'
r'<td>((?:(?!</td>).)*?)<table[^>]*>((?:(?!</table>).)*?)</table>'
r'(?:(?!</td>).)*?</td>\s*?</tr>',
member_table_def, re.S),
# Section headers
(r'.*<h3>(.+?)</h3>', r'\n.SE\n.SH "\1"\n', 0),
# Member type & function
(r'<tr class="t-dsc">\n?<td>\s*(.*?)\n?</td>.*?<td>\s*(.*?)</td>.*?</tr>',
member_type_function, re.S),
# Parameters
(r'<tr class="t-par">.*?<td>\s*(.*?)\n?</td>.*?<td>.*?</td>.*?'
r'<td>\s*(.*?)</td>.*?</tr>',
r'\n.IP "\1"\n\2\n', re.S),
# 'ul' tag
(r'<ul>', r'\n.RS 2\n', 0),
(r'</ul>', r'\n.RE\n.sp\n', 0),
# 'li' tag
(r'<li>\s*(.+?)</li>', r'\n.IP \[bu] 3\n\1\n', re.S),
# 'pre' tag
(r'<pre[^>]*>(.+?)</pre\s*>', r'\n.in +2n\n.nf\n\1\n.fi\n.in\n', re.S),
# Footer
(r'<div class="printfooter">',
r'\n.SE\n.IEND\n.SH "REFERENCE"\n'
r'cppreference.com, 2015 - All rights reserved.', re.S),
# C++ version tag
(r'<div title="(C\+\+..)"[^>]*>', r'.sp\n\1\n', 0),
# Output
(r'<p>Output:\n?</p>', r'\n.sp\nOutput:\n', re.S),
# Paragraph
(r'<p>(.*?)</p>', r'\n\1\n.sp\n', re.S),
(r'<div class="t-li1">(.*?)</div>', r'\n\1\n.sp\n', re.S),
(r'<div class="t-li2">(.*?)</div>',
r'\n.RS\n\1\n.RE\n.sp\n', re.S),
# 'br' tag
(r'<br/>', r'\n.br\n', 0),
(r'\n.br\n.br\n', r'\n.sp\n', 0),
# 'dd' 'dt' tag
(r'<dt>(.+?)</dt>\s*<dd>(.+?)</dd>', r'\n.IP "\1"\n\2\n', re.S),
# Bold
(r'<strong>(.+?)</strong>', r'\n.B \1\n', 0),
# Any other tags
(r'<script[^>]*>[^<]*</script>', r'', 0),
(r'<.*?>', r'', re.S),
# Escape
(r'^#', r'\#', 0),
(r' ', ' ', 0),
(r'&#(\d+);', lambda g: chr(int(g.group(1))), 0),
# Misc
(r'<', r'<', 0),
(r'>', r'>', 0),
(r'"', r'"', 0),
(r'&', r'&', 0),
(r' ', r' ', 0),
(r'\\([^\^nE])', r'\\\\\1', 0),
(r'>/">', r'', 0),
(r'/">', r'', 0),
# Remove empty sections
(r'\n.SH (.+?)\n+.SE', r'', 0),
# Remove empty lines
(r'\n\s*\n+', r'\n', 0),
(r'\n\n+', r'\n', 0),
# Preserve \n" in EXAMPLE
(r'\\n', r'\\en', 0),
# Remove leading whitespace
(r'^\s+', r'', re.S),
# Trailing white-spaces
(r'\s+\n', r'\n', re.S),
# Remove extra whitespace and newline in .SH/SS/IP section
(r'.(SH|SS|IP) "\s*(.*?)\s*\n?"', r'.\1 "\2"', 0),
# Remove extra whitespace before .IP bullet
(r'(.IP \\\\\[bu\] 3)\n\s*(.*?)\n', r'\1\n\2\n', 0),
# Remove extra '\n' before C++ version Tag (don't do it in table)
(r'(?<!T{)\n\s*(\[(:?since|until) C\+\+\d+\])', r' \1', re.S)
]
def func_test():
"""Test if there is major format changes in cplusplus.com"""
ifs = urllib.request.urlopen('http://en.cppreference.com/w/cpp/container/vector')
result = html2groff(fixupHTML(ifs.read()), 'std::vector')
assert '.SH "NAME"' in result
assert '.SH "SYNOPSIS"' in result
assert '.SH "DESCRIPTION"' in result
def test():
"""Simple Text"""
ifs = urllib.request.urlopen('http://en.cppreference.com/w/cpp/container/vector')
print(html2groff(fixupHTML(ifs.read()), 'std::vector'), end=' ')
#with open('test.html') as ifs:
# data = fixupHTML(ifs.read())
# print html2groff(data, 'std::vector'),
if __name__ == '__main__':
test()
|
aitjcize/cppman | cppman/main.py | Cppman.extract_name | python | def extract_name(self, data):
name = re.search('<h1[^>]*>(.+?)</h1>', data).group(1)
name = re.sub(r'<([^>]+)>', r'', name)
name = re.sub(r'>', r'>', name)
name = re.sub(r'<', r'<', name)
return name | Extract man page name from web page. | train | https://github.com/aitjcize/cppman/blob/7b48e81b2cd3baa912d73dfe977ecbaff945a93c/cppman/main.py#L56-L62 | null | class Cppman(Crawler):
"""Manage cpp man pages, indexes"""
def __init__(self, forced=False, force_columns=-1):
Crawler.__init__(self)
self.results = set()
self.forced = forced
self.success_count = None
self.failure_count = None
self.force_columns = force_columns
self.blacklist = [
]
self.name_exceptions = [
'http://www.cplusplus.com/reference/string/swap/'
]
def rebuild_index(self):
"""Rebuild index database from cplusplus.com and cppreference.com."""
try:
os.remove(environ.index_db_re)
except:
pass
self.db_conn = sqlite3.connect(environ.index_db_re)
self.db_cursor = self.db_conn.cursor()
self.db_cursor.execute('CREATE TABLE "cplusplus.com" '
'(name VARCHAR(255), url VARCHAR(255), std VARCHAR(255))')
self.db_cursor.execute('CREATE TABLE "cppreference.com" '
'(name VARCHAR(255), url VARCHAR(255), std VARCHAR(255))')
try:
self.add_url_filter('\.(jpg|jpeg|gif|png|js|css|swf|svg)$')
self.set_follow_mode(Crawler.F_SAME_PATH)
# cplusplus.com
self.crawl('http://www.cplusplus.com/reference/')
for name, url, std in self.results:
self.insert_index('cplusplus.com', name, url, std)
self.db_conn.commit()
# Rename duplicate entries
duplicates = self.db_cursor.execute('SELECT name, COUNT(name) '
'AS NON '
'FROM "cplusplus.com" '
'GROUP BY NAME '
'HAVING (NON > 1)').fetchall()
for name, num in duplicates:
dump = self.db_cursor.execute('SELECT name, url FROM '
'"cplusplus.com" WHERE name="%s"'
% name).fetchall()
for n, u in dump:
if u not in self.name_exceptions:
n2 = n[5:] if n.startswith('std::') else n
try:
group = re.search('/([^/]+)/%s/$' % n2, u).group(1)
except Exception:
group = re.search('/([^/]+)/[^/]+/$', u).group(1)
new_name = '%s (%s)' % (n, group)
self.db_cursor.execute('UPDATE "cplusplus.com" '
'SET name="%s", url="%s" '
'WHERE url="%s"' %
(new_name, u, u))
self.db_conn.commit()
# cppreference.com
self.results = set()
self.crawl('https://en.cppreference.com/w/cpp', '/w/cpp')
for name, url, std in self.results:
self.insert_index('cppreference.com', name, url, std)
self.db_conn.commit()
except KeyboardInterrupt:
os.remove(environ.index_db_re)
raise KeyboardInterrupt
finally:
self.db_conn.close()
def process_document(self, doc, std):
"""callback to insert index"""
if doc.url not in self.blacklist:
print("Indexing '%s' %s..." % (doc.url, std))
name = self.extract_name(doc.text)
self.results.add((name, doc.url, std))
else:
print("Skipping blacklisted page '%s' ..." % doc.url)
return None
def parse_expression(self, expr):
"""
split expression into prefix and expression
tested with
```
operator==
!=
std::rel_ops::operator!=
std::atomic::operator=
std::array::operator[]
std::function::operator()
std::vector::at
std::relational operators
std::vector::begin
std::abs(float)
std::fabs()
```
"""
m = re.match(r'^(.*?(?:::)?(?:operator)?)((?:::[^:]*|[^:]*)?)$', expr);
prefix = m.group(1)
tail = m.group(2)
return [prefix, tail]
def parse_title(self, title):
"""
split of the last parenthesis operator==,!=,<,<=(std::vector)
tested with
```
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=
operator==,!=,<,<=,>,>=
std::rel_ops::operator!=,>,<=,>=
std::atomic::operator=
std::array::operator[]
std::function::operator()
std::vector::at
std::relational operators (vector)
std::vector::begin, std::vector::cbegin
std::abs(float), std::fabs
std::unordered_set::begin(size_type), std::unordered_set::cbegin(size_type)
```
"""
m = re.match(r'^\s*((?:\(size_type\)|(?:.|\(\))*?)*)((?:\([^)]+\))?)\s*$', title)
postfix = m.group(2)
t_names = m.group(1).split(',')
t_names = [n.strip() for n in t_names]
prefix = self.parse_expression(t_names[0])[0]
names = []
for n in t_names:
r = self.parse_expression(n);
if prefix == r[0]:
names.append(n + postfix)
else:
names.append(prefix + r[1] + postfix)
return names
def insert_index(self, table, name, url, std=""):
"""callback to insert index"""
names = self.parse_title(name);
for n in names:
self.db_cursor.execute(
'INSERT INTO "%s" (name, url, std) VALUES (?, ?, ?)' % table, (
n, url, std))
def cache_all(self):
"""Cache all available man pages"""
respond = input(
'By default, cppman fetches pages on-the-fly if corresponding '
'page is not found in the cache. The "cache-all" option is only '
'useful if you want to view man pages offline. '
'Caching all contents will take several minutes, '
'do you want to continue [y/N]? ')
if not (respond and 'yes'.startswith(respond.lower())):
raise KeyboardInterrupt
try:
os.makedirs(environ.cache_dir)
except:
pass
self.success_count = 0
self.failure_count = 0
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
source = environ.config.source
print('Caching manpages from %s ...' % source)
data = cursor.execute('SELECT * FROM "%s"' % source).fetchall()
for name, url, _ in data:
print('Caching %s ...' % name)
retries = 3
while retries > 0:
try:
self.cache_man_page(source, url, name)
except Exception:
print('Retrying ...')
retries -= 1
else:
self.success_count += 1
break
else:
print('Error caching %s ...' % name)
self.failure_count += 1
conn.close()
print('\n%d manual pages cached successfully.' % self.success_count)
print('%d manual pages failed to cache.' % self.failure_count)
self.update_mandb(False)
def cache_man_page(self, source, url, name):
"""callback to cache new man page"""
# Skip if already exists, override if forced flag is true
outname = self.get_page_path(source, name)
if os.path.exists(outname) and not self.forced:
return
try:
os.makedirs(os.path.join(environ.cache_dir, source))
except OSError:
pass
# There are often some errors in the HTML, for example: missing closing
# tag. We use fixupHTML to fix this.
data = util.fixupHTML(urllib.request.urlopen(url).read())
formatter = importlib.import_module('cppman.formatter.%s' % source[:-4])
groff_text = formatter.html2groff(data, name)
with gzip.open(outname, 'w') as f:
f.write(groff_text.encode('utf-8'))
def clear_cache(self):
"""Clear all cache in man"""
shutil.rmtree(environ.cache_dir)
def man(self, pattern):
"""Call viewer.sh to view man page"""
try:
avail = os.listdir(os.path.join(environ.cache_dir, environ.source))
except OSError:
avail = []
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
# Try direct match
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name="%s" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
# Try standard library
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name="std::%s" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name LIKE "%%%s%%" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
raise RuntimeError('No manual entry for ' + pattern)
finally:
conn.close()
page_filename = self.get_normalized_page_name(page_name)
if self.forced or page_filename + '.3.gz' not in avail:
self.cache_man_page(environ.source, url, page_name)
pager_type = environ.pager if sys.stdout.isatty() else 'pipe'
# Call viewer
columns = (util.get_width() if self.force_columns == -1 else
self.force_columns)
pid = os.fork()
if pid == 0:
os.execl('/bin/sh', '/bin/sh', environ.pager_script, pager_type,
self.get_page_path(environ.source, page_name),
str(columns), environ.pager_config, page_name)
return pid
def find(self, pattern):
"""Find pages in database."""
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
selected = cursor.execute(
'SELECT * FROM "%s" WHERE name '
'LIKE "%%%s%%" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchall()
pat = re.compile('(%s)' % re.escape(pattern), re.I)
if selected:
for name, url, std in selected:
if os.isatty(sys.stdout.fileno()):
print(pat.sub(r'\033[1;31m\1\033[0m', name) +
(' \033[1;33m[%s]\033[0m' % std if std else ''))
else:
print(name + (' [%s]' % std if std else ''))
else:
raise RuntimeError('%s: nothing appropriate.' % pattern)
def update_mandb(self, quiet=True):
"""Update mandb."""
if not environ.config.UpdateManPath:
return
print('\nrunning mandb...')
cmd = 'mandb %s' % (' -q' if quiet else '')
subprocess.Popen(cmd, shell=True).wait()
def get_normalized_page_name(self, name):
return name.replace('/', '_')
def get_page_path(self, source, name):
name = self.get_normalized_page_name(name)
return os.path.join(environ.cache_dir, source, name + '.3.gz')
|
aitjcize/cppman | cppman/main.py | Cppman.rebuild_index | python | def rebuild_index(self):
try:
os.remove(environ.index_db_re)
except:
pass
self.db_conn = sqlite3.connect(environ.index_db_re)
self.db_cursor = self.db_conn.cursor()
self.db_cursor.execute('CREATE TABLE "cplusplus.com" '
'(name VARCHAR(255), url VARCHAR(255), std VARCHAR(255))')
self.db_cursor.execute('CREATE TABLE "cppreference.com" '
'(name VARCHAR(255), url VARCHAR(255), std VARCHAR(255))')
try:
self.add_url_filter('\.(jpg|jpeg|gif|png|js|css|swf|svg)$')
self.set_follow_mode(Crawler.F_SAME_PATH)
# cplusplus.com
self.crawl('http://www.cplusplus.com/reference/')
for name, url, std in self.results:
self.insert_index('cplusplus.com', name, url, std)
self.db_conn.commit()
# Rename duplicate entries
duplicates = self.db_cursor.execute('SELECT name, COUNT(name) '
'AS NON '
'FROM "cplusplus.com" '
'GROUP BY NAME '
'HAVING (NON > 1)').fetchall()
for name, num in duplicates:
dump = self.db_cursor.execute('SELECT name, url FROM '
'"cplusplus.com" WHERE name="%s"'
% name).fetchall()
for n, u in dump:
if u not in self.name_exceptions:
n2 = n[5:] if n.startswith('std::') else n
try:
group = re.search('/([^/]+)/%s/$' % n2, u).group(1)
except Exception:
group = re.search('/([^/]+)/[^/]+/$', u).group(1)
new_name = '%s (%s)' % (n, group)
self.db_cursor.execute('UPDATE "cplusplus.com" '
'SET name="%s", url="%s" '
'WHERE url="%s"' %
(new_name, u, u))
self.db_conn.commit()
# cppreference.com
self.results = set()
self.crawl('https://en.cppreference.com/w/cpp', '/w/cpp')
for name, url, std in self.results:
self.insert_index('cppreference.com', name, url, std)
self.db_conn.commit()
except KeyboardInterrupt:
os.remove(environ.index_db_re)
raise KeyboardInterrupt
finally:
self.db_conn.close() | Rebuild index database from cplusplus.com and cppreference.com. | train | https://github.com/aitjcize/cppman/blob/7b48e81b2cd3baa912d73dfe977ecbaff945a93c/cppman/main.py#L64-L125 | [
"def add_url_filter(self, uf):\n self.url_filters.append(uf)\n",
"def set_follow_mode(self, mode):\n if mode > 5:\n raise RuntimeError('invalid follow mode.')\n self.follow_mode = mode\n",
"def crawl(self, url, path=None):\n self.root_url = url\n self.link_parser = create_link_parser(url)\n\n rx = re.match('(https?://)([^/]+)([^\\?]*)(\\?.*)?', url)\n self.proto = rx.group(1)\n self.host = rx.group(2)\n self.path = rx.group(3)\n self.dir_path = os.path.dirname(self.path)\n self.query = rx.group(4)\n\n if path:\n self.dir_path = path\n\n self.targets.add((url, \"\"))\n self._spawn_new_worker()\n\n while self.threads:\n try:\n for t in self.threads:\n t.join(1)\n if not t.isAlive():\n self.threads.remove(t)\n except KeyboardInterrupt:\n sys.exit(1)\n",
"def insert_index(self, table, name, url, std=\"\"):\n \"\"\"callback to insert index\"\"\"\n names = self.parse_title(name);\n\n for n in names:\n self.db_cursor.execute(\n 'INSERT INTO \"%s\" (name, url, std) VALUES (?, ?, ?)' % table, (\n n, url, std))\n"
] | class Cppman(Crawler):
"""Manage cpp man pages, indexes"""
def __init__(self, forced=False, force_columns=-1):
Crawler.__init__(self)
self.results = set()
self.forced = forced
self.success_count = None
self.failure_count = None
self.force_columns = force_columns
self.blacklist = [
]
self.name_exceptions = [
'http://www.cplusplus.com/reference/string/swap/'
]
def extract_name(self, data):
"""Extract man page name from web page."""
name = re.search('<h1[^>]*>(.+?)</h1>', data).group(1)
name = re.sub(r'<([^>]+)>', r'', name)
name = re.sub(r'>', r'>', name)
name = re.sub(r'<', r'<', name)
return name
def process_document(self, doc, std):
"""callback to insert index"""
if doc.url not in self.blacklist:
print("Indexing '%s' %s..." % (doc.url, std))
name = self.extract_name(doc.text)
self.results.add((name, doc.url, std))
else:
print("Skipping blacklisted page '%s' ..." % doc.url)
return None
def parse_expression(self, expr):
"""
split expression into prefix and expression
tested with
```
operator==
!=
std::rel_ops::operator!=
std::atomic::operator=
std::array::operator[]
std::function::operator()
std::vector::at
std::relational operators
std::vector::begin
std::abs(float)
std::fabs()
```
"""
m = re.match(r'^(.*?(?:::)?(?:operator)?)((?:::[^:]*|[^:]*)?)$', expr);
prefix = m.group(1)
tail = m.group(2)
return [prefix, tail]
def parse_title(self, title):
"""
split of the last parenthesis operator==,!=,<,<=(std::vector)
tested with
```
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=
operator==,!=,<,<=,>,>=
std::rel_ops::operator!=,>,<=,>=
std::atomic::operator=
std::array::operator[]
std::function::operator()
std::vector::at
std::relational operators (vector)
std::vector::begin, std::vector::cbegin
std::abs(float), std::fabs
std::unordered_set::begin(size_type), std::unordered_set::cbegin(size_type)
```
"""
m = re.match(r'^\s*((?:\(size_type\)|(?:.|\(\))*?)*)((?:\([^)]+\))?)\s*$', title)
postfix = m.group(2)
t_names = m.group(1).split(',')
t_names = [n.strip() for n in t_names]
prefix = self.parse_expression(t_names[0])[0]
names = []
for n in t_names:
r = self.parse_expression(n);
if prefix == r[0]:
names.append(n + postfix)
else:
names.append(prefix + r[1] + postfix)
return names
def insert_index(self, table, name, url, std=""):
"""callback to insert index"""
names = self.parse_title(name);
for n in names:
self.db_cursor.execute(
'INSERT INTO "%s" (name, url, std) VALUES (?, ?, ?)' % table, (
n, url, std))
def cache_all(self):
"""Cache all available man pages"""
respond = input(
'By default, cppman fetches pages on-the-fly if corresponding '
'page is not found in the cache. The "cache-all" option is only '
'useful if you want to view man pages offline. '
'Caching all contents will take several minutes, '
'do you want to continue [y/N]? ')
if not (respond and 'yes'.startswith(respond.lower())):
raise KeyboardInterrupt
try:
os.makedirs(environ.cache_dir)
except:
pass
self.success_count = 0
self.failure_count = 0
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
source = environ.config.source
print('Caching manpages from %s ...' % source)
data = cursor.execute('SELECT * FROM "%s"' % source).fetchall()
for name, url, _ in data:
print('Caching %s ...' % name)
retries = 3
while retries > 0:
try:
self.cache_man_page(source, url, name)
except Exception:
print('Retrying ...')
retries -= 1
else:
self.success_count += 1
break
else:
print('Error caching %s ...' % name)
self.failure_count += 1
conn.close()
print('\n%d manual pages cached successfully.' % self.success_count)
print('%d manual pages failed to cache.' % self.failure_count)
self.update_mandb(False)
def cache_man_page(self, source, url, name):
"""callback to cache new man page"""
# Skip if already exists, override if forced flag is true
outname = self.get_page_path(source, name)
if os.path.exists(outname) and not self.forced:
return
try:
os.makedirs(os.path.join(environ.cache_dir, source))
except OSError:
pass
# There are often some errors in the HTML, for example: missing closing
# tag. We use fixupHTML to fix this.
data = util.fixupHTML(urllib.request.urlopen(url).read())
formatter = importlib.import_module('cppman.formatter.%s' % source[:-4])
groff_text = formatter.html2groff(data, name)
with gzip.open(outname, 'w') as f:
f.write(groff_text.encode('utf-8'))
def clear_cache(self):
"""Clear all cache in man"""
shutil.rmtree(environ.cache_dir)
def man(self, pattern):
"""Call viewer.sh to view man page"""
try:
avail = os.listdir(os.path.join(environ.cache_dir, environ.source))
except OSError:
avail = []
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
# Try direct match
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name="%s" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
# Try standard library
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name="std::%s" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name LIKE "%%%s%%" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
raise RuntimeError('No manual entry for ' + pattern)
finally:
conn.close()
page_filename = self.get_normalized_page_name(page_name)
if self.forced or page_filename + '.3.gz' not in avail:
self.cache_man_page(environ.source, url, page_name)
pager_type = environ.pager if sys.stdout.isatty() else 'pipe'
# Call viewer
columns = (util.get_width() if self.force_columns == -1 else
self.force_columns)
pid = os.fork()
if pid == 0:
os.execl('/bin/sh', '/bin/sh', environ.pager_script, pager_type,
self.get_page_path(environ.source, page_name),
str(columns), environ.pager_config, page_name)
return pid
def find(self, pattern):
"""Find pages in database."""
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
selected = cursor.execute(
'SELECT * FROM "%s" WHERE name '
'LIKE "%%%s%%" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchall()
pat = re.compile('(%s)' % re.escape(pattern), re.I)
if selected:
for name, url, std in selected:
if os.isatty(sys.stdout.fileno()):
print(pat.sub(r'\033[1;31m\1\033[0m', name) +
(' \033[1;33m[%s]\033[0m' % std if std else ''))
else:
print(name + (' [%s]' % std if std else ''))
else:
raise RuntimeError('%s: nothing appropriate.' % pattern)
def update_mandb(self, quiet=True):
"""Update mandb."""
if not environ.config.UpdateManPath:
return
print('\nrunning mandb...')
cmd = 'mandb %s' % (' -q' if quiet else '')
subprocess.Popen(cmd, shell=True).wait()
def get_normalized_page_name(self, name):
return name.replace('/', '_')
def get_page_path(self, source, name):
name = self.get_normalized_page_name(name)
return os.path.join(environ.cache_dir, source, name + '.3.gz')
|
aitjcize/cppman | cppman/main.py | Cppman.process_document | python | def process_document(self, doc, std):
if doc.url not in self.blacklist:
print("Indexing '%s' %s..." % (doc.url, std))
name = self.extract_name(doc.text)
self.results.add((name, doc.url, std))
else:
print("Skipping blacklisted page '%s' ..." % doc.url)
return None | callback to insert index | train | https://github.com/aitjcize/cppman/blob/7b48e81b2cd3baa912d73dfe977ecbaff945a93c/cppman/main.py#L127-L135 | [
"def extract_name(self, data):\n \"\"\"Extract man page name from web page.\"\"\"\n name = re.search('<h1[^>]*>(.+?)</h1>', data).group(1)\n name = re.sub(r'<([^>]+)>', r'', name)\n name = re.sub(r'>', r'>', name)\n name = re.sub(r'<', r'<', name)\n return name\n"
] | class Cppman(Crawler):
"""Manage cpp man pages, indexes"""
def __init__(self, forced=False, force_columns=-1):
Crawler.__init__(self)
self.results = set()
self.forced = forced
self.success_count = None
self.failure_count = None
self.force_columns = force_columns
self.blacklist = [
]
self.name_exceptions = [
'http://www.cplusplus.com/reference/string/swap/'
]
def extract_name(self, data):
"""Extract man page name from web page."""
name = re.search('<h1[^>]*>(.+?)</h1>', data).group(1)
name = re.sub(r'<([^>]+)>', r'', name)
name = re.sub(r'>', r'>', name)
name = re.sub(r'<', r'<', name)
return name
def rebuild_index(self):
"""Rebuild index database from cplusplus.com and cppreference.com."""
try:
os.remove(environ.index_db_re)
except:
pass
self.db_conn = sqlite3.connect(environ.index_db_re)
self.db_cursor = self.db_conn.cursor()
self.db_cursor.execute('CREATE TABLE "cplusplus.com" '
'(name VARCHAR(255), url VARCHAR(255), std VARCHAR(255))')
self.db_cursor.execute('CREATE TABLE "cppreference.com" '
'(name VARCHAR(255), url VARCHAR(255), std VARCHAR(255))')
try:
self.add_url_filter('\.(jpg|jpeg|gif|png|js|css|swf|svg)$')
self.set_follow_mode(Crawler.F_SAME_PATH)
# cplusplus.com
self.crawl('http://www.cplusplus.com/reference/')
for name, url, std in self.results:
self.insert_index('cplusplus.com', name, url, std)
self.db_conn.commit()
# Rename duplicate entries
duplicates = self.db_cursor.execute('SELECT name, COUNT(name) '
'AS NON '
'FROM "cplusplus.com" '
'GROUP BY NAME '
'HAVING (NON > 1)').fetchall()
for name, num in duplicates:
dump = self.db_cursor.execute('SELECT name, url FROM '
'"cplusplus.com" WHERE name="%s"'
% name).fetchall()
for n, u in dump:
if u not in self.name_exceptions:
n2 = n[5:] if n.startswith('std::') else n
try:
group = re.search('/([^/]+)/%s/$' % n2, u).group(1)
except Exception:
group = re.search('/([^/]+)/[^/]+/$', u).group(1)
new_name = '%s (%s)' % (n, group)
self.db_cursor.execute('UPDATE "cplusplus.com" '
'SET name="%s", url="%s" '
'WHERE url="%s"' %
(new_name, u, u))
self.db_conn.commit()
# cppreference.com
self.results = set()
self.crawl('https://en.cppreference.com/w/cpp', '/w/cpp')
for name, url, std in self.results:
self.insert_index('cppreference.com', name, url, std)
self.db_conn.commit()
except KeyboardInterrupt:
os.remove(environ.index_db_re)
raise KeyboardInterrupt
finally:
self.db_conn.close()
def parse_expression(self, expr):
"""
split expression into prefix and expression
tested with
```
operator==
!=
std::rel_ops::operator!=
std::atomic::operator=
std::array::operator[]
std::function::operator()
std::vector::at
std::relational operators
std::vector::begin
std::abs(float)
std::fabs()
```
"""
m = re.match(r'^(.*?(?:::)?(?:operator)?)((?:::[^:]*|[^:]*)?)$', expr);
prefix = m.group(1)
tail = m.group(2)
return [prefix, tail]
def parse_title(self, title):
"""
split of the last parenthesis operator==,!=,<,<=(std::vector)
tested with
```
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=
operator==,!=,<,<=,>,>=
std::rel_ops::operator!=,>,<=,>=
std::atomic::operator=
std::array::operator[]
std::function::operator()
std::vector::at
std::relational operators (vector)
std::vector::begin, std::vector::cbegin
std::abs(float), std::fabs
std::unordered_set::begin(size_type), std::unordered_set::cbegin(size_type)
```
"""
m = re.match(r'^\s*((?:\(size_type\)|(?:.|\(\))*?)*)((?:\([^)]+\))?)\s*$', title)
postfix = m.group(2)
t_names = m.group(1).split(',')
t_names = [n.strip() for n in t_names]
prefix = self.parse_expression(t_names[0])[0]
names = []
for n in t_names:
r = self.parse_expression(n);
if prefix == r[0]:
names.append(n + postfix)
else:
names.append(prefix + r[1] + postfix)
return names
def insert_index(self, table, name, url, std=""):
"""callback to insert index"""
names = self.parse_title(name);
for n in names:
self.db_cursor.execute(
'INSERT INTO "%s" (name, url, std) VALUES (?, ?, ?)' % table, (
n, url, std))
def cache_all(self):
"""Cache all available man pages"""
respond = input(
'By default, cppman fetches pages on-the-fly if corresponding '
'page is not found in the cache. The "cache-all" option is only '
'useful if you want to view man pages offline. '
'Caching all contents will take several minutes, '
'do you want to continue [y/N]? ')
if not (respond and 'yes'.startswith(respond.lower())):
raise KeyboardInterrupt
try:
os.makedirs(environ.cache_dir)
except:
pass
self.success_count = 0
self.failure_count = 0
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
source = environ.config.source
print('Caching manpages from %s ...' % source)
data = cursor.execute('SELECT * FROM "%s"' % source).fetchall()
for name, url, _ in data:
print('Caching %s ...' % name)
retries = 3
while retries > 0:
try:
self.cache_man_page(source, url, name)
except Exception:
print('Retrying ...')
retries -= 1
else:
self.success_count += 1
break
else:
print('Error caching %s ...' % name)
self.failure_count += 1
conn.close()
print('\n%d manual pages cached successfully.' % self.success_count)
print('%d manual pages failed to cache.' % self.failure_count)
self.update_mandb(False)
def cache_man_page(self, source, url, name):
"""callback to cache new man page"""
# Skip if already exists, override if forced flag is true
outname = self.get_page_path(source, name)
if os.path.exists(outname) and not self.forced:
return
try:
os.makedirs(os.path.join(environ.cache_dir, source))
except OSError:
pass
# There are often some errors in the HTML, for example: missing closing
# tag. We use fixupHTML to fix this.
data = util.fixupHTML(urllib.request.urlopen(url).read())
formatter = importlib.import_module('cppman.formatter.%s' % source[:-4])
groff_text = formatter.html2groff(data, name)
with gzip.open(outname, 'w') as f:
f.write(groff_text.encode('utf-8'))
def clear_cache(self):
"""Clear all cache in man"""
shutil.rmtree(environ.cache_dir)
def man(self, pattern):
"""Call viewer.sh to view man page"""
try:
avail = os.listdir(os.path.join(environ.cache_dir, environ.source))
except OSError:
avail = []
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
# Try direct match
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name="%s" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
# Try standard library
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name="std::%s" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name LIKE "%%%s%%" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
raise RuntimeError('No manual entry for ' + pattern)
finally:
conn.close()
page_filename = self.get_normalized_page_name(page_name)
if self.forced or page_filename + '.3.gz' not in avail:
self.cache_man_page(environ.source, url, page_name)
pager_type = environ.pager if sys.stdout.isatty() else 'pipe'
# Call viewer
columns = (util.get_width() if self.force_columns == -1 else
self.force_columns)
pid = os.fork()
if pid == 0:
os.execl('/bin/sh', '/bin/sh', environ.pager_script, pager_type,
self.get_page_path(environ.source, page_name),
str(columns), environ.pager_config, page_name)
return pid
def find(self, pattern):
"""Find pages in database."""
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
selected = cursor.execute(
'SELECT * FROM "%s" WHERE name '
'LIKE "%%%s%%" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchall()
pat = re.compile('(%s)' % re.escape(pattern), re.I)
if selected:
for name, url, std in selected:
if os.isatty(sys.stdout.fileno()):
print(pat.sub(r'\033[1;31m\1\033[0m', name) +
(' \033[1;33m[%s]\033[0m' % std if std else ''))
else:
print(name + (' [%s]' % std if std else ''))
else:
raise RuntimeError('%s: nothing appropriate.' % pattern)
def update_mandb(self, quiet=True):
"""Update mandb."""
if not environ.config.UpdateManPath:
return
print('\nrunning mandb...')
cmd = 'mandb %s' % (' -q' if quiet else '')
subprocess.Popen(cmd, shell=True).wait()
def get_normalized_page_name(self, name):
return name.replace('/', '_')
def get_page_path(self, source, name):
name = self.get_normalized_page_name(name)
return os.path.join(environ.cache_dir, source, name + '.3.gz')
|
aitjcize/cppman | cppman/main.py | Cppman.parse_expression | python | def parse_expression(self, expr):
m = re.match(r'^(.*?(?:::)?(?:operator)?)((?:::[^:]*|[^:]*)?)$', expr);
prefix = m.group(1)
tail = m.group(2)
return [prefix, tail] | split expression into prefix and expression
tested with
```
operator==
!=
std::rel_ops::operator!=
std::atomic::operator=
std::array::operator[]
std::function::operator()
std::vector::at
std::relational operators
std::vector::begin
std::abs(float)
std::fabs()
``` | train | https://github.com/aitjcize/cppman/blob/7b48e81b2cd3baa912d73dfe977ecbaff945a93c/cppman/main.py#L137-L158 | null | class Cppman(Crawler):
"""Manage cpp man pages, indexes"""
def __init__(self, forced=False, force_columns=-1):
Crawler.__init__(self)
self.results = set()
self.forced = forced
self.success_count = None
self.failure_count = None
self.force_columns = force_columns
self.blacklist = [
]
self.name_exceptions = [
'http://www.cplusplus.com/reference/string/swap/'
]
def extract_name(self, data):
"""Extract man page name from web page."""
name = re.search('<h1[^>]*>(.+?)</h1>', data).group(1)
name = re.sub(r'<([^>]+)>', r'', name)
name = re.sub(r'>', r'>', name)
name = re.sub(r'<', r'<', name)
return name
def rebuild_index(self):
"""Rebuild index database from cplusplus.com and cppreference.com."""
try:
os.remove(environ.index_db_re)
except:
pass
self.db_conn = sqlite3.connect(environ.index_db_re)
self.db_cursor = self.db_conn.cursor()
self.db_cursor.execute('CREATE TABLE "cplusplus.com" '
'(name VARCHAR(255), url VARCHAR(255), std VARCHAR(255))')
self.db_cursor.execute('CREATE TABLE "cppreference.com" '
'(name VARCHAR(255), url VARCHAR(255), std VARCHAR(255))')
try:
self.add_url_filter('\.(jpg|jpeg|gif|png|js|css|swf|svg)$')
self.set_follow_mode(Crawler.F_SAME_PATH)
# cplusplus.com
self.crawl('http://www.cplusplus.com/reference/')
for name, url, std in self.results:
self.insert_index('cplusplus.com', name, url, std)
self.db_conn.commit()
# Rename duplicate entries
duplicates = self.db_cursor.execute('SELECT name, COUNT(name) '
'AS NON '
'FROM "cplusplus.com" '
'GROUP BY NAME '
'HAVING (NON > 1)').fetchall()
for name, num in duplicates:
dump = self.db_cursor.execute('SELECT name, url FROM '
'"cplusplus.com" WHERE name="%s"'
% name).fetchall()
for n, u in dump:
if u not in self.name_exceptions:
n2 = n[5:] if n.startswith('std::') else n
try:
group = re.search('/([^/]+)/%s/$' % n2, u).group(1)
except Exception:
group = re.search('/([^/]+)/[^/]+/$', u).group(1)
new_name = '%s (%s)' % (n, group)
self.db_cursor.execute('UPDATE "cplusplus.com" '
'SET name="%s", url="%s" '
'WHERE url="%s"' %
(new_name, u, u))
self.db_conn.commit()
# cppreference.com
self.results = set()
self.crawl('https://en.cppreference.com/w/cpp', '/w/cpp')
for name, url, std in self.results:
self.insert_index('cppreference.com', name, url, std)
self.db_conn.commit()
except KeyboardInterrupt:
os.remove(environ.index_db_re)
raise KeyboardInterrupt
finally:
self.db_conn.close()
def process_document(self, doc, std):
"""callback to insert index"""
if doc.url not in self.blacklist:
print("Indexing '%s' %s..." % (doc.url, std))
name = self.extract_name(doc.text)
self.results.add((name, doc.url, std))
else:
print("Skipping blacklisted page '%s' ..." % doc.url)
return None
def parse_title(self, title):
"""
split of the last parenthesis operator==,!=,<,<=(std::vector)
tested with
```
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=
operator==,!=,<,<=,>,>=
std::rel_ops::operator!=,>,<=,>=
std::atomic::operator=
std::array::operator[]
std::function::operator()
std::vector::at
std::relational operators (vector)
std::vector::begin, std::vector::cbegin
std::abs(float), std::fabs
std::unordered_set::begin(size_type), std::unordered_set::cbegin(size_type)
```
"""
m = re.match(r'^\s*((?:\(size_type\)|(?:.|\(\))*?)*)((?:\([^)]+\))?)\s*$', title)
postfix = m.group(2)
t_names = m.group(1).split(',')
t_names = [n.strip() for n in t_names]
prefix = self.parse_expression(t_names[0])[0]
names = []
for n in t_names:
r = self.parse_expression(n);
if prefix == r[0]:
names.append(n + postfix)
else:
names.append(prefix + r[1] + postfix)
return names
def insert_index(self, table, name, url, std=""):
"""callback to insert index"""
names = self.parse_title(name);
for n in names:
self.db_cursor.execute(
'INSERT INTO "%s" (name, url, std) VALUES (?, ?, ?)' % table, (
n, url, std))
def cache_all(self):
"""Cache all available man pages"""
respond = input(
'By default, cppman fetches pages on-the-fly if corresponding '
'page is not found in the cache. The "cache-all" option is only '
'useful if you want to view man pages offline. '
'Caching all contents will take several minutes, '
'do you want to continue [y/N]? ')
if not (respond and 'yes'.startswith(respond.lower())):
raise KeyboardInterrupt
try:
os.makedirs(environ.cache_dir)
except:
pass
self.success_count = 0
self.failure_count = 0
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
source = environ.config.source
print('Caching manpages from %s ...' % source)
data = cursor.execute('SELECT * FROM "%s"' % source).fetchall()
for name, url, _ in data:
print('Caching %s ...' % name)
retries = 3
while retries > 0:
try:
self.cache_man_page(source, url, name)
except Exception:
print('Retrying ...')
retries -= 1
else:
self.success_count += 1
break
else:
print('Error caching %s ...' % name)
self.failure_count += 1
conn.close()
print('\n%d manual pages cached successfully.' % self.success_count)
print('%d manual pages failed to cache.' % self.failure_count)
self.update_mandb(False)
def cache_man_page(self, source, url, name):
"""callback to cache new man page"""
# Skip if already exists, override if forced flag is true
outname = self.get_page_path(source, name)
if os.path.exists(outname) and not self.forced:
return
try:
os.makedirs(os.path.join(environ.cache_dir, source))
except OSError:
pass
# There are often some errors in the HTML, for example: missing closing
# tag. We use fixupHTML to fix this.
data = util.fixupHTML(urllib.request.urlopen(url).read())
formatter = importlib.import_module('cppman.formatter.%s' % source[:-4])
groff_text = formatter.html2groff(data, name)
with gzip.open(outname, 'w') as f:
f.write(groff_text.encode('utf-8'))
def clear_cache(self):
"""Clear all cache in man"""
shutil.rmtree(environ.cache_dir)
def man(self, pattern):
"""Call viewer.sh to view man page"""
try:
avail = os.listdir(os.path.join(environ.cache_dir, environ.source))
except OSError:
avail = []
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
# Try direct match
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name="%s" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
# Try standard library
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name="std::%s" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name LIKE "%%%s%%" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
raise RuntimeError('No manual entry for ' + pattern)
finally:
conn.close()
page_filename = self.get_normalized_page_name(page_name)
if self.forced or page_filename + '.3.gz' not in avail:
self.cache_man_page(environ.source, url, page_name)
pager_type = environ.pager if sys.stdout.isatty() else 'pipe'
# Call viewer
columns = (util.get_width() if self.force_columns == -1 else
self.force_columns)
pid = os.fork()
if pid == 0:
os.execl('/bin/sh', '/bin/sh', environ.pager_script, pager_type,
self.get_page_path(environ.source, page_name),
str(columns), environ.pager_config, page_name)
return pid
def find(self, pattern):
"""Find pages in database."""
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
selected = cursor.execute(
'SELECT * FROM "%s" WHERE name '
'LIKE "%%%s%%" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchall()
pat = re.compile('(%s)' % re.escape(pattern), re.I)
if selected:
for name, url, std in selected:
if os.isatty(sys.stdout.fileno()):
print(pat.sub(r'\033[1;31m\1\033[0m', name) +
(' \033[1;33m[%s]\033[0m' % std if std else ''))
else:
print(name + (' [%s]' % std if std else ''))
else:
raise RuntimeError('%s: nothing appropriate.' % pattern)
def update_mandb(self, quiet=True):
"""Update mandb."""
if not environ.config.UpdateManPath:
return
print('\nrunning mandb...')
cmd = 'mandb %s' % (' -q' if quiet else '')
subprocess.Popen(cmd, shell=True).wait()
def get_normalized_page_name(self, name):
return name.replace('/', '_')
def get_page_path(self, source, name):
name = self.get_normalized_page_name(name)
return os.path.join(environ.cache_dir, source, name + '.3.gz')
|
aitjcize/cppman | cppman/main.py | Cppman.parse_title | python | def parse_title(self, title):
m = re.match(r'^\s*((?:\(size_type\)|(?:.|\(\))*?)*)((?:\([^)]+\))?)\s*$', title)
postfix = m.group(2)
t_names = m.group(1).split(',')
t_names = [n.strip() for n in t_names]
prefix = self.parse_expression(t_names[0])[0]
names = []
for n in t_names:
r = self.parse_expression(n);
if prefix == r[0]:
names.append(n + postfix)
else:
names.append(prefix + r[1] + postfix)
return names | split of the last parenthesis operator==,!=,<,<=(std::vector)
tested with
```
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=
operator==,!=,<,<=,>,>=
std::rel_ops::operator!=,>,<=,>=
std::atomic::operator=
std::array::operator[]
std::function::operator()
std::vector::at
std::relational operators (vector)
std::vector::begin, std::vector::cbegin
std::abs(float), std::fabs
std::unordered_set::begin(size_type), std::unordered_set::cbegin(size_type)
``` | train | https://github.com/aitjcize/cppman/blob/7b48e81b2cd3baa912d73dfe977ecbaff945a93c/cppman/main.py#L160-L193 | [
"def parse_expression(self, expr):\n \"\"\"\n split expression into prefix and expression\n tested with\n ```\n operator==\n !=\n std::rel_ops::operator!=\n std::atomic::operator=\n std::array::operator[]\n std::function::operator()\n std::vector::at\n std::relational operators\n std::vector::begin\n std::abs(float)\n std::fabs()\n ```\n \"\"\"\n m = re.match(r'^(.*?(?:::)?(?:operator)?)((?:::[^:]*|[^:]*)?)$', expr);\n prefix = m.group(1)\n tail = m.group(2)\n return [prefix, tail]\n"
] | class Cppman(Crawler):
"""Manage cpp man pages, indexes"""
def __init__(self, forced=False, force_columns=-1):
Crawler.__init__(self)
self.results = set()
self.forced = forced
self.success_count = None
self.failure_count = None
self.force_columns = force_columns
self.blacklist = [
]
self.name_exceptions = [
'http://www.cplusplus.com/reference/string/swap/'
]
def extract_name(self, data):
"""Extract man page name from web page."""
name = re.search('<h1[^>]*>(.+?)</h1>', data).group(1)
name = re.sub(r'<([^>]+)>', r'', name)
name = re.sub(r'>', r'>', name)
name = re.sub(r'<', r'<', name)
return name
def rebuild_index(self):
"""Rebuild index database from cplusplus.com and cppreference.com."""
try:
os.remove(environ.index_db_re)
except:
pass
self.db_conn = sqlite3.connect(environ.index_db_re)
self.db_cursor = self.db_conn.cursor()
self.db_cursor.execute('CREATE TABLE "cplusplus.com" '
'(name VARCHAR(255), url VARCHAR(255), std VARCHAR(255))')
self.db_cursor.execute('CREATE TABLE "cppreference.com" '
'(name VARCHAR(255), url VARCHAR(255), std VARCHAR(255))')
try:
self.add_url_filter('\.(jpg|jpeg|gif|png|js|css|swf|svg)$')
self.set_follow_mode(Crawler.F_SAME_PATH)
# cplusplus.com
self.crawl('http://www.cplusplus.com/reference/')
for name, url, std in self.results:
self.insert_index('cplusplus.com', name, url, std)
self.db_conn.commit()
# Rename duplicate entries
duplicates = self.db_cursor.execute('SELECT name, COUNT(name) '
'AS NON '
'FROM "cplusplus.com" '
'GROUP BY NAME '
'HAVING (NON > 1)').fetchall()
for name, num in duplicates:
dump = self.db_cursor.execute('SELECT name, url FROM '
'"cplusplus.com" WHERE name="%s"'
% name).fetchall()
for n, u in dump:
if u not in self.name_exceptions:
n2 = n[5:] if n.startswith('std::') else n
try:
group = re.search('/([^/]+)/%s/$' % n2, u).group(1)
except Exception:
group = re.search('/([^/]+)/[^/]+/$', u).group(1)
new_name = '%s (%s)' % (n, group)
self.db_cursor.execute('UPDATE "cplusplus.com" '
'SET name="%s", url="%s" '
'WHERE url="%s"' %
(new_name, u, u))
self.db_conn.commit()
# cppreference.com
self.results = set()
self.crawl('https://en.cppreference.com/w/cpp', '/w/cpp')
for name, url, std in self.results:
self.insert_index('cppreference.com', name, url, std)
self.db_conn.commit()
except KeyboardInterrupt:
os.remove(environ.index_db_re)
raise KeyboardInterrupt
finally:
self.db_conn.close()
def process_document(self, doc, std):
"""callback to insert index"""
if doc.url not in self.blacklist:
print("Indexing '%s' %s..." % (doc.url, std))
name = self.extract_name(doc.text)
self.results.add((name, doc.url, std))
else:
print("Skipping blacklisted page '%s' ..." % doc.url)
return None
def parse_expression(self, expr):
"""
split expression into prefix and expression
tested with
```
operator==
!=
std::rel_ops::operator!=
std::atomic::operator=
std::array::operator[]
std::function::operator()
std::vector::at
std::relational operators
std::vector::begin
std::abs(float)
std::fabs()
```
"""
m = re.match(r'^(.*?(?:::)?(?:operator)?)((?:::[^:]*|[^:]*)?)$', expr);
prefix = m.group(1)
tail = m.group(2)
return [prefix, tail]
def insert_index(self, table, name, url, std=""):
"""callback to insert index"""
names = self.parse_title(name);
for n in names:
self.db_cursor.execute(
'INSERT INTO "%s" (name, url, std) VALUES (?, ?, ?)' % table, (
n, url, std))
def cache_all(self):
"""Cache all available man pages"""
respond = input(
'By default, cppman fetches pages on-the-fly if corresponding '
'page is not found in the cache. The "cache-all" option is only '
'useful if you want to view man pages offline. '
'Caching all contents will take several minutes, '
'do you want to continue [y/N]? ')
if not (respond and 'yes'.startswith(respond.lower())):
raise KeyboardInterrupt
try:
os.makedirs(environ.cache_dir)
except:
pass
self.success_count = 0
self.failure_count = 0
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
source = environ.config.source
print('Caching manpages from %s ...' % source)
data = cursor.execute('SELECT * FROM "%s"' % source).fetchall()
for name, url, _ in data:
print('Caching %s ...' % name)
retries = 3
while retries > 0:
try:
self.cache_man_page(source, url, name)
except Exception:
print('Retrying ...')
retries -= 1
else:
self.success_count += 1
break
else:
print('Error caching %s ...' % name)
self.failure_count += 1
conn.close()
print('\n%d manual pages cached successfully.' % self.success_count)
print('%d manual pages failed to cache.' % self.failure_count)
self.update_mandb(False)
def cache_man_page(self, source, url, name):
"""callback to cache new man page"""
# Skip if already exists, override if forced flag is true
outname = self.get_page_path(source, name)
if os.path.exists(outname) and not self.forced:
return
try:
os.makedirs(os.path.join(environ.cache_dir, source))
except OSError:
pass
# There are often some errors in the HTML, for example: missing closing
# tag. We use fixupHTML to fix this.
data = util.fixupHTML(urllib.request.urlopen(url).read())
formatter = importlib.import_module('cppman.formatter.%s' % source[:-4])
groff_text = formatter.html2groff(data, name)
with gzip.open(outname, 'w') as f:
f.write(groff_text.encode('utf-8'))
def clear_cache(self):
"""Clear all cache in man"""
shutil.rmtree(environ.cache_dir)
def man(self, pattern):
"""Call viewer.sh to view man page"""
try:
avail = os.listdir(os.path.join(environ.cache_dir, environ.source))
except OSError:
avail = []
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
# Try direct match
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name="%s" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
# Try standard library
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name="std::%s" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name LIKE "%%%s%%" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
raise RuntimeError('No manual entry for ' + pattern)
finally:
conn.close()
page_filename = self.get_normalized_page_name(page_name)
if self.forced or page_filename + '.3.gz' not in avail:
self.cache_man_page(environ.source, url, page_name)
pager_type = environ.pager if sys.stdout.isatty() else 'pipe'
# Call viewer
columns = (util.get_width() if self.force_columns == -1 else
self.force_columns)
pid = os.fork()
if pid == 0:
os.execl('/bin/sh', '/bin/sh', environ.pager_script, pager_type,
self.get_page_path(environ.source, page_name),
str(columns), environ.pager_config, page_name)
return pid
def find(self, pattern):
"""Find pages in database."""
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
selected = cursor.execute(
'SELECT * FROM "%s" WHERE name '
'LIKE "%%%s%%" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchall()
pat = re.compile('(%s)' % re.escape(pattern), re.I)
if selected:
for name, url, std in selected:
if os.isatty(sys.stdout.fileno()):
print(pat.sub(r'\033[1;31m\1\033[0m', name) +
(' \033[1;33m[%s]\033[0m' % std if std else ''))
else:
print(name + (' [%s]' % std if std else ''))
else:
raise RuntimeError('%s: nothing appropriate.' % pattern)
def update_mandb(self, quiet=True):
"""Update mandb."""
if not environ.config.UpdateManPath:
return
print('\nrunning mandb...')
cmd = 'mandb %s' % (' -q' if quiet else '')
subprocess.Popen(cmd, shell=True).wait()
def get_normalized_page_name(self, name):
return name.replace('/', '_')
def get_page_path(self, source, name):
name = self.get_normalized_page_name(name)
return os.path.join(environ.cache_dir, source, name + '.3.gz')
|
aitjcize/cppman | cppman/main.py | Cppman.insert_index | python | def insert_index(self, table, name, url, std=""):
names = self.parse_title(name);
for n in names:
self.db_cursor.execute(
'INSERT INTO "%s" (name, url, std) VALUES (?, ?, ?)' % table, (
n, url, std)) | callback to insert index | train | https://github.com/aitjcize/cppman/blob/7b48e81b2cd3baa912d73dfe977ecbaff945a93c/cppman/main.py#L195-L202 | [
"def parse_title(self, title):\n \"\"\"\n split of the last parenthesis operator==,!=,<,<=(std::vector)\n tested with\n ```\n operator==,!=,<,<=,>,>=(std::vector) \n operator==,!=,<,<=,>,>=(std::vector)\n operator==,!=,<,<=,>,>= \n operator==,!=,<,<=,>,>=\n std::rel_ops::operator!=,>,<=,>= \n std::atomic::operator= \n std::array::operator[] \n std::function::operator() \n std::vector::at\n std::relational operators (vector)\n std::vector::begin, std::vector::cbegin\n std::abs(float), std::fabs\n std::unordered_set::begin(size_type), std::unordered_set::cbegin(size_type)\n ```\n \"\"\"\n m = re.match(r'^\\s*((?:\\(size_type\\)|(?:.|\\(\\))*?)*)((?:\\([^)]+\\))?)\\s*$', title)\n postfix = m.group(2)\n\n t_names = m.group(1).split(',')\n t_names = [n.strip() for n in t_names]\n prefix = self.parse_expression(t_names[0])[0]\n names = []\n for n in t_names:\n r = self.parse_expression(n);\n if prefix == r[0]:\n names.append(n + postfix)\n else:\n names.append(prefix + r[1] + postfix)\n return names\n"
] | class Cppman(Crawler):
"""Manage cpp man pages, indexes"""
def __init__(self, forced=False, force_columns=-1):
Crawler.__init__(self)
self.results = set()
self.forced = forced
self.success_count = None
self.failure_count = None
self.force_columns = force_columns
self.blacklist = [
]
self.name_exceptions = [
'http://www.cplusplus.com/reference/string/swap/'
]
def extract_name(self, data):
"""Extract man page name from web page."""
name = re.search('<h1[^>]*>(.+?)</h1>', data).group(1)
name = re.sub(r'<([^>]+)>', r'', name)
name = re.sub(r'>', r'>', name)
name = re.sub(r'<', r'<', name)
return name
def rebuild_index(self):
"""Rebuild index database from cplusplus.com and cppreference.com."""
try:
os.remove(environ.index_db_re)
except:
pass
self.db_conn = sqlite3.connect(environ.index_db_re)
self.db_cursor = self.db_conn.cursor()
self.db_cursor.execute('CREATE TABLE "cplusplus.com" '
'(name VARCHAR(255), url VARCHAR(255), std VARCHAR(255))')
self.db_cursor.execute('CREATE TABLE "cppreference.com" '
'(name VARCHAR(255), url VARCHAR(255), std VARCHAR(255))')
try:
self.add_url_filter('\.(jpg|jpeg|gif|png|js|css|swf|svg)$')
self.set_follow_mode(Crawler.F_SAME_PATH)
# cplusplus.com
self.crawl('http://www.cplusplus.com/reference/')
for name, url, std in self.results:
self.insert_index('cplusplus.com', name, url, std)
self.db_conn.commit()
# Rename duplicate entries
duplicates = self.db_cursor.execute('SELECT name, COUNT(name) '
'AS NON '
'FROM "cplusplus.com" '
'GROUP BY NAME '
'HAVING (NON > 1)').fetchall()
for name, num in duplicates:
dump = self.db_cursor.execute('SELECT name, url FROM '
'"cplusplus.com" WHERE name="%s"'
% name).fetchall()
for n, u in dump:
if u not in self.name_exceptions:
n2 = n[5:] if n.startswith('std::') else n
try:
group = re.search('/([^/]+)/%s/$' % n2, u).group(1)
except Exception:
group = re.search('/([^/]+)/[^/]+/$', u).group(1)
new_name = '%s (%s)' % (n, group)
self.db_cursor.execute('UPDATE "cplusplus.com" '
'SET name="%s", url="%s" '
'WHERE url="%s"' %
(new_name, u, u))
self.db_conn.commit()
# cppreference.com
self.results = set()
self.crawl('https://en.cppreference.com/w/cpp', '/w/cpp')
for name, url, std in self.results:
self.insert_index('cppreference.com', name, url, std)
self.db_conn.commit()
except KeyboardInterrupt:
os.remove(environ.index_db_re)
raise KeyboardInterrupt
finally:
self.db_conn.close()
def process_document(self, doc, std):
"""callback to insert index"""
if doc.url not in self.blacklist:
print("Indexing '%s' %s..." % (doc.url, std))
name = self.extract_name(doc.text)
self.results.add((name, doc.url, std))
else:
print("Skipping blacklisted page '%s' ..." % doc.url)
return None
def parse_expression(self, expr):
"""
split expression into prefix and expression
tested with
```
operator==
!=
std::rel_ops::operator!=
std::atomic::operator=
std::array::operator[]
std::function::operator()
std::vector::at
std::relational operators
std::vector::begin
std::abs(float)
std::fabs()
```
"""
m = re.match(r'^(.*?(?:::)?(?:operator)?)((?:::[^:]*|[^:]*)?)$', expr);
prefix = m.group(1)
tail = m.group(2)
return [prefix, tail]
def parse_title(self, title):
"""
split of the last parenthesis operator==,!=,<,<=(std::vector)
tested with
```
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=
operator==,!=,<,<=,>,>=
std::rel_ops::operator!=,>,<=,>=
std::atomic::operator=
std::array::operator[]
std::function::operator()
std::vector::at
std::relational operators (vector)
std::vector::begin, std::vector::cbegin
std::abs(float), std::fabs
std::unordered_set::begin(size_type), std::unordered_set::cbegin(size_type)
```
"""
m = re.match(r'^\s*((?:\(size_type\)|(?:.|\(\))*?)*)((?:\([^)]+\))?)\s*$', title)
postfix = m.group(2)
t_names = m.group(1).split(',')
t_names = [n.strip() for n in t_names]
prefix = self.parse_expression(t_names[0])[0]
names = []
for n in t_names:
r = self.parse_expression(n);
if prefix == r[0]:
names.append(n + postfix)
else:
names.append(prefix + r[1] + postfix)
return names
def cache_all(self):
"""Cache all available man pages"""
respond = input(
'By default, cppman fetches pages on-the-fly if corresponding '
'page is not found in the cache. The "cache-all" option is only '
'useful if you want to view man pages offline. '
'Caching all contents will take several minutes, '
'do you want to continue [y/N]? ')
if not (respond and 'yes'.startswith(respond.lower())):
raise KeyboardInterrupt
try:
os.makedirs(environ.cache_dir)
except:
pass
self.success_count = 0
self.failure_count = 0
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
source = environ.config.source
print('Caching manpages from %s ...' % source)
data = cursor.execute('SELECT * FROM "%s"' % source).fetchall()
for name, url, _ in data:
print('Caching %s ...' % name)
retries = 3
while retries > 0:
try:
self.cache_man_page(source, url, name)
except Exception:
print('Retrying ...')
retries -= 1
else:
self.success_count += 1
break
else:
print('Error caching %s ...' % name)
self.failure_count += 1
conn.close()
print('\n%d manual pages cached successfully.' % self.success_count)
print('%d manual pages failed to cache.' % self.failure_count)
self.update_mandb(False)
def cache_man_page(self, source, url, name):
"""callback to cache new man page"""
# Skip if already exists, override if forced flag is true
outname = self.get_page_path(source, name)
if os.path.exists(outname) and not self.forced:
return
try:
os.makedirs(os.path.join(environ.cache_dir, source))
except OSError:
pass
# There are often some errors in the HTML, for example: missing closing
# tag. We use fixupHTML to fix this.
data = util.fixupHTML(urllib.request.urlopen(url).read())
formatter = importlib.import_module('cppman.formatter.%s' % source[:-4])
groff_text = formatter.html2groff(data, name)
with gzip.open(outname, 'w') as f:
f.write(groff_text.encode('utf-8'))
def clear_cache(self):
"""Clear all cache in man"""
shutil.rmtree(environ.cache_dir)
def man(self, pattern):
"""Call viewer.sh to view man page"""
try:
avail = os.listdir(os.path.join(environ.cache_dir, environ.source))
except OSError:
avail = []
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
# Try direct match
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name="%s" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
# Try standard library
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name="std::%s" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name LIKE "%%%s%%" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
raise RuntimeError('No manual entry for ' + pattern)
finally:
conn.close()
page_filename = self.get_normalized_page_name(page_name)
if self.forced or page_filename + '.3.gz' not in avail:
self.cache_man_page(environ.source, url, page_name)
pager_type = environ.pager if sys.stdout.isatty() else 'pipe'
# Call viewer
columns = (util.get_width() if self.force_columns == -1 else
self.force_columns)
pid = os.fork()
if pid == 0:
os.execl('/bin/sh', '/bin/sh', environ.pager_script, pager_type,
self.get_page_path(environ.source, page_name),
str(columns), environ.pager_config, page_name)
return pid
def find(self, pattern):
"""Find pages in database."""
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
selected = cursor.execute(
'SELECT * FROM "%s" WHERE name '
'LIKE "%%%s%%" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchall()
pat = re.compile('(%s)' % re.escape(pattern), re.I)
if selected:
for name, url, std in selected:
if os.isatty(sys.stdout.fileno()):
print(pat.sub(r'\033[1;31m\1\033[0m', name) +
(' \033[1;33m[%s]\033[0m' % std if std else ''))
else:
print(name + (' [%s]' % std if std else ''))
else:
raise RuntimeError('%s: nothing appropriate.' % pattern)
def update_mandb(self, quiet=True):
"""Update mandb."""
if not environ.config.UpdateManPath:
return
print('\nrunning mandb...')
cmd = 'mandb %s' % (' -q' if quiet else '')
subprocess.Popen(cmd, shell=True).wait()
def get_normalized_page_name(self, name):
return name.replace('/', '_')
def get_page_path(self, source, name):
name = self.get_normalized_page_name(name)
return os.path.join(environ.cache_dir, source, name + '.3.gz')
|
aitjcize/cppman | cppman/main.py | Cppman.cache_all | python | def cache_all(self):
respond = input(
'By default, cppman fetches pages on-the-fly if corresponding '
'page is not found in the cache. The "cache-all" option is only '
'useful if you want to view man pages offline. '
'Caching all contents will take several minutes, '
'do you want to continue [y/N]? ')
if not (respond and 'yes'.startswith(respond.lower())):
raise KeyboardInterrupt
try:
os.makedirs(environ.cache_dir)
except:
pass
self.success_count = 0
self.failure_count = 0
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
source = environ.config.source
print('Caching manpages from %s ...' % source)
data = cursor.execute('SELECT * FROM "%s"' % source).fetchall()
for name, url, _ in data:
print('Caching %s ...' % name)
retries = 3
while retries > 0:
try:
self.cache_man_page(source, url, name)
except Exception:
print('Retrying ...')
retries -= 1
else:
self.success_count += 1
break
else:
print('Error caching %s ...' % name)
self.failure_count += 1
conn.close()
print('\n%d manual pages cached successfully.' % self.success_count)
print('%d manual pages failed to cache.' % self.failure_count)
self.update_mandb(False) | Cache all available man pages | train | https://github.com/aitjcize/cppman/blob/7b48e81b2cd3baa912d73dfe977ecbaff945a93c/cppman/main.py#L204-L254 | [
"def cache_man_page(self, source, url, name):\n \"\"\"callback to cache new man page\"\"\"\n # Skip if already exists, override if forced flag is true\n outname = self.get_page_path(source, name)\n if os.path.exists(outname) and not self.forced:\n return\n\n try:\n os.makedirs(os.path.join(environ.cache_dir, source))\n except OSError:\n pass\n\n # There are often some errors in the HTML, for example: missing closing\n # tag. We use fixupHTML to fix this.\n data = util.fixupHTML(urllib.request.urlopen(url).read())\n\n formatter = importlib.import_module('cppman.formatter.%s' % source[:-4])\n groff_text = formatter.html2groff(data, name)\n\n with gzip.open(outname, 'w') as f:\n f.write(groff_text.encode('utf-8'))\n",
"def update_mandb(self, quiet=True):\n \"\"\"Update mandb.\"\"\"\n if not environ.config.UpdateManPath:\n return\n print('\\nrunning mandb...')\n cmd = 'mandb %s' % (' -q' if quiet else '')\n subprocess.Popen(cmd, shell=True).wait()\n"
] | class Cppman(Crawler):
"""Manage cpp man pages, indexes"""
def __init__(self, forced=False, force_columns=-1):
Crawler.__init__(self)
self.results = set()
self.forced = forced
self.success_count = None
self.failure_count = None
self.force_columns = force_columns
self.blacklist = [
]
self.name_exceptions = [
'http://www.cplusplus.com/reference/string/swap/'
]
def extract_name(self, data):
"""Extract man page name from web page."""
name = re.search('<h1[^>]*>(.+?)</h1>', data).group(1)
name = re.sub(r'<([^>]+)>', r'', name)
name = re.sub(r'>', r'>', name)
name = re.sub(r'<', r'<', name)
return name
def rebuild_index(self):
"""Rebuild index database from cplusplus.com and cppreference.com."""
try:
os.remove(environ.index_db_re)
except:
pass
self.db_conn = sqlite3.connect(environ.index_db_re)
self.db_cursor = self.db_conn.cursor()
self.db_cursor.execute('CREATE TABLE "cplusplus.com" '
'(name VARCHAR(255), url VARCHAR(255), std VARCHAR(255))')
self.db_cursor.execute('CREATE TABLE "cppreference.com" '
'(name VARCHAR(255), url VARCHAR(255), std VARCHAR(255))')
try:
self.add_url_filter('\.(jpg|jpeg|gif|png|js|css|swf|svg)$')
self.set_follow_mode(Crawler.F_SAME_PATH)
# cplusplus.com
self.crawl('http://www.cplusplus.com/reference/')
for name, url, std in self.results:
self.insert_index('cplusplus.com', name, url, std)
self.db_conn.commit()
# Rename duplicate entries
duplicates = self.db_cursor.execute('SELECT name, COUNT(name) '
'AS NON '
'FROM "cplusplus.com" '
'GROUP BY NAME '
'HAVING (NON > 1)').fetchall()
for name, num in duplicates:
dump = self.db_cursor.execute('SELECT name, url FROM '
'"cplusplus.com" WHERE name="%s"'
% name).fetchall()
for n, u in dump:
if u not in self.name_exceptions:
n2 = n[5:] if n.startswith('std::') else n
try:
group = re.search('/([^/]+)/%s/$' % n2, u).group(1)
except Exception:
group = re.search('/([^/]+)/[^/]+/$', u).group(1)
new_name = '%s (%s)' % (n, group)
self.db_cursor.execute('UPDATE "cplusplus.com" '
'SET name="%s", url="%s" '
'WHERE url="%s"' %
(new_name, u, u))
self.db_conn.commit()
# cppreference.com
self.results = set()
self.crawl('https://en.cppreference.com/w/cpp', '/w/cpp')
for name, url, std in self.results:
self.insert_index('cppreference.com', name, url, std)
self.db_conn.commit()
except KeyboardInterrupt:
os.remove(environ.index_db_re)
raise KeyboardInterrupt
finally:
self.db_conn.close()
def process_document(self, doc, std):
"""callback to insert index"""
if doc.url not in self.blacklist:
print("Indexing '%s' %s..." % (doc.url, std))
name = self.extract_name(doc.text)
self.results.add((name, doc.url, std))
else:
print("Skipping blacklisted page '%s' ..." % doc.url)
return None
def parse_expression(self, expr):
"""
split expression into prefix and expression
tested with
```
operator==
!=
std::rel_ops::operator!=
std::atomic::operator=
std::array::operator[]
std::function::operator()
std::vector::at
std::relational operators
std::vector::begin
std::abs(float)
std::fabs()
```
"""
m = re.match(r'^(.*?(?:::)?(?:operator)?)((?:::[^:]*|[^:]*)?)$', expr);
prefix = m.group(1)
tail = m.group(2)
return [prefix, tail]
def parse_title(self, title):
"""
split of the last parenthesis operator==,!=,<,<=(std::vector)
tested with
```
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=
operator==,!=,<,<=,>,>=
std::rel_ops::operator!=,>,<=,>=
std::atomic::operator=
std::array::operator[]
std::function::operator()
std::vector::at
std::relational operators (vector)
std::vector::begin, std::vector::cbegin
std::abs(float), std::fabs
std::unordered_set::begin(size_type), std::unordered_set::cbegin(size_type)
```
"""
m = re.match(r'^\s*((?:\(size_type\)|(?:.|\(\))*?)*)((?:\([^)]+\))?)\s*$', title)
postfix = m.group(2)
t_names = m.group(1).split(',')
t_names = [n.strip() for n in t_names]
prefix = self.parse_expression(t_names[0])[0]
names = []
for n in t_names:
r = self.parse_expression(n);
if prefix == r[0]:
names.append(n + postfix)
else:
names.append(prefix + r[1] + postfix)
return names
def insert_index(self, table, name, url, std=""):
"""callback to insert index"""
names = self.parse_title(name);
for n in names:
self.db_cursor.execute(
'INSERT INTO "%s" (name, url, std) VALUES (?, ?, ?)' % table, (
n, url, std))
def cache_man_page(self, source, url, name):
"""callback to cache new man page"""
# Skip if already exists, override if forced flag is true
outname = self.get_page_path(source, name)
if os.path.exists(outname) and not self.forced:
return
try:
os.makedirs(os.path.join(environ.cache_dir, source))
except OSError:
pass
# There are often some errors in the HTML, for example: missing closing
# tag. We use fixupHTML to fix this.
data = util.fixupHTML(urllib.request.urlopen(url).read())
formatter = importlib.import_module('cppman.formatter.%s' % source[:-4])
groff_text = formatter.html2groff(data, name)
with gzip.open(outname, 'w') as f:
f.write(groff_text.encode('utf-8'))
def clear_cache(self):
"""Clear all cache in man"""
shutil.rmtree(environ.cache_dir)
def man(self, pattern):
"""Call viewer.sh to view man page"""
try:
avail = os.listdir(os.path.join(environ.cache_dir, environ.source))
except OSError:
avail = []
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
# Try direct match
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name="%s" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
# Try standard library
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name="std::%s" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name LIKE "%%%s%%" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
raise RuntimeError('No manual entry for ' + pattern)
finally:
conn.close()
page_filename = self.get_normalized_page_name(page_name)
if self.forced or page_filename + '.3.gz' not in avail:
self.cache_man_page(environ.source, url, page_name)
pager_type = environ.pager if sys.stdout.isatty() else 'pipe'
# Call viewer
columns = (util.get_width() if self.force_columns == -1 else
self.force_columns)
pid = os.fork()
if pid == 0:
os.execl('/bin/sh', '/bin/sh', environ.pager_script, pager_type,
self.get_page_path(environ.source, page_name),
str(columns), environ.pager_config, page_name)
return pid
def find(self, pattern):
"""Find pages in database."""
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
selected = cursor.execute(
'SELECT * FROM "%s" WHERE name '
'LIKE "%%%s%%" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchall()
pat = re.compile('(%s)' % re.escape(pattern), re.I)
if selected:
for name, url, std in selected:
if os.isatty(sys.stdout.fileno()):
print(pat.sub(r'\033[1;31m\1\033[0m', name) +
(' \033[1;33m[%s]\033[0m' % std if std else ''))
else:
print(name + (' [%s]' % std if std else ''))
else:
raise RuntimeError('%s: nothing appropriate.' % pattern)
def update_mandb(self, quiet=True):
"""Update mandb."""
if not environ.config.UpdateManPath:
return
print('\nrunning mandb...')
cmd = 'mandb %s' % (' -q' if quiet else '')
subprocess.Popen(cmd, shell=True).wait()
def get_normalized_page_name(self, name):
return name.replace('/', '_')
def get_page_path(self, source, name):
name = self.get_normalized_page_name(name)
return os.path.join(environ.cache_dir, source, name + '.3.gz')
|
aitjcize/cppman | cppman/main.py | Cppman.cache_man_page | python | def cache_man_page(self, source, url, name):
# Skip if already exists, override if forced flag is true
outname = self.get_page_path(source, name)
if os.path.exists(outname) and not self.forced:
return
try:
os.makedirs(os.path.join(environ.cache_dir, source))
except OSError:
pass
# There are often some errors in the HTML, for example: missing closing
# tag. We use fixupHTML to fix this.
data = util.fixupHTML(urllib.request.urlopen(url).read())
formatter = importlib.import_module('cppman.formatter.%s' % source[:-4])
groff_text = formatter.html2groff(data, name)
with gzip.open(outname, 'w') as f:
f.write(groff_text.encode('utf-8')) | callback to cache new man page | train | https://github.com/aitjcize/cppman/blob/7b48e81b2cd3baa912d73dfe977ecbaff945a93c/cppman/main.py#L256-L276 | [
"def fixupHTML(data):\n return str(bs4.BeautifulSoup(data, \"html5lib\"))\n",
"def get_page_path(self, source, name):\n name = self.get_normalized_page_name(name)\n return os.path.join(environ.cache_dir, source, name + '.3.gz')\n"
] | class Cppman(Crawler):
"""Manage cpp man pages, indexes"""
def __init__(self, forced=False, force_columns=-1):
Crawler.__init__(self)
self.results = set()
self.forced = forced
self.success_count = None
self.failure_count = None
self.force_columns = force_columns
self.blacklist = [
]
self.name_exceptions = [
'http://www.cplusplus.com/reference/string/swap/'
]
def extract_name(self, data):
"""Extract man page name from web page."""
name = re.search('<h1[^>]*>(.+?)</h1>', data).group(1)
name = re.sub(r'<([^>]+)>', r'', name)
name = re.sub(r'>', r'>', name)
name = re.sub(r'<', r'<', name)
return name
def rebuild_index(self):
"""Rebuild index database from cplusplus.com and cppreference.com."""
try:
os.remove(environ.index_db_re)
except:
pass
self.db_conn = sqlite3.connect(environ.index_db_re)
self.db_cursor = self.db_conn.cursor()
self.db_cursor.execute('CREATE TABLE "cplusplus.com" '
'(name VARCHAR(255), url VARCHAR(255), std VARCHAR(255))')
self.db_cursor.execute('CREATE TABLE "cppreference.com" '
'(name VARCHAR(255), url VARCHAR(255), std VARCHAR(255))')
try:
self.add_url_filter('\.(jpg|jpeg|gif|png|js|css|swf|svg)$')
self.set_follow_mode(Crawler.F_SAME_PATH)
# cplusplus.com
self.crawl('http://www.cplusplus.com/reference/')
for name, url, std in self.results:
self.insert_index('cplusplus.com', name, url, std)
self.db_conn.commit()
# Rename duplicate entries
duplicates = self.db_cursor.execute('SELECT name, COUNT(name) '
'AS NON '
'FROM "cplusplus.com" '
'GROUP BY NAME '
'HAVING (NON > 1)').fetchall()
for name, num in duplicates:
dump = self.db_cursor.execute('SELECT name, url FROM '
'"cplusplus.com" WHERE name="%s"'
% name).fetchall()
for n, u in dump:
if u not in self.name_exceptions:
n2 = n[5:] if n.startswith('std::') else n
try:
group = re.search('/([^/]+)/%s/$' % n2, u).group(1)
except Exception:
group = re.search('/([^/]+)/[^/]+/$', u).group(1)
new_name = '%s (%s)' % (n, group)
self.db_cursor.execute('UPDATE "cplusplus.com" '
'SET name="%s", url="%s" '
'WHERE url="%s"' %
(new_name, u, u))
self.db_conn.commit()
# cppreference.com
self.results = set()
self.crawl('https://en.cppreference.com/w/cpp', '/w/cpp')
for name, url, std in self.results:
self.insert_index('cppreference.com', name, url, std)
self.db_conn.commit()
except KeyboardInterrupt:
os.remove(environ.index_db_re)
raise KeyboardInterrupt
finally:
self.db_conn.close()
def process_document(self, doc, std):
"""callback to insert index"""
if doc.url not in self.blacklist:
print("Indexing '%s' %s..." % (doc.url, std))
name = self.extract_name(doc.text)
self.results.add((name, doc.url, std))
else:
print("Skipping blacklisted page '%s' ..." % doc.url)
return None
def parse_expression(self, expr):
"""
split expression into prefix and expression
tested with
```
operator==
!=
std::rel_ops::operator!=
std::atomic::operator=
std::array::operator[]
std::function::operator()
std::vector::at
std::relational operators
std::vector::begin
std::abs(float)
std::fabs()
```
"""
m = re.match(r'^(.*?(?:::)?(?:operator)?)((?:::[^:]*|[^:]*)?)$', expr);
prefix = m.group(1)
tail = m.group(2)
return [prefix, tail]
def parse_title(self, title):
"""
split of the last parenthesis operator==,!=,<,<=(std::vector)
tested with
```
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=
operator==,!=,<,<=,>,>=
std::rel_ops::operator!=,>,<=,>=
std::atomic::operator=
std::array::operator[]
std::function::operator()
std::vector::at
std::relational operators (vector)
std::vector::begin, std::vector::cbegin
std::abs(float), std::fabs
std::unordered_set::begin(size_type), std::unordered_set::cbegin(size_type)
```
"""
m = re.match(r'^\s*((?:\(size_type\)|(?:.|\(\))*?)*)((?:\([^)]+\))?)\s*$', title)
postfix = m.group(2)
t_names = m.group(1).split(',')
t_names = [n.strip() for n in t_names]
prefix = self.parse_expression(t_names[0])[0]
names = []
for n in t_names:
r = self.parse_expression(n);
if prefix == r[0]:
names.append(n + postfix)
else:
names.append(prefix + r[1] + postfix)
return names
def insert_index(self, table, name, url, std=""):
"""callback to insert index"""
names = self.parse_title(name);
for n in names:
self.db_cursor.execute(
'INSERT INTO "%s" (name, url, std) VALUES (?, ?, ?)' % table, (
n, url, std))
def cache_all(self):
"""Cache all available man pages"""
respond = input(
'By default, cppman fetches pages on-the-fly if corresponding '
'page is not found in the cache. The "cache-all" option is only '
'useful if you want to view man pages offline. '
'Caching all contents will take several minutes, '
'do you want to continue [y/N]? ')
if not (respond and 'yes'.startswith(respond.lower())):
raise KeyboardInterrupt
try:
os.makedirs(environ.cache_dir)
except:
pass
self.success_count = 0
self.failure_count = 0
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
source = environ.config.source
print('Caching manpages from %s ...' % source)
data = cursor.execute('SELECT * FROM "%s"' % source).fetchall()
for name, url, _ in data:
print('Caching %s ...' % name)
retries = 3
while retries > 0:
try:
self.cache_man_page(source, url, name)
except Exception:
print('Retrying ...')
retries -= 1
else:
self.success_count += 1
break
else:
print('Error caching %s ...' % name)
self.failure_count += 1
conn.close()
print('\n%d manual pages cached successfully.' % self.success_count)
print('%d manual pages failed to cache.' % self.failure_count)
self.update_mandb(False)
def clear_cache(self):
"""Clear all cache in man"""
shutil.rmtree(environ.cache_dir)
def man(self, pattern):
"""Call viewer.sh to view man page"""
try:
avail = os.listdir(os.path.join(environ.cache_dir, environ.source))
except OSError:
avail = []
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
# Try direct match
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name="%s" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
# Try standard library
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name="std::%s" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name LIKE "%%%s%%" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
raise RuntimeError('No manual entry for ' + pattern)
finally:
conn.close()
page_filename = self.get_normalized_page_name(page_name)
if self.forced or page_filename + '.3.gz' not in avail:
self.cache_man_page(environ.source, url, page_name)
pager_type = environ.pager if sys.stdout.isatty() else 'pipe'
# Call viewer
columns = (util.get_width() if self.force_columns == -1 else
self.force_columns)
pid = os.fork()
if pid == 0:
os.execl('/bin/sh', '/bin/sh', environ.pager_script, pager_type,
self.get_page_path(environ.source, page_name),
str(columns), environ.pager_config, page_name)
return pid
def find(self, pattern):
"""Find pages in database."""
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
selected = cursor.execute(
'SELECT * FROM "%s" WHERE name '
'LIKE "%%%s%%" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchall()
pat = re.compile('(%s)' % re.escape(pattern), re.I)
if selected:
for name, url, std in selected:
if os.isatty(sys.stdout.fileno()):
print(pat.sub(r'\033[1;31m\1\033[0m', name) +
(' \033[1;33m[%s]\033[0m' % std if std else ''))
else:
print(name + (' [%s]' % std if std else ''))
else:
raise RuntimeError('%s: nothing appropriate.' % pattern)
def update_mandb(self, quiet=True):
"""Update mandb."""
if not environ.config.UpdateManPath:
return
print('\nrunning mandb...')
cmd = 'mandb %s' % (' -q' if quiet else '')
subprocess.Popen(cmd, shell=True).wait()
def get_normalized_page_name(self, name):
return name.replace('/', '_')
def get_page_path(self, source, name):
name = self.get_normalized_page_name(name)
return os.path.join(environ.cache_dir, source, name + '.3.gz')
|
aitjcize/cppman | cppman/main.py | Cppman.man | python | def man(self, pattern):
try:
avail = os.listdir(os.path.join(environ.cache_dir, environ.source))
except OSError:
avail = []
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
# Try direct match
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name="%s" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
# Try standard library
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name="std::%s" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name LIKE "%%%s%%" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
raise RuntimeError('No manual entry for ' + pattern)
finally:
conn.close()
page_filename = self.get_normalized_page_name(page_name)
if self.forced or page_filename + '.3.gz' not in avail:
self.cache_man_page(environ.source, url, page_name)
pager_type = environ.pager if sys.stdout.isatty() else 'pipe'
# Call viewer
columns = (util.get_width() if self.force_columns == -1 else
self.force_columns)
pid = os.fork()
if pid == 0:
os.execl('/bin/sh', '/bin/sh', environ.pager_script, pager_type,
self.get_page_path(environ.source, page_name),
str(columns), environ.pager_config, page_name)
return pid | Call viewer.sh to view man page | train | https://github.com/aitjcize/cppman/blob/7b48e81b2cd3baa912d73dfe977ecbaff945a93c/cppman/main.py#L282-L333 | [
"def get_width():\n \"\"\"Get terminal width\"\"\"\n # Get terminal size\n ws = struct.pack(\"HHHH\", 0, 0, 0, 0)\n ws = fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ, ws)\n lines, columns, x, y = struct.unpack(\"HHHH\", ws)\n width = min(columns * 39 // 40, columns - 2)\n return width\n",
"def cache_man_page(self, source, url, name):\n \"\"\"callback to cache new man page\"\"\"\n # Skip if already exists, override if forced flag is true\n outname = self.get_page_path(source, name)\n if os.path.exists(outname) and not self.forced:\n return\n\n try:\n os.makedirs(os.path.join(environ.cache_dir, source))\n except OSError:\n pass\n\n # There are often some errors in the HTML, for example: missing closing\n # tag. We use fixupHTML to fix this.\n data = util.fixupHTML(urllib.request.urlopen(url).read())\n\n formatter = importlib.import_module('cppman.formatter.%s' % source[:-4])\n groff_text = formatter.html2groff(data, name)\n\n with gzip.open(outname, 'w') as f:\n f.write(groff_text.encode('utf-8'))\n",
"def get_normalized_page_name(self, name):\n return name.replace('/', '_')\n"
] | class Cppman(Crawler):
"""Manage cpp man pages, indexes"""
def __init__(self, forced=False, force_columns=-1):
Crawler.__init__(self)
self.results = set()
self.forced = forced
self.success_count = None
self.failure_count = None
self.force_columns = force_columns
self.blacklist = [
]
self.name_exceptions = [
'http://www.cplusplus.com/reference/string/swap/'
]
def extract_name(self, data):
"""Extract man page name from web page."""
name = re.search('<h1[^>]*>(.+?)</h1>', data).group(1)
name = re.sub(r'<([^>]+)>', r'', name)
name = re.sub(r'>', r'>', name)
name = re.sub(r'<', r'<', name)
return name
def rebuild_index(self):
"""Rebuild index database from cplusplus.com and cppreference.com."""
try:
os.remove(environ.index_db_re)
except:
pass
self.db_conn = sqlite3.connect(environ.index_db_re)
self.db_cursor = self.db_conn.cursor()
self.db_cursor.execute('CREATE TABLE "cplusplus.com" '
'(name VARCHAR(255), url VARCHAR(255), std VARCHAR(255))')
self.db_cursor.execute('CREATE TABLE "cppreference.com" '
'(name VARCHAR(255), url VARCHAR(255), std VARCHAR(255))')
try:
self.add_url_filter('\.(jpg|jpeg|gif|png|js|css|swf|svg)$')
self.set_follow_mode(Crawler.F_SAME_PATH)
# cplusplus.com
self.crawl('http://www.cplusplus.com/reference/')
for name, url, std in self.results:
self.insert_index('cplusplus.com', name, url, std)
self.db_conn.commit()
# Rename duplicate entries
duplicates = self.db_cursor.execute('SELECT name, COUNT(name) '
'AS NON '
'FROM "cplusplus.com" '
'GROUP BY NAME '
'HAVING (NON > 1)').fetchall()
for name, num in duplicates:
dump = self.db_cursor.execute('SELECT name, url FROM '
'"cplusplus.com" WHERE name="%s"'
% name).fetchall()
for n, u in dump:
if u not in self.name_exceptions:
n2 = n[5:] if n.startswith('std::') else n
try:
group = re.search('/([^/]+)/%s/$' % n2, u).group(1)
except Exception:
group = re.search('/([^/]+)/[^/]+/$', u).group(1)
new_name = '%s (%s)' % (n, group)
self.db_cursor.execute('UPDATE "cplusplus.com" '
'SET name="%s", url="%s" '
'WHERE url="%s"' %
(new_name, u, u))
self.db_conn.commit()
# cppreference.com
self.results = set()
self.crawl('https://en.cppreference.com/w/cpp', '/w/cpp')
for name, url, std in self.results:
self.insert_index('cppreference.com', name, url, std)
self.db_conn.commit()
except KeyboardInterrupt:
os.remove(environ.index_db_re)
raise KeyboardInterrupt
finally:
self.db_conn.close()
def process_document(self, doc, std):
"""callback to insert index"""
if doc.url not in self.blacklist:
print("Indexing '%s' %s..." % (doc.url, std))
name = self.extract_name(doc.text)
self.results.add((name, doc.url, std))
else:
print("Skipping blacklisted page '%s' ..." % doc.url)
return None
def parse_expression(self, expr):
"""
split expression into prefix and expression
tested with
```
operator==
!=
std::rel_ops::operator!=
std::atomic::operator=
std::array::operator[]
std::function::operator()
std::vector::at
std::relational operators
std::vector::begin
std::abs(float)
std::fabs()
```
"""
m = re.match(r'^(.*?(?:::)?(?:operator)?)((?:::[^:]*|[^:]*)?)$', expr);
prefix = m.group(1)
tail = m.group(2)
return [prefix, tail]
def parse_title(self, title):
"""
split of the last parenthesis operator==,!=,<,<=(std::vector)
tested with
```
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=
operator==,!=,<,<=,>,>=
std::rel_ops::operator!=,>,<=,>=
std::atomic::operator=
std::array::operator[]
std::function::operator()
std::vector::at
std::relational operators (vector)
std::vector::begin, std::vector::cbegin
std::abs(float), std::fabs
std::unordered_set::begin(size_type), std::unordered_set::cbegin(size_type)
```
"""
m = re.match(r'^\s*((?:\(size_type\)|(?:.|\(\))*?)*)((?:\([^)]+\))?)\s*$', title)
postfix = m.group(2)
t_names = m.group(1).split(',')
t_names = [n.strip() for n in t_names]
prefix = self.parse_expression(t_names[0])[0]
names = []
for n in t_names:
r = self.parse_expression(n);
if prefix == r[0]:
names.append(n + postfix)
else:
names.append(prefix + r[1] + postfix)
return names
def insert_index(self, table, name, url, std=""):
"""callback to insert index"""
names = self.parse_title(name);
for n in names:
self.db_cursor.execute(
'INSERT INTO "%s" (name, url, std) VALUES (?, ?, ?)' % table, (
n, url, std))
def cache_all(self):
"""Cache all available man pages"""
respond = input(
'By default, cppman fetches pages on-the-fly if corresponding '
'page is not found in the cache. The "cache-all" option is only '
'useful if you want to view man pages offline. '
'Caching all contents will take several minutes, '
'do you want to continue [y/N]? ')
if not (respond and 'yes'.startswith(respond.lower())):
raise KeyboardInterrupt
try:
os.makedirs(environ.cache_dir)
except:
pass
self.success_count = 0
self.failure_count = 0
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
source = environ.config.source
print('Caching manpages from %s ...' % source)
data = cursor.execute('SELECT * FROM "%s"' % source).fetchall()
for name, url, _ in data:
print('Caching %s ...' % name)
retries = 3
while retries > 0:
try:
self.cache_man_page(source, url, name)
except Exception:
print('Retrying ...')
retries -= 1
else:
self.success_count += 1
break
else:
print('Error caching %s ...' % name)
self.failure_count += 1
conn.close()
print('\n%d manual pages cached successfully.' % self.success_count)
print('%d manual pages failed to cache.' % self.failure_count)
self.update_mandb(False)
def cache_man_page(self, source, url, name):
"""callback to cache new man page"""
# Skip if already exists, override if forced flag is true
outname = self.get_page_path(source, name)
if os.path.exists(outname) and not self.forced:
return
try:
os.makedirs(os.path.join(environ.cache_dir, source))
except OSError:
pass
# There are often some errors in the HTML, for example: missing closing
# tag. We use fixupHTML to fix this.
data = util.fixupHTML(urllib.request.urlopen(url).read())
formatter = importlib.import_module('cppman.formatter.%s' % source[:-4])
groff_text = formatter.html2groff(data, name)
with gzip.open(outname, 'w') as f:
f.write(groff_text.encode('utf-8'))
def clear_cache(self):
"""Clear all cache in man"""
shutil.rmtree(environ.cache_dir)
def find(self, pattern):
"""Find pages in database."""
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
selected = cursor.execute(
'SELECT * FROM "%s" WHERE name '
'LIKE "%%%s%%" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchall()
pat = re.compile('(%s)' % re.escape(pattern), re.I)
if selected:
for name, url, std in selected:
if os.isatty(sys.stdout.fileno()):
print(pat.sub(r'\033[1;31m\1\033[0m', name) +
(' \033[1;33m[%s]\033[0m' % std if std else ''))
else:
print(name + (' [%s]' % std if std else ''))
else:
raise RuntimeError('%s: nothing appropriate.' % pattern)
def update_mandb(self, quiet=True):
"""Update mandb."""
if not environ.config.UpdateManPath:
return
print('\nrunning mandb...')
cmd = 'mandb %s' % (' -q' if quiet else '')
subprocess.Popen(cmd, shell=True).wait()
def get_normalized_page_name(self, name):
return name.replace('/', '_')
def get_page_path(self, source, name):
name = self.get_normalized_page_name(name)
return os.path.join(environ.cache_dir, source, name + '.3.gz')
|
aitjcize/cppman | cppman/main.py | Cppman.find | python | def find(self, pattern):
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
selected = cursor.execute(
'SELECT * FROM "%s" WHERE name '
'LIKE "%%%s%%" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchall()
pat = re.compile('(%s)' % re.escape(pattern), re.I)
if selected:
for name, url, std in selected:
if os.isatty(sys.stdout.fileno()):
print(pat.sub(r'\033[1;31m\1\033[0m', name) +
(' \033[1;33m[%s]\033[0m' % std if std else ''))
else:
print(name + (' [%s]' % std if std else ''))
else:
raise RuntimeError('%s: nothing appropriate.' % pattern) | Find pages in database. | train | https://github.com/aitjcize/cppman/blob/7b48e81b2cd3baa912d73dfe977ecbaff945a93c/cppman/main.py#L335-L358 | null | class Cppman(Crawler):
"""Manage cpp man pages, indexes"""
def __init__(self, forced=False, force_columns=-1):
Crawler.__init__(self)
self.results = set()
self.forced = forced
self.success_count = None
self.failure_count = None
self.force_columns = force_columns
self.blacklist = [
]
self.name_exceptions = [
'http://www.cplusplus.com/reference/string/swap/'
]
def extract_name(self, data):
"""Extract man page name from web page."""
name = re.search('<h1[^>]*>(.+?)</h1>', data).group(1)
name = re.sub(r'<([^>]+)>', r'', name)
name = re.sub(r'>', r'>', name)
name = re.sub(r'<', r'<', name)
return name
def rebuild_index(self):
"""Rebuild index database from cplusplus.com and cppreference.com."""
try:
os.remove(environ.index_db_re)
except:
pass
self.db_conn = sqlite3.connect(environ.index_db_re)
self.db_cursor = self.db_conn.cursor()
self.db_cursor.execute('CREATE TABLE "cplusplus.com" '
'(name VARCHAR(255), url VARCHAR(255), std VARCHAR(255))')
self.db_cursor.execute('CREATE TABLE "cppreference.com" '
'(name VARCHAR(255), url VARCHAR(255), std VARCHAR(255))')
try:
self.add_url_filter('\.(jpg|jpeg|gif|png|js|css|swf|svg)$')
self.set_follow_mode(Crawler.F_SAME_PATH)
# cplusplus.com
self.crawl('http://www.cplusplus.com/reference/')
for name, url, std in self.results:
self.insert_index('cplusplus.com', name, url, std)
self.db_conn.commit()
# Rename duplicate entries
duplicates = self.db_cursor.execute('SELECT name, COUNT(name) '
'AS NON '
'FROM "cplusplus.com" '
'GROUP BY NAME '
'HAVING (NON > 1)').fetchall()
for name, num in duplicates:
dump = self.db_cursor.execute('SELECT name, url FROM '
'"cplusplus.com" WHERE name="%s"'
% name).fetchall()
for n, u in dump:
if u not in self.name_exceptions:
n2 = n[5:] if n.startswith('std::') else n
try:
group = re.search('/([^/]+)/%s/$' % n2, u).group(1)
except Exception:
group = re.search('/([^/]+)/[^/]+/$', u).group(1)
new_name = '%s (%s)' % (n, group)
self.db_cursor.execute('UPDATE "cplusplus.com" '
'SET name="%s", url="%s" '
'WHERE url="%s"' %
(new_name, u, u))
self.db_conn.commit()
# cppreference.com
self.results = set()
self.crawl('https://en.cppreference.com/w/cpp', '/w/cpp')
for name, url, std in self.results:
self.insert_index('cppreference.com', name, url, std)
self.db_conn.commit()
except KeyboardInterrupt:
os.remove(environ.index_db_re)
raise KeyboardInterrupt
finally:
self.db_conn.close()
def process_document(self, doc, std):
"""callback to insert index"""
if doc.url not in self.blacklist:
print("Indexing '%s' %s..." % (doc.url, std))
name = self.extract_name(doc.text)
self.results.add((name, doc.url, std))
else:
print("Skipping blacklisted page '%s' ..." % doc.url)
return None
def parse_expression(self, expr):
"""
split expression into prefix and expression
tested with
```
operator==
!=
std::rel_ops::operator!=
std::atomic::operator=
std::array::operator[]
std::function::operator()
std::vector::at
std::relational operators
std::vector::begin
std::abs(float)
std::fabs()
```
"""
m = re.match(r'^(.*?(?:::)?(?:operator)?)((?:::[^:]*|[^:]*)?)$', expr);
prefix = m.group(1)
tail = m.group(2)
return [prefix, tail]
def parse_title(self, title):
"""
split of the last parenthesis operator==,!=,<,<=(std::vector)
tested with
```
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=
operator==,!=,<,<=,>,>=
std::rel_ops::operator!=,>,<=,>=
std::atomic::operator=
std::array::operator[]
std::function::operator()
std::vector::at
std::relational operators (vector)
std::vector::begin, std::vector::cbegin
std::abs(float), std::fabs
std::unordered_set::begin(size_type), std::unordered_set::cbegin(size_type)
```
"""
m = re.match(r'^\s*((?:\(size_type\)|(?:.|\(\))*?)*)((?:\([^)]+\))?)\s*$', title)
postfix = m.group(2)
t_names = m.group(1).split(',')
t_names = [n.strip() for n in t_names]
prefix = self.parse_expression(t_names[0])[0]
names = []
for n in t_names:
r = self.parse_expression(n);
if prefix == r[0]:
names.append(n + postfix)
else:
names.append(prefix + r[1] + postfix)
return names
def insert_index(self, table, name, url, std=""):
"""callback to insert index"""
names = self.parse_title(name);
for n in names:
self.db_cursor.execute(
'INSERT INTO "%s" (name, url, std) VALUES (?, ?, ?)' % table, (
n, url, std))
def cache_all(self):
"""Cache all available man pages"""
respond = input(
'By default, cppman fetches pages on-the-fly if corresponding '
'page is not found in the cache. The "cache-all" option is only '
'useful if you want to view man pages offline. '
'Caching all contents will take several minutes, '
'do you want to continue [y/N]? ')
if not (respond and 'yes'.startswith(respond.lower())):
raise KeyboardInterrupt
try:
os.makedirs(environ.cache_dir)
except:
pass
self.success_count = 0
self.failure_count = 0
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
source = environ.config.source
print('Caching manpages from %s ...' % source)
data = cursor.execute('SELECT * FROM "%s"' % source).fetchall()
for name, url, _ in data:
print('Caching %s ...' % name)
retries = 3
while retries > 0:
try:
self.cache_man_page(source, url, name)
except Exception:
print('Retrying ...')
retries -= 1
else:
self.success_count += 1
break
else:
print('Error caching %s ...' % name)
self.failure_count += 1
conn.close()
print('\n%d manual pages cached successfully.' % self.success_count)
print('%d manual pages failed to cache.' % self.failure_count)
self.update_mandb(False)
def cache_man_page(self, source, url, name):
"""callback to cache new man page"""
# Skip if already exists, override if forced flag is true
outname = self.get_page_path(source, name)
if os.path.exists(outname) and not self.forced:
return
try:
os.makedirs(os.path.join(environ.cache_dir, source))
except OSError:
pass
# There are often some errors in the HTML, for example: missing closing
# tag. We use fixupHTML to fix this.
data = util.fixupHTML(urllib.request.urlopen(url).read())
formatter = importlib.import_module('cppman.formatter.%s' % source[:-4])
groff_text = formatter.html2groff(data, name)
with gzip.open(outname, 'w') as f:
f.write(groff_text.encode('utf-8'))
def clear_cache(self):
"""Clear all cache in man"""
shutil.rmtree(environ.cache_dir)
def man(self, pattern):
"""Call viewer.sh to view man page"""
try:
avail = os.listdir(os.path.join(environ.cache_dir, environ.source))
except OSError:
avail = []
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
# Try direct match
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name="%s" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
# Try standard library
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name="std::%s" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name LIKE "%%%s%%" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
raise RuntimeError('No manual entry for ' + pattern)
finally:
conn.close()
page_filename = self.get_normalized_page_name(page_name)
if self.forced or page_filename + '.3.gz' not in avail:
self.cache_man_page(environ.source, url, page_name)
pager_type = environ.pager if sys.stdout.isatty() else 'pipe'
# Call viewer
columns = (util.get_width() if self.force_columns == -1 else
self.force_columns)
pid = os.fork()
if pid == 0:
os.execl('/bin/sh', '/bin/sh', environ.pager_script, pager_type,
self.get_page_path(environ.source, page_name),
str(columns), environ.pager_config, page_name)
return pid
def update_mandb(self, quiet=True):
"""Update mandb."""
if not environ.config.UpdateManPath:
return
print('\nrunning mandb...')
cmd = 'mandb %s' % (' -q' if quiet else '')
subprocess.Popen(cmd, shell=True).wait()
def get_normalized_page_name(self, name):
return name.replace('/', '_')
def get_page_path(self, source, name):
name = self.get_normalized_page_name(name)
return os.path.join(environ.cache_dir, source, name + '.3.gz')
|
aitjcize/cppman | cppman/main.py | Cppman.update_mandb | python | def update_mandb(self, quiet=True):
if not environ.config.UpdateManPath:
return
print('\nrunning mandb...')
cmd = 'mandb %s' % (' -q' if quiet else '')
subprocess.Popen(cmd, shell=True).wait() | Update mandb. | train | https://github.com/aitjcize/cppman/blob/7b48e81b2cd3baa912d73dfe977ecbaff945a93c/cppman/main.py#L360-L366 | null | class Cppman(Crawler):
"""Manage cpp man pages, indexes"""
def __init__(self, forced=False, force_columns=-1):
Crawler.__init__(self)
self.results = set()
self.forced = forced
self.success_count = None
self.failure_count = None
self.force_columns = force_columns
self.blacklist = [
]
self.name_exceptions = [
'http://www.cplusplus.com/reference/string/swap/'
]
def extract_name(self, data):
"""Extract man page name from web page."""
name = re.search('<h1[^>]*>(.+?)</h1>', data).group(1)
name = re.sub(r'<([^>]+)>', r'', name)
name = re.sub(r'>', r'>', name)
name = re.sub(r'<', r'<', name)
return name
def rebuild_index(self):
"""Rebuild index database from cplusplus.com and cppreference.com."""
try:
os.remove(environ.index_db_re)
except:
pass
self.db_conn = sqlite3.connect(environ.index_db_re)
self.db_cursor = self.db_conn.cursor()
self.db_cursor.execute('CREATE TABLE "cplusplus.com" '
'(name VARCHAR(255), url VARCHAR(255), std VARCHAR(255))')
self.db_cursor.execute('CREATE TABLE "cppreference.com" '
'(name VARCHAR(255), url VARCHAR(255), std VARCHAR(255))')
try:
self.add_url_filter('\.(jpg|jpeg|gif|png|js|css|swf|svg)$')
self.set_follow_mode(Crawler.F_SAME_PATH)
# cplusplus.com
self.crawl('http://www.cplusplus.com/reference/')
for name, url, std in self.results:
self.insert_index('cplusplus.com', name, url, std)
self.db_conn.commit()
# Rename duplicate entries
duplicates = self.db_cursor.execute('SELECT name, COUNT(name) '
'AS NON '
'FROM "cplusplus.com" '
'GROUP BY NAME '
'HAVING (NON > 1)').fetchall()
for name, num in duplicates:
dump = self.db_cursor.execute('SELECT name, url FROM '
'"cplusplus.com" WHERE name="%s"'
% name).fetchall()
for n, u in dump:
if u not in self.name_exceptions:
n2 = n[5:] if n.startswith('std::') else n
try:
group = re.search('/([^/]+)/%s/$' % n2, u).group(1)
except Exception:
group = re.search('/([^/]+)/[^/]+/$', u).group(1)
new_name = '%s (%s)' % (n, group)
self.db_cursor.execute('UPDATE "cplusplus.com" '
'SET name="%s", url="%s" '
'WHERE url="%s"' %
(new_name, u, u))
self.db_conn.commit()
# cppreference.com
self.results = set()
self.crawl('https://en.cppreference.com/w/cpp', '/w/cpp')
for name, url, std in self.results:
self.insert_index('cppreference.com', name, url, std)
self.db_conn.commit()
except KeyboardInterrupt:
os.remove(environ.index_db_re)
raise KeyboardInterrupt
finally:
self.db_conn.close()
def process_document(self, doc, std):
"""callback to insert index"""
if doc.url not in self.blacklist:
print("Indexing '%s' %s..." % (doc.url, std))
name = self.extract_name(doc.text)
self.results.add((name, doc.url, std))
else:
print("Skipping blacklisted page '%s' ..." % doc.url)
return None
def parse_expression(self, expr):
"""
split expression into prefix and expression
tested with
```
operator==
!=
std::rel_ops::operator!=
std::atomic::operator=
std::array::operator[]
std::function::operator()
std::vector::at
std::relational operators
std::vector::begin
std::abs(float)
std::fabs()
```
"""
m = re.match(r'^(.*?(?:::)?(?:operator)?)((?:::[^:]*|[^:]*)?)$', expr);
prefix = m.group(1)
tail = m.group(2)
return [prefix, tail]
def parse_title(self, title):
"""
split of the last parenthesis operator==,!=,<,<=(std::vector)
tested with
```
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=(std::vector)
operator==,!=,<,<=,>,>=
operator==,!=,<,<=,>,>=
std::rel_ops::operator!=,>,<=,>=
std::atomic::operator=
std::array::operator[]
std::function::operator()
std::vector::at
std::relational operators (vector)
std::vector::begin, std::vector::cbegin
std::abs(float), std::fabs
std::unordered_set::begin(size_type), std::unordered_set::cbegin(size_type)
```
"""
m = re.match(r'^\s*((?:\(size_type\)|(?:.|\(\))*?)*)((?:\([^)]+\))?)\s*$', title)
postfix = m.group(2)
t_names = m.group(1).split(',')
t_names = [n.strip() for n in t_names]
prefix = self.parse_expression(t_names[0])[0]
names = []
for n in t_names:
r = self.parse_expression(n);
if prefix == r[0]:
names.append(n + postfix)
else:
names.append(prefix + r[1] + postfix)
return names
def insert_index(self, table, name, url, std=""):
"""callback to insert index"""
names = self.parse_title(name);
for n in names:
self.db_cursor.execute(
'INSERT INTO "%s" (name, url, std) VALUES (?, ?, ?)' % table, (
n, url, std))
def cache_all(self):
"""Cache all available man pages"""
respond = input(
'By default, cppman fetches pages on-the-fly if corresponding '
'page is not found in the cache. The "cache-all" option is only '
'useful if you want to view man pages offline. '
'Caching all contents will take several minutes, '
'do you want to continue [y/N]? ')
if not (respond and 'yes'.startswith(respond.lower())):
raise KeyboardInterrupt
try:
os.makedirs(environ.cache_dir)
except:
pass
self.success_count = 0
self.failure_count = 0
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
source = environ.config.source
print('Caching manpages from %s ...' % source)
data = cursor.execute('SELECT * FROM "%s"' % source).fetchall()
for name, url, _ in data:
print('Caching %s ...' % name)
retries = 3
while retries > 0:
try:
self.cache_man_page(source, url, name)
except Exception:
print('Retrying ...')
retries -= 1
else:
self.success_count += 1
break
else:
print('Error caching %s ...' % name)
self.failure_count += 1
conn.close()
print('\n%d manual pages cached successfully.' % self.success_count)
print('%d manual pages failed to cache.' % self.failure_count)
self.update_mandb(False)
def cache_man_page(self, source, url, name):
"""callback to cache new man page"""
# Skip if already exists, override if forced flag is true
outname = self.get_page_path(source, name)
if os.path.exists(outname) and not self.forced:
return
try:
os.makedirs(os.path.join(environ.cache_dir, source))
except OSError:
pass
# There are often some errors in the HTML, for example: missing closing
# tag. We use fixupHTML to fix this.
data = util.fixupHTML(urllib.request.urlopen(url).read())
formatter = importlib.import_module('cppman.formatter.%s' % source[:-4])
groff_text = formatter.html2groff(data, name)
with gzip.open(outname, 'w') as f:
f.write(groff_text.encode('utf-8'))
def clear_cache(self):
"""Clear all cache in man"""
shutil.rmtree(environ.cache_dir)
def man(self, pattern):
"""Call viewer.sh to view man page"""
try:
avail = os.listdir(os.path.join(environ.cache_dir, environ.source))
except OSError:
avail = []
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
# Try direct match
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name="%s" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
# Try standard library
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name="std::%s" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
try:
page_name, url = cursor.execute(
'SELECT name,url FROM "%s" '
'WHERE name LIKE "%%%s%%" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchone()
except TypeError:
raise RuntimeError('No manual entry for ' + pattern)
finally:
conn.close()
page_filename = self.get_normalized_page_name(page_name)
if self.forced or page_filename + '.3.gz' not in avail:
self.cache_man_page(environ.source, url, page_name)
pager_type = environ.pager if sys.stdout.isatty() else 'pipe'
# Call viewer
columns = (util.get_width() if self.force_columns == -1 else
self.force_columns)
pid = os.fork()
if pid == 0:
os.execl('/bin/sh', '/bin/sh', environ.pager_script, pager_type,
self.get_page_path(environ.source, page_name),
str(columns), environ.pager_config, page_name)
return pid
def find(self, pattern):
"""Find pages in database."""
if not os.path.exists(environ.index_db):
raise RuntimeError("can't find index.db")
conn = sqlite3.connect(environ.index_db)
cursor = conn.cursor()
selected = cursor.execute(
'SELECT * FROM "%s" WHERE name '
'LIKE "%%%s%%" ORDER BY LENGTH(name)'
% (environ.source, pattern)).fetchall()
pat = re.compile('(%s)' % re.escape(pattern), re.I)
if selected:
for name, url, std in selected:
if os.isatty(sys.stdout.fileno()):
print(pat.sub(r'\033[1;31m\1\033[0m', name) +
(' \033[1;33m[%s]\033[0m' % std if std else ''))
else:
print(name + (' [%s]' % std if std else ''))
else:
raise RuntimeError('%s: nothing appropriate.' % pattern)
def get_normalized_page_name(self, name):
return name.replace('/', '_')
def get_page_path(self, source, name):
name = self.get_normalized_page_name(name)
return os.path.join(environ.cache_dir, source, name + '.3.gz')
|
aitjcize/cppman | cppman/config.py | Config.set_default | python | def set_default(self):
try:
os.makedirs(os.path.dirname(self._configfile))
except:
pass
self._config = configparser.RawConfigParser()
self._config.add_section('Settings')
for key, val in self.DEFAULTS.items():
self._config.set('Settings', key, val)
with open(self._configfile, 'w') as f:
self._config.write(f) | Set config to default. | train | https://github.com/aitjcize/cppman/blob/7b48e81b2cd3baa912d73dfe977ecbaff945a93c/cppman/config.py#L64-L78 | null | class Config(object):
PAGERS = ['vim', 'less', 'system']
SOURCES = ['cplusplus.com', 'cppreference.com']
DEFAULTS = {
'Source': 'cplusplus.com',
'UpdateManPath': 'false',
'Pager': 'vim'
}
def __init__(self, configfile):
self._configfile = configfile
if not os.path.exists(configfile):
self.set_default()
else:
self._config = configparser.RawConfigParser()
self._config.read(self._configfile)
def __getattr__(self, name):
try:
value = self._config.get('Settings', name)
except configparser.NoOptionError:
value = self.DEFAULTS[name]
setattr(self, name, value)
self._config.read(self._configfile)
return self.parse_bool(value)
def __setattr__(self, name, value):
if not name.startswith('_'):
self._config.set('Settings', name, value)
self.save()
self.__dict__[name] = self.parse_bool(value)
def save(self):
"""Store config back to file."""
try:
os.makedirs(os.path.dirname(self._configfile))
except:
pass
with open(self._configfile, 'w') as f:
self._config.write(f)
def parse_bool(self, val):
if type(val) == str:
if val.lower() == 'true':
return True
elif val.lower() == 'false':
return False
return val
|
aitjcize/cppman | cppman/config.py | Config.save | python | def save(self):
try:
os.makedirs(os.path.dirname(self._configfile))
except:
pass
with open(self._configfile, 'w') as f:
self._config.write(f) | Store config back to file. | train | https://github.com/aitjcize/cppman/blob/7b48e81b2cd3baa912d73dfe977ecbaff945a93c/cppman/config.py#L80-L88 | null | class Config(object):
PAGERS = ['vim', 'less', 'system']
SOURCES = ['cplusplus.com', 'cppreference.com']
DEFAULTS = {
'Source': 'cplusplus.com',
'UpdateManPath': 'false',
'Pager': 'vim'
}
def __init__(self, configfile):
self._configfile = configfile
if not os.path.exists(configfile):
self.set_default()
else:
self._config = configparser.RawConfigParser()
self._config.read(self._configfile)
def __getattr__(self, name):
try:
value = self._config.get('Settings', name)
except configparser.NoOptionError:
value = self.DEFAULTS[name]
setattr(self, name, value)
self._config.read(self._configfile)
return self.parse_bool(value)
def __setattr__(self, name, value):
if not name.startswith('_'):
self._config.set('Settings', name, value)
self.save()
self.__dict__[name] = self.parse_bool(value)
def set_default(self):
"""Set config to default."""
try:
os.makedirs(os.path.dirname(self._configfile))
except:
pass
self._config = configparser.RawConfigParser()
self._config.add_section('Settings')
for key, val in self.DEFAULTS.items():
self._config.set('Settings', key, val)
with open(self._configfile, 'w') as f:
self._config.write(f)
def parse_bool(self, val):
if type(val) == str:
if val.lower() == 'true':
return True
elif val.lower() == 'false':
return False
return val
|
btimby/fulltext | fulltext/__main__.py | check_backends | python | def check_backends(title):
path = os.path.dirname(fulltext.backends.__file__)
errs = []
for name in os.listdir(path):
if not name.endswith('.py'):
continue
if name == '__init__.py':
continue
mod_name = "fulltext.backends.%s" % (
os.path.splitext(os.path.basename(name))[0])
try:
mod = __import__(mod_name, fromlist=[' '])
except ImportError as err:
errs.append((mod_name, str(err)))
continue
kw = dict(encoding='utf8', encoding_errors='strict',
kwargs={})
try:
inst = getattr(mod, "Backend")(**kw)
if hasattr(inst, "check"):
inst.check(title=title)
except Exception as err:
errs.append((mod.__name__, str(err)))
if errs:
for mod, err in errs:
msg = hilite("%s: %s" % (mod, err), ok=False)
print(msg, file=sys.stderr)
sys.exit(1) | Invoke test() for all backends and fail (raise) if some dep
is missing. | train | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/__main__.py#L26-L59 | [
"def hilite(s, ok=True, bold=False):\n \"\"\"Return an highlighted version of 'string'.\"\"\"\n if not term_supports_colors():\n return s\n attr = []\n if ok is None: # no color\n pass\n elif ok: # green\n attr.append('32')\n else: # red\n attr.append('31')\n if bold:\n attr.append('1')\n return '\\x1b[%sm%s\\x1b[0m' % (';'.join(attr), s)\n"
] | """
Fulltext CLI interface.
"""
from __future__ import absolute_import, print_function
import os
import sys
import logging
from docopt import docopt
import fulltext
import fulltext.backends
from fulltext.util import hilite
HERE = os.path.abspath(os.path.dirname(__file__))
def _handle_open(path):
with open(path, 'rb') as f:
return fulltext.get(f)
def config_logging(verbose):
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter(
'[%(levelname)1.1s %(name)s] %(message)s'))
logger = logging.getLogger()
logger.addHandler(handler)
logger.setLevel(logging.DEBUG if verbose else logging.INFO)
def main(args=sys.argv[1:]):
"""Extract text from a file.
Commands:
extract - extract text from path
check - make sure all deps are installed
Usage:
fulltext extract [-v] [-f] <path>...
fulltext check [-t]
Options:
-f, --file Open file first.
-t, --title Check deps for title.
-v, --verbose More verbose output.
"""
opt = docopt(main.__doc__.strip(), args, options_first=True)
config_logging(opt['--verbose'])
if opt['check']:
check_backends(opt['--title'])
elif opt['extract']:
handler = fulltext.get
if opt['--file']:
handler = _handle_open
for path in opt['<path>']:
print(handler(path))
else:
# we should never get here
raise ValueError("don't know how to handle cmd")
if __name__ == '__main__':
main()
|
btimby/fulltext | fulltext/__main__.py | main | python | def main(args=sys.argv[1:]):
opt = docopt(main.__doc__.strip(), args, options_first=True)
config_logging(opt['--verbose'])
if opt['check']:
check_backends(opt['--title'])
elif opt['extract']:
handler = fulltext.get
if opt['--file']:
handler = _handle_open
for path in opt['<path>']:
print(handler(path))
else:
# we should never get here
raise ValueError("don't know how to handle cmd") | Extract text from a file.
Commands:
extract - extract text from path
check - make sure all deps are installed
Usage:
fulltext extract [-v] [-f] <path>...
fulltext check [-t]
Options:
-f, --file Open file first.
-t, --title Check deps for title.
-v, --verbose More verbose output. | train | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/__main__.py#L71-L103 | [
"def get(path_or_file, default=SENTINAL, mime=None, name=None, backend=None,\n encoding=None, encoding_errors=None, kwargs=None,\n _wtitle=False):\n \"\"\"\n Get document full text.\n\n Accepts a path or file-like object.\n * If given, `default` is returned instead of an error.\n * `backend` is either a module object or a string specifying which\n default backend to use (e.g. \"doc\"); take a look at backends\n directory to see a list of default backends.\n * `mime` and `name` should be passed if the information\n is available to caller, otherwise a best guess is made.\n If both are specified `mime` takes precedence.\n * `encoding` and `encoding_errors` are used to handle text encoding.\n They are taken into consideration mostly only by pure-python\n backends which do not rely on CLI tools.\n Default to \"utf8\" and \"strict\" respectively.\n * `kwargs` are passed to the underlying backend.\n \"\"\"\n try:\n text, title = _get(\n path_or_file, default=default, mime=mime, name=name,\n backend=backend, kwargs=kwargs, encoding=encoding,\n encoding_errors=encoding_errors, _wtitle=_wtitle)\n if _wtitle:\n return (text, title)\n else:\n return text\n except Exception as e:\n if default is not SENTINAL:\n LOGGER.exception(e)\n return default\n raise\n",
"def _handle_open(path):\n with open(path, 'rb') as f:\n return fulltext.get(f)\n",
"def check_backends(title):\n \"\"\"Invoke test() for all backends and fail (raise) if some dep\n is missing.\n \"\"\"\n path = os.path.dirname(fulltext.backends.__file__)\n errs = []\n for name in os.listdir(path):\n if not name.endswith('.py'):\n continue\n if name == '__init__.py':\n continue\n\n mod_name = \"fulltext.backends.%s\" % (\n os.path.splitext(os.path.basename(name))[0])\n\n try:\n mod = __import__(mod_name, fromlist=[' '])\n except ImportError as err:\n errs.append((mod_name, str(err)))\n continue\n\n kw = dict(encoding='utf8', encoding_errors='strict',\n kwargs={})\n try:\n inst = getattr(mod, \"Backend\")(**kw)\n if hasattr(inst, \"check\"):\n inst.check(title=title)\n except Exception as err:\n errs.append((mod.__name__, str(err)))\n if errs:\n for mod, err in errs:\n msg = hilite(\"%s: %s\" % (mod, err), ok=False)\n print(msg, file=sys.stderr)\n sys.exit(1)\n",
"def config_logging(verbose):\n handler = logging.StreamHandler(sys.stdout)\n handler.setFormatter(logging.Formatter(\n '[%(levelname)1.1s %(name)s] %(message)s'))\n logger = logging.getLogger()\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG if verbose else logging.INFO)\n"
] | """
Fulltext CLI interface.
"""
from __future__ import absolute_import, print_function
import os
import sys
import logging
from docopt import docopt
import fulltext
import fulltext.backends
from fulltext.util import hilite
HERE = os.path.abspath(os.path.dirname(__file__))
def _handle_open(path):
with open(path, 'rb') as f:
return fulltext.get(f)
def check_backends(title):
"""Invoke test() for all backends and fail (raise) if some dep
is missing.
"""
path = os.path.dirname(fulltext.backends.__file__)
errs = []
for name in os.listdir(path):
if not name.endswith('.py'):
continue
if name == '__init__.py':
continue
mod_name = "fulltext.backends.%s" % (
os.path.splitext(os.path.basename(name))[0])
try:
mod = __import__(mod_name, fromlist=[' '])
except ImportError as err:
errs.append((mod_name, str(err)))
continue
kw = dict(encoding='utf8', encoding_errors='strict',
kwargs={})
try:
inst = getattr(mod, "Backend")(**kw)
if hasattr(inst, "check"):
inst.check(title=title)
except Exception as err:
errs.append((mod.__name__, str(err)))
if errs:
for mod, err in errs:
msg = hilite("%s: %s" % (mod, err), ok=False)
print(msg, file=sys.stderr)
sys.exit(1)
def config_logging(verbose):
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter(
'[%(levelname)1.1s %(name)s] %(message)s'))
logger = logging.getLogger()
logger.addHandler(handler)
logger.setLevel(logging.DEBUG if verbose else logging.INFO)
if __name__ == '__main__':
main()
|
btimby/fulltext | fulltext/__init__.py | register_backend | python | def register_backend(mimetype, module, extensions=None):
if mimetype in MIMETYPE_TO_BACKENDS:
warn("overwriting %r mimetype which was already set" % mimetype)
MIMETYPE_TO_BACKENDS[mimetype] = module
if extensions is None:
try:
ext = _MIMETYPES_TO_EXT[mimetype]
except KeyError:
raise KeyError(
"mimetypes module has no extension associated "
"with %r mimetype; use 'extensions' arg yourself" % mimetype)
assert ext, ext
EXTS_TO_MIMETYPES[ext] = mimetype
else:
if not isinstance(extensions, (list, tuple, set, frozenset)):
raise TypeError("invalid extensions type (got %r)" % extensions)
for ext in set(extensions):
ext = ext if ext.startswith('.') else '.' + ext
assert ext, ext
EXTS_TO_MIMETYPES[ext] = mimetype | Register a backend.
`mimetype`: a mimetype string (e.g. 'text/plain')
`module`: an import string (e.g. path.to.my.module)
`extensions`: a list of extensions (e.g. ['txt', 'text']) | train | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/__init__.py#L142-L166 | [
"def warn(msg):\n warnings.warn(msg, UserWarning, stacklevel=2)\n LOGGER.warning(msg)\n"
] | from __future__ import absolute_import
import errno
import re
import logging
import os
import mimetypes
import sys
from os.path import splitext
from six import string_types
from six import PY3
from fulltext.util import warn
from fulltext.util import magic
from fulltext.util import is_file_path
from fulltext.util import fobj_to_tempfile
from fulltext.util import is_windows
__all__ = ["get", "register_backend"]
# --- overridable defaults
ENCODING = sys.getfilesystemencoding()
ENCODING_ERRORS = "strict"
DEFAULT_MIME = 'application/octet-stream'
# --- others
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.NullHandler())
STRIP_WHITE = re.compile(r'[ \t\v\f\r\n]+')
SENTINAL = object()
MIMETYPE_TO_BACKENDS = {}
EXTS_TO_MIMETYPES = {}
MAGIC_BUFFER_SIZE = 1024
mimetypes.init()
_MIMETYPES_TO_EXT = dict([(v, k) for k, v in mimetypes.types_map.items()])
# A list of extensions which will be treated as pure text.
# This takes precedence over register_backend().
# https://www.openoffice.org/dev_docs/source/file_extensions.html
_TEXT_EXTS = set((
".asm", # Non-UNIX assembler source file
".asp", # Active Server Page
".awk", # An awk script file
".bat", # MS-DOS batch file
".c", # C language file
".class", # Compiled java source code file
".cmd", # Compiler command file
".cpp", # C++ language file
".cxx", # C++ language file
".def", # Win32 library definition file
".dpc", # Source dependency file containing list of dependencies
".dpj", # Java source dependency file containing list of dependencies
".h", # C header file
".hpp", # Generated C++ header or header plus plus file
".hrc", # An ".src", # include header file
".hxx", # C++ header file
".in",
".inc", # Include file
".ini", # Initialization file
".inl", # Inline header file
".jar", # Java classes archive file
".java", # Java language file
".js", # JavaScript code file
".jsp", # Java Server Page file
".kdelnk", # KDE1 configuration file
".l", # Lex source code file
".ll", # Lex source code file
".lnx", # Linux-specific makefile
".log", # Log file
".lst", # ASCII database file used in solenv
".MacOS",
".md", # Markdown language.
".mk", # A dmake makefile
".mod", # BASIC module file
".par", # Script particles file
".pl", # Perl script
".plc", # Former build script file, now obsolete
".pld", # Former build script file, now obsolete
".pm", # Perl module file
".pmk", # Project makefiles
".pre", # Preprocessor output from scpcomp
".py", # Python
".pyx", # Cython
".r", # Resource file for Macintosh
".rc", # A dmake recursive makefile or a Win32 resource script file
".rdb", # Interface and type description database (type library)
".res", # Resource file
".rst", # Restructured text
".s", # Assembler source file (UNIX)
".sbl", # BASIC file
".scp", # Script source file
".sh", # Shell script
".src", # Source resource string file
".txt", # Language text file
".y", # Yacc source code file
".yaml", # Yaml
".yml", # Yaml
".yxx", # Bison source code file
))
# XXX: dirty hack for pyinstaller so that it includes these modules.
# TODO: find a way to do this in pyinstaller.spec instead.
if is_windows() and hasattr(sys, '_MEIPASS'):
from fulltext.backends import __bin # NOQA
from fulltext.backends import __csv # NOQA
from fulltext.backends import __doc # NOQA
from fulltext.backends import __docx # NOQA
from fulltext.backends import __eml # NOQA
from fulltext.backends import __epub # NOQA
from fulltext.backends import __gz # NOQA
from fulltext.backends import __html # NOQA
from fulltext.backends import __hwp # NOQA
from fulltext.backends import __json # NOQA
from fulltext.backends import __mbox # NOQA
# XXX couldn't find a way to install ExtractMessage lib with
# pyinstaller.
# from fulltext.backends import __msg # NOQA
from fulltext.backends import __ocr # NOQA
from fulltext.backends import __odt # NOQA
from fulltext.backends import __pdf # NOQA
from fulltext.backends import __pptx # NOQA
from fulltext.backends import __ps # NOQA
from fulltext.backends import __rar # NOQA
from fulltext.backends import __rtf # NOQA
from fulltext.backends import __text # NOQA
from fulltext.backends import __xlsx # NOQA
from fulltext.backends import __xml # NOQA
from fulltext.backends import __zip # NOQA
# =====================================================================
# --- backends
# =====================================================================
register_backend(
'application/zip',
'fulltext.backends.__zip',
extensions=[".zip"])
register_backend(
'application/x-rar-compressed',
'fulltext.backends.__rar',
extensions=['.rar'])
for mt in ("text/xml", "application/xml", "application/x-xml"):
register_backend(
mt,
'fulltext.backends.__xml',
extensions=[".xml", ".xsd"])
register_backend(
'application/vnd.ms-excel',
'fulltext.backends.__xlsx',
extensions=['.xls', '.xlsx'])
register_backend(
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'fulltext.backends.__xlsx',
extensions=['.xlsx'])
register_backend(
'text/plain',
'fulltext.backends.__text',
extensions=['.txt', '.text'])
register_backend(
'application/rtf',
'fulltext.backends.__rtf',
extensions=['.rtf'])
register_backend(
'application/vnd.openxmlformats-officedocument.presentationml.presentation', # NOQA
'fulltext.backends.__pptx',
extensions=['.pptx'])
register_backend(
'application/pdf',
'fulltext.backends.__pdf',
extensions=['.pdf'])
register_backend(
'application/vnd.oasis.opendocument.text',
'fulltext.backends.__odt',
extensions=['.odt'])
register_backend(
'application/vnd.oasis.opendocument.spreadsheet',
'fulltext.backends.__odt',
extensions=['.ods'])
# images
register_backend(
'image/jpeg',
'fulltext.backends.__ocr',
extensions=['.jpg', '.jpeg'])
register_backend(
'image/bmp',
'fulltext.backends.__ocr',
extensions=['.bmp'])
register_backend(
'image/png',
'fulltext.backends.__ocr',
extensions=['.png'])
register_backend(
'image/gif',
'fulltext.backends.__ocr',
extensions=['.gif'])
register_backend(
'application/x-hwp',
'fulltext.backends.__hwp',
extensions=['.hwp'])
for mt in ('text/html', 'application/html', 'text/xhtml'):
register_backend(
mt,
'fulltext.backends.__html',
extensions=['.htm', '.html', '.xhtml'])
register_backend(
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'fulltext.backends.__docx',
extensions=['.docx'])
register_backend(
'application/msword',
'fulltext.backends.__doc',
extensions=['.doc'])
for mt in ('text/csv', 'text/tsv', 'text/psv'):
register_backend(
mt,
'fulltext.backends.__csv',
extensions=['.csv', '.tsv', '.psv', '.tab'])
for mt in ("application/epub", "application/epub+zip"):
register_backend(
mt,
'fulltext.backends.__epub',
extensions=[".epub"])
register_backend(
'application/postscript',
'fulltext.backends.__ps',
extensions=[".ps", ".eps", ".ai"])
register_backend(
'message/rfc822',
'fulltext.backends.__eml',
extensions=['.eml'])
register_backend(
'application/mbox',
'fulltext.backends.__mbox',
extensions=['.mbox'])
register_backend(
'application/vnd.ms-outlook',
'fulltext.backends.__msg',
extensions=['.msg'])
register_backend(
'application/gzip',
'fulltext.backends.__gz',
extensions=['.gz'])
register_backend(
'application/json',
'fulltext.backends.__json',
extensions=['.json'])
# default backend.
register_backend(
'application/octet-stream',
'fulltext.backends.__bin',
extensions=['.a', '.bin'])
# Extensions which will be treated as pure text.
# We just come up with a custom mime name.
for ext in _TEXT_EXTS:
register_backend(
'[custom-fulltext-mime]/%s' % ext,
'fulltext.backends.__text',
extensions=[ext])
# =====================================================================
# --- utils
# =====================================================================
def is_binary(f):
"""Return True if binary mode."""
# NOTE: order matters here. We don't bail on Python 2 just yet. Both
# codecs.open() and io.open() can open in text mode, both set the encoding
# attribute. We must do that check first.
# If it has a decoding attribute with a value, it is text mode.
if getattr(f, "encoding", None):
return False
# Python 2 makes no further distinction.
if not PY3:
return True
# If the file has a mode, and it contains b, it is binary.
try:
if 'b' in getattr(f, 'mode', ''):
return True
except TypeError:
import gzip
if isinstance(f, gzip.GzipFile):
return True # in gzip mode is an integer
raise
# Can we sniff?
try:
f.seek(0, os.SEEK_CUR)
except (AttributeError, IOError):
return False
# Finally, let's sniff by reading a byte.
byte = f.read(1)
f.seek(-1, os.SEEK_CUR)
return hasattr(byte, 'decode')
def handle_path(backend_inst, path, **kwargs):
"""
Handle a path.
Called by `get()` when provided a path. This function will prefer the
backend's `handle_path()` if one is provided Otherwise, it will open the
given path then use `handle_fobj()`.
"""
if callable(getattr(backend_inst, 'handle_path', None)):
# Prefer handle_path() if present.
LOGGER.debug("using handle_path")
return backend_inst.handle_path(path)
elif callable(getattr(backend_inst, 'handle_fobj', None)):
# Fallback to handle_fobj(). No warning here since the performance hit
# is minimal.
LOGGER.debug("using handle_fobj")
with open(path, 'rb') as f:
return backend_inst.handle_fobj(f)
else:
raise AssertionError(
'Backend %s has no _get functions' % backend_inst.__name__)
def handle_fobj(backend, f, **kwargs):
"""
Handle a file-like object.
Called by `get()` when provided a file-like. This function will prefer the
backend's `handle_fobj()` if one is provided. Otherwise, it will write the
data to a temporary file and call `handle_path()`.
"""
if not is_binary(f):
raise AssertionError('File must be opened in binary mode.')
if callable(getattr(backend, 'handle_fobj', None)):
# Prefer handle_fobj() if present.
LOGGER.debug("using handle_fobj")
return backend.handle_fobj(f)
elif callable(getattr(backend, 'handle_path', None)):
# Fallback to handle_path(). Warn user since this is potentially
# expensive.
LOGGER.debug("using handle_path")
LOGGER.warning(
"Using disk, %r backend does not provide `handle_fobj()`", backend)
ext = ''
if 'ext' in kwargs:
ext = '.' + kwargs['ext']
with fobj_to_tempfile(f, suffix=ext) as fname:
return backend.handle_path(fname, **kwargs)
else:
raise AssertionError(
'Backend %s has no _get functions' % backend.__name__)
def import_mod(mod_name):
return __import__(mod_name, fromlist=[' '])
def backend_from_mime(mime):
"""Determine backend module object from a mime string."""
try:
mod_name = MIMETYPE_TO_BACKENDS[mime]
except KeyError:
msg = "No handler for %r, defaulting to %r" % (mime, DEFAULT_MIME)
if 'FULLTEXT_TESTING' in os.environ:
warn(msg)
else:
LOGGER.debug(msg)
mod_name = MIMETYPE_TO_BACKENDS[DEFAULT_MIME]
mod = import_mod(mod_name)
return mod
def backend_from_fname(name):
"""Determine backend module object from a file name."""
ext = splitext(name)[1]
try:
mime = EXTS_TO_MIMETYPES[ext]
except KeyError:
try:
f = open(name, 'rb')
except IOError as e:
# The file may not exist, we are being asked to determine it's type
# from it's name. Other errors are unexpected.
if e.errno != errno.ENOENT:
raise
# We will have to fall back upon the default backend.
msg = "No handler for %r, defaulting to %r" % (ext, DEFAULT_MIME)
if 'FULLTEXT_TESTING' in os.environ:
warn(msg)
else:
LOGGER.debug(msg)
mod_name = MIMETYPE_TO_BACKENDS[DEFAULT_MIME]
else:
with f:
return backend_from_fobj(f)
else:
mod_name = MIMETYPE_TO_BACKENDS[mime]
mod = import_mod(mod_name)
return mod
def backend_from_fobj(f):
"""Determine backend module object from a file object."""
if magic is None:
warn("magic lib is not installed; assuming mime type %r" % (
DEFAULT_MIME))
return backend_from_mime(DEFAULT_MIME)
else:
offset = f.tell()
try:
f.seek(0)
chunk = f.read(MAGIC_BUFFER_SIZE)
mime = magic.from_buffer(chunk, mime=True)
return backend_from_mime(mime)
finally:
f.seek(offset)
def backend_inst_from_mod(mod, encoding, encoding_errors, kwargs):
"""Given a mod and a set of opts return an instantiated
Backend class.
"""
kw = dict(encoding=encoding, encoding_errors=encoding_errors,
kwargs=kwargs)
try:
klass = getattr(mod, "Backend")
except AttributeError:
raise AttributeError("%r mod does not define any backend class" % mod)
inst = klass(**kw)
try:
inst.check(title=False)
except Exception as err:
bin_mod = "fulltext.backends.__bin"
warn("can't use %r due to %r; use %r backend instead" % (
mod, str(err), bin_mod))
inst = import_mod(bin_mod).Backend(**kw)
inst.check(title=False)
LOGGER.debug("using %r" % inst)
return inst
# =====================================================================
# --- public API
# =====================================================================
def _get(path_or_file, default, mime, name, backend, encoding,
encoding_errors, kwargs, _wtitle):
if encoding is None:
encoding = ENCODING
if encoding_errors is None:
encoding_errors = ENCODING_ERRORS
kwargs = kwargs.copy() if kwargs is not None else {}
kwargs.setdefault("mime", mime)
# Find backend module.
if backend is None:
if mime:
backend_mod = backend_from_mime(mime)
elif name:
backend_mod = backend_from_fname(name)
else:
if is_file_path(path_or_file):
backend_mod = backend_from_fname(path_or_file)
else:
if hasattr(path_or_file, "name"):
backend_mod = backend_from_fname(path_or_file.name)
else:
backend_mod = backend_from_fobj(path_or_file)
else:
if isinstance(backend, string_types):
try:
mime = EXTS_TO_MIMETYPES['.' + backend]
except KeyError:
raise ValueError("invalid backend %r" % backend)
backend_mod = backend_from_mime(mime)
else:
backend_mod = backend
# Get backend class.
inst = backend_inst_from_mod(
backend_mod, encoding, encoding_errors, kwargs)
fun = handle_path if is_file_path(path_or_file) else handle_fobj
# Run handle_ function, handle callbacks.
title = None
inst.setup()
try:
text = fun(inst, path_or_file)
if _wtitle:
try:
title = inst.handle_title(path_or_file)
except Exception:
LOGGER.exception("error while getting title (setting to None)")
finally:
inst.teardown()
assert text is not None, "backend function returned None"
text = STRIP_WHITE.sub(' ', text)
text = text.strip()
return (text, title)
def get(path_or_file, default=SENTINAL, mime=None, name=None, backend=None,
encoding=None, encoding_errors=None, kwargs=None,
_wtitle=False):
"""
Get document full text.
Accepts a path or file-like object.
* If given, `default` is returned instead of an error.
* `backend` is either a module object or a string specifying which
default backend to use (e.g. "doc"); take a look at backends
directory to see a list of default backends.
* `mime` and `name` should be passed if the information
is available to caller, otherwise a best guess is made.
If both are specified `mime` takes precedence.
* `encoding` and `encoding_errors` are used to handle text encoding.
They are taken into consideration mostly only by pure-python
backends which do not rely on CLI tools.
Default to "utf8" and "strict" respectively.
* `kwargs` are passed to the underlying backend.
"""
try:
text, title = _get(
path_or_file, default=default, mime=mime, name=name,
backend=backend, kwargs=kwargs, encoding=encoding,
encoding_errors=encoding_errors, _wtitle=_wtitle)
if _wtitle:
return (text, title)
else:
return text
except Exception as e:
if default is not SENTINAL:
LOGGER.exception(e)
return default
raise
def get_with_title(*args, **kwargs):
"""Like get() but also tries to determine document title.
Returns a (text, title) tuple.
"""
kwargs['_wtitle'] = True
return get(*args, **kwargs)
|
btimby/fulltext | fulltext/__init__.py | is_binary | python | def is_binary(f):
# NOTE: order matters here. We don't bail on Python 2 just yet. Both
# codecs.open() and io.open() can open in text mode, both set the encoding
# attribute. We must do that check first.
# If it has a decoding attribute with a value, it is text mode.
if getattr(f, "encoding", None):
return False
# Python 2 makes no further distinction.
if not PY3:
return True
# If the file has a mode, and it contains b, it is binary.
try:
if 'b' in getattr(f, 'mode', ''):
return True
except TypeError:
import gzip
if isinstance(f, gzip.GzipFile):
return True # in gzip mode is an integer
raise
# Can we sniff?
try:
f.seek(0, os.SEEK_CUR)
except (AttributeError, IOError):
return False
# Finally, let's sniff by reading a byte.
byte = f.read(1)
f.seek(-1, os.SEEK_CUR)
return hasattr(byte, 'decode') | Return True if binary mode. | train | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/__init__.py#L329-L362 | null | from __future__ import absolute_import
import errno
import re
import logging
import os
import mimetypes
import sys
from os.path import splitext
from six import string_types
from six import PY3
from fulltext.util import warn
from fulltext.util import magic
from fulltext.util import is_file_path
from fulltext.util import fobj_to_tempfile
from fulltext.util import is_windows
__all__ = ["get", "register_backend"]
# --- overridable defaults
ENCODING = sys.getfilesystemencoding()
ENCODING_ERRORS = "strict"
DEFAULT_MIME = 'application/octet-stream'
# --- others
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.NullHandler())
STRIP_WHITE = re.compile(r'[ \t\v\f\r\n]+')
SENTINAL = object()
MIMETYPE_TO_BACKENDS = {}
EXTS_TO_MIMETYPES = {}
MAGIC_BUFFER_SIZE = 1024
mimetypes.init()
_MIMETYPES_TO_EXT = dict([(v, k) for k, v in mimetypes.types_map.items()])
# A list of extensions which will be treated as pure text.
# This takes precedence over register_backend().
# https://www.openoffice.org/dev_docs/source/file_extensions.html
_TEXT_EXTS = set((
".asm", # Non-UNIX assembler source file
".asp", # Active Server Page
".awk", # An awk script file
".bat", # MS-DOS batch file
".c", # C language file
".class", # Compiled java source code file
".cmd", # Compiler command file
".cpp", # C++ language file
".cxx", # C++ language file
".def", # Win32 library definition file
".dpc", # Source dependency file containing list of dependencies
".dpj", # Java source dependency file containing list of dependencies
".h", # C header file
".hpp", # Generated C++ header or header plus plus file
".hrc", # An ".src", # include header file
".hxx", # C++ header file
".in",
".inc", # Include file
".ini", # Initialization file
".inl", # Inline header file
".jar", # Java classes archive file
".java", # Java language file
".js", # JavaScript code file
".jsp", # Java Server Page file
".kdelnk", # KDE1 configuration file
".l", # Lex source code file
".ll", # Lex source code file
".lnx", # Linux-specific makefile
".log", # Log file
".lst", # ASCII database file used in solenv
".MacOS",
".md", # Markdown language.
".mk", # A dmake makefile
".mod", # BASIC module file
".par", # Script particles file
".pl", # Perl script
".plc", # Former build script file, now obsolete
".pld", # Former build script file, now obsolete
".pm", # Perl module file
".pmk", # Project makefiles
".pre", # Preprocessor output from scpcomp
".py", # Python
".pyx", # Cython
".r", # Resource file for Macintosh
".rc", # A dmake recursive makefile or a Win32 resource script file
".rdb", # Interface and type description database (type library)
".res", # Resource file
".rst", # Restructured text
".s", # Assembler source file (UNIX)
".sbl", # BASIC file
".scp", # Script source file
".sh", # Shell script
".src", # Source resource string file
".txt", # Language text file
".y", # Yacc source code file
".yaml", # Yaml
".yml", # Yaml
".yxx", # Bison source code file
))
# XXX: dirty hack for pyinstaller so that it includes these modules.
# TODO: find a way to do this in pyinstaller.spec instead.
if is_windows() and hasattr(sys, '_MEIPASS'):
from fulltext.backends import __bin # NOQA
from fulltext.backends import __csv # NOQA
from fulltext.backends import __doc # NOQA
from fulltext.backends import __docx # NOQA
from fulltext.backends import __eml # NOQA
from fulltext.backends import __epub # NOQA
from fulltext.backends import __gz # NOQA
from fulltext.backends import __html # NOQA
from fulltext.backends import __hwp # NOQA
from fulltext.backends import __json # NOQA
from fulltext.backends import __mbox # NOQA
# XXX couldn't find a way to install ExtractMessage lib with
# pyinstaller.
# from fulltext.backends import __msg # NOQA
from fulltext.backends import __ocr # NOQA
from fulltext.backends import __odt # NOQA
from fulltext.backends import __pdf # NOQA
from fulltext.backends import __pptx # NOQA
from fulltext.backends import __ps # NOQA
from fulltext.backends import __rar # NOQA
from fulltext.backends import __rtf # NOQA
from fulltext.backends import __text # NOQA
from fulltext.backends import __xlsx # NOQA
from fulltext.backends import __xml # NOQA
from fulltext.backends import __zip # NOQA
# =====================================================================
# --- backends
# =====================================================================
def register_backend(mimetype, module, extensions=None):
"""Register a backend.
`mimetype`: a mimetype string (e.g. 'text/plain')
`module`: an import string (e.g. path.to.my.module)
`extensions`: a list of extensions (e.g. ['txt', 'text'])
"""
if mimetype in MIMETYPE_TO_BACKENDS:
warn("overwriting %r mimetype which was already set" % mimetype)
MIMETYPE_TO_BACKENDS[mimetype] = module
if extensions is None:
try:
ext = _MIMETYPES_TO_EXT[mimetype]
except KeyError:
raise KeyError(
"mimetypes module has no extension associated "
"with %r mimetype; use 'extensions' arg yourself" % mimetype)
assert ext, ext
EXTS_TO_MIMETYPES[ext] = mimetype
else:
if not isinstance(extensions, (list, tuple, set, frozenset)):
raise TypeError("invalid extensions type (got %r)" % extensions)
for ext in set(extensions):
ext = ext if ext.startswith('.') else '.' + ext
assert ext, ext
EXTS_TO_MIMETYPES[ext] = mimetype
register_backend(
'application/zip',
'fulltext.backends.__zip',
extensions=[".zip"])
register_backend(
'application/x-rar-compressed',
'fulltext.backends.__rar',
extensions=['.rar'])
for mt in ("text/xml", "application/xml", "application/x-xml"):
register_backend(
mt,
'fulltext.backends.__xml',
extensions=[".xml", ".xsd"])
register_backend(
'application/vnd.ms-excel',
'fulltext.backends.__xlsx',
extensions=['.xls', '.xlsx'])
register_backend(
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'fulltext.backends.__xlsx',
extensions=['.xlsx'])
register_backend(
'text/plain',
'fulltext.backends.__text',
extensions=['.txt', '.text'])
register_backend(
'application/rtf',
'fulltext.backends.__rtf',
extensions=['.rtf'])
register_backend(
'application/vnd.openxmlformats-officedocument.presentationml.presentation', # NOQA
'fulltext.backends.__pptx',
extensions=['.pptx'])
register_backend(
'application/pdf',
'fulltext.backends.__pdf',
extensions=['.pdf'])
register_backend(
'application/vnd.oasis.opendocument.text',
'fulltext.backends.__odt',
extensions=['.odt'])
register_backend(
'application/vnd.oasis.opendocument.spreadsheet',
'fulltext.backends.__odt',
extensions=['.ods'])
# images
register_backend(
'image/jpeg',
'fulltext.backends.__ocr',
extensions=['.jpg', '.jpeg'])
register_backend(
'image/bmp',
'fulltext.backends.__ocr',
extensions=['.bmp'])
register_backend(
'image/png',
'fulltext.backends.__ocr',
extensions=['.png'])
register_backend(
'image/gif',
'fulltext.backends.__ocr',
extensions=['.gif'])
register_backend(
'application/x-hwp',
'fulltext.backends.__hwp',
extensions=['.hwp'])
for mt in ('text/html', 'application/html', 'text/xhtml'):
register_backend(
mt,
'fulltext.backends.__html',
extensions=['.htm', '.html', '.xhtml'])
register_backend(
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'fulltext.backends.__docx',
extensions=['.docx'])
register_backend(
'application/msword',
'fulltext.backends.__doc',
extensions=['.doc'])
for mt in ('text/csv', 'text/tsv', 'text/psv'):
register_backend(
mt,
'fulltext.backends.__csv',
extensions=['.csv', '.tsv', '.psv', '.tab'])
for mt in ("application/epub", "application/epub+zip"):
register_backend(
mt,
'fulltext.backends.__epub',
extensions=[".epub"])
register_backend(
'application/postscript',
'fulltext.backends.__ps',
extensions=[".ps", ".eps", ".ai"])
register_backend(
'message/rfc822',
'fulltext.backends.__eml',
extensions=['.eml'])
register_backend(
'application/mbox',
'fulltext.backends.__mbox',
extensions=['.mbox'])
register_backend(
'application/vnd.ms-outlook',
'fulltext.backends.__msg',
extensions=['.msg'])
register_backend(
'application/gzip',
'fulltext.backends.__gz',
extensions=['.gz'])
register_backend(
'application/json',
'fulltext.backends.__json',
extensions=['.json'])
# default backend.
register_backend(
'application/octet-stream',
'fulltext.backends.__bin',
extensions=['.a', '.bin'])
# Extensions which will be treated as pure text.
# We just come up with a custom mime name.
for ext in _TEXT_EXTS:
register_backend(
'[custom-fulltext-mime]/%s' % ext,
'fulltext.backends.__text',
extensions=[ext])
# =====================================================================
# --- utils
# =====================================================================
def handle_path(backend_inst, path, **kwargs):
"""
Handle a path.
Called by `get()` when provided a path. This function will prefer the
backend's `handle_path()` if one is provided Otherwise, it will open the
given path then use `handle_fobj()`.
"""
if callable(getattr(backend_inst, 'handle_path', None)):
# Prefer handle_path() if present.
LOGGER.debug("using handle_path")
return backend_inst.handle_path(path)
elif callable(getattr(backend_inst, 'handle_fobj', None)):
# Fallback to handle_fobj(). No warning here since the performance hit
# is minimal.
LOGGER.debug("using handle_fobj")
with open(path, 'rb') as f:
return backend_inst.handle_fobj(f)
else:
raise AssertionError(
'Backend %s has no _get functions' % backend_inst.__name__)
def handle_fobj(backend, f, **kwargs):
"""
Handle a file-like object.
Called by `get()` when provided a file-like. This function will prefer the
backend's `handle_fobj()` if one is provided. Otherwise, it will write the
data to a temporary file and call `handle_path()`.
"""
if not is_binary(f):
raise AssertionError('File must be opened in binary mode.')
if callable(getattr(backend, 'handle_fobj', None)):
# Prefer handle_fobj() if present.
LOGGER.debug("using handle_fobj")
return backend.handle_fobj(f)
elif callable(getattr(backend, 'handle_path', None)):
# Fallback to handle_path(). Warn user since this is potentially
# expensive.
LOGGER.debug("using handle_path")
LOGGER.warning(
"Using disk, %r backend does not provide `handle_fobj()`", backend)
ext = ''
if 'ext' in kwargs:
ext = '.' + kwargs['ext']
with fobj_to_tempfile(f, suffix=ext) as fname:
return backend.handle_path(fname, **kwargs)
else:
raise AssertionError(
'Backend %s has no _get functions' % backend.__name__)
def import_mod(mod_name):
return __import__(mod_name, fromlist=[' '])
def backend_from_mime(mime):
"""Determine backend module object from a mime string."""
try:
mod_name = MIMETYPE_TO_BACKENDS[mime]
except KeyError:
msg = "No handler for %r, defaulting to %r" % (mime, DEFAULT_MIME)
if 'FULLTEXT_TESTING' in os.environ:
warn(msg)
else:
LOGGER.debug(msg)
mod_name = MIMETYPE_TO_BACKENDS[DEFAULT_MIME]
mod = import_mod(mod_name)
return mod
def backend_from_fname(name):
"""Determine backend module object from a file name."""
ext = splitext(name)[1]
try:
mime = EXTS_TO_MIMETYPES[ext]
except KeyError:
try:
f = open(name, 'rb')
except IOError as e:
# The file may not exist, we are being asked to determine it's type
# from it's name. Other errors are unexpected.
if e.errno != errno.ENOENT:
raise
# We will have to fall back upon the default backend.
msg = "No handler for %r, defaulting to %r" % (ext, DEFAULT_MIME)
if 'FULLTEXT_TESTING' in os.environ:
warn(msg)
else:
LOGGER.debug(msg)
mod_name = MIMETYPE_TO_BACKENDS[DEFAULT_MIME]
else:
with f:
return backend_from_fobj(f)
else:
mod_name = MIMETYPE_TO_BACKENDS[mime]
mod = import_mod(mod_name)
return mod
def backend_from_fobj(f):
"""Determine backend module object from a file object."""
if magic is None:
warn("magic lib is not installed; assuming mime type %r" % (
DEFAULT_MIME))
return backend_from_mime(DEFAULT_MIME)
else:
offset = f.tell()
try:
f.seek(0)
chunk = f.read(MAGIC_BUFFER_SIZE)
mime = magic.from_buffer(chunk, mime=True)
return backend_from_mime(mime)
finally:
f.seek(offset)
def backend_inst_from_mod(mod, encoding, encoding_errors, kwargs):
"""Given a mod and a set of opts return an instantiated
Backend class.
"""
kw = dict(encoding=encoding, encoding_errors=encoding_errors,
kwargs=kwargs)
try:
klass = getattr(mod, "Backend")
except AttributeError:
raise AttributeError("%r mod does not define any backend class" % mod)
inst = klass(**kw)
try:
inst.check(title=False)
except Exception as err:
bin_mod = "fulltext.backends.__bin"
warn("can't use %r due to %r; use %r backend instead" % (
mod, str(err), bin_mod))
inst = import_mod(bin_mod).Backend(**kw)
inst.check(title=False)
LOGGER.debug("using %r" % inst)
return inst
# =====================================================================
# --- public API
# =====================================================================
def _get(path_or_file, default, mime, name, backend, encoding,
encoding_errors, kwargs, _wtitle):
if encoding is None:
encoding = ENCODING
if encoding_errors is None:
encoding_errors = ENCODING_ERRORS
kwargs = kwargs.copy() if kwargs is not None else {}
kwargs.setdefault("mime", mime)
# Find backend module.
if backend is None:
if mime:
backend_mod = backend_from_mime(mime)
elif name:
backend_mod = backend_from_fname(name)
else:
if is_file_path(path_or_file):
backend_mod = backend_from_fname(path_or_file)
else:
if hasattr(path_or_file, "name"):
backend_mod = backend_from_fname(path_or_file.name)
else:
backend_mod = backend_from_fobj(path_or_file)
else:
if isinstance(backend, string_types):
try:
mime = EXTS_TO_MIMETYPES['.' + backend]
except KeyError:
raise ValueError("invalid backend %r" % backend)
backend_mod = backend_from_mime(mime)
else:
backend_mod = backend
# Get backend class.
inst = backend_inst_from_mod(
backend_mod, encoding, encoding_errors, kwargs)
fun = handle_path if is_file_path(path_or_file) else handle_fobj
# Run handle_ function, handle callbacks.
title = None
inst.setup()
try:
text = fun(inst, path_or_file)
if _wtitle:
try:
title = inst.handle_title(path_or_file)
except Exception:
LOGGER.exception("error while getting title (setting to None)")
finally:
inst.teardown()
assert text is not None, "backend function returned None"
text = STRIP_WHITE.sub(' ', text)
text = text.strip()
return (text, title)
def get(path_or_file, default=SENTINAL, mime=None, name=None, backend=None,
encoding=None, encoding_errors=None, kwargs=None,
_wtitle=False):
"""
Get document full text.
Accepts a path or file-like object.
* If given, `default` is returned instead of an error.
* `backend` is either a module object or a string specifying which
default backend to use (e.g. "doc"); take a look at backends
directory to see a list of default backends.
* `mime` and `name` should be passed if the information
is available to caller, otherwise a best guess is made.
If both are specified `mime` takes precedence.
* `encoding` and `encoding_errors` are used to handle text encoding.
They are taken into consideration mostly only by pure-python
backends which do not rely on CLI tools.
Default to "utf8" and "strict" respectively.
* `kwargs` are passed to the underlying backend.
"""
try:
text, title = _get(
path_or_file, default=default, mime=mime, name=name,
backend=backend, kwargs=kwargs, encoding=encoding,
encoding_errors=encoding_errors, _wtitle=_wtitle)
if _wtitle:
return (text, title)
else:
return text
except Exception as e:
if default is not SENTINAL:
LOGGER.exception(e)
return default
raise
def get_with_title(*args, **kwargs):
"""Like get() but also tries to determine document title.
Returns a (text, title) tuple.
"""
kwargs['_wtitle'] = True
return get(*args, **kwargs)
|
btimby/fulltext | fulltext/__init__.py | handle_path | python | def handle_path(backend_inst, path, **kwargs):
if callable(getattr(backend_inst, 'handle_path', None)):
# Prefer handle_path() if present.
LOGGER.debug("using handle_path")
return backend_inst.handle_path(path)
elif callable(getattr(backend_inst, 'handle_fobj', None)):
# Fallback to handle_fobj(). No warning here since the performance hit
# is minimal.
LOGGER.debug("using handle_fobj")
with open(path, 'rb') as f:
return backend_inst.handle_fobj(f)
else:
raise AssertionError(
'Backend %s has no _get functions' % backend_inst.__name__) | Handle a path.
Called by `get()` when provided a path. This function will prefer the
backend's `handle_path()` if one is provided Otherwise, it will open the
given path then use `handle_fobj()`. | train | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/__init__.py#L365-L387 | null | from __future__ import absolute_import
import errno
import re
import logging
import os
import mimetypes
import sys
from os.path import splitext
from six import string_types
from six import PY3
from fulltext.util import warn
from fulltext.util import magic
from fulltext.util import is_file_path
from fulltext.util import fobj_to_tempfile
from fulltext.util import is_windows
__all__ = ["get", "register_backend"]
# --- overridable defaults
ENCODING = sys.getfilesystemencoding()
ENCODING_ERRORS = "strict"
DEFAULT_MIME = 'application/octet-stream'
# --- others
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.NullHandler())
STRIP_WHITE = re.compile(r'[ \t\v\f\r\n]+')
SENTINAL = object()
MIMETYPE_TO_BACKENDS = {}
EXTS_TO_MIMETYPES = {}
MAGIC_BUFFER_SIZE = 1024
mimetypes.init()
_MIMETYPES_TO_EXT = dict([(v, k) for k, v in mimetypes.types_map.items()])
# A list of extensions which will be treated as pure text.
# This takes precedence over register_backend().
# https://www.openoffice.org/dev_docs/source/file_extensions.html
_TEXT_EXTS = set((
".asm", # Non-UNIX assembler source file
".asp", # Active Server Page
".awk", # An awk script file
".bat", # MS-DOS batch file
".c", # C language file
".class", # Compiled java source code file
".cmd", # Compiler command file
".cpp", # C++ language file
".cxx", # C++ language file
".def", # Win32 library definition file
".dpc", # Source dependency file containing list of dependencies
".dpj", # Java source dependency file containing list of dependencies
".h", # C header file
".hpp", # Generated C++ header or header plus plus file
".hrc", # An ".src", # include header file
".hxx", # C++ header file
".in",
".inc", # Include file
".ini", # Initialization file
".inl", # Inline header file
".jar", # Java classes archive file
".java", # Java language file
".js", # JavaScript code file
".jsp", # Java Server Page file
".kdelnk", # KDE1 configuration file
".l", # Lex source code file
".ll", # Lex source code file
".lnx", # Linux-specific makefile
".log", # Log file
".lst", # ASCII database file used in solenv
".MacOS",
".md", # Markdown language.
".mk", # A dmake makefile
".mod", # BASIC module file
".par", # Script particles file
".pl", # Perl script
".plc", # Former build script file, now obsolete
".pld", # Former build script file, now obsolete
".pm", # Perl module file
".pmk", # Project makefiles
".pre", # Preprocessor output from scpcomp
".py", # Python
".pyx", # Cython
".r", # Resource file for Macintosh
".rc", # A dmake recursive makefile or a Win32 resource script file
".rdb", # Interface and type description database (type library)
".res", # Resource file
".rst", # Restructured text
".s", # Assembler source file (UNIX)
".sbl", # BASIC file
".scp", # Script source file
".sh", # Shell script
".src", # Source resource string file
".txt", # Language text file
".y", # Yacc source code file
".yaml", # Yaml
".yml", # Yaml
".yxx", # Bison source code file
))
# XXX: dirty hack for pyinstaller so that it includes these modules.
# TODO: find a way to do this in pyinstaller.spec instead.
if is_windows() and hasattr(sys, '_MEIPASS'):
from fulltext.backends import __bin # NOQA
from fulltext.backends import __csv # NOQA
from fulltext.backends import __doc # NOQA
from fulltext.backends import __docx # NOQA
from fulltext.backends import __eml # NOQA
from fulltext.backends import __epub # NOQA
from fulltext.backends import __gz # NOQA
from fulltext.backends import __html # NOQA
from fulltext.backends import __hwp # NOQA
from fulltext.backends import __json # NOQA
from fulltext.backends import __mbox # NOQA
# XXX couldn't find a way to install ExtractMessage lib with
# pyinstaller.
# from fulltext.backends import __msg # NOQA
from fulltext.backends import __ocr # NOQA
from fulltext.backends import __odt # NOQA
from fulltext.backends import __pdf # NOQA
from fulltext.backends import __pptx # NOQA
from fulltext.backends import __ps # NOQA
from fulltext.backends import __rar # NOQA
from fulltext.backends import __rtf # NOQA
from fulltext.backends import __text # NOQA
from fulltext.backends import __xlsx # NOQA
from fulltext.backends import __xml # NOQA
from fulltext.backends import __zip # NOQA
# =====================================================================
# --- backends
# =====================================================================
def register_backend(mimetype, module, extensions=None):
"""Register a backend.
`mimetype`: a mimetype string (e.g. 'text/plain')
`module`: an import string (e.g. path.to.my.module)
`extensions`: a list of extensions (e.g. ['txt', 'text'])
"""
if mimetype in MIMETYPE_TO_BACKENDS:
warn("overwriting %r mimetype which was already set" % mimetype)
MIMETYPE_TO_BACKENDS[mimetype] = module
if extensions is None:
try:
ext = _MIMETYPES_TO_EXT[mimetype]
except KeyError:
raise KeyError(
"mimetypes module has no extension associated "
"with %r mimetype; use 'extensions' arg yourself" % mimetype)
assert ext, ext
EXTS_TO_MIMETYPES[ext] = mimetype
else:
if not isinstance(extensions, (list, tuple, set, frozenset)):
raise TypeError("invalid extensions type (got %r)" % extensions)
for ext in set(extensions):
ext = ext if ext.startswith('.') else '.' + ext
assert ext, ext
EXTS_TO_MIMETYPES[ext] = mimetype
register_backend(
'application/zip',
'fulltext.backends.__zip',
extensions=[".zip"])
register_backend(
'application/x-rar-compressed',
'fulltext.backends.__rar',
extensions=['.rar'])
for mt in ("text/xml", "application/xml", "application/x-xml"):
register_backend(
mt,
'fulltext.backends.__xml',
extensions=[".xml", ".xsd"])
register_backend(
'application/vnd.ms-excel',
'fulltext.backends.__xlsx',
extensions=['.xls', '.xlsx'])
register_backend(
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'fulltext.backends.__xlsx',
extensions=['.xlsx'])
register_backend(
'text/plain',
'fulltext.backends.__text',
extensions=['.txt', '.text'])
register_backend(
'application/rtf',
'fulltext.backends.__rtf',
extensions=['.rtf'])
register_backend(
'application/vnd.openxmlformats-officedocument.presentationml.presentation', # NOQA
'fulltext.backends.__pptx',
extensions=['.pptx'])
register_backend(
'application/pdf',
'fulltext.backends.__pdf',
extensions=['.pdf'])
register_backend(
'application/vnd.oasis.opendocument.text',
'fulltext.backends.__odt',
extensions=['.odt'])
register_backend(
'application/vnd.oasis.opendocument.spreadsheet',
'fulltext.backends.__odt',
extensions=['.ods'])
# images
register_backend(
'image/jpeg',
'fulltext.backends.__ocr',
extensions=['.jpg', '.jpeg'])
register_backend(
'image/bmp',
'fulltext.backends.__ocr',
extensions=['.bmp'])
register_backend(
'image/png',
'fulltext.backends.__ocr',
extensions=['.png'])
register_backend(
'image/gif',
'fulltext.backends.__ocr',
extensions=['.gif'])
register_backend(
'application/x-hwp',
'fulltext.backends.__hwp',
extensions=['.hwp'])
for mt in ('text/html', 'application/html', 'text/xhtml'):
register_backend(
mt,
'fulltext.backends.__html',
extensions=['.htm', '.html', '.xhtml'])
register_backend(
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'fulltext.backends.__docx',
extensions=['.docx'])
register_backend(
'application/msword',
'fulltext.backends.__doc',
extensions=['.doc'])
for mt in ('text/csv', 'text/tsv', 'text/psv'):
register_backend(
mt,
'fulltext.backends.__csv',
extensions=['.csv', '.tsv', '.psv', '.tab'])
for mt in ("application/epub", "application/epub+zip"):
register_backend(
mt,
'fulltext.backends.__epub',
extensions=[".epub"])
register_backend(
'application/postscript',
'fulltext.backends.__ps',
extensions=[".ps", ".eps", ".ai"])
register_backend(
'message/rfc822',
'fulltext.backends.__eml',
extensions=['.eml'])
register_backend(
'application/mbox',
'fulltext.backends.__mbox',
extensions=['.mbox'])
register_backend(
'application/vnd.ms-outlook',
'fulltext.backends.__msg',
extensions=['.msg'])
register_backend(
'application/gzip',
'fulltext.backends.__gz',
extensions=['.gz'])
register_backend(
'application/json',
'fulltext.backends.__json',
extensions=['.json'])
# default backend.
register_backend(
'application/octet-stream',
'fulltext.backends.__bin',
extensions=['.a', '.bin'])
# Extensions which will be treated as pure text.
# We just come up with a custom mime name.
for ext in _TEXT_EXTS:
register_backend(
'[custom-fulltext-mime]/%s' % ext,
'fulltext.backends.__text',
extensions=[ext])
# =====================================================================
# --- utils
# =====================================================================
def is_binary(f):
"""Return True if binary mode."""
# NOTE: order matters here. We don't bail on Python 2 just yet. Both
# codecs.open() and io.open() can open in text mode, both set the encoding
# attribute. We must do that check first.
# If it has a decoding attribute with a value, it is text mode.
if getattr(f, "encoding", None):
return False
# Python 2 makes no further distinction.
if not PY3:
return True
# If the file has a mode, and it contains b, it is binary.
try:
if 'b' in getattr(f, 'mode', ''):
return True
except TypeError:
import gzip
if isinstance(f, gzip.GzipFile):
return True # in gzip mode is an integer
raise
# Can we sniff?
try:
f.seek(0, os.SEEK_CUR)
except (AttributeError, IOError):
return False
# Finally, let's sniff by reading a byte.
byte = f.read(1)
f.seek(-1, os.SEEK_CUR)
return hasattr(byte, 'decode')
def handle_fobj(backend, f, **kwargs):
"""
Handle a file-like object.
Called by `get()` when provided a file-like. This function will prefer the
backend's `handle_fobj()` if one is provided. Otherwise, it will write the
data to a temporary file and call `handle_path()`.
"""
if not is_binary(f):
raise AssertionError('File must be opened in binary mode.')
if callable(getattr(backend, 'handle_fobj', None)):
# Prefer handle_fobj() if present.
LOGGER.debug("using handle_fobj")
return backend.handle_fobj(f)
elif callable(getattr(backend, 'handle_path', None)):
# Fallback to handle_path(). Warn user since this is potentially
# expensive.
LOGGER.debug("using handle_path")
LOGGER.warning(
"Using disk, %r backend does not provide `handle_fobj()`", backend)
ext = ''
if 'ext' in kwargs:
ext = '.' + kwargs['ext']
with fobj_to_tempfile(f, suffix=ext) as fname:
return backend.handle_path(fname, **kwargs)
else:
raise AssertionError(
'Backend %s has no _get functions' % backend.__name__)
def import_mod(mod_name):
return __import__(mod_name, fromlist=[' '])
def backend_from_mime(mime):
"""Determine backend module object from a mime string."""
try:
mod_name = MIMETYPE_TO_BACKENDS[mime]
except KeyError:
msg = "No handler for %r, defaulting to %r" % (mime, DEFAULT_MIME)
if 'FULLTEXT_TESTING' in os.environ:
warn(msg)
else:
LOGGER.debug(msg)
mod_name = MIMETYPE_TO_BACKENDS[DEFAULT_MIME]
mod = import_mod(mod_name)
return mod
def backend_from_fname(name):
"""Determine backend module object from a file name."""
ext = splitext(name)[1]
try:
mime = EXTS_TO_MIMETYPES[ext]
except KeyError:
try:
f = open(name, 'rb')
except IOError as e:
# The file may not exist, we are being asked to determine it's type
# from it's name. Other errors are unexpected.
if e.errno != errno.ENOENT:
raise
# We will have to fall back upon the default backend.
msg = "No handler for %r, defaulting to %r" % (ext, DEFAULT_MIME)
if 'FULLTEXT_TESTING' in os.environ:
warn(msg)
else:
LOGGER.debug(msg)
mod_name = MIMETYPE_TO_BACKENDS[DEFAULT_MIME]
else:
with f:
return backend_from_fobj(f)
else:
mod_name = MIMETYPE_TO_BACKENDS[mime]
mod = import_mod(mod_name)
return mod
def backend_from_fobj(f):
"""Determine backend module object from a file object."""
if magic is None:
warn("magic lib is not installed; assuming mime type %r" % (
DEFAULT_MIME))
return backend_from_mime(DEFAULT_MIME)
else:
offset = f.tell()
try:
f.seek(0)
chunk = f.read(MAGIC_BUFFER_SIZE)
mime = magic.from_buffer(chunk, mime=True)
return backend_from_mime(mime)
finally:
f.seek(offset)
def backend_inst_from_mod(mod, encoding, encoding_errors, kwargs):
"""Given a mod and a set of opts return an instantiated
Backend class.
"""
kw = dict(encoding=encoding, encoding_errors=encoding_errors,
kwargs=kwargs)
try:
klass = getattr(mod, "Backend")
except AttributeError:
raise AttributeError("%r mod does not define any backend class" % mod)
inst = klass(**kw)
try:
inst.check(title=False)
except Exception as err:
bin_mod = "fulltext.backends.__bin"
warn("can't use %r due to %r; use %r backend instead" % (
mod, str(err), bin_mod))
inst = import_mod(bin_mod).Backend(**kw)
inst.check(title=False)
LOGGER.debug("using %r" % inst)
return inst
# =====================================================================
# --- public API
# =====================================================================
def _get(path_or_file, default, mime, name, backend, encoding,
encoding_errors, kwargs, _wtitle):
if encoding is None:
encoding = ENCODING
if encoding_errors is None:
encoding_errors = ENCODING_ERRORS
kwargs = kwargs.copy() if kwargs is not None else {}
kwargs.setdefault("mime", mime)
# Find backend module.
if backend is None:
if mime:
backend_mod = backend_from_mime(mime)
elif name:
backend_mod = backend_from_fname(name)
else:
if is_file_path(path_or_file):
backend_mod = backend_from_fname(path_or_file)
else:
if hasattr(path_or_file, "name"):
backend_mod = backend_from_fname(path_or_file.name)
else:
backend_mod = backend_from_fobj(path_or_file)
else:
if isinstance(backend, string_types):
try:
mime = EXTS_TO_MIMETYPES['.' + backend]
except KeyError:
raise ValueError("invalid backend %r" % backend)
backend_mod = backend_from_mime(mime)
else:
backend_mod = backend
# Get backend class.
inst = backend_inst_from_mod(
backend_mod, encoding, encoding_errors, kwargs)
fun = handle_path if is_file_path(path_or_file) else handle_fobj
# Run handle_ function, handle callbacks.
title = None
inst.setup()
try:
text = fun(inst, path_or_file)
if _wtitle:
try:
title = inst.handle_title(path_or_file)
except Exception:
LOGGER.exception("error while getting title (setting to None)")
finally:
inst.teardown()
assert text is not None, "backend function returned None"
text = STRIP_WHITE.sub(' ', text)
text = text.strip()
return (text, title)
def get(path_or_file, default=SENTINAL, mime=None, name=None, backend=None,
encoding=None, encoding_errors=None, kwargs=None,
_wtitle=False):
"""
Get document full text.
Accepts a path or file-like object.
* If given, `default` is returned instead of an error.
* `backend` is either a module object or a string specifying which
default backend to use (e.g. "doc"); take a look at backends
directory to see a list of default backends.
* `mime` and `name` should be passed if the information
is available to caller, otherwise a best guess is made.
If both are specified `mime` takes precedence.
* `encoding` and `encoding_errors` are used to handle text encoding.
They are taken into consideration mostly only by pure-python
backends which do not rely on CLI tools.
Default to "utf8" and "strict" respectively.
* `kwargs` are passed to the underlying backend.
"""
try:
text, title = _get(
path_or_file, default=default, mime=mime, name=name,
backend=backend, kwargs=kwargs, encoding=encoding,
encoding_errors=encoding_errors, _wtitle=_wtitle)
if _wtitle:
return (text, title)
else:
return text
except Exception as e:
if default is not SENTINAL:
LOGGER.exception(e)
return default
raise
def get_with_title(*args, **kwargs):
"""Like get() but also tries to determine document title.
Returns a (text, title) tuple.
"""
kwargs['_wtitle'] = True
return get(*args, **kwargs)
|
btimby/fulltext | fulltext/__init__.py | handle_fobj | python | def handle_fobj(backend, f, **kwargs):
if not is_binary(f):
raise AssertionError('File must be opened in binary mode.')
if callable(getattr(backend, 'handle_fobj', None)):
# Prefer handle_fobj() if present.
LOGGER.debug("using handle_fobj")
return backend.handle_fobj(f)
elif callable(getattr(backend, 'handle_path', None)):
# Fallback to handle_path(). Warn user since this is potentially
# expensive.
LOGGER.debug("using handle_path")
LOGGER.warning(
"Using disk, %r backend does not provide `handle_fobj()`", backend)
ext = ''
if 'ext' in kwargs:
ext = '.' + kwargs['ext']
with fobj_to_tempfile(f, suffix=ext) as fname:
return backend.handle_path(fname, **kwargs)
else:
raise AssertionError(
'Backend %s has no _get functions' % backend.__name__) | Handle a file-like object.
Called by `get()` when provided a file-like. This function will prefer the
backend's `handle_fobj()` if one is provided. Otherwise, it will write the
data to a temporary file and call `handle_path()`. | train | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/__init__.py#L390-L421 | [
"def is_binary(f):\n \"\"\"Return True if binary mode.\"\"\"\n # NOTE: order matters here. We don't bail on Python 2 just yet. Both\n # codecs.open() and io.open() can open in text mode, both set the encoding\n # attribute. We must do that check first.\n\n # If it has a decoding attribute with a value, it is text mode.\n if getattr(f, \"encoding\", None):\n return False\n\n # Python 2 makes no further distinction.\n if not PY3:\n return True\n\n # If the file has a mode, and it contains b, it is binary.\n try:\n if 'b' in getattr(f, 'mode', ''):\n return True\n except TypeError:\n import gzip\n if isinstance(f, gzip.GzipFile):\n return True # in gzip mode is an integer\n raise\n\n # Can we sniff?\n try:\n f.seek(0, os.SEEK_CUR)\n except (AttributeError, IOError):\n return False\n\n # Finally, let's sniff by reading a byte.\n byte = f.read(1)\n f.seek(-1, os.SEEK_CUR)\n return hasattr(byte, 'decode')\n"
] | from __future__ import absolute_import
import errno
import re
import logging
import os
import mimetypes
import sys
from os.path import splitext
from six import string_types
from six import PY3
from fulltext.util import warn
from fulltext.util import magic
from fulltext.util import is_file_path
from fulltext.util import fobj_to_tempfile
from fulltext.util import is_windows
__all__ = ["get", "register_backend"]
# --- overridable defaults
ENCODING = sys.getfilesystemencoding()
ENCODING_ERRORS = "strict"
DEFAULT_MIME = 'application/octet-stream'
# --- others
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.NullHandler())
STRIP_WHITE = re.compile(r'[ \t\v\f\r\n]+')
SENTINAL = object()
MIMETYPE_TO_BACKENDS = {}
EXTS_TO_MIMETYPES = {}
MAGIC_BUFFER_SIZE = 1024
mimetypes.init()
_MIMETYPES_TO_EXT = dict([(v, k) for k, v in mimetypes.types_map.items()])
# A list of extensions which will be treated as pure text.
# This takes precedence over register_backend().
# https://www.openoffice.org/dev_docs/source/file_extensions.html
_TEXT_EXTS = set((
".asm", # Non-UNIX assembler source file
".asp", # Active Server Page
".awk", # An awk script file
".bat", # MS-DOS batch file
".c", # C language file
".class", # Compiled java source code file
".cmd", # Compiler command file
".cpp", # C++ language file
".cxx", # C++ language file
".def", # Win32 library definition file
".dpc", # Source dependency file containing list of dependencies
".dpj", # Java source dependency file containing list of dependencies
".h", # C header file
".hpp", # Generated C++ header or header plus plus file
".hrc", # An ".src", # include header file
".hxx", # C++ header file
".in",
".inc", # Include file
".ini", # Initialization file
".inl", # Inline header file
".jar", # Java classes archive file
".java", # Java language file
".js", # JavaScript code file
".jsp", # Java Server Page file
".kdelnk", # KDE1 configuration file
".l", # Lex source code file
".ll", # Lex source code file
".lnx", # Linux-specific makefile
".log", # Log file
".lst", # ASCII database file used in solenv
".MacOS",
".md", # Markdown language.
".mk", # A dmake makefile
".mod", # BASIC module file
".par", # Script particles file
".pl", # Perl script
".plc", # Former build script file, now obsolete
".pld", # Former build script file, now obsolete
".pm", # Perl module file
".pmk", # Project makefiles
".pre", # Preprocessor output from scpcomp
".py", # Python
".pyx", # Cython
".r", # Resource file for Macintosh
".rc", # A dmake recursive makefile or a Win32 resource script file
".rdb", # Interface and type description database (type library)
".res", # Resource file
".rst", # Restructured text
".s", # Assembler source file (UNIX)
".sbl", # BASIC file
".scp", # Script source file
".sh", # Shell script
".src", # Source resource string file
".txt", # Language text file
".y", # Yacc source code file
".yaml", # Yaml
".yml", # Yaml
".yxx", # Bison source code file
))
# XXX: dirty hack for pyinstaller so that it includes these modules.
# TODO: find a way to do this in pyinstaller.spec instead.
if is_windows() and hasattr(sys, '_MEIPASS'):
from fulltext.backends import __bin # NOQA
from fulltext.backends import __csv # NOQA
from fulltext.backends import __doc # NOQA
from fulltext.backends import __docx # NOQA
from fulltext.backends import __eml # NOQA
from fulltext.backends import __epub # NOQA
from fulltext.backends import __gz # NOQA
from fulltext.backends import __html # NOQA
from fulltext.backends import __hwp # NOQA
from fulltext.backends import __json # NOQA
from fulltext.backends import __mbox # NOQA
# XXX couldn't find a way to install ExtractMessage lib with
# pyinstaller.
# from fulltext.backends import __msg # NOQA
from fulltext.backends import __ocr # NOQA
from fulltext.backends import __odt # NOQA
from fulltext.backends import __pdf # NOQA
from fulltext.backends import __pptx # NOQA
from fulltext.backends import __ps # NOQA
from fulltext.backends import __rar # NOQA
from fulltext.backends import __rtf # NOQA
from fulltext.backends import __text # NOQA
from fulltext.backends import __xlsx # NOQA
from fulltext.backends import __xml # NOQA
from fulltext.backends import __zip # NOQA
# =====================================================================
# --- backends
# =====================================================================
def register_backend(mimetype, module, extensions=None):
"""Register a backend.
`mimetype`: a mimetype string (e.g. 'text/plain')
`module`: an import string (e.g. path.to.my.module)
`extensions`: a list of extensions (e.g. ['txt', 'text'])
"""
if mimetype in MIMETYPE_TO_BACKENDS:
warn("overwriting %r mimetype which was already set" % mimetype)
MIMETYPE_TO_BACKENDS[mimetype] = module
if extensions is None:
try:
ext = _MIMETYPES_TO_EXT[mimetype]
except KeyError:
raise KeyError(
"mimetypes module has no extension associated "
"with %r mimetype; use 'extensions' arg yourself" % mimetype)
assert ext, ext
EXTS_TO_MIMETYPES[ext] = mimetype
else:
if not isinstance(extensions, (list, tuple, set, frozenset)):
raise TypeError("invalid extensions type (got %r)" % extensions)
for ext in set(extensions):
ext = ext if ext.startswith('.') else '.' + ext
assert ext, ext
EXTS_TO_MIMETYPES[ext] = mimetype
register_backend(
'application/zip',
'fulltext.backends.__zip',
extensions=[".zip"])
register_backend(
'application/x-rar-compressed',
'fulltext.backends.__rar',
extensions=['.rar'])
for mt in ("text/xml", "application/xml", "application/x-xml"):
register_backend(
mt,
'fulltext.backends.__xml',
extensions=[".xml", ".xsd"])
register_backend(
'application/vnd.ms-excel',
'fulltext.backends.__xlsx',
extensions=['.xls', '.xlsx'])
register_backend(
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'fulltext.backends.__xlsx',
extensions=['.xlsx'])
register_backend(
'text/plain',
'fulltext.backends.__text',
extensions=['.txt', '.text'])
register_backend(
'application/rtf',
'fulltext.backends.__rtf',
extensions=['.rtf'])
register_backend(
'application/vnd.openxmlformats-officedocument.presentationml.presentation', # NOQA
'fulltext.backends.__pptx',
extensions=['.pptx'])
register_backend(
'application/pdf',
'fulltext.backends.__pdf',
extensions=['.pdf'])
register_backend(
'application/vnd.oasis.opendocument.text',
'fulltext.backends.__odt',
extensions=['.odt'])
register_backend(
'application/vnd.oasis.opendocument.spreadsheet',
'fulltext.backends.__odt',
extensions=['.ods'])
# images
register_backend(
'image/jpeg',
'fulltext.backends.__ocr',
extensions=['.jpg', '.jpeg'])
register_backend(
'image/bmp',
'fulltext.backends.__ocr',
extensions=['.bmp'])
register_backend(
'image/png',
'fulltext.backends.__ocr',
extensions=['.png'])
register_backend(
'image/gif',
'fulltext.backends.__ocr',
extensions=['.gif'])
register_backend(
'application/x-hwp',
'fulltext.backends.__hwp',
extensions=['.hwp'])
for mt in ('text/html', 'application/html', 'text/xhtml'):
register_backend(
mt,
'fulltext.backends.__html',
extensions=['.htm', '.html', '.xhtml'])
register_backend(
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'fulltext.backends.__docx',
extensions=['.docx'])
register_backend(
'application/msword',
'fulltext.backends.__doc',
extensions=['.doc'])
for mt in ('text/csv', 'text/tsv', 'text/psv'):
register_backend(
mt,
'fulltext.backends.__csv',
extensions=['.csv', '.tsv', '.psv', '.tab'])
for mt in ("application/epub", "application/epub+zip"):
register_backend(
mt,
'fulltext.backends.__epub',
extensions=[".epub"])
register_backend(
'application/postscript',
'fulltext.backends.__ps',
extensions=[".ps", ".eps", ".ai"])
register_backend(
'message/rfc822',
'fulltext.backends.__eml',
extensions=['.eml'])
register_backend(
'application/mbox',
'fulltext.backends.__mbox',
extensions=['.mbox'])
register_backend(
'application/vnd.ms-outlook',
'fulltext.backends.__msg',
extensions=['.msg'])
register_backend(
'application/gzip',
'fulltext.backends.__gz',
extensions=['.gz'])
register_backend(
'application/json',
'fulltext.backends.__json',
extensions=['.json'])
# default backend.
register_backend(
'application/octet-stream',
'fulltext.backends.__bin',
extensions=['.a', '.bin'])
# Extensions which will be treated as pure text.
# We just come up with a custom mime name.
for ext in _TEXT_EXTS:
register_backend(
'[custom-fulltext-mime]/%s' % ext,
'fulltext.backends.__text',
extensions=[ext])
# =====================================================================
# --- utils
# =====================================================================
def is_binary(f):
"""Return True if binary mode."""
# NOTE: order matters here. We don't bail on Python 2 just yet. Both
# codecs.open() and io.open() can open in text mode, both set the encoding
# attribute. We must do that check first.
# If it has a decoding attribute with a value, it is text mode.
if getattr(f, "encoding", None):
return False
# Python 2 makes no further distinction.
if not PY3:
return True
# If the file has a mode, and it contains b, it is binary.
try:
if 'b' in getattr(f, 'mode', ''):
return True
except TypeError:
import gzip
if isinstance(f, gzip.GzipFile):
return True # in gzip mode is an integer
raise
# Can we sniff?
try:
f.seek(0, os.SEEK_CUR)
except (AttributeError, IOError):
return False
# Finally, let's sniff by reading a byte.
byte = f.read(1)
f.seek(-1, os.SEEK_CUR)
return hasattr(byte, 'decode')
def handle_path(backend_inst, path, **kwargs):
"""
Handle a path.
Called by `get()` when provided a path. This function will prefer the
backend's `handle_path()` if one is provided Otherwise, it will open the
given path then use `handle_fobj()`.
"""
if callable(getattr(backend_inst, 'handle_path', None)):
# Prefer handle_path() if present.
LOGGER.debug("using handle_path")
return backend_inst.handle_path(path)
elif callable(getattr(backend_inst, 'handle_fobj', None)):
# Fallback to handle_fobj(). No warning here since the performance hit
# is minimal.
LOGGER.debug("using handle_fobj")
with open(path, 'rb') as f:
return backend_inst.handle_fobj(f)
else:
raise AssertionError(
'Backend %s has no _get functions' % backend_inst.__name__)
def import_mod(mod_name):
return __import__(mod_name, fromlist=[' '])
def backend_from_mime(mime):
"""Determine backend module object from a mime string."""
try:
mod_name = MIMETYPE_TO_BACKENDS[mime]
except KeyError:
msg = "No handler for %r, defaulting to %r" % (mime, DEFAULT_MIME)
if 'FULLTEXT_TESTING' in os.environ:
warn(msg)
else:
LOGGER.debug(msg)
mod_name = MIMETYPE_TO_BACKENDS[DEFAULT_MIME]
mod = import_mod(mod_name)
return mod
def backend_from_fname(name):
"""Determine backend module object from a file name."""
ext = splitext(name)[1]
try:
mime = EXTS_TO_MIMETYPES[ext]
except KeyError:
try:
f = open(name, 'rb')
except IOError as e:
# The file may not exist, we are being asked to determine it's type
# from it's name. Other errors are unexpected.
if e.errno != errno.ENOENT:
raise
# We will have to fall back upon the default backend.
msg = "No handler for %r, defaulting to %r" % (ext, DEFAULT_MIME)
if 'FULLTEXT_TESTING' in os.environ:
warn(msg)
else:
LOGGER.debug(msg)
mod_name = MIMETYPE_TO_BACKENDS[DEFAULT_MIME]
else:
with f:
return backend_from_fobj(f)
else:
mod_name = MIMETYPE_TO_BACKENDS[mime]
mod = import_mod(mod_name)
return mod
def backend_from_fobj(f):
"""Determine backend module object from a file object."""
if magic is None:
warn("magic lib is not installed; assuming mime type %r" % (
DEFAULT_MIME))
return backend_from_mime(DEFAULT_MIME)
else:
offset = f.tell()
try:
f.seek(0)
chunk = f.read(MAGIC_BUFFER_SIZE)
mime = magic.from_buffer(chunk, mime=True)
return backend_from_mime(mime)
finally:
f.seek(offset)
def backend_inst_from_mod(mod, encoding, encoding_errors, kwargs):
"""Given a mod and a set of opts return an instantiated
Backend class.
"""
kw = dict(encoding=encoding, encoding_errors=encoding_errors,
kwargs=kwargs)
try:
klass = getattr(mod, "Backend")
except AttributeError:
raise AttributeError("%r mod does not define any backend class" % mod)
inst = klass(**kw)
try:
inst.check(title=False)
except Exception as err:
bin_mod = "fulltext.backends.__bin"
warn("can't use %r due to %r; use %r backend instead" % (
mod, str(err), bin_mod))
inst = import_mod(bin_mod).Backend(**kw)
inst.check(title=False)
LOGGER.debug("using %r" % inst)
return inst
# =====================================================================
# --- public API
# =====================================================================
def _get(path_or_file, default, mime, name, backend, encoding,
encoding_errors, kwargs, _wtitle):
if encoding is None:
encoding = ENCODING
if encoding_errors is None:
encoding_errors = ENCODING_ERRORS
kwargs = kwargs.copy() if kwargs is not None else {}
kwargs.setdefault("mime", mime)
# Find backend module.
if backend is None:
if mime:
backend_mod = backend_from_mime(mime)
elif name:
backend_mod = backend_from_fname(name)
else:
if is_file_path(path_or_file):
backend_mod = backend_from_fname(path_or_file)
else:
if hasattr(path_or_file, "name"):
backend_mod = backend_from_fname(path_or_file.name)
else:
backend_mod = backend_from_fobj(path_or_file)
else:
if isinstance(backend, string_types):
try:
mime = EXTS_TO_MIMETYPES['.' + backend]
except KeyError:
raise ValueError("invalid backend %r" % backend)
backend_mod = backend_from_mime(mime)
else:
backend_mod = backend
# Get backend class.
inst = backend_inst_from_mod(
backend_mod, encoding, encoding_errors, kwargs)
fun = handle_path if is_file_path(path_or_file) else handle_fobj
# Run handle_ function, handle callbacks.
title = None
inst.setup()
try:
text = fun(inst, path_or_file)
if _wtitle:
try:
title = inst.handle_title(path_or_file)
except Exception:
LOGGER.exception("error while getting title (setting to None)")
finally:
inst.teardown()
assert text is not None, "backend function returned None"
text = STRIP_WHITE.sub(' ', text)
text = text.strip()
return (text, title)
def get(path_or_file, default=SENTINAL, mime=None, name=None, backend=None,
encoding=None, encoding_errors=None, kwargs=None,
_wtitle=False):
"""
Get document full text.
Accepts a path or file-like object.
* If given, `default` is returned instead of an error.
* `backend` is either a module object or a string specifying which
default backend to use (e.g. "doc"); take a look at backends
directory to see a list of default backends.
* `mime` and `name` should be passed if the information
is available to caller, otherwise a best guess is made.
If both are specified `mime` takes precedence.
* `encoding` and `encoding_errors` are used to handle text encoding.
They are taken into consideration mostly only by pure-python
backends which do not rely on CLI tools.
Default to "utf8" and "strict" respectively.
* `kwargs` are passed to the underlying backend.
"""
try:
text, title = _get(
path_or_file, default=default, mime=mime, name=name,
backend=backend, kwargs=kwargs, encoding=encoding,
encoding_errors=encoding_errors, _wtitle=_wtitle)
if _wtitle:
return (text, title)
else:
return text
except Exception as e:
if default is not SENTINAL:
LOGGER.exception(e)
return default
raise
def get_with_title(*args, **kwargs):
"""Like get() but also tries to determine document title.
Returns a (text, title) tuple.
"""
kwargs['_wtitle'] = True
return get(*args, **kwargs)
|
btimby/fulltext | fulltext/__init__.py | backend_from_mime | python | def backend_from_mime(mime):
try:
mod_name = MIMETYPE_TO_BACKENDS[mime]
except KeyError:
msg = "No handler for %r, defaulting to %r" % (mime, DEFAULT_MIME)
if 'FULLTEXT_TESTING' in os.environ:
warn(msg)
else:
LOGGER.debug(msg)
mod_name = MIMETYPE_TO_BACKENDS[DEFAULT_MIME]
mod = import_mod(mod_name)
return mod | Determine backend module object from a mime string. | train | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/__init__.py#L428-L442 | [
"def warn(msg):\n warnings.warn(msg, UserWarning, stacklevel=2)\n LOGGER.warning(msg)\n",
"def import_mod(mod_name):\n return __import__(mod_name, fromlist=[' '])\n"
] | from __future__ import absolute_import
import errno
import re
import logging
import os
import mimetypes
import sys
from os.path import splitext
from six import string_types
from six import PY3
from fulltext.util import warn
from fulltext.util import magic
from fulltext.util import is_file_path
from fulltext.util import fobj_to_tempfile
from fulltext.util import is_windows
__all__ = ["get", "register_backend"]
# --- overridable defaults
ENCODING = sys.getfilesystemencoding()
ENCODING_ERRORS = "strict"
DEFAULT_MIME = 'application/octet-stream'
# --- others
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.NullHandler())
STRIP_WHITE = re.compile(r'[ \t\v\f\r\n]+')
SENTINAL = object()
MIMETYPE_TO_BACKENDS = {}
EXTS_TO_MIMETYPES = {}
MAGIC_BUFFER_SIZE = 1024
mimetypes.init()
_MIMETYPES_TO_EXT = dict([(v, k) for k, v in mimetypes.types_map.items()])
# A list of extensions which will be treated as pure text.
# This takes precedence over register_backend().
# https://www.openoffice.org/dev_docs/source/file_extensions.html
_TEXT_EXTS = set((
".asm", # Non-UNIX assembler source file
".asp", # Active Server Page
".awk", # An awk script file
".bat", # MS-DOS batch file
".c", # C language file
".class", # Compiled java source code file
".cmd", # Compiler command file
".cpp", # C++ language file
".cxx", # C++ language file
".def", # Win32 library definition file
".dpc", # Source dependency file containing list of dependencies
".dpj", # Java source dependency file containing list of dependencies
".h", # C header file
".hpp", # Generated C++ header or header plus plus file
".hrc", # An ".src", # include header file
".hxx", # C++ header file
".in",
".inc", # Include file
".ini", # Initialization file
".inl", # Inline header file
".jar", # Java classes archive file
".java", # Java language file
".js", # JavaScript code file
".jsp", # Java Server Page file
".kdelnk", # KDE1 configuration file
".l", # Lex source code file
".ll", # Lex source code file
".lnx", # Linux-specific makefile
".log", # Log file
".lst", # ASCII database file used in solenv
".MacOS",
".md", # Markdown language.
".mk", # A dmake makefile
".mod", # BASIC module file
".par", # Script particles file
".pl", # Perl script
".plc", # Former build script file, now obsolete
".pld", # Former build script file, now obsolete
".pm", # Perl module file
".pmk", # Project makefiles
".pre", # Preprocessor output from scpcomp
".py", # Python
".pyx", # Cython
".r", # Resource file for Macintosh
".rc", # A dmake recursive makefile or a Win32 resource script file
".rdb", # Interface and type description database (type library)
".res", # Resource file
".rst", # Restructured text
".s", # Assembler source file (UNIX)
".sbl", # BASIC file
".scp", # Script source file
".sh", # Shell script
".src", # Source resource string file
".txt", # Language text file
".y", # Yacc source code file
".yaml", # Yaml
".yml", # Yaml
".yxx", # Bison source code file
))
# XXX: dirty hack for pyinstaller so that it includes these modules.
# TODO: find a way to do this in pyinstaller.spec instead.
if is_windows() and hasattr(sys, '_MEIPASS'):
from fulltext.backends import __bin # NOQA
from fulltext.backends import __csv # NOQA
from fulltext.backends import __doc # NOQA
from fulltext.backends import __docx # NOQA
from fulltext.backends import __eml # NOQA
from fulltext.backends import __epub # NOQA
from fulltext.backends import __gz # NOQA
from fulltext.backends import __html # NOQA
from fulltext.backends import __hwp # NOQA
from fulltext.backends import __json # NOQA
from fulltext.backends import __mbox # NOQA
# XXX couldn't find a way to install ExtractMessage lib with
# pyinstaller.
# from fulltext.backends import __msg # NOQA
from fulltext.backends import __ocr # NOQA
from fulltext.backends import __odt # NOQA
from fulltext.backends import __pdf # NOQA
from fulltext.backends import __pptx # NOQA
from fulltext.backends import __ps # NOQA
from fulltext.backends import __rar # NOQA
from fulltext.backends import __rtf # NOQA
from fulltext.backends import __text # NOQA
from fulltext.backends import __xlsx # NOQA
from fulltext.backends import __xml # NOQA
from fulltext.backends import __zip # NOQA
# =====================================================================
# --- backends
# =====================================================================
def register_backend(mimetype, module, extensions=None):
"""Register a backend.
`mimetype`: a mimetype string (e.g. 'text/plain')
`module`: an import string (e.g. path.to.my.module)
`extensions`: a list of extensions (e.g. ['txt', 'text'])
"""
if mimetype in MIMETYPE_TO_BACKENDS:
warn("overwriting %r mimetype which was already set" % mimetype)
MIMETYPE_TO_BACKENDS[mimetype] = module
if extensions is None:
try:
ext = _MIMETYPES_TO_EXT[mimetype]
except KeyError:
raise KeyError(
"mimetypes module has no extension associated "
"with %r mimetype; use 'extensions' arg yourself" % mimetype)
assert ext, ext
EXTS_TO_MIMETYPES[ext] = mimetype
else:
if not isinstance(extensions, (list, tuple, set, frozenset)):
raise TypeError("invalid extensions type (got %r)" % extensions)
for ext in set(extensions):
ext = ext if ext.startswith('.') else '.' + ext
assert ext, ext
EXTS_TO_MIMETYPES[ext] = mimetype
register_backend(
'application/zip',
'fulltext.backends.__zip',
extensions=[".zip"])
register_backend(
'application/x-rar-compressed',
'fulltext.backends.__rar',
extensions=['.rar'])
for mt in ("text/xml", "application/xml", "application/x-xml"):
register_backend(
mt,
'fulltext.backends.__xml',
extensions=[".xml", ".xsd"])
register_backend(
'application/vnd.ms-excel',
'fulltext.backends.__xlsx',
extensions=['.xls', '.xlsx'])
register_backend(
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'fulltext.backends.__xlsx',
extensions=['.xlsx'])
register_backend(
'text/plain',
'fulltext.backends.__text',
extensions=['.txt', '.text'])
register_backend(
'application/rtf',
'fulltext.backends.__rtf',
extensions=['.rtf'])
register_backend(
'application/vnd.openxmlformats-officedocument.presentationml.presentation', # NOQA
'fulltext.backends.__pptx',
extensions=['.pptx'])
register_backend(
'application/pdf',
'fulltext.backends.__pdf',
extensions=['.pdf'])
register_backend(
'application/vnd.oasis.opendocument.text',
'fulltext.backends.__odt',
extensions=['.odt'])
register_backend(
'application/vnd.oasis.opendocument.spreadsheet',
'fulltext.backends.__odt',
extensions=['.ods'])
# images
register_backend(
'image/jpeg',
'fulltext.backends.__ocr',
extensions=['.jpg', '.jpeg'])
register_backend(
'image/bmp',
'fulltext.backends.__ocr',
extensions=['.bmp'])
register_backend(
'image/png',
'fulltext.backends.__ocr',
extensions=['.png'])
register_backend(
'image/gif',
'fulltext.backends.__ocr',
extensions=['.gif'])
register_backend(
'application/x-hwp',
'fulltext.backends.__hwp',
extensions=['.hwp'])
for mt in ('text/html', 'application/html', 'text/xhtml'):
register_backend(
mt,
'fulltext.backends.__html',
extensions=['.htm', '.html', '.xhtml'])
register_backend(
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'fulltext.backends.__docx',
extensions=['.docx'])
register_backend(
'application/msword',
'fulltext.backends.__doc',
extensions=['.doc'])
for mt in ('text/csv', 'text/tsv', 'text/psv'):
register_backend(
mt,
'fulltext.backends.__csv',
extensions=['.csv', '.tsv', '.psv', '.tab'])
for mt in ("application/epub", "application/epub+zip"):
register_backend(
mt,
'fulltext.backends.__epub',
extensions=[".epub"])
register_backend(
'application/postscript',
'fulltext.backends.__ps',
extensions=[".ps", ".eps", ".ai"])
register_backend(
'message/rfc822',
'fulltext.backends.__eml',
extensions=['.eml'])
register_backend(
'application/mbox',
'fulltext.backends.__mbox',
extensions=['.mbox'])
register_backend(
'application/vnd.ms-outlook',
'fulltext.backends.__msg',
extensions=['.msg'])
register_backend(
'application/gzip',
'fulltext.backends.__gz',
extensions=['.gz'])
register_backend(
'application/json',
'fulltext.backends.__json',
extensions=['.json'])
# default backend.
register_backend(
'application/octet-stream',
'fulltext.backends.__bin',
extensions=['.a', '.bin'])
# Extensions which will be treated as pure text.
# We just come up with a custom mime name.
for ext in _TEXT_EXTS:
register_backend(
'[custom-fulltext-mime]/%s' % ext,
'fulltext.backends.__text',
extensions=[ext])
# =====================================================================
# --- utils
# =====================================================================
def is_binary(f):
"""Return True if binary mode."""
# NOTE: order matters here. We don't bail on Python 2 just yet. Both
# codecs.open() and io.open() can open in text mode, both set the encoding
# attribute. We must do that check first.
# If it has a decoding attribute with a value, it is text mode.
if getattr(f, "encoding", None):
return False
# Python 2 makes no further distinction.
if not PY3:
return True
# If the file has a mode, and it contains b, it is binary.
try:
if 'b' in getattr(f, 'mode', ''):
return True
except TypeError:
import gzip
if isinstance(f, gzip.GzipFile):
return True # in gzip mode is an integer
raise
# Can we sniff?
try:
f.seek(0, os.SEEK_CUR)
except (AttributeError, IOError):
return False
# Finally, let's sniff by reading a byte.
byte = f.read(1)
f.seek(-1, os.SEEK_CUR)
return hasattr(byte, 'decode')
def handle_path(backend_inst, path, **kwargs):
"""
Handle a path.
Called by `get()` when provided a path. This function will prefer the
backend's `handle_path()` if one is provided Otherwise, it will open the
given path then use `handle_fobj()`.
"""
if callable(getattr(backend_inst, 'handle_path', None)):
# Prefer handle_path() if present.
LOGGER.debug("using handle_path")
return backend_inst.handle_path(path)
elif callable(getattr(backend_inst, 'handle_fobj', None)):
# Fallback to handle_fobj(). No warning here since the performance hit
# is minimal.
LOGGER.debug("using handle_fobj")
with open(path, 'rb') as f:
return backend_inst.handle_fobj(f)
else:
raise AssertionError(
'Backend %s has no _get functions' % backend_inst.__name__)
def handle_fobj(backend, f, **kwargs):
"""
Handle a file-like object.
Called by `get()` when provided a file-like. This function will prefer the
backend's `handle_fobj()` if one is provided. Otherwise, it will write the
data to a temporary file and call `handle_path()`.
"""
if not is_binary(f):
raise AssertionError('File must be opened in binary mode.')
if callable(getattr(backend, 'handle_fobj', None)):
# Prefer handle_fobj() if present.
LOGGER.debug("using handle_fobj")
return backend.handle_fobj(f)
elif callable(getattr(backend, 'handle_path', None)):
# Fallback to handle_path(). Warn user since this is potentially
# expensive.
LOGGER.debug("using handle_path")
LOGGER.warning(
"Using disk, %r backend does not provide `handle_fobj()`", backend)
ext = ''
if 'ext' in kwargs:
ext = '.' + kwargs['ext']
with fobj_to_tempfile(f, suffix=ext) as fname:
return backend.handle_path(fname, **kwargs)
else:
raise AssertionError(
'Backend %s has no _get functions' % backend.__name__)
def import_mod(mod_name):
return __import__(mod_name, fromlist=[' '])
def backend_from_fname(name):
"""Determine backend module object from a file name."""
ext = splitext(name)[1]
try:
mime = EXTS_TO_MIMETYPES[ext]
except KeyError:
try:
f = open(name, 'rb')
except IOError as e:
# The file may not exist, we are being asked to determine it's type
# from it's name. Other errors are unexpected.
if e.errno != errno.ENOENT:
raise
# We will have to fall back upon the default backend.
msg = "No handler for %r, defaulting to %r" % (ext, DEFAULT_MIME)
if 'FULLTEXT_TESTING' in os.environ:
warn(msg)
else:
LOGGER.debug(msg)
mod_name = MIMETYPE_TO_BACKENDS[DEFAULT_MIME]
else:
with f:
return backend_from_fobj(f)
else:
mod_name = MIMETYPE_TO_BACKENDS[mime]
mod = import_mod(mod_name)
return mod
def backend_from_fobj(f):
"""Determine backend module object from a file object."""
if magic is None:
warn("magic lib is not installed; assuming mime type %r" % (
DEFAULT_MIME))
return backend_from_mime(DEFAULT_MIME)
else:
offset = f.tell()
try:
f.seek(0)
chunk = f.read(MAGIC_BUFFER_SIZE)
mime = magic.from_buffer(chunk, mime=True)
return backend_from_mime(mime)
finally:
f.seek(offset)
def backend_inst_from_mod(mod, encoding, encoding_errors, kwargs):
"""Given a mod and a set of opts return an instantiated
Backend class.
"""
kw = dict(encoding=encoding, encoding_errors=encoding_errors,
kwargs=kwargs)
try:
klass = getattr(mod, "Backend")
except AttributeError:
raise AttributeError("%r mod does not define any backend class" % mod)
inst = klass(**kw)
try:
inst.check(title=False)
except Exception as err:
bin_mod = "fulltext.backends.__bin"
warn("can't use %r due to %r; use %r backend instead" % (
mod, str(err), bin_mod))
inst = import_mod(bin_mod).Backend(**kw)
inst.check(title=False)
LOGGER.debug("using %r" % inst)
return inst
# =====================================================================
# --- public API
# =====================================================================
def _get(path_or_file, default, mime, name, backend, encoding,
encoding_errors, kwargs, _wtitle):
if encoding is None:
encoding = ENCODING
if encoding_errors is None:
encoding_errors = ENCODING_ERRORS
kwargs = kwargs.copy() if kwargs is not None else {}
kwargs.setdefault("mime", mime)
# Find backend module.
if backend is None:
if mime:
backend_mod = backend_from_mime(mime)
elif name:
backend_mod = backend_from_fname(name)
else:
if is_file_path(path_or_file):
backend_mod = backend_from_fname(path_or_file)
else:
if hasattr(path_or_file, "name"):
backend_mod = backend_from_fname(path_or_file.name)
else:
backend_mod = backend_from_fobj(path_or_file)
else:
if isinstance(backend, string_types):
try:
mime = EXTS_TO_MIMETYPES['.' + backend]
except KeyError:
raise ValueError("invalid backend %r" % backend)
backend_mod = backend_from_mime(mime)
else:
backend_mod = backend
# Get backend class.
inst = backend_inst_from_mod(
backend_mod, encoding, encoding_errors, kwargs)
fun = handle_path if is_file_path(path_or_file) else handle_fobj
# Run handle_ function, handle callbacks.
title = None
inst.setup()
try:
text = fun(inst, path_or_file)
if _wtitle:
try:
title = inst.handle_title(path_or_file)
except Exception:
LOGGER.exception("error while getting title (setting to None)")
finally:
inst.teardown()
assert text is not None, "backend function returned None"
text = STRIP_WHITE.sub(' ', text)
text = text.strip()
return (text, title)
def get(path_or_file, default=SENTINAL, mime=None, name=None, backend=None,
encoding=None, encoding_errors=None, kwargs=None,
_wtitle=False):
"""
Get document full text.
Accepts a path or file-like object.
* If given, `default` is returned instead of an error.
* `backend` is either a module object or a string specifying which
default backend to use (e.g. "doc"); take a look at backends
directory to see a list of default backends.
* `mime` and `name` should be passed if the information
is available to caller, otherwise a best guess is made.
If both are specified `mime` takes precedence.
* `encoding` and `encoding_errors` are used to handle text encoding.
They are taken into consideration mostly only by pure-python
backends which do not rely on CLI tools.
Default to "utf8" and "strict" respectively.
* `kwargs` are passed to the underlying backend.
"""
try:
text, title = _get(
path_or_file, default=default, mime=mime, name=name,
backend=backend, kwargs=kwargs, encoding=encoding,
encoding_errors=encoding_errors, _wtitle=_wtitle)
if _wtitle:
return (text, title)
else:
return text
except Exception as e:
if default is not SENTINAL:
LOGGER.exception(e)
return default
raise
def get_with_title(*args, **kwargs):
"""Like get() but also tries to determine document title.
Returns a (text, title) tuple.
"""
kwargs['_wtitle'] = True
return get(*args, **kwargs)
|
btimby/fulltext | fulltext/__init__.py | backend_from_fname | python | def backend_from_fname(name):
ext = splitext(name)[1]
try:
mime = EXTS_TO_MIMETYPES[ext]
except KeyError:
try:
f = open(name, 'rb')
except IOError as e:
# The file may not exist, we are being asked to determine it's type
# from it's name. Other errors are unexpected.
if e.errno != errno.ENOENT:
raise
# We will have to fall back upon the default backend.
msg = "No handler for %r, defaulting to %r" % (ext, DEFAULT_MIME)
if 'FULLTEXT_TESTING' in os.environ:
warn(msg)
else:
LOGGER.debug(msg)
mod_name = MIMETYPE_TO_BACKENDS[DEFAULT_MIME]
else:
with f:
return backend_from_fobj(f)
else:
mod_name = MIMETYPE_TO_BACKENDS[mime]
mod = import_mod(mod_name)
return mod | Determine backend module object from a file name. | train | https://github.com/btimby/fulltext/blob/9234cc1e2099209430e20317649549026de283ce/fulltext/__init__.py#L445-L479 | [
"def backend_from_fobj(f):\n \"\"\"Determine backend module object from a file object.\"\"\"\n if magic is None:\n warn(\"magic lib is not installed; assuming mime type %r\" % (\n DEFAULT_MIME))\n return backend_from_mime(DEFAULT_MIME)\n else:\n offset = f.tell()\n try:\n f.seek(0)\n chunk = f.read(MAGIC_BUFFER_SIZE)\n mime = magic.from_buffer(chunk, mime=True)\n return backend_from_mime(mime)\n finally:\n f.seek(offset)\n",
"def import_mod(mod_name):\n return __import__(mod_name, fromlist=[' '])\n"
] | from __future__ import absolute_import
import errno
import re
import logging
import os
import mimetypes
import sys
from os.path import splitext
from six import string_types
from six import PY3
from fulltext.util import warn
from fulltext.util import magic
from fulltext.util import is_file_path
from fulltext.util import fobj_to_tempfile
from fulltext.util import is_windows
__all__ = ["get", "register_backend"]
# --- overridable defaults
ENCODING = sys.getfilesystemencoding()
ENCODING_ERRORS = "strict"
DEFAULT_MIME = 'application/octet-stream'
# --- others
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.NullHandler())
STRIP_WHITE = re.compile(r'[ \t\v\f\r\n]+')
SENTINAL = object()
MIMETYPE_TO_BACKENDS = {}
EXTS_TO_MIMETYPES = {}
MAGIC_BUFFER_SIZE = 1024
mimetypes.init()
_MIMETYPES_TO_EXT = dict([(v, k) for k, v in mimetypes.types_map.items()])
# A list of extensions which will be treated as pure text.
# This takes precedence over register_backend().
# https://www.openoffice.org/dev_docs/source/file_extensions.html
_TEXT_EXTS = set((
".asm", # Non-UNIX assembler source file
".asp", # Active Server Page
".awk", # An awk script file
".bat", # MS-DOS batch file
".c", # C language file
".class", # Compiled java source code file
".cmd", # Compiler command file
".cpp", # C++ language file
".cxx", # C++ language file
".def", # Win32 library definition file
".dpc", # Source dependency file containing list of dependencies
".dpj", # Java source dependency file containing list of dependencies
".h", # C header file
".hpp", # Generated C++ header or header plus plus file
".hrc", # An ".src", # include header file
".hxx", # C++ header file
".in",
".inc", # Include file
".ini", # Initialization file
".inl", # Inline header file
".jar", # Java classes archive file
".java", # Java language file
".js", # JavaScript code file
".jsp", # Java Server Page file
".kdelnk", # KDE1 configuration file
".l", # Lex source code file
".ll", # Lex source code file
".lnx", # Linux-specific makefile
".log", # Log file
".lst", # ASCII database file used in solenv
".MacOS",
".md", # Markdown language.
".mk", # A dmake makefile
".mod", # BASIC module file
".par", # Script particles file
".pl", # Perl script
".plc", # Former build script file, now obsolete
".pld", # Former build script file, now obsolete
".pm", # Perl module file
".pmk", # Project makefiles
".pre", # Preprocessor output from scpcomp
".py", # Python
".pyx", # Cython
".r", # Resource file for Macintosh
".rc", # A dmake recursive makefile or a Win32 resource script file
".rdb", # Interface and type description database (type library)
".res", # Resource file
".rst", # Restructured text
".s", # Assembler source file (UNIX)
".sbl", # BASIC file
".scp", # Script source file
".sh", # Shell script
".src", # Source resource string file
".txt", # Language text file
".y", # Yacc source code file
".yaml", # Yaml
".yml", # Yaml
".yxx", # Bison source code file
))
# XXX: dirty hack for pyinstaller so that it includes these modules.
# TODO: find a way to do this in pyinstaller.spec instead.
if is_windows() and hasattr(sys, '_MEIPASS'):
from fulltext.backends import __bin # NOQA
from fulltext.backends import __csv # NOQA
from fulltext.backends import __doc # NOQA
from fulltext.backends import __docx # NOQA
from fulltext.backends import __eml # NOQA
from fulltext.backends import __epub # NOQA
from fulltext.backends import __gz # NOQA
from fulltext.backends import __html # NOQA
from fulltext.backends import __hwp # NOQA
from fulltext.backends import __json # NOQA
from fulltext.backends import __mbox # NOQA
# XXX couldn't find a way to install ExtractMessage lib with
# pyinstaller.
# from fulltext.backends import __msg # NOQA
from fulltext.backends import __ocr # NOQA
from fulltext.backends import __odt # NOQA
from fulltext.backends import __pdf # NOQA
from fulltext.backends import __pptx # NOQA
from fulltext.backends import __ps # NOQA
from fulltext.backends import __rar # NOQA
from fulltext.backends import __rtf # NOQA
from fulltext.backends import __text # NOQA
from fulltext.backends import __xlsx # NOQA
from fulltext.backends import __xml # NOQA
from fulltext.backends import __zip # NOQA
# =====================================================================
# --- backends
# =====================================================================
def register_backend(mimetype, module, extensions=None):
"""Register a backend.
`mimetype`: a mimetype string (e.g. 'text/plain')
`module`: an import string (e.g. path.to.my.module)
`extensions`: a list of extensions (e.g. ['txt', 'text'])
"""
if mimetype in MIMETYPE_TO_BACKENDS:
warn("overwriting %r mimetype which was already set" % mimetype)
MIMETYPE_TO_BACKENDS[mimetype] = module
if extensions is None:
try:
ext = _MIMETYPES_TO_EXT[mimetype]
except KeyError:
raise KeyError(
"mimetypes module has no extension associated "
"with %r mimetype; use 'extensions' arg yourself" % mimetype)
assert ext, ext
EXTS_TO_MIMETYPES[ext] = mimetype
else:
if not isinstance(extensions, (list, tuple, set, frozenset)):
raise TypeError("invalid extensions type (got %r)" % extensions)
for ext in set(extensions):
ext = ext if ext.startswith('.') else '.' + ext
assert ext, ext
EXTS_TO_MIMETYPES[ext] = mimetype
register_backend(
'application/zip',
'fulltext.backends.__zip',
extensions=[".zip"])
register_backend(
'application/x-rar-compressed',
'fulltext.backends.__rar',
extensions=['.rar'])
for mt in ("text/xml", "application/xml", "application/x-xml"):
register_backend(
mt,
'fulltext.backends.__xml',
extensions=[".xml", ".xsd"])
register_backend(
'application/vnd.ms-excel',
'fulltext.backends.__xlsx',
extensions=['.xls', '.xlsx'])
register_backend(
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'fulltext.backends.__xlsx',
extensions=['.xlsx'])
register_backend(
'text/plain',
'fulltext.backends.__text',
extensions=['.txt', '.text'])
register_backend(
'application/rtf',
'fulltext.backends.__rtf',
extensions=['.rtf'])
register_backend(
'application/vnd.openxmlformats-officedocument.presentationml.presentation', # NOQA
'fulltext.backends.__pptx',
extensions=['.pptx'])
register_backend(
'application/pdf',
'fulltext.backends.__pdf',
extensions=['.pdf'])
register_backend(
'application/vnd.oasis.opendocument.text',
'fulltext.backends.__odt',
extensions=['.odt'])
register_backend(
'application/vnd.oasis.opendocument.spreadsheet',
'fulltext.backends.__odt',
extensions=['.ods'])
# images
register_backend(
'image/jpeg',
'fulltext.backends.__ocr',
extensions=['.jpg', '.jpeg'])
register_backend(
'image/bmp',
'fulltext.backends.__ocr',
extensions=['.bmp'])
register_backend(
'image/png',
'fulltext.backends.__ocr',
extensions=['.png'])
register_backend(
'image/gif',
'fulltext.backends.__ocr',
extensions=['.gif'])
register_backend(
'application/x-hwp',
'fulltext.backends.__hwp',
extensions=['.hwp'])
for mt in ('text/html', 'application/html', 'text/xhtml'):
register_backend(
mt,
'fulltext.backends.__html',
extensions=['.htm', '.html', '.xhtml'])
register_backend(
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'fulltext.backends.__docx',
extensions=['.docx'])
register_backend(
'application/msword',
'fulltext.backends.__doc',
extensions=['.doc'])
for mt in ('text/csv', 'text/tsv', 'text/psv'):
register_backend(
mt,
'fulltext.backends.__csv',
extensions=['.csv', '.tsv', '.psv', '.tab'])
for mt in ("application/epub", "application/epub+zip"):
register_backend(
mt,
'fulltext.backends.__epub',
extensions=[".epub"])
register_backend(
'application/postscript',
'fulltext.backends.__ps',
extensions=[".ps", ".eps", ".ai"])
register_backend(
'message/rfc822',
'fulltext.backends.__eml',
extensions=['.eml'])
register_backend(
'application/mbox',
'fulltext.backends.__mbox',
extensions=['.mbox'])
register_backend(
'application/vnd.ms-outlook',
'fulltext.backends.__msg',
extensions=['.msg'])
register_backend(
'application/gzip',
'fulltext.backends.__gz',
extensions=['.gz'])
register_backend(
'application/json',
'fulltext.backends.__json',
extensions=['.json'])
# default backend.
register_backend(
'application/octet-stream',
'fulltext.backends.__bin',
extensions=['.a', '.bin'])
# Extensions which will be treated as pure text.
# We just come up with a custom mime name.
for ext in _TEXT_EXTS:
register_backend(
'[custom-fulltext-mime]/%s' % ext,
'fulltext.backends.__text',
extensions=[ext])
# =====================================================================
# --- utils
# =====================================================================
def is_binary(f):
"""Return True if binary mode."""
# NOTE: order matters here. We don't bail on Python 2 just yet. Both
# codecs.open() and io.open() can open in text mode, both set the encoding
# attribute. We must do that check first.
# If it has a decoding attribute with a value, it is text mode.
if getattr(f, "encoding", None):
return False
# Python 2 makes no further distinction.
if not PY3:
return True
# If the file has a mode, and it contains b, it is binary.
try:
if 'b' in getattr(f, 'mode', ''):
return True
except TypeError:
import gzip
if isinstance(f, gzip.GzipFile):
return True # in gzip mode is an integer
raise
# Can we sniff?
try:
f.seek(0, os.SEEK_CUR)
except (AttributeError, IOError):
return False
# Finally, let's sniff by reading a byte.
byte = f.read(1)
f.seek(-1, os.SEEK_CUR)
return hasattr(byte, 'decode')
def handle_path(backend_inst, path, **kwargs):
"""
Handle a path.
Called by `get()` when provided a path. This function will prefer the
backend's `handle_path()` if one is provided Otherwise, it will open the
given path then use `handle_fobj()`.
"""
if callable(getattr(backend_inst, 'handle_path', None)):
# Prefer handle_path() if present.
LOGGER.debug("using handle_path")
return backend_inst.handle_path(path)
elif callable(getattr(backend_inst, 'handle_fobj', None)):
# Fallback to handle_fobj(). No warning here since the performance hit
# is minimal.
LOGGER.debug("using handle_fobj")
with open(path, 'rb') as f:
return backend_inst.handle_fobj(f)
else:
raise AssertionError(
'Backend %s has no _get functions' % backend_inst.__name__)
def handle_fobj(backend, f, **kwargs):
"""
Handle a file-like object.
Called by `get()` when provided a file-like. This function will prefer the
backend's `handle_fobj()` if one is provided. Otherwise, it will write the
data to a temporary file and call `handle_path()`.
"""
if not is_binary(f):
raise AssertionError('File must be opened in binary mode.')
if callable(getattr(backend, 'handle_fobj', None)):
# Prefer handle_fobj() if present.
LOGGER.debug("using handle_fobj")
return backend.handle_fobj(f)
elif callable(getattr(backend, 'handle_path', None)):
# Fallback to handle_path(). Warn user since this is potentially
# expensive.
LOGGER.debug("using handle_path")
LOGGER.warning(
"Using disk, %r backend does not provide `handle_fobj()`", backend)
ext = ''
if 'ext' in kwargs:
ext = '.' + kwargs['ext']
with fobj_to_tempfile(f, suffix=ext) as fname:
return backend.handle_path(fname, **kwargs)
else:
raise AssertionError(
'Backend %s has no _get functions' % backend.__name__)
def import_mod(mod_name):
return __import__(mod_name, fromlist=[' '])
def backend_from_mime(mime):
"""Determine backend module object from a mime string."""
try:
mod_name = MIMETYPE_TO_BACKENDS[mime]
except KeyError:
msg = "No handler for %r, defaulting to %r" % (mime, DEFAULT_MIME)
if 'FULLTEXT_TESTING' in os.environ:
warn(msg)
else:
LOGGER.debug(msg)
mod_name = MIMETYPE_TO_BACKENDS[DEFAULT_MIME]
mod = import_mod(mod_name)
return mod
def backend_from_fobj(f):
"""Determine backend module object from a file object."""
if magic is None:
warn("magic lib is not installed; assuming mime type %r" % (
DEFAULT_MIME))
return backend_from_mime(DEFAULT_MIME)
else:
offset = f.tell()
try:
f.seek(0)
chunk = f.read(MAGIC_BUFFER_SIZE)
mime = magic.from_buffer(chunk, mime=True)
return backend_from_mime(mime)
finally:
f.seek(offset)
def backend_inst_from_mod(mod, encoding, encoding_errors, kwargs):
"""Given a mod and a set of opts return an instantiated
Backend class.
"""
kw = dict(encoding=encoding, encoding_errors=encoding_errors,
kwargs=kwargs)
try:
klass = getattr(mod, "Backend")
except AttributeError:
raise AttributeError("%r mod does not define any backend class" % mod)
inst = klass(**kw)
try:
inst.check(title=False)
except Exception as err:
bin_mod = "fulltext.backends.__bin"
warn("can't use %r due to %r; use %r backend instead" % (
mod, str(err), bin_mod))
inst = import_mod(bin_mod).Backend(**kw)
inst.check(title=False)
LOGGER.debug("using %r" % inst)
return inst
# =====================================================================
# --- public API
# =====================================================================
def _get(path_or_file, default, mime, name, backend, encoding,
encoding_errors, kwargs, _wtitle):
if encoding is None:
encoding = ENCODING
if encoding_errors is None:
encoding_errors = ENCODING_ERRORS
kwargs = kwargs.copy() if kwargs is not None else {}
kwargs.setdefault("mime", mime)
# Find backend module.
if backend is None:
if mime:
backend_mod = backend_from_mime(mime)
elif name:
backend_mod = backend_from_fname(name)
else:
if is_file_path(path_or_file):
backend_mod = backend_from_fname(path_or_file)
else:
if hasattr(path_or_file, "name"):
backend_mod = backend_from_fname(path_or_file.name)
else:
backend_mod = backend_from_fobj(path_or_file)
else:
if isinstance(backend, string_types):
try:
mime = EXTS_TO_MIMETYPES['.' + backend]
except KeyError:
raise ValueError("invalid backend %r" % backend)
backend_mod = backend_from_mime(mime)
else:
backend_mod = backend
# Get backend class.
inst = backend_inst_from_mod(
backend_mod, encoding, encoding_errors, kwargs)
fun = handle_path if is_file_path(path_or_file) else handle_fobj
# Run handle_ function, handle callbacks.
title = None
inst.setup()
try:
text = fun(inst, path_or_file)
if _wtitle:
try:
title = inst.handle_title(path_or_file)
except Exception:
LOGGER.exception("error while getting title (setting to None)")
finally:
inst.teardown()
assert text is not None, "backend function returned None"
text = STRIP_WHITE.sub(' ', text)
text = text.strip()
return (text, title)
def get(path_or_file, default=SENTINAL, mime=None, name=None, backend=None,
encoding=None, encoding_errors=None, kwargs=None,
_wtitle=False):
"""
Get document full text.
Accepts a path or file-like object.
* If given, `default` is returned instead of an error.
* `backend` is either a module object or a string specifying which
default backend to use (e.g. "doc"); take a look at backends
directory to see a list of default backends.
* `mime` and `name` should be passed if the information
is available to caller, otherwise a best guess is made.
If both are specified `mime` takes precedence.
* `encoding` and `encoding_errors` are used to handle text encoding.
They are taken into consideration mostly only by pure-python
backends which do not rely on CLI tools.
Default to "utf8" and "strict" respectively.
* `kwargs` are passed to the underlying backend.
"""
try:
text, title = _get(
path_or_file, default=default, mime=mime, name=name,
backend=backend, kwargs=kwargs, encoding=encoding,
encoding_errors=encoding_errors, _wtitle=_wtitle)
if _wtitle:
return (text, title)
else:
return text
except Exception as e:
if default is not SENTINAL:
LOGGER.exception(e)
return default
raise
def get_with_title(*args, **kwargs):
"""Like get() but also tries to determine document title.
Returns a (text, title) tuple.
"""
kwargs['_wtitle'] = True
return get(*args, **kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.