repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
GiulioRossetti/ndlib | ndlib/models/ModelConfig.py | Configuration.add_edge_configuration | python | def add_edge_configuration(self, param_name, edge, param_value):
if param_name not in self.config['edges']:
self.config['edges'][param_name] = {edge: param_value}
else:
self.config['edges'][param_name][edge] = param_value | Set a parameter for a given edge
:param param_name: parameter identifier (as specified by the chosen model)
:param edge: edge identifier
:param param_value: parameter value | train | https://github.com/GiulioRossetti/ndlib/blob/23ecf50c0f76ff2714471071ab9ecb600f4a9832/ndlib/models/ModelConfig.py#L95-L106 | null | class Configuration(object):
"""
Configuration Object
"""
def __init__(self):
self.config = {
'nodes': {},
'edges': {},
'model': {},
'status': {}
}
def get_nodes_configuration(self):
"""
Nodes configurations
:return: dictionary that link each node to its attributes
"""
return self.config['nodes']
def get_edges_configuration(self):
"""
Edges configurations
:return: dictionary that link each edge to its attributes
"""
return self.config['edges']
def get_model_parameters(self):
"""
Model parameters
:return: dictionary describes the specified model parameters
"""
return self.config['model']
def get_model_configuration(self):
"""
Initial configuration
:return: initial nodes status (if specified)
"""
return self.config['status']
def add_model_parameter(self, param_name, param_value):
"""
Set a Model Parameter
:param param_name: parameter identifier (as specified by the chosen model)
:param param_value: parameter value
"""
self.config['model'][param_name] = param_value
def add_model_initial_configuration(self, status_name, nodes):
"""
Set initial status for a set of nodes
:param status_name: status to be set (as specified by the chosen model)
:param nodes: list of affected nodes
"""
self.config['status'][status_name] = nodes
def add_node_configuration(self, param_name, node_id, param_value):
"""
Set a parameter for a given node
:param param_name: parameter identifier (as specified by the chosen model)
:param node_id: node identifier
:param param_value: parameter value
"""
if param_name not in self.config['nodes']:
self.config['nodes'][param_name] = {node_id: param_value}
else:
self.config['nodes'][param_name][node_id] = param_value
def add_node_set_configuration(self, param_name, node_to_value):
"""
Set Nodes parameter
:param param_name: parameter identifier (as specified by the chosen model)
:param node_to_value: dictionary mapping each node a parameter value
"""
for nid, val in future.utils.iteritems(node_to_value):
self.add_node_configuration(param_name, nid, val)
def add_edge_set_configuration(self, param_name, edge_to_value):
"""
Set Edges parameter
:param param_name: parameter identifier (as specified by the chosen model)
:param edge_to_value: dictionary mapping each edge a parameter value
"""
for edge, val in future.utils.iteritems(edge_to_value):
self.add_edge_configuration(param_name, edge, val)
|
GiulioRossetti/ndlib | ndlib/models/ModelConfig.py | Configuration.add_edge_set_configuration | python | def add_edge_set_configuration(self, param_name, edge_to_value):
for edge, val in future.utils.iteritems(edge_to_value):
self.add_edge_configuration(param_name, edge, val) | Set Edges parameter
:param param_name: parameter identifier (as specified by the chosen model)
:param edge_to_value: dictionary mapping each edge a parameter value | train | https://github.com/GiulioRossetti/ndlib/blob/23ecf50c0f76ff2714471071ab9ecb600f4a9832/ndlib/models/ModelConfig.py#L108-L116 | [
"def add_edge_configuration(self, param_name, edge, param_value):\n \"\"\"\n Set a parameter for a given edge\n\n :param param_name: parameter identifier (as specified by the chosen model)\n :param edge: edge identifier\n :param param_value: parameter value\n \"\"\"\n if param_name not in self.config['edges']:\n self.config['edges'][param_name] = {edge: param_value}\n else:\n self.config['edges'][param_name][edge] = param_value\n"
] | class Configuration(object):
"""
Configuration Object
"""
def __init__(self):
self.config = {
'nodes': {},
'edges': {},
'model': {},
'status': {}
}
def get_nodes_configuration(self):
"""
Nodes configurations
:return: dictionary that link each node to its attributes
"""
return self.config['nodes']
def get_edges_configuration(self):
"""
Edges configurations
:return: dictionary that link each edge to its attributes
"""
return self.config['edges']
def get_model_parameters(self):
"""
Model parameters
:return: dictionary describes the specified model parameters
"""
return self.config['model']
def get_model_configuration(self):
"""
Initial configuration
:return: initial nodes status (if specified)
"""
return self.config['status']
def add_model_parameter(self, param_name, param_value):
"""
Set a Model Parameter
:param param_name: parameter identifier (as specified by the chosen model)
:param param_value: parameter value
"""
self.config['model'][param_name] = param_value
def add_model_initial_configuration(self, status_name, nodes):
"""
Set initial status for a set of nodes
:param status_name: status to be set (as specified by the chosen model)
:param nodes: list of affected nodes
"""
self.config['status'][status_name] = nodes
def add_node_configuration(self, param_name, node_id, param_value):
"""
Set a parameter for a given node
:param param_name: parameter identifier (as specified by the chosen model)
:param node_id: node identifier
:param param_value: parameter value
"""
if param_name not in self.config['nodes']:
self.config['nodes'][param_name] = {node_id: param_value}
else:
self.config['nodes'][param_name][node_id] = param_value
def add_node_set_configuration(self, param_name, node_to_value):
"""
Set Nodes parameter
:param param_name: parameter identifier (as specified by the chosen model)
:param node_to_value: dictionary mapping each node a parameter value
"""
for nid, val in future.utils.iteritems(node_to_value):
self.add_node_configuration(param_name, nid, val)
def add_edge_configuration(self, param_name, edge, param_value):
"""
Set a parameter for a given edge
:param param_name: parameter identifier (as specified by the chosen model)
:param edge: edge identifier
:param param_value: parameter value
"""
if param_name not in self.config['edges']:
self.config['edges'][param_name] = {edge: param_value}
else:
self.config['edges'][param_name][edge] = param_value
|
GiulioRossetti/ndlib | ndlib/models/opinions/SznajdModel.py | SznajdModel.iteration | python | def iteration(self, node_status=True):
# One iteration changes the opinion of several voters using the following procedure:
# - select randomly one voter (speaker 1)
# - select randomly one of its neighbours (speaker 2)
# - if the two voters agree, their neighbours take their opinion
self.clean_initial_status(self.available_statuses.values())
if self.actual_iteration == 0:
self.actual_iteration += 1
delta, node_count, status_delta = self.status_delta(self.status)
if node_status:
return {"iteration": 0, "status": self.status.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": 0, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
delta = {}
status_delta = {st: 0 for st in self.available_statuses.values()}
# select a random node
speaker1 = list(self.graph.nodes())[np.random.randint(0, self.graph.number_of_nodes())]
# select a random neighbour
neighbours = list(self.graph.neighbors(speaker1))
if isinstance(self.graph, nx.DiGraph):
# add also the predecessors
neighbours += list(self.graph.predecessors(speaker1))
speaker2 = neighbours[np.random.randint(0, len(neighbours))]
if self.status[speaker1] == self.status[speaker2]:
# select listeners (all neighbours of two speakers)
neighbours = list(self.graph.neighbors(speaker1)) + list(self.graph.neighbors(speaker2))
if isinstance(self.graph, nx.DiGraph):
# assumed if a->b then b can be influenced by a
# but not the other way around - the link between the speakers doesn't matter
neighbours = list(self.graph.successors(speaker1)) + list(self.graph.successors(speaker2))
# update status of listeners
for listener in neighbours:
if self.status[speaker1] != self.status[listener]:
delta[listener] = self.status[speaker1]
status_delta[self.status[listener]] += 1
for x in self.available_statuses.values():
if x != self.status[listener]:
status_delta[x] -= 1
self.status[listener] = self.status[speaker1]
node_count = {st: len([n for n in self.status if self.status[n] == st])
for st in self.available_statuses.values()}
self.actual_iteration += 1
if node_status:
return {"iteration": self.actual_iteration - 1, "status": delta.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": self.actual_iteration - 1, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()} | Execute a single model iteration
:return: Iteration_id, Incremental node status (dictionary node->status) | train | https://github.com/GiulioRossetti/ndlib/blob/23ecf50c0f76ff2714471071ab9ecb600f4a9832/ndlib/models/opinions/SznajdModel.py#L28-L95 | [
"def clean_initial_status(self, valid_status=None):\n \"\"\"\n Check the consistency of initial status\n :param valid_status: valid node configurations\n \"\"\"\n for n, s in future.utils.iteritems(self.status):\n if s not in valid_status:\n self.status[n] = 0\n",
"def status_delta(self, actual_status):\n \"\"\"\n Compute the point-to-point variations for each status w.r.t. the previous system configuration\n\n :param actual_status: the actual simulation status\n :return: node that have changed their statuses (dictionary status->nodes),\n count of actual nodes per status (dictionary status->node count),\n delta of nodes per status w.r.t the previous configuration (dictionary status->delta)\n \"\"\"\n actual_status_count = {}\n old_status_count = {}\n delta = {}\n for n, v in future.utils.iteritems(self.status):\n if v != actual_status[n]:\n delta[n] = actual_status[n]\n\n for st in self.available_statuses.values():\n actual_status_count[st] = len([x for x in actual_status if actual_status[x] == st])\n old_status_count[st] = len([x for x in self.status if self.status[x] == st])\n\n status_delta = {st: actual_status_count[st] - old_status_count[st] for st in actual_status_count}\n\n return delta, actual_status_count, status_delta\n"
] | class SznajdModel(DiffusionModel):
"""
"""
def __init__(self, graph):
"""
Model Constructor
:param graph: A networkx graph object
"""
super(self.__class__, self).__init__(graph)
self.available_statuses = {
"Susceptible": 0,
"Infected": 1,
}
self.name = "Sznajd"
|
GiulioRossetti/ndlib | ndlib/utils.py | multi_runs | python | def multi_runs(model, execution_number=1, iteration_number=50, infection_sets=None,
nprocesses=multiprocessing.cpu_count()):
if nprocesses > multiprocessing.cpu_count():
nprocesses = multiprocessing.cpu_count()
executions = []
if infection_sets is not None:
if len(infection_sets) != execution_number:
raise InitializationException(
{"message": "Number of infection sets provided does not match the number of executions required"})
for x in past.builtins.xrange(0, execution_number, nprocesses):
with closing(multiprocessing.Pool(processes=nprocesses, maxtasksperchild=10)) as pool:
tasks = [copy.copy(model).reset(infection_sets[i]) for i in
past.builtins.xrange(x, min(x + nprocesses, execution_number))]
results = [pool.apply_async(__execute, (t, iteration_number)) for t in tasks]
for result in results:
executions.append(result.get())
else:
for x in past.builtins.xrange(0, execution_number, nprocesses):
with closing(multiprocessing.Pool(processes=nprocesses, maxtasksperchild=10)) as pool:
tasks = [copy.deepcopy(model).reset() for _ in
past.builtins.xrange(x, min(x + nprocesses, execution_number))]
results = [pool.apply_async(__execute, (t, iteration_number)) for t in tasks]
for result in results:
executions.append(result.get())
return executions | Multiple executions of a given model varying the initial set of infected nodes
:param model: a configured diffusion model
:param execution_number: number of instantiations
:param iteration_number: number of iterations per execution
:param infection_sets: predefined set of infected nodes sets
:param nprocesses: number of processes. Default values cpu number.
:return: resulting trends for all the executions | train | https://github.com/GiulioRossetti/ndlib/blob/23ecf50c0f76ff2714471071ab9ecb600f4a9832/ndlib/utils.py#L15-L58 | null | import multiprocessing
from contextlib import closing
import copy
import past
__author__ = 'Giulio Rossetti'
__license__ = "BSD-2-Clause"
__email__ = "giulio.rossetti@gmail.com"
class InitializationException(Exception):
"""Initialization Exception"""
def __execute(model, iteration_number):
"""
Execute a simulation model
:param model: a configured diffusion model
:param iteration_number: number of iterations
:return: computed trends
"""
iterations = model.iteration_bunch(iteration_number, False)
trends = model.build_trends(iterations)[0]
del iterations
del model
return trends
|
GiulioRossetti/ndlib | ndlib/utils.py | __execute | python | def __execute(model, iteration_number):
iterations = model.iteration_bunch(iteration_number, False)
trends = model.build_trends(iterations)[0]
del iterations
del model
return trends | Execute a simulation model
:param model: a configured diffusion model
:param iteration_number: number of iterations
:return: computed trends | train | https://github.com/GiulioRossetti/ndlib/blob/23ecf50c0f76ff2714471071ab9ecb600f4a9832/ndlib/utils.py#L61-L73 | null | import multiprocessing
from contextlib import closing
import copy
import past
__author__ = 'Giulio Rossetti'
__license__ = "BSD-2-Clause"
__email__ = "giulio.rossetti@gmail.com"
class InitializationException(Exception):
"""Initialization Exception"""
def multi_runs(model, execution_number=1, iteration_number=50, infection_sets=None,
nprocesses=multiprocessing.cpu_count()):
"""
Multiple executions of a given model varying the initial set of infected nodes
:param model: a configured diffusion model
:param execution_number: number of instantiations
:param iteration_number: number of iterations per execution
:param infection_sets: predefined set of infected nodes sets
:param nprocesses: number of processes. Default values cpu number.
:return: resulting trends for all the executions
"""
if nprocesses > multiprocessing.cpu_count():
nprocesses = multiprocessing.cpu_count()
executions = []
if infection_sets is not None:
if len(infection_sets) != execution_number:
raise InitializationException(
{"message": "Number of infection sets provided does not match the number of executions required"})
for x in past.builtins.xrange(0, execution_number, nprocesses):
with closing(multiprocessing.Pool(processes=nprocesses, maxtasksperchild=10)) as pool:
tasks = [copy.copy(model).reset(infection_sets[i]) for i in
past.builtins.xrange(x, min(x + nprocesses, execution_number))]
results = [pool.apply_async(__execute, (t, iteration_number)) for t in tasks]
for result in results:
executions.append(result.get())
else:
for x in past.builtins.xrange(0, execution_number, nprocesses):
with closing(multiprocessing.Pool(processes=nprocesses, maxtasksperchild=10)) as pool:
tasks = [copy.deepcopy(model).reset() for _ in
past.builtins.xrange(x, min(x + nprocesses, execution_number))]
results = [pool.apply_async(__execute, (t, iteration_number)) for t in tasks]
for result in results:
executions.append(result.get())
return executions
|
GiulioRossetti/ndlib | ndlib/models/DynamicCompostiteModel.py | DynamicCompositeModel.iteration | python | def iteration(self, node_status=True):
self.clean_initial_status(self.available_statuses.values())
actual_status = {node: nstatus for node, nstatus in future.utils.iteritems(self.status)}
if self.actual_iteration == 0:
self.actual_iteration += 1
delta, node_count, status_delta = self.status_delta(actual_status)
if node_status:
return {"iteration": 0, "status": actual_status.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": 0, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
for u in self.graph.nodes():
u_status = self.status[u]
for i in range(0, self.compartment_progressive):
if u_status == self.available_statuses[self.compartment[i][0]]:
rule = self.compartment[i][2]
test = rule.execute(node=u, graph=self.graph, status=self.status,
status_map=self.available_statuses, params=self.params)
if test:
actual_status[u] = self.available_statuses[self.compartment[i][1]]
break
delta, node_count, status_delta = self.status_delta(actual_status)
self.status = actual_status
self.actual_iteration += 1
if node_status:
return {"iteration": self.actual_iteration - 1, "status": delta.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": self.actual_iteration - 1, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()} | Execute a single model iteration
:return: Iteration_id, Incremental node status (dictionary node->status) | train | https://github.com/GiulioRossetti/ndlib/blob/23ecf50c0f76ff2714471071ab9ecb600f4a9832/ndlib/models/DynamicCompostiteModel.py#L30-L70 | [
"def clean_initial_status(self, valid_status=None):\n \"\"\"\n Check the consistency of initial status\n :param valid_status: valid node configurations\n \"\"\"\n for n, s in future.utils.iteritems(self.status):\n if s not in valid_status:\n self.status[n] = 0\n",
"def status_delta(self, actual_status):\n \"\"\"\n Compute the point-to-point variations for each status w.r.t. the previous system configuration\n\n :param actual_status: the actual simulation status\n :return: node that have changed their statuses (dictionary status->nodes),\n count of actual nodes per status (dictionary status->node count),\n delta of nodes per status w.r.t the previous configuration (dictionary status->delta)\n \"\"\"\n actual_status_count = {}\n old_status_count = {}\n delta = {}\n for n, v in future.utils.iteritems(self.status):\n if v != actual_status[n]:\n delta[n] = actual_status[n]\n\n for st in self.available_statuses.values():\n actual_status_count[st] = len([x for x in actual_status if actual_status[x] == st])\n old_status_count[st] = len([x for x in self.status if self.status[x] == st])\n\n status_delta = {st: actual_status_count[st] - old_status_count[st] for st in actual_status_count}\n\n return delta, actual_status_count, status_delta\n"
] | class DynamicCompositeModel(DynamicDiffusionModel):
def __init__(self, graph):
"""
Model Constructor
:param graph: A networkx graph object
"""
super(self.__class__, self).__init__(graph)
self.available_statuses = {}
self.compartment = {}
self.compartment_progressive = 0
self.status_progressive = 0
def add_status(self, status_name):
self.available_statuses[status_name] = self.status_progressive
self.status_progressive += 1
def add_rule(self, status_from, status_to, rule):
self.compartment[self.compartment_progressive] = (status_from, status_to, rule)
self.compartment_progressive += 1
|
GiulioRossetti/ndlib | ndlib/viz/bokeh/MultiPlot.py | MultiPlot.plot | python | def plot(self, ncols=2):
grid = gridplot(self.plots, ncols=ncols)
return grid | :param ncols: Number of grid columns
:return: a bokeh figure image | train | https://github.com/GiulioRossetti/ndlib/blob/23ecf50c0f76ff2714471071ab9ecb600f4a9832/ndlib/viz/bokeh/MultiPlot.py#L19-L25 | null | class MultiPlot(object):
def __init__(self):
self.plots = []
def add_plot(self, plot):
"""
:param plot: The bokeh plot to add to the grid
"""
self.plots.append(plot)
|
GiulioRossetti/ndlib | ndlib/models/opinions/MajorityRuleModel.py | MajorityRuleModel.iteration | python | def iteration(self, node_status=True):
# One iteration changes the opinion of at most q voters using the following procedure:
# - select randomly q voters
# - compute majority opinion
# - if tie all agents take opinion +1
# - if not tie, all agents take majority opinion
self.clean_initial_status(self.available_statuses.values())
if self.actual_iteration == 0:
self.actual_iteration += 1
delta, node_count, status_delta = self.status_delta(self.status)
if node_status:
return {"iteration": 0, "status": self.status.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": 0, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
# select q random nodes
discussion_group = [list(self.graph.nodes())[i]
for i in np.random.randint(0, self.graph.number_of_nodes(), self.params['model']['q'])]
# compute majority
majority_vote = 1
vote_sum = sum([self.status[node] for node in discussion_group])
if vote_sum < (self.params["model"]["q"] / 2.0):
majority_vote = 0 # in case of tie, majority_vote remains 1
# update status of nodes in discussion group
delta = {}
status_delta = {st: 0 for st in self.available_statuses.values()}
for listener in discussion_group:
if majority_vote != self.status[listener]:
delta[listener] = majority_vote
status_delta[self.status[listener]] += 1
for x in self.available_statuses.values():
if x != self.status[listener]:
status_delta[x] -= 1
self.status[listener] = majority_vote
# fix
node_count = {st: len([n for n in self.status if self.status[n] == st])
for st in self.available_statuses.values()}
self.actual_iteration += 1
if node_status:
return {"iteration": self.actual_iteration - 1, "status": delta.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": self.actual_iteration - 1, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()} | Execute a single model iteration
:return: Iteration_id, Incremental node status (dictionary node->status) | train | https://github.com/GiulioRossetti/ndlib/blob/23ecf50c0f76ff2714471071ab9ecb600f4a9832/ndlib/models/opinions/MajorityRuleModel.py#L38-L99 | [
"def clean_initial_status(self, valid_status=None):\n \"\"\"\n Check the consistency of initial status\n :param valid_status: valid node configurations\n \"\"\"\n for n, s in future.utils.iteritems(self.status):\n if s not in valid_status:\n self.status[n] = 0\n",
"def status_delta(self, actual_status):\n \"\"\"\n Compute the point-to-point variations for each status w.r.t. the previous system configuration\n\n :param actual_status: the actual simulation status\n :return: node that have changed their statuses (dictionary status->nodes),\n count of actual nodes per status (dictionary status->node count),\n delta of nodes per status w.r.t the previous configuration (dictionary status->delta)\n \"\"\"\n actual_status_count = {}\n old_status_count = {}\n delta = {}\n for n, v in future.utils.iteritems(self.status):\n if v != actual_status[n]:\n delta[n] = actual_status[n]\n\n for st in self.available_statuses.values():\n actual_status_count[st] = len([x for x in actual_status if actual_status[x] == st])\n old_status_count[st] = len([x for x in self.status if self.status[x] == st])\n\n status_delta = {st: actual_status_count[st] - old_status_count[st] for st in actual_status_count}\n\n return delta, actual_status_count, status_delta\n"
] | class MajorityRuleModel(DiffusionModel):
"""
"""
def __init__(self, graph):
"""
Model Constructor
:param graph: A networkx graph object
"""
super(self.__class__, self).__init__(graph)
self.available_statuses = {
"Susceptible": 0,
"Infected": 1,
}
self.parameters = {"model": {
"q": {
"descr": "Number of randomly chosen voters",
"range": [0, len(self.graph.nodes())],
"optional": False
}
},
"nodes": {},
"edges": {}
}
self.name = "Majority Rule"
|
GiulioRossetti/ndlib | ndlib/models/dynamic/DynSIModel.py | DynSIModel.iteration | python | def iteration(self, node_status=True):
self.clean_initial_status(self.available_statuses.values())
actual_status = {node: nstatus for node, nstatus in future.utils.iteritems(self.status)}
# streaming
if self.stream_execution:
u, v = list(self.graph.edges())[0]
u_status = self.status[u]
v_status = self.status[v]
if u_status == 1 and v_status == 0:
p = np.random.random_sample()
if p < self.params['model']['beta']:
actual_status[v] = 1
if v_status == 1 and u_status == 0:
p = np.random.random_sample()
if p < self.params['model']['beta']:
actual_status[u] = 1
delta, node_count, status_delta = self.status_delta(actual_status)
self.status = actual_status
self.actual_iteration += 1
if node_status:
return {"iteration": self.actual_iteration - 1, "status": delta.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": self.actual_iteration - 1, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
# snapshot
else:
if self.actual_iteration == 0:
self.actual_iteration += 1
delta, node_count, status_delta = self.status_delta(actual_status)
if node_status:
return {"iteration": 0, "status": actual_status.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": 0, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
for u in self.graph.nodes():
u_status = self.status[u]
eventp = np.random.random_sample()
neighbors = self.graph.neighbors(u)
if isinstance(self.graph, nx.DiGraph):
neighbors = self.graph.predecessors(u)
if u_status == 0:
infected_neighbors = len([v for v in neighbors if self.status[v] == 1])
if eventp < self.params['model']['beta'] * infected_neighbors:
actual_status[u] = 1
delta, node_count, status_delta = self.status_delta(actual_status)
self.status = actual_status
self.actual_iteration += 1
if node_status:
return {"iteration": self.actual_iteration - 1, "status": delta.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": self.actual_iteration - 1, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()} | Execute a single model iteration
:return: Iteration_id, Incremental node status (dictionary node->status) | train | https://github.com/GiulioRossetti/ndlib/blob/23ecf50c0f76ff2714471071ab9ecb600f4a9832/ndlib/models/dynamic/DynSIModel.py#L43-L113 | [
"def clean_initial_status(self, valid_status=None):\n \"\"\"\n Check the consistency of initial status\n :param valid_status: valid node configurations\n \"\"\"\n for n, s in future.utils.iteritems(self.status):\n if s not in valid_status:\n self.status[n] = 0\n",
"def status_delta(self, actual_status):\n \"\"\"\n Compute the point-to-point variations for each status w.r.t. the previous system configuration\n\n :param actual_status: the actual simulation status\n :return: node that have changed their statuses (dictionary status->nodes),\n count of actual nodes per status (dictionary status->node count),\n delta of nodes per status w.r.t the previous configuration (dictionary status->delta)\n \"\"\"\n actual_status_count = {}\n old_status_count = {}\n delta = {}\n for n, v in future.utils.iteritems(self.status):\n if v != actual_status[n]:\n delta[n] = actual_status[n]\n\n for st in self.available_statuses.values():\n actual_status_count[st] = len([x for x in actual_status if actual_status[x] == st])\n old_status_count[st] = len([x for x in self.status if self.status[x] == st])\n\n status_delta = {st: actual_status_count[st] - old_status_count[st] for st in actual_status_count}\n\n return delta, actual_status_count, status_delta\n"
] | class DynSIModel(DynamicDiffusionModel):
"""
Model Parameters to be specified via ModelConfig
:param beta: The infection rate (float value in [0,1])
"""
def __init__(self, graph):
"""
Model Constructor
:param graph: A dynetx graph object
"""
super(self.__class__, self).__init__(graph)
self.available_statuses = {
"Susceptible": 0,
"Infected": 1
}
self.parameters = {
"model": {
"beta": {
"descr": "Infection rate",
"range": "[0,1]",
"optional": False}
},
"nodes": {},
"edges": {},
}
self.name = "SI"
|
GiulioRossetti/ndlib | ndlib/models/epidemics/IndependentCascadesModel.py | IndependentCascadesModel.iteration | python | def iteration(self, node_status=True):
self.clean_initial_status(self.available_statuses.values())
actual_status = {node: nstatus for node, nstatus in future.utils.iteritems(self.status)}
if self.actual_iteration == 0:
self.actual_iteration += 1
delta, node_count, status_delta = self.status_delta(actual_status)
if node_status:
return {"iteration": 0, "status": actual_status.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": 0, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
for u in self.graph.nodes():
if self.status[u] != 1:
continue
neighbors = list(self.graph.neighbors(u)) # neighbors and successors (in DiGraph) produce the same result
# Standard threshold
if len(neighbors) > 0:
threshold = 1.0/len(neighbors)
for v in neighbors:
if actual_status[v] == 0:
key = (u, v)
# Individual specified thresholds
if 'threshold' in self.params['edges']:
if key in self.params['edges']['threshold']:
threshold = self.params['edges']['threshold'][key]
elif (v, u) in self.params['edges']['threshold'] and not nx.is_directed(self.graph):
threshold = self.params['edges']['threshold'][(v, u)]
flip = np.random.random_sample()
if flip <= threshold:
actual_status[v] = 1
actual_status[u] = 2
delta, node_count, status_delta = self.status_delta(actual_status)
self.status = actual_status
self.actual_iteration += 1
if node_status:
return {"iteration": self.actual_iteration - 1, "status": delta.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": self.actual_iteration - 1, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()} | Execute a single model iteration
:return: Iteration_id, Incremental node status (dictionary node->status) | train | https://github.com/GiulioRossetti/ndlib/blob/23ecf50c0f76ff2714471071ab9ecb600f4a9832/ndlib/models/epidemics/IndependentCascadesModel.py#L46-L101 | [
"def clean_initial_status(self, valid_status=None):\n \"\"\"\n Check the consistency of initial status\n :param valid_status: valid node configurations\n \"\"\"\n for n, s in future.utils.iteritems(self.status):\n if s not in valid_status:\n self.status[n] = 0\n",
"def status_delta(self, actual_status):\n \"\"\"\n Compute the point-to-point variations for each status w.r.t. the previous system configuration\n\n :param actual_status: the actual simulation status\n :return: node that have changed their statuses (dictionary status->nodes),\n count of actual nodes per status (dictionary status->node count),\n delta of nodes per status w.r.t the previous configuration (dictionary status->delta)\n \"\"\"\n actual_status_count = {}\n old_status_count = {}\n delta = {}\n for n, v in future.utils.iteritems(self.status):\n if v != actual_status[n]:\n delta[n] = actual_status[n]\n\n for st in self.available_statuses.values():\n actual_status_count[st] = len([x for x in actual_status if actual_status[x] == st])\n old_status_count[st] = len([x for x in self.status if self.status[x] == st])\n\n status_delta = {st: actual_status_count[st] - old_status_count[st] for st in actual_status_count}\n\n return delta, actual_status_count, status_delta\n"
] | class IndependentCascadesModel(DiffusionModel):
"""
Edge Parameters to be specified via ModelConfig
:param threshold: The edge threshold. As default a value of 0.1 is assumed for all edges.
"""
def __init__(self, graph):
"""
Model Constructor
:param graph: A networkx graph object
"""
super(self.__class__, self).__init__(graph)
self.available_statuses = {
"Susceptible": 0,
"Infected": 1,
"Removed": 2
}
self.parameters = {
"model": {},
"nodes": {},
"edges": {
"threshold": {
"descr": "Edge threshold",
"range": [0, 1],
"optional": True,
"default": 0.1
}
},
}
self.name = "Independent Cascades"
|
GiulioRossetti/ndlib | ndlib/models/epidemics/KerteszThresholdModel.py | KerteszThresholdModel.iteration | python | def iteration(self, node_status=True):
self.clean_initial_status(self.available_statuses.values())
actual_status = {node: nstatus for node, nstatus in future.utils.iteritems(self.status)}
if self.actual_iteration == 0:
if min(actual_status.values()) == 0:
number_node_blocked = int(float(self.graph.number_of_nodes()) *
float(self.params['model']['percentage_blocked']))
i = 0
while i < number_node_blocked:
# select a random node
node = list(self.graph.nodes())[np.random.randint(0, self.graph.number_of_nodes())]
# node not infected
if actual_status[node] == 0:
# node blocked
actual_status[node] = -1
self.status[node] = -1
i += 1
self.actual_iteration += 1
delta, node_count, status_delta = self.status_delta(actual_status)
if node_status:
return {"iteration": 0, "status": actual_status.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": 0, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
for node in self.graph.nodes():
if self.status[node] == 0:
if self.params['model']['adopter_rate'] > 0:
xk = (0, 1)
pk = (1-self.params['model']['adopter_rate'], self.params['model']['adopter_rate'])
probability = stats.rv_discrete(name='probability', values=(xk, pk))
number_probability = probability.rvs()
if number_probability == 1:
actual_status[node] = 1
continue
neighbors = list(self.graph.neighbors(node))
if len(neighbors) == 0:
continue
if isinstance(self.graph, nx.DiGraph):
neighbors = self.graph.predecessors(node)
infected = 0
for v in neighbors:
if self.status[v] != -1:
infected += self.status[v]
infected_ratio = float(infected)/len(neighbors)
if infected_ratio >= self.params['nodes']['threshold'][node]:
actual_status[node] = 1
delta, node_count, status_delta = self.status_delta(actual_status)
self.status = actual_status
self.actual_iteration += 1
if node_status:
return {"iteration": self.actual_iteration - 1, "status": delta.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": self.actual_iteration - 1, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()} | Execute a single model iteration
:return: Iteration_id, Incremental node status (dictionary node->status) | train | https://github.com/GiulioRossetti/ndlib/blob/23ecf50c0f76ff2714471071ab9ecb600f4a9832/ndlib/models/epidemics/KerteszThresholdModel.py#L62-L136 | [
"def clean_initial_status(self, valid_status=None):\n \"\"\"\n Check the consistency of initial status\n :param valid_status: valid node configurations\n \"\"\"\n for n, s in future.utils.iteritems(self.status):\n if s not in valid_status:\n self.status[n] = 0\n",
"def status_delta(self, actual_status):\n \"\"\"\n Compute the point-to-point variations for each status w.r.t. the previous system configuration\n\n :param actual_status: the actual simulation status\n :return: node that have changed their statuses (dictionary status->nodes),\n count of actual nodes per status (dictionary status->node count),\n delta of nodes per status w.r.t the previous configuration (dictionary status->delta)\n \"\"\"\n actual_status_count = {}\n old_status_count = {}\n delta = {}\n for n, v in future.utils.iteritems(self.status):\n if v != actual_status[n]:\n delta[n] = actual_status[n]\n\n for st in self.available_statuses.values():\n actual_status_count[st] = len([x for x in actual_status if actual_status[x] == st])\n old_status_count[st] = len([x for x in self.status if self.status[x] == st])\n\n status_delta = {st: actual_status_count[st] - old_status_count[st] for st in actual_status_count}\n\n return delta, actual_status_count, status_delta\n"
] | class KerteszThresholdModel(DiffusionModel):
"""
Node/Model Parameters to be specified via ModelConfig
:param threshold: The node threshold. As default a value of 0.1 is assumed for all nodes.
:param adopter_rate: The probability of spontaneous adoptions. Defaults value 0.
:param percentage_infected: The percentage of blocked nodes. Default value 0.1.
"""
def __init__(self, graph):
"""
Model Constructor
:param graph: A networkx graph object
"""
super(self.__class__, self).__init__(graph)
self.available_statuses = {
"Susceptible": 0,
"Infected": 1,
"Blocked": -1
}
self.parameters = {
"model": {
"adopter_rate": {
"descr": "Exogenous adoption rate",
"range": [0, 1],
"optional": True,
"default": 0
},
"percentage_blocked": {
"descr": "Percentage of blocked nodes",
"range": [0, 1],
"optional": True,
"default": 0.1
}
},
"nodes": {
"threshold": {
"descr": "Node threshold",
"range": [0, 1],
"optional": True,
"default": 0.1
}
},
"edges": {},
}
self.name = "Kertesz Threhold"
|
GiulioRossetti/ndlib | ndlib/models/dynamic/DynProfileThresholdModel.py | DynProfileThresholdModel.iteration | python | def iteration(self, node_status=True):
self.clean_initial_status(self.available_statuses.values())
actual_status = {node: nstatus for node, nstatus in future.utils.iteritems(self.status)}
# streaming
if self.stream_execution:
raise ValueError("Streaming network not allowed.")
# snapshot
else:
if self.actual_iteration == 0:
self.actual_iteration += 1
delta, node_count, status_delta = self.status_delta(actual_status)
if node_status:
return {"iteration": 0, "status": actual_status.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": 0, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
for u in self.graph.nodes():
if actual_status[u] != 0:
continue
if self.params['model']['adopter_rate'] > 0:
xk = (0, 1)
pk = (1 - self.params['model']['adopter_rate'], self.params['model']['adopter_rate'])
probability = stats.rv_discrete(name='probability', values=(xk, pk))
number_probability = probability.rvs()
if number_probability == 1:
actual_status[u] = 1
continue
neighbors = list(self.graph.neighbors(u))
if isinstance(self.graph, nx.DiGraph):
neighbors = list(self.graph.predecessors(u))
infected = 0
for v in neighbors:
infected += self.status[v]
if infected > 0 and actual_status[u] == 0:
infected_ratio = float(infected) / len(neighbors)
if infected_ratio >= self.params['nodes']['threshold'][u]:
eventp = np.random.random_sample()
if eventp >= self.params['nodes']['profile'][u]:
actual_status[u] = 1
else:
if self.params['model']['blocked'] != 0:
blip = np.random.random_sample()
if blip > self.params['model']['blocked']:
actual_status[u] = -1
delta, node_count, status_delta = self.status_delta(actual_status)
self.status = actual_status
self.actual_iteration += 1
if node_status:
return {"iteration": self.actual_iteration - 1, "status": delta.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": self.actual_iteration - 1, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()} | Execute a single model iteration
:return: Iteration_id, Incremental node status (dictionary node->status) | train | https://github.com/GiulioRossetti/ndlib/blob/23ecf50c0f76ff2714471071ab9ecb600f4a9832/ndlib/models/dynamic/DynProfileThresholdModel.py#L67-L135 | [
"def clean_initial_status(self, valid_status=None):\n \"\"\"\n Check the consistency of initial status\n :param valid_status: valid node configurations\n \"\"\"\n for n, s in future.utils.iteritems(self.status):\n if s not in valid_status:\n self.status[n] = 0\n",
"def status_delta(self, actual_status):\n \"\"\"\n Compute the point-to-point variations for each status w.r.t. the previous system configuration\n\n :param actual_status: the actual simulation status\n :return: node that have changed their statuses (dictionary status->nodes),\n count of actual nodes per status (dictionary status->node count),\n delta of nodes per status w.r.t the previous configuration (dictionary status->delta)\n \"\"\"\n actual_status_count = {}\n old_status_count = {}\n delta = {}\n for n, v in future.utils.iteritems(self.status):\n if v != actual_status[n]:\n delta[n] = actual_status[n]\n\n for st in self.available_statuses.values():\n actual_status_count[st] = len([x for x in actual_status if actual_status[x] == st])\n old_status_count[st] = len([x for x in self.status if self.status[x] == st])\n\n status_delta = {st: actual_status_count[st] - old_status_count[st] for st in actual_status_count}\n\n return delta, actual_status_count, status_delta\n"
] | class DynProfileThresholdModel(DynamicDiffusionModel):
"""
Node Parameters to be specified via ModelConfig
:param profile: The node profile. As default a value of 0.1 is assumed for all nodes.
:param threshold: The node threshold. As default a value of 0.1 is assumed for all nodes.
"""
def __init__(self, graph):
"""
Model Constructor
:param graph: A networkx graph object
"""
super(self.__class__, self).__init__(graph)
self.available_statuses = {
"Susceptible": 0,
"Infected": 1,
"Blocked": -1
}
self.parameters = {
"model": {
"blocked": {
"descr": "Presence of blocked nodes",
"range": [0, 1],
"optional": True,
"default": 0
},
"adopter_rate": {
"descr": "Exogenous adoption rate",
"range": [0, 1],
"optional": True,
"default": 0
}
},
"nodes": {
"threshold": {
"descr": "Node threshold",
"range": [0, 1],
"optional": True,
"default": 0.1
},
"profile": {
"descr": "Node profile",
"range": [0, 1],
"optional": True,
"default": 0.1
}
},
"edges": {},
}
self.name = "Profile-Threshold"
|
GiulioRossetti/ndlib | ndlib/models/opinions/AlgorithmicBiasModel.py | AlgorithmicBiasModel.set_initial_status | python | def set_initial_status(self, configuration=None):
super(AlgorithmicBiasModel, self).set_initial_status(configuration)
# set node status
for node in self.status:
self.status[node] = np.random.random_sample()
self.initial_status = self.status.copy() | Override behaviour of methods in class DiffusionModel.
Overwrites initial status using random real values. | train | https://github.com/GiulioRossetti/ndlib/blob/23ecf50c0f76ff2714471071ab9ecb600f4a9832/ndlib/models/opinions/AlgorithmicBiasModel.py#L54-L64 | [
"def set_initial_status(self, configuration):\n \"\"\"\n Set the initial model configuration\n\n :param configuration: a ```ndlib.models.ModelConfig.Configuration``` object\n \"\"\"\n\n self.__validate_configuration(configuration)\n\n nodes_cfg = configuration.get_nodes_configuration()\n # Set additional node information\n\n for param, node_to_value in future.utils.iteritems(nodes_cfg):\n if len(node_to_value) < len(self.graph.nodes()):\n raise ConfigurationException({\"message\": \"Not all nodes have a configuration specified\"})\n\n self.params['nodes'][param] = node_to_value\n\n edges_cfg = configuration.get_edges_configuration()\n # Set additional edges information\n for param, edge_to_values in future.utils.iteritems(edges_cfg):\n if len(edge_to_values) == len(self.graph.edges()):\n self.params['edges'][param] = {}\n for e in edge_to_values:\n self.params['edges'][param][e] = edge_to_values[e]\n\n # Set initial status\n model_status = configuration.get_model_configuration()\n\n for param, nodes in future.utils.iteritems(model_status):\n self.params['status'][param] = nodes\n for node in nodes:\n self.status[node] = self.available_statuses[param]\n\n # Set model additional information\n model_params = configuration.get_model_parameters()\n for param, val in future.utils.iteritems(model_params):\n self.params['model'][param] = val\n\n # Handle initial infection\n if 'Infected' not in self.params['status']:\n if 'percentage_infected' in self.params['model']:\n number_of_initial_infected = len(self.graph.nodes()) * float(self.params['model']['percentage_infected'])\n if number_of_initial_infected < 1:\n warnings.warn('Graph with less than 100 nodes: a single node will be set as infected')\n number_of_initial_infected = 1\n\n available_nodes = [n for n in self.status if self.status[n] == 0]\n sampled_nodes = np.random.choice(available_nodes, int(number_of_initial_infected), replace=False)\n for k in sampled_nodes:\n self.status[k] = self.available_statuses['Infected']\n\n self.initial_status = self.status\n"
] | class AlgorithmicBiasModel(DiffusionModel):
"""
Model Parameters to be specified via ModelConfig
:param epsilon: bounded confidence threshold from the Deffuant model, in [0,1]
:param gamma: strength of the algorithmic bias, positive, real
Node states are continuous values in [0,1].
The initial state is generated randomly uniformly from the domain [0,1].
"""
def __init__(self, graph):
"""
Model Constructor
:param graph: A networkx graph object
"""
super(self.__class__, self).__init__(graph)
self.discrete_state = False
self.available_statuses = {
"Infected": 0
}
self.parameters = {
"model": {
"epsilon": {
"descr": "Bounded confidence threshold",
"range": [0, 1],
"optional": False
},
"gamma": {
"descr": "Algorithmic bias",
"range": [0, 100],
"optional": False
}
},
"nodes": {},
"edges": {}
}
self.name = "Agorithmic Bias"
def clean_initial_status(self, valid_status=None):
for n, s in future.utils.iteritems(self.status):
if s > 1 or s < 0:
self.status[n] = 0
@staticmethod
def prob(distance, gamma, min_dist):
if distance < min_dist:
distance = min_dist
return np.power(distance, -gamma)
def iteration(self, node_status=True):
"""
Execute a single model iteration
:return: Iteration_id, Incremental node status (dictionary node->status)
"""
# One iteration changes the opinion of N agent pairs using the following procedure:
# - first one agent is selected
# - then a second agent is selected based on a probability that decreases with the distance to the first agent
# - if the two agents have a distance smaller than epsilon, then they change their status to the average of
# their previous statuses
self.clean_initial_status(None)
actual_status = {node: nstatus for node, nstatus in future.utils.iteritems(self.status)}
if self.actual_iteration == 0:
self.actual_iteration += 1
delta, node_count, status_delta = self.status_delta(self.status)
if node_status:
return {"iteration": 0, "status": self.status.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": 0, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
# interact with peers
for i in range(0, self.graph.number_of_nodes()):
# select a random node
n1 = list(self.graph.nodes())[np.random.randint(0, self.graph.number_of_nodes())]
# select all of the node's neighbours (no digraph possible)
neighbours = list(self.graph.neighbors(n1))
if len(neighbours) == 0:
continue
# compute probabilities to select a second node among the neighbours
selection_prob = np.array([self.prob(np.abs(actual_status[neighbours[i]]-actual_status[n1]),
self.params['model']['gamma'],0.00001) for i in range(len(neighbours))])
selection_prob = selection_prob/np.sum(selection_prob)
cumulative_selection_probability = np.cumsum(selection_prob)
# select second nodebased on selection probabilities above
r = np.random.random_sample()
n2 = 0
while cumulative_selection_probability[n2] < r:
n2 = n2+1
n2 = neighbours[n2]
# update status of n1 and n2
diff = np.abs(actual_status[n1]-actual_status[n2])
if diff < self.params['model']['epsilon']:
avg = (actual_status[n1]+actual_status[n2])/2.0
actual_status[n1] = avg
actual_status[n2] = avg
delta, node_count, status_delta = self.status_delta(actual_status)
self.status = actual_status
self.actual_iteration += 1
if node_status:
return {"iteration": self.actual_iteration - 1, "status": delta.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": self.actual_iteration - 1, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
|
GiulioRossetti/ndlib | ndlib/models/opinions/AlgorithmicBiasModel.py | AlgorithmicBiasModel.iteration | python | def iteration(self, node_status=True):
# One iteration changes the opinion of N agent pairs using the following procedure:
# - first one agent is selected
# - then a second agent is selected based on a probability that decreases with the distance to the first agent
# - if the two agents have a distance smaller than epsilon, then they change their status to the average of
# their previous statuses
self.clean_initial_status(None)
actual_status = {node: nstatus for node, nstatus in future.utils.iteritems(self.status)}
if self.actual_iteration == 0:
self.actual_iteration += 1
delta, node_count, status_delta = self.status_delta(self.status)
if node_status:
return {"iteration": 0, "status": self.status.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": 0, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
# interact with peers
for i in range(0, self.graph.number_of_nodes()):
# select a random node
n1 = list(self.graph.nodes())[np.random.randint(0, self.graph.number_of_nodes())]
# select all of the node's neighbours (no digraph possible)
neighbours = list(self.graph.neighbors(n1))
if len(neighbours) == 0:
continue
# compute probabilities to select a second node among the neighbours
selection_prob = np.array([self.prob(np.abs(actual_status[neighbours[i]]-actual_status[n1]),
self.params['model']['gamma'],0.00001) for i in range(len(neighbours))])
selection_prob = selection_prob/np.sum(selection_prob)
cumulative_selection_probability = np.cumsum(selection_prob)
# select second nodebased on selection probabilities above
r = np.random.random_sample()
n2 = 0
while cumulative_selection_probability[n2] < r:
n2 = n2+1
n2 = neighbours[n2]
# update status of n1 and n2
diff = np.abs(actual_status[n1]-actual_status[n2])
if diff < self.params['model']['epsilon']:
avg = (actual_status[n1]+actual_status[n2])/2.0
actual_status[n1] = avg
actual_status[n2] = avg
delta, node_count, status_delta = self.status_delta(actual_status)
self.status = actual_status
self.actual_iteration += 1
if node_status:
return {"iteration": self.actual_iteration - 1, "status": delta.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": self.actual_iteration - 1, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()} | Execute a single model iteration
:return: Iteration_id, Incremental node status (dictionary node->status) | train | https://github.com/GiulioRossetti/ndlib/blob/23ecf50c0f76ff2714471071ab9ecb600f4a9832/ndlib/models/opinions/AlgorithmicBiasModel.py#L77-L141 | [
"def clean_initial_status(self, valid_status=None):\n for n, s in future.utils.iteritems(self.status):\n if s > 1 or s < 0:\n self.status[n] = 0\n",
"def status_delta(self, actual_status):\n \"\"\"\n Compute the point-to-point variations for each status w.r.t. the previous system configuration\n\n :param actual_status: the actual simulation status\n :return: node that have changed their statuses (dictionary status->nodes),\n count of actual nodes per status (dictionary status->node count),\n delta of nodes per status w.r.t the previous configuration (dictionary status->delta)\n \"\"\"\n actual_status_count = {}\n old_status_count = {}\n delta = {}\n for n, v in future.utils.iteritems(self.status):\n if v != actual_status[n]:\n delta[n] = actual_status[n]\n\n for st in self.available_statuses.values():\n actual_status_count[st] = len([x for x in actual_status if actual_status[x] == st])\n old_status_count[st] = len([x for x in self.status if self.status[x] == st])\n\n status_delta = {st: actual_status_count[st] - old_status_count[st] for st in actual_status_count}\n\n return delta, actual_status_count, status_delta\n"
] | class AlgorithmicBiasModel(DiffusionModel):
"""
Model Parameters to be specified via ModelConfig
:param epsilon: bounded confidence threshold from the Deffuant model, in [0,1]
:param gamma: strength of the algorithmic bias, positive, real
Node states are continuous values in [0,1].
The initial state is generated randomly uniformly from the domain [0,1].
"""
def __init__(self, graph):
"""
Model Constructor
:param graph: A networkx graph object
"""
super(self.__class__, self).__init__(graph)
self.discrete_state = False
self.available_statuses = {
"Infected": 0
}
self.parameters = {
"model": {
"epsilon": {
"descr": "Bounded confidence threshold",
"range": [0, 1],
"optional": False
},
"gamma": {
"descr": "Algorithmic bias",
"range": [0, 100],
"optional": False
}
},
"nodes": {},
"edges": {}
}
self.name = "Agorithmic Bias"
def set_initial_status(self, configuration=None):
"""
Override behaviour of methods in class DiffusionModel.
Overwrites initial status using random real values.
"""
super(AlgorithmicBiasModel, self).set_initial_status(configuration)
# set node status
for node in self.status:
self.status[node] = np.random.random_sample()
self.initial_status = self.status.copy()
def clean_initial_status(self, valid_status=None):
for n, s in future.utils.iteritems(self.status):
if s > 1 or s < 0:
self.status[n] = 0
@staticmethod
def prob(distance, gamma, min_dist):
if distance < min_dist:
distance = min_dist
return np.power(distance, -gamma)
def iteration(self, node_status=True):
"""
Execute a single model iteration
:return: Iteration_id, Incremental node status (dictionary node->status)
"""
# One iteration changes the opinion of N agent pairs using the following procedure:
# - first one agent is selected
# - then a second agent is selected based on a probability that decreases with the distance to the first agent
# - if the two agents have a distance smaller than epsilon, then they change their status to the average of
# their previous statuses
self.clean_initial_status(None)
actual_status = {node: nstatus for node, nstatus in future.utils.iteritems(self.status)}
if self.actual_iteration == 0:
self.actual_iteration += 1
delta, node_count, status_delta = self.status_delta(self.status)
if node_status:
return {"iteration": 0, "status": self.status.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": 0, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
# interact with peers
for i in range(0, self.graph.number_of_nodes()):
# select a random node
n1 = list(self.graph.nodes())[np.random.randint(0, self.graph.number_of_nodes())]
# select all of the node's neighbours (no digraph possible)
neighbours = list(self.graph.neighbors(n1))
if len(neighbours) == 0:
continue
# compute probabilities to select a second node among the neighbours
selection_prob = np.array([self.prob(np.abs(actual_status[neighbours[i]]-actual_status[n1]),
self.params['model']['gamma'],0.00001) for i in range(len(neighbours))])
selection_prob = selection_prob/np.sum(selection_prob)
cumulative_selection_probability = np.cumsum(selection_prob)
# select second nodebased on selection probabilities above
r = np.random.random_sample()
n2 = 0
while cumulative_selection_probability[n2] < r:
n2 = n2+1
n2 = neighbours[n2]
# update status of n1 and n2
diff = np.abs(actual_status[n1]-actual_status[n2])
if diff < self.params['model']['epsilon']:
avg = (actual_status[n1]+actual_status[n2])/2.0
actual_status[n1] = avg
actual_status[n2] = avg
delta, node_count, status_delta = self.status_delta(actual_status)
self.status = actual_status
self.actual_iteration += 1
if node_status:
return {"iteration": self.actual_iteration - 1, "status": delta.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": self.actual_iteration - 1, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
|
GiulioRossetti/ndlib | ndlib/models/epidemics/ThresholdModel.py | ThresholdModel.iteration | python | def iteration(self, node_status=True):
self.clean_initial_status(self.available_statuses.values())
actual_status = {node: nstatus for node, nstatus in future.utils.iteritems(self.status)}
if self.actual_iteration == 0:
self.actual_iteration += 1
delta, node_count, status_delta = self.status_delta(actual_status)
if node_status:
return {"iteration": 0, "status": actual_status.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": 0, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
for u in self.graph.nodes():
if actual_status[u] == 1:
continue
neighbors = list(self.graph.neighbors(u))
if isinstance(self.graph, nx.DiGraph):
neighbors = list(self.graph.predecessors(u))
infected = 0
for v in neighbors:
infected += self.status[v]
if len(neighbors) > 0:
infected_ratio = float(infected)/len(neighbors)
if infected_ratio >= self.params['nodes']['threshold'][u]:
actual_status[u] = 1
delta, node_count, status_delta = self.status_delta(actual_status)
self.status = actual_status
self.actual_iteration += 1
if node_status:
return {"iteration": self.actual_iteration - 1, "status": delta.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": self.actual_iteration - 1, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()} | Execute a single model iteration
:return: Iteration_id, Incremental node status (dictionary node->status) | train | https://github.com/GiulioRossetti/ndlib/blob/23ecf50c0f76ff2714471071ab9ecb600f4a9832/ndlib/models/epidemics/ThresholdModel.py#L44-L90 | [
"def clean_initial_status(self, valid_status=None):\n \"\"\"\n Check the consistency of initial status\n :param valid_status: valid node configurations\n \"\"\"\n for n, s in future.utils.iteritems(self.status):\n if s not in valid_status:\n self.status[n] = 0\n",
"def status_delta(self, actual_status):\n \"\"\"\n Compute the point-to-point variations for each status w.r.t. the previous system configuration\n\n :param actual_status: the actual simulation status\n :return: node that have changed their statuses (dictionary status->nodes),\n count of actual nodes per status (dictionary status->node count),\n delta of nodes per status w.r.t the previous configuration (dictionary status->delta)\n \"\"\"\n actual_status_count = {}\n old_status_count = {}\n delta = {}\n for n, v in future.utils.iteritems(self.status):\n if v != actual_status[n]:\n delta[n] = actual_status[n]\n\n for st in self.available_statuses.values():\n actual_status_count[st] = len([x for x in actual_status if actual_status[x] == st])\n old_status_count[st] = len([x for x in self.status if self.status[x] == st])\n\n status_delta = {st: actual_status_count[st] - old_status_count[st] for st in actual_status_count}\n\n return delta, actual_status_count, status_delta\n"
] | class ThresholdModel(DiffusionModel):
"""
Node Parameters to be specified via ModelConfig
:param threshold: The node threshold. If not specified otherwise a value of 0.1 is assumed for all nodes.
"""
def __init__(self, graph):
"""
Model Constructor
:param graph: A networkx graph object
"""
super(self.__class__, self).__init__(graph)
self.available_statuses = {
"Susceptible": 0,
"Infected": 1
}
self.parameters = {
"model": {},
"nodes": {
"threshold": {
"descr": "Node threshold",
"range": [0, 1],
"optional": True,
"default": 0.1
}
},
"edges": {},
}
self.name = "Threshold"
|
Cadene/pretrained-models.pytorch | pretrainedmodels/models/bninception.py | bninception | python | def bninception(num_classes=1000, pretrained='imagenet'):
r"""BNInception model architecture from <https://arxiv.org/pdf/1502.03167.pdf>`_ paper.
"""
model = BNInception(num_classes=num_classes)
if pretrained is not None:
settings = pretrained_settings['bninception'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model | r"""BNInception model architecture from <https://arxiv.org/pdf/1502.03167.pdf>`_ paper. | train | https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/pretrainedmodels/models/bninception.py#L497-L511 | null | from __future__ import print_function, division, absolute_import
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
import os
import sys
__all__ = ['BNInception', 'bninception']
pretrained_settings = {
'bninception': {
'imagenet': {
# Was ported using python2 (may trigger warning)
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/bn_inception-52deb4733.pth',
# 'url': 'http://yjxiong.me/others/bn_inception-9f5701afb96c8044.pth',
'input_space': 'BGR',
'input_size': [3, 224, 224],
'input_range': [0, 255],
'mean': [104, 117, 128],
'std': [1, 1, 1],
'num_classes': 1000
}
}
}
class BNInception(nn.Module):
def __init__(self, num_classes=1000):
super(BNInception, self).__init__()
inplace = True
self.conv1_7x7_s2 = nn.Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3))
self.conv1_7x7_s2_bn = nn.BatchNorm2d(64, affine=True)
self.conv1_relu_7x7 = nn.ReLU (inplace)
self.pool1_3x3_s2 = nn.MaxPool2d ((3, 3), stride=(2, 2), dilation=(1, 1), ceil_mode=True)
self.conv2_3x3_reduce = nn.Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1))
self.conv2_3x3_reduce_bn = nn.BatchNorm2d(64, affine=True)
self.conv2_relu_3x3_reduce = nn.ReLU (inplace)
self.conv2_3x3 = nn.Conv2d(64, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.conv2_3x3_bn = nn.BatchNorm2d(192, affine=True)
self.conv2_relu_3x3 = nn.ReLU (inplace)
self.pool2_3x3_s2 = nn.MaxPool2d ((3, 3), stride=(2, 2), dilation=(1, 1), ceil_mode=True)
self.inception_3a_1x1 = nn.Conv2d(192, 64, kernel_size=(1, 1), stride=(1, 1))
self.inception_3a_1x1_bn = nn.BatchNorm2d(64, affine=True)
self.inception_3a_relu_1x1 = nn.ReLU (inplace)
self.inception_3a_3x3_reduce = nn.Conv2d(192, 64, kernel_size=(1, 1), stride=(1, 1))
self.inception_3a_3x3_reduce_bn = nn.BatchNorm2d(64, affine=True)
self.inception_3a_relu_3x3_reduce = nn.ReLU (inplace)
self.inception_3a_3x3 = nn.Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_3a_3x3_bn = nn.BatchNorm2d(64, affine=True)
self.inception_3a_relu_3x3 = nn.ReLU (inplace)
self.inception_3a_double_3x3_reduce = nn.Conv2d(192, 64, kernel_size=(1, 1), stride=(1, 1))
self.inception_3a_double_3x3_reduce_bn = nn.BatchNorm2d(64, affine=True)
self.inception_3a_relu_double_3x3_reduce = nn.ReLU (inplace)
self.inception_3a_double_3x3_1 = nn.Conv2d(64, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_3a_double_3x3_1_bn = nn.BatchNorm2d(96, affine=True)
self.inception_3a_relu_double_3x3_1 = nn.ReLU (inplace)
self.inception_3a_double_3x3_2 = nn.Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_3a_double_3x3_2_bn = nn.BatchNorm2d(96, affine=True)
self.inception_3a_relu_double_3x3_2 = nn.ReLU (inplace)
self.inception_3a_pool = nn.AvgPool2d (3, stride=1, padding=1, ceil_mode=True, count_include_pad=True)
self.inception_3a_pool_proj = nn.Conv2d(192, 32, kernel_size=(1, 1), stride=(1, 1))
self.inception_3a_pool_proj_bn = nn.BatchNorm2d(32, affine=True)
self.inception_3a_relu_pool_proj = nn.ReLU (inplace)
self.inception_3b_1x1 = nn.Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1))
self.inception_3b_1x1_bn = nn.BatchNorm2d(64, affine=True)
self.inception_3b_relu_1x1 = nn.ReLU (inplace)
self.inception_3b_3x3_reduce = nn.Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1))
self.inception_3b_3x3_reduce_bn = nn.BatchNorm2d(64, affine=True)
self.inception_3b_relu_3x3_reduce = nn.ReLU (inplace)
self.inception_3b_3x3 = nn.Conv2d(64, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_3b_3x3_bn = nn.BatchNorm2d(96, affine=True)
self.inception_3b_relu_3x3 = nn.ReLU (inplace)
self.inception_3b_double_3x3_reduce = nn.Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1))
self.inception_3b_double_3x3_reduce_bn = nn.BatchNorm2d(64, affine=True)
self.inception_3b_relu_double_3x3_reduce = nn.ReLU (inplace)
self.inception_3b_double_3x3_1 = nn.Conv2d(64, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_3b_double_3x3_1_bn = nn.BatchNorm2d(96, affine=True)
self.inception_3b_relu_double_3x3_1 = nn.ReLU (inplace)
self.inception_3b_double_3x3_2 = nn.Conv2d(96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_3b_double_3x3_2_bn = nn.BatchNorm2d(96, affine=True)
self.inception_3b_relu_double_3x3_2 = nn.ReLU (inplace)
self.inception_3b_pool = nn.AvgPool2d (3, stride=1, padding=1, ceil_mode=True, count_include_pad=True)
self.inception_3b_pool_proj = nn.Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1))
self.inception_3b_pool_proj_bn = nn.BatchNorm2d(64, affine=True)
self.inception_3b_relu_pool_proj = nn.ReLU (inplace)
self.inception_3c_3x3_reduce = nn.Conv2d(320, 128, kernel_size=(1, 1), stride=(1, 1))
self.inception_3c_3x3_reduce_bn = nn.BatchNorm2d(128, affine=True)
self.inception_3c_relu_3x3_reduce = nn.ReLU (inplace)
self.inception_3c_3x3 = nn.Conv2d(128, 160, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
self.inception_3c_3x3_bn = nn.BatchNorm2d(160, affine=True)
self.inception_3c_relu_3x3 = nn.ReLU (inplace)
self.inception_3c_double_3x3_reduce = nn.Conv2d(320, 64, kernel_size=(1, 1), stride=(1, 1))
self.inception_3c_double_3x3_reduce_bn = nn.BatchNorm2d(64, affine=True)
self.inception_3c_relu_double_3x3_reduce = nn.ReLU (inplace)
self.inception_3c_double_3x3_1 = nn.Conv2d(64, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_3c_double_3x3_1_bn = nn.BatchNorm2d(96, affine=True)
self.inception_3c_relu_double_3x3_1 = nn.ReLU (inplace)
self.inception_3c_double_3x3_2 = nn.Conv2d(96, 96, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
self.inception_3c_double_3x3_2_bn = nn.BatchNorm2d(96, affine=True)
self.inception_3c_relu_double_3x3_2 = nn.ReLU (inplace)
self.inception_3c_pool = nn.MaxPool2d ((3, 3), stride=(2, 2), dilation=(1, 1), ceil_mode=True)
self.inception_4a_1x1 = nn.Conv2d(576, 224, kernel_size=(1, 1), stride=(1, 1))
self.inception_4a_1x1_bn = nn.BatchNorm2d(224, affine=True)
self.inception_4a_relu_1x1 = nn.ReLU (inplace)
self.inception_4a_3x3_reduce = nn.Conv2d(576, 64, kernel_size=(1, 1), stride=(1, 1))
self.inception_4a_3x3_reduce_bn = nn.BatchNorm2d(64, affine=True)
self.inception_4a_relu_3x3_reduce = nn.ReLU (inplace)
self.inception_4a_3x3 = nn.Conv2d(64, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_4a_3x3_bn = nn.BatchNorm2d(96, affine=True)
self.inception_4a_relu_3x3 = nn.ReLU (inplace)
self.inception_4a_double_3x3_reduce = nn.Conv2d(576, 96, kernel_size=(1, 1), stride=(1, 1))
self.inception_4a_double_3x3_reduce_bn = nn.BatchNorm2d(96, affine=True)
self.inception_4a_relu_double_3x3_reduce = nn.ReLU (inplace)
self.inception_4a_double_3x3_1 = nn.Conv2d(96, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_4a_double_3x3_1_bn = nn.BatchNorm2d(128, affine=True)
self.inception_4a_relu_double_3x3_1 = nn.ReLU (inplace)
self.inception_4a_double_3x3_2 = nn.Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_4a_double_3x3_2_bn = nn.BatchNorm2d(128, affine=True)
self.inception_4a_relu_double_3x3_2 = nn.ReLU (inplace)
self.inception_4a_pool = nn.AvgPool2d (3, stride=1, padding=1, ceil_mode=True, count_include_pad=True)
self.inception_4a_pool_proj = nn.Conv2d(576, 128, kernel_size=(1, 1), stride=(1, 1))
self.inception_4a_pool_proj_bn = nn.BatchNorm2d(128, affine=True)
self.inception_4a_relu_pool_proj = nn.ReLU (inplace)
self.inception_4b_1x1 = nn.Conv2d(576, 192, kernel_size=(1, 1), stride=(1, 1))
self.inception_4b_1x1_bn = nn.BatchNorm2d(192, affine=True)
self.inception_4b_relu_1x1 = nn.ReLU (inplace)
self.inception_4b_3x3_reduce = nn.Conv2d(576, 96, kernel_size=(1, 1), stride=(1, 1))
self.inception_4b_3x3_reduce_bn = nn.BatchNorm2d(96, affine=True)
self.inception_4b_relu_3x3_reduce = nn.ReLU (inplace)
self.inception_4b_3x3 = nn.Conv2d(96, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_4b_3x3_bn = nn.BatchNorm2d(128, affine=True)
self.inception_4b_relu_3x3 = nn.ReLU (inplace)
self.inception_4b_double_3x3_reduce = nn.Conv2d(576, 96, kernel_size=(1, 1), stride=(1, 1))
self.inception_4b_double_3x3_reduce_bn = nn.BatchNorm2d(96, affine=True)
self.inception_4b_relu_double_3x3_reduce = nn.ReLU (inplace)
self.inception_4b_double_3x3_1 = nn.Conv2d(96, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_4b_double_3x3_1_bn = nn.BatchNorm2d(128, affine=True)
self.inception_4b_relu_double_3x3_1 = nn.ReLU (inplace)
self.inception_4b_double_3x3_2 = nn.Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_4b_double_3x3_2_bn = nn.BatchNorm2d(128, affine=True)
self.inception_4b_relu_double_3x3_2 = nn.ReLU (inplace)
self.inception_4b_pool = nn.AvgPool2d (3, stride=1, padding=1, ceil_mode=True, count_include_pad=True)
self.inception_4b_pool_proj = nn.Conv2d(576, 128, kernel_size=(1, 1), stride=(1, 1))
self.inception_4b_pool_proj_bn = nn.BatchNorm2d(128, affine=True)
self.inception_4b_relu_pool_proj = nn.ReLU (inplace)
self.inception_4c_1x1 = nn.Conv2d(576, 160, kernel_size=(1, 1), stride=(1, 1))
self.inception_4c_1x1_bn = nn.BatchNorm2d(160, affine=True)
self.inception_4c_relu_1x1 = nn.ReLU (inplace)
self.inception_4c_3x3_reduce = nn.Conv2d(576, 128, kernel_size=(1, 1), stride=(1, 1))
self.inception_4c_3x3_reduce_bn = nn.BatchNorm2d(128, affine=True)
self.inception_4c_relu_3x3_reduce = nn.ReLU (inplace)
self.inception_4c_3x3 = nn.Conv2d(128, 160, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_4c_3x3_bn = nn.BatchNorm2d(160, affine=True)
self.inception_4c_relu_3x3 = nn.ReLU (inplace)
self.inception_4c_double_3x3_reduce = nn.Conv2d(576, 128, kernel_size=(1, 1), stride=(1, 1))
self.inception_4c_double_3x3_reduce_bn = nn.BatchNorm2d(128, affine=True)
self.inception_4c_relu_double_3x3_reduce = nn.ReLU (inplace)
self.inception_4c_double_3x3_1 = nn.Conv2d(128, 160, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_4c_double_3x3_1_bn = nn.BatchNorm2d(160, affine=True)
self.inception_4c_relu_double_3x3_1 = nn.ReLU (inplace)
self.inception_4c_double_3x3_2 = nn.Conv2d(160, 160, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_4c_double_3x3_2_bn = nn.BatchNorm2d(160, affine=True)
self.inception_4c_relu_double_3x3_2 = nn.ReLU (inplace)
self.inception_4c_pool = nn.AvgPool2d (3, stride=1, padding=1, ceil_mode=True, count_include_pad=True)
self.inception_4c_pool_proj = nn.Conv2d(576, 128, kernel_size=(1, 1), stride=(1, 1))
self.inception_4c_pool_proj_bn = nn.BatchNorm2d(128, affine=True)
self.inception_4c_relu_pool_proj = nn.ReLU (inplace)
self.inception_4d_1x1 = nn.Conv2d(608, 96, kernel_size=(1, 1), stride=(1, 1))
self.inception_4d_1x1_bn = nn.BatchNorm2d(96, affine=True)
self.inception_4d_relu_1x1 = nn.ReLU (inplace)
self.inception_4d_3x3_reduce = nn.Conv2d(608, 128, kernel_size=(1, 1), stride=(1, 1))
self.inception_4d_3x3_reduce_bn = nn.BatchNorm2d(128, affine=True)
self.inception_4d_relu_3x3_reduce = nn.ReLU (inplace)
self.inception_4d_3x3 = nn.Conv2d(128, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_4d_3x3_bn = nn.BatchNorm2d(192, affine=True)
self.inception_4d_relu_3x3 = nn.ReLU (inplace)
self.inception_4d_double_3x3_reduce = nn.Conv2d(608, 160, kernel_size=(1, 1), stride=(1, 1))
self.inception_4d_double_3x3_reduce_bn = nn.BatchNorm2d(160, affine=True)
self.inception_4d_relu_double_3x3_reduce = nn.ReLU (inplace)
self.inception_4d_double_3x3_1 = nn.Conv2d(160, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_4d_double_3x3_1_bn = nn.BatchNorm2d(192, affine=True)
self.inception_4d_relu_double_3x3_1 = nn.ReLU (inplace)
self.inception_4d_double_3x3_2 = nn.Conv2d(192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_4d_double_3x3_2_bn = nn.BatchNorm2d(192, affine=True)
self.inception_4d_relu_double_3x3_2 = nn.ReLU (inplace)
self.inception_4d_pool = nn.AvgPool2d (3, stride=1, padding=1, ceil_mode=True, count_include_pad=True)
self.inception_4d_pool_proj = nn.Conv2d(608, 128, kernel_size=(1, 1), stride=(1, 1))
self.inception_4d_pool_proj_bn = nn.BatchNorm2d(128, affine=True)
self.inception_4d_relu_pool_proj = nn.ReLU (inplace)
self.inception_4e_3x3_reduce = nn.Conv2d(608, 128, kernel_size=(1, 1), stride=(1, 1))
self.inception_4e_3x3_reduce_bn = nn.BatchNorm2d(128, affine=True)
self.inception_4e_relu_3x3_reduce = nn.ReLU (inplace)
self.inception_4e_3x3 = nn.Conv2d(128, 192, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
self.inception_4e_3x3_bn = nn.BatchNorm2d(192, affine=True)
self.inception_4e_relu_3x3 = nn.ReLU (inplace)
self.inception_4e_double_3x3_reduce = nn.Conv2d(608, 192, kernel_size=(1, 1), stride=(1, 1))
self.inception_4e_double_3x3_reduce_bn = nn.BatchNorm2d(192, affine=True)
self.inception_4e_relu_double_3x3_reduce = nn.ReLU (inplace)
self.inception_4e_double_3x3_1 = nn.Conv2d(192, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_4e_double_3x3_1_bn = nn.BatchNorm2d(256, affine=True)
self.inception_4e_relu_double_3x3_1 = nn.ReLU (inplace)
self.inception_4e_double_3x3_2 = nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
self.inception_4e_double_3x3_2_bn = nn.BatchNorm2d(256, affine=True)
self.inception_4e_relu_double_3x3_2 = nn.ReLU (inplace)
self.inception_4e_pool = nn.MaxPool2d ((3, 3), stride=(2, 2), dilation=(1, 1), ceil_mode=True)
self.inception_5a_1x1 = nn.Conv2d(1056, 352, kernel_size=(1, 1), stride=(1, 1))
self.inception_5a_1x1_bn = nn.BatchNorm2d(352, affine=True)
self.inception_5a_relu_1x1 = nn.ReLU (inplace)
self.inception_5a_3x3_reduce = nn.Conv2d(1056, 192, kernel_size=(1, 1), stride=(1, 1))
self.inception_5a_3x3_reduce_bn = nn.BatchNorm2d(192, affine=True)
self.inception_5a_relu_3x3_reduce = nn.ReLU (inplace)
self.inception_5a_3x3 = nn.Conv2d(192, 320, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_5a_3x3_bn = nn.BatchNorm2d(320, affine=True)
self.inception_5a_relu_3x3 = nn.ReLU (inplace)
self.inception_5a_double_3x3_reduce = nn.Conv2d(1056, 160, kernel_size=(1, 1), stride=(1, 1))
self.inception_5a_double_3x3_reduce_bn = nn.BatchNorm2d(160, affine=True)
self.inception_5a_relu_double_3x3_reduce = nn.ReLU (inplace)
self.inception_5a_double_3x3_1 = nn.Conv2d(160, 224, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_5a_double_3x3_1_bn = nn.BatchNorm2d(224, affine=True)
self.inception_5a_relu_double_3x3_1 = nn.ReLU (inplace)
self.inception_5a_double_3x3_2 = nn.Conv2d(224, 224, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_5a_double_3x3_2_bn = nn.BatchNorm2d(224, affine=True)
self.inception_5a_relu_double_3x3_2 = nn.ReLU (inplace)
self.inception_5a_pool = nn.AvgPool2d (3, stride=1, padding=1, ceil_mode=True, count_include_pad=True)
self.inception_5a_pool_proj = nn.Conv2d(1056, 128, kernel_size=(1, 1), stride=(1, 1))
self.inception_5a_pool_proj_bn = nn.BatchNorm2d(128, affine=True)
self.inception_5a_relu_pool_proj = nn.ReLU (inplace)
self.inception_5b_1x1 = nn.Conv2d(1024, 352, kernel_size=(1, 1), stride=(1, 1))
self.inception_5b_1x1_bn = nn.BatchNorm2d(352, affine=True)
self.inception_5b_relu_1x1 = nn.ReLU (inplace)
self.inception_5b_3x3_reduce = nn.Conv2d(1024, 192, kernel_size=(1, 1), stride=(1, 1))
self.inception_5b_3x3_reduce_bn = nn.BatchNorm2d(192, affine=True)
self.inception_5b_relu_3x3_reduce = nn.ReLU (inplace)
self.inception_5b_3x3 = nn.Conv2d(192, 320, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_5b_3x3_bn = nn.BatchNorm2d(320, affine=True)
self.inception_5b_relu_3x3 = nn.ReLU (inplace)
self.inception_5b_double_3x3_reduce = nn.Conv2d(1024, 192, kernel_size=(1, 1), stride=(1, 1))
self.inception_5b_double_3x3_reduce_bn = nn.BatchNorm2d(192, affine=True)
self.inception_5b_relu_double_3x3_reduce = nn.ReLU (inplace)
self.inception_5b_double_3x3_1 = nn.Conv2d(192, 224, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_5b_double_3x3_1_bn = nn.BatchNorm2d(224, affine=True)
self.inception_5b_relu_double_3x3_1 = nn.ReLU (inplace)
self.inception_5b_double_3x3_2 = nn.Conv2d(224, 224, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.inception_5b_double_3x3_2_bn = nn.BatchNorm2d(224, affine=True)
self.inception_5b_relu_double_3x3_2 = nn.ReLU (inplace)
self.inception_5b_pool = nn.MaxPool2d ((3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), ceil_mode=True)
self.inception_5b_pool_proj = nn.Conv2d(1024, 128, kernel_size=(1, 1), stride=(1, 1))
self.inception_5b_pool_proj_bn = nn.BatchNorm2d(128, affine=True)
self.inception_5b_relu_pool_proj = nn.ReLU (inplace)
self.last_linear = nn.Linear (1024, num_classes)
def features(self, input):
conv1_7x7_s2_out = self.conv1_7x7_s2(input)
conv1_7x7_s2_bn_out = self.conv1_7x7_s2_bn(conv1_7x7_s2_out)
conv1_relu_7x7_out = self.conv1_relu_7x7(conv1_7x7_s2_bn_out)
pool1_3x3_s2_out = self.pool1_3x3_s2(conv1_relu_7x7_out)
conv2_3x3_reduce_out = self.conv2_3x3_reduce(pool1_3x3_s2_out)
conv2_3x3_reduce_bn_out = self.conv2_3x3_reduce_bn(conv2_3x3_reduce_out)
conv2_relu_3x3_reduce_out = self.conv2_relu_3x3_reduce(conv2_3x3_reduce_bn_out)
conv2_3x3_out = self.conv2_3x3(conv2_relu_3x3_reduce_out)
conv2_3x3_bn_out = self.conv2_3x3_bn(conv2_3x3_out)
conv2_relu_3x3_out = self.conv2_relu_3x3(conv2_3x3_bn_out)
pool2_3x3_s2_out = self.pool2_3x3_s2(conv2_relu_3x3_out)
inception_3a_1x1_out = self.inception_3a_1x1(pool2_3x3_s2_out)
inception_3a_1x1_bn_out = self.inception_3a_1x1_bn(inception_3a_1x1_out)
inception_3a_relu_1x1_out = self.inception_3a_relu_1x1(inception_3a_1x1_bn_out)
inception_3a_3x3_reduce_out = self.inception_3a_3x3_reduce(pool2_3x3_s2_out)
inception_3a_3x3_reduce_bn_out = self.inception_3a_3x3_reduce_bn(inception_3a_3x3_reduce_out)
inception_3a_relu_3x3_reduce_out = self.inception_3a_relu_3x3_reduce(inception_3a_3x3_reduce_bn_out)
inception_3a_3x3_out = self.inception_3a_3x3(inception_3a_relu_3x3_reduce_out)
inception_3a_3x3_bn_out = self.inception_3a_3x3_bn(inception_3a_3x3_out)
inception_3a_relu_3x3_out = self.inception_3a_relu_3x3(inception_3a_3x3_bn_out)
inception_3a_double_3x3_reduce_out = self.inception_3a_double_3x3_reduce(pool2_3x3_s2_out)
inception_3a_double_3x3_reduce_bn_out = self.inception_3a_double_3x3_reduce_bn(inception_3a_double_3x3_reduce_out)
inception_3a_relu_double_3x3_reduce_out = self.inception_3a_relu_double_3x3_reduce(inception_3a_double_3x3_reduce_bn_out)
inception_3a_double_3x3_1_out = self.inception_3a_double_3x3_1(inception_3a_relu_double_3x3_reduce_out)
inception_3a_double_3x3_1_bn_out = self.inception_3a_double_3x3_1_bn(inception_3a_double_3x3_1_out)
inception_3a_relu_double_3x3_1_out = self.inception_3a_relu_double_3x3_1(inception_3a_double_3x3_1_bn_out)
inception_3a_double_3x3_2_out = self.inception_3a_double_3x3_2(inception_3a_relu_double_3x3_1_out)
inception_3a_double_3x3_2_bn_out = self.inception_3a_double_3x3_2_bn(inception_3a_double_3x3_2_out)
inception_3a_relu_double_3x3_2_out = self.inception_3a_relu_double_3x3_2(inception_3a_double_3x3_2_bn_out)
inception_3a_pool_out = self.inception_3a_pool(pool2_3x3_s2_out)
inception_3a_pool_proj_out = self.inception_3a_pool_proj(inception_3a_pool_out)
inception_3a_pool_proj_bn_out = self.inception_3a_pool_proj_bn(inception_3a_pool_proj_out)
inception_3a_relu_pool_proj_out = self.inception_3a_relu_pool_proj(inception_3a_pool_proj_bn_out)
inception_3a_output_out = torch.cat([inception_3a_relu_1x1_out,inception_3a_relu_3x3_out,inception_3a_relu_double_3x3_2_out ,inception_3a_relu_pool_proj_out], 1)
inception_3b_1x1_out = self.inception_3b_1x1(inception_3a_output_out)
inception_3b_1x1_bn_out = self.inception_3b_1x1_bn(inception_3b_1x1_out)
inception_3b_relu_1x1_out = self.inception_3b_relu_1x1(inception_3b_1x1_bn_out)
inception_3b_3x3_reduce_out = self.inception_3b_3x3_reduce(inception_3a_output_out)
inception_3b_3x3_reduce_bn_out = self.inception_3b_3x3_reduce_bn(inception_3b_3x3_reduce_out)
inception_3b_relu_3x3_reduce_out = self.inception_3b_relu_3x3_reduce(inception_3b_3x3_reduce_bn_out)
inception_3b_3x3_out = self.inception_3b_3x3(inception_3b_relu_3x3_reduce_out)
inception_3b_3x3_bn_out = self.inception_3b_3x3_bn(inception_3b_3x3_out)
inception_3b_relu_3x3_out = self.inception_3b_relu_3x3(inception_3b_3x3_bn_out)
inception_3b_double_3x3_reduce_out = self.inception_3b_double_3x3_reduce(inception_3a_output_out)
inception_3b_double_3x3_reduce_bn_out = self.inception_3b_double_3x3_reduce_bn(inception_3b_double_3x3_reduce_out)
inception_3b_relu_double_3x3_reduce_out = self.inception_3b_relu_double_3x3_reduce(inception_3b_double_3x3_reduce_bn_out)
inception_3b_double_3x3_1_out = self.inception_3b_double_3x3_1(inception_3b_relu_double_3x3_reduce_out)
inception_3b_double_3x3_1_bn_out = self.inception_3b_double_3x3_1_bn(inception_3b_double_3x3_1_out)
inception_3b_relu_double_3x3_1_out = self.inception_3b_relu_double_3x3_1(inception_3b_double_3x3_1_bn_out)
inception_3b_double_3x3_2_out = self.inception_3b_double_3x3_2(inception_3b_relu_double_3x3_1_out)
inception_3b_double_3x3_2_bn_out = self.inception_3b_double_3x3_2_bn(inception_3b_double_3x3_2_out)
inception_3b_relu_double_3x3_2_out = self.inception_3b_relu_double_3x3_2(inception_3b_double_3x3_2_bn_out)
inception_3b_pool_out = self.inception_3b_pool(inception_3a_output_out)
inception_3b_pool_proj_out = self.inception_3b_pool_proj(inception_3b_pool_out)
inception_3b_pool_proj_bn_out = self.inception_3b_pool_proj_bn(inception_3b_pool_proj_out)
inception_3b_relu_pool_proj_out = self.inception_3b_relu_pool_proj(inception_3b_pool_proj_bn_out)
inception_3b_output_out = torch.cat([inception_3b_relu_1x1_out,inception_3b_relu_3x3_out,inception_3b_relu_double_3x3_2_out,inception_3b_relu_pool_proj_out], 1)
inception_3c_3x3_reduce_out = self.inception_3c_3x3_reduce(inception_3b_output_out)
inception_3c_3x3_reduce_bn_out = self.inception_3c_3x3_reduce_bn(inception_3c_3x3_reduce_out)
inception_3c_relu_3x3_reduce_out = self.inception_3c_relu_3x3_reduce(inception_3c_3x3_reduce_bn_out)
inception_3c_3x3_out = self.inception_3c_3x3(inception_3c_relu_3x3_reduce_out)
inception_3c_3x3_bn_out = self.inception_3c_3x3_bn(inception_3c_3x3_out)
inception_3c_relu_3x3_out = self.inception_3c_relu_3x3(inception_3c_3x3_bn_out)
inception_3c_double_3x3_reduce_out = self.inception_3c_double_3x3_reduce(inception_3b_output_out)
inception_3c_double_3x3_reduce_bn_out = self.inception_3c_double_3x3_reduce_bn(inception_3c_double_3x3_reduce_out)
inception_3c_relu_double_3x3_reduce_out = self.inception_3c_relu_double_3x3_reduce(inception_3c_double_3x3_reduce_bn_out)
inception_3c_double_3x3_1_out = self.inception_3c_double_3x3_1(inception_3c_relu_double_3x3_reduce_out)
inception_3c_double_3x3_1_bn_out = self.inception_3c_double_3x3_1_bn(inception_3c_double_3x3_1_out)
inception_3c_relu_double_3x3_1_out = self.inception_3c_relu_double_3x3_1(inception_3c_double_3x3_1_bn_out)
inception_3c_double_3x3_2_out = self.inception_3c_double_3x3_2(inception_3c_relu_double_3x3_1_out)
inception_3c_double_3x3_2_bn_out = self.inception_3c_double_3x3_2_bn(inception_3c_double_3x3_2_out)
inception_3c_relu_double_3x3_2_out = self.inception_3c_relu_double_3x3_2(inception_3c_double_3x3_2_bn_out)
inception_3c_pool_out = self.inception_3c_pool(inception_3b_output_out)
inception_3c_output_out = torch.cat([inception_3c_relu_3x3_out,inception_3c_relu_double_3x3_2_out,inception_3c_pool_out], 1)
inception_4a_1x1_out = self.inception_4a_1x1(inception_3c_output_out)
inception_4a_1x1_bn_out = self.inception_4a_1x1_bn(inception_4a_1x1_out)
inception_4a_relu_1x1_out = self.inception_4a_relu_1x1(inception_4a_1x1_bn_out)
inception_4a_3x3_reduce_out = self.inception_4a_3x3_reduce(inception_3c_output_out)
inception_4a_3x3_reduce_bn_out = self.inception_4a_3x3_reduce_bn(inception_4a_3x3_reduce_out)
inception_4a_relu_3x3_reduce_out = self.inception_4a_relu_3x3_reduce(inception_4a_3x3_reduce_bn_out)
inception_4a_3x3_out = self.inception_4a_3x3(inception_4a_relu_3x3_reduce_out)
inception_4a_3x3_bn_out = self.inception_4a_3x3_bn(inception_4a_3x3_out)
inception_4a_relu_3x3_out = self.inception_4a_relu_3x3(inception_4a_3x3_bn_out)
inception_4a_double_3x3_reduce_out = self.inception_4a_double_3x3_reduce(inception_3c_output_out)
inception_4a_double_3x3_reduce_bn_out = self.inception_4a_double_3x3_reduce_bn(inception_4a_double_3x3_reduce_out)
inception_4a_relu_double_3x3_reduce_out = self.inception_4a_relu_double_3x3_reduce(inception_4a_double_3x3_reduce_bn_out)
inception_4a_double_3x3_1_out = self.inception_4a_double_3x3_1(inception_4a_relu_double_3x3_reduce_out)
inception_4a_double_3x3_1_bn_out = self.inception_4a_double_3x3_1_bn(inception_4a_double_3x3_1_out)
inception_4a_relu_double_3x3_1_out = self.inception_4a_relu_double_3x3_1(inception_4a_double_3x3_1_bn_out)
inception_4a_double_3x3_2_out = self.inception_4a_double_3x3_2(inception_4a_relu_double_3x3_1_out)
inception_4a_double_3x3_2_bn_out = self.inception_4a_double_3x3_2_bn(inception_4a_double_3x3_2_out)
inception_4a_relu_double_3x3_2_out = self.inception_4a_relu_double_3x3_2(inception_4a_double_3x3_2_bn_out)
inception_4a_pool_out = self.inception_4a_pool(inception_3c_output_out)
inception_4a_pool_proj_out = self.inception_4a_pool_proj(inception_4a_pool_out)
inception_4a_pool_proj_bn_out = self.inception_4a_pool_proj_bn(inception_4a_pool_proj_out)
inception_4a_relu_pool_proj_out = self.inception_4a_relu_pool_proj(inception_4a_pool_proj_bn_out)
inception_4a_output_out = torch.cat([inception_4a_relu_1x1_out,inception_4a_relu_3x3_out,inception_4a_relu_double_3x3_2_out,inception_4a_relu_pool_proj_out], 1)
inception_4b_1x1_out = self.inception_4b_1x1(inception_4a_output_out)
inception_4b_1x1_bn_out = self.inception_4b_1x1_bn(inception_4b_1x1_out)
inception_4b_relu_1x1_out = self.inception_4b_relu_1x1(inception_4b_1x1_bn_out)
inception_4b_3x3_reduce_out = self.inception_4b_3x3_reduce(inception_4a_output_out)
inception_4b_3x3_reduce_bn_out = self.inception_4b_3x3_reduce_bn(inception_4b_3x3_reduce_out)
inception_4b_relu_3x3_reduce_out = self.inception_4b_relu_3x3_reduce(inception_4b_3x3_reduce_bn_out)
inception_4b_3x3_out = self.inception_4b_3x3(inception_4b_relu_3x3_reduce_out)
inception_4b_3x3_bn_out = self.inception_4b_3x3_bn(inception_4b_3x3_out)
inception_4b_relu_3x3_out = self.inception_4b_relu_3x3(inception_4b_3x3_bn_out)
inception_4b_double_3x3_reduce_out = self.inception_4b_double_3x3_reduce(inception_4a_output_out)
inception_4b_double_3x3_reduce_bn_out = self.inception_4b_double_3x3_reduce_bn(inception_4b_double_3x3_reduce_out)
inception_4b_relu_double_3x3_reduce_out = self.inception_4b_relu_double_3x3_reduce(inception_4b_double_3x3_reduce_bn_out)
inception_4b_double_3x3_1_out = self.inception_4b_double_3x3_1(inception_4b_relu_double_3x3_reduce_out)
inception_4b_double_3x3_1_bn_out = self.inception_4b_double_3x3_1_bn(inception_4b_double_3x3_1_out)
inception_4b_relu_double_3x3_1_out = self.inception_4b_relu_double_3x3_1(inception_4b_double_3x3_1_bn_out)
inception_4b_double_3x3_2_out = self.inception_4b_double_3x3_2(inception_4b_relu_double_3x3_1_out)
inception_4b_double_3x3_2_bn_out = self.inception_4b_double_3x3_2_bn(inception_4b_double_3x3_2_out)
inception_4b_relu_double_3x3_2_out = self.inception_4b_relu_double_3x3_2(inception_4b_double_3x3_2_bn_out)
inception_4b_pool_out = self.inception_4b_pool(inception_4a_output_out)
inception_4b_pool_proj_out = self.inception_4b_pool_proj(inception_4b_pool_out)
inception_4b_pool_proj_bn_out = self.inception_4b_pool_proj_bn(inception_4b_pool_proj_out)
inception_4b_relu_pool_proj_out = self.inception_4b_relu_pool_proj(inception_4b_pool_proj_bn_out)
inception_4b_output_out = torch.cat([inception_4b_relu_1x1_out,inception_4b_relu_3x3_out,inception_4b_relu_double_3x3_2_out,inception_4b_relu_pool_proj_out], 1)
inception_4c_1x1_out = self.inception_4c_1x1(inception_4b_output_out)
inception_4c_1x1_bn_out = self.inception_4c_1x1_bn(inception_4c_1x1_out)
inception_4c_relu_1x1_out = self.inception_4c_relu_1x1(inception_4c_1x1_bn_out)
inception_4c_3x3_reduce_out = self.inception_4c_3x3_reduce(inception_4b_output_out)
inception_4c_3x3_reduce_bn_out = self.inception_4c_3x3_reduce_bn(inception_4c_3x3_reduce_out)
inception_4c_relu_3x3_reduce_out = self.inception_4c_relu_3x3_reduce(inception_4c_3x3_reduce_bn_out)
inception_4c_3x3_out = self.inception_4c_3x3(inception_4c_relu_3x3_reduce_out)
inception_4c_3x3_bn_out = self.inception_4c_3x3_bn(inception_4c_3x3_out)
inception_4c_relu_3x3_out = self.inception_4c_relu_3x3(inception_4c_3x3_bn_out)
inception_4c_double_3x3_reduce_out = self.inception_4c_double_3x3_reduce(inception_4b_output_out)
inception_4c_double_3x3_reduce_bn_out = self.inception_4c_double_3x3_reduce_bn(inception_4c_double_3x3_reduce_out)
inception_4c_relu_double_3x3_reduce_out = self.inception_4c_relu_double_3x3_reduce(inception_4c_double_3x3_reduce_bn_out)
inception_4c_double_3x3_1_out = self.inception_4c_double_3x3_1(inception_4c_relu_double_3x3_reduce_out)
inception_4c_double_3x3_1_bn_out = self.inception_4c_double_3x3_1_bn(inception_4c_double_3x3_1_out)
inception_4c_relu_double_3x3_1_out = self.inception_4c_relu_double_3x3_1(inception_4c_double_3x3_1_bn_out)
inception_4c_double_3x3_2_out = self.inception_4c_double_3x3_2(inception_4c_relu_double_3x3_1_out)
inception_4c_double_3x3_2_bn_out = self.inception_4c_double_3x3_2_bn(inception_4c_double_3x3_2_out)
inception_4c_relu_double_3x3_2_out = self.inception_4c_relu_double_3x3_2(inception_4c_double_3x3_2_bn_out)
inception_4c_pool_out = self.inception_4c_pool(inception_4b_output_out)
inception_4c_pool_proj_out = self.inception_4c_pool_proj(inception_4c_pool_out)
inception_4c_pool_proj_bn_out = self.inception_4c_pool_proj_bn(inception_4c_pool_proj_out)
inception_4c_relu_pool_proj_out = self.inception_4c_relu_pool_proj(inception_4c_pool_proj_bn_out)
inception_4c_output_out = torch.cat([inception_4c_relu_1x1_out,inception_4c_relu_3x3_out,inception_4c_relu_double_3x3_2_out,inception_4c_relu_pool_proj_out], 1)
inception_4d_1x1_out = self.inception_4d_1x1(inception_4c_output_out)
inception_4d_1x1_bn_out = self.inception_4d_1x1_bn(inception_4d_1x1_out)
inception_4d_relu_1x1_out = self.inception_4d_relu_1x1(inception_4d_1x1_bn_out)
inception_4d_3x3_reduce_out = self.inception_4d_3x3_reduce(inception_4c_output_out)
inception_4d_3x3_reduce_bn_out = self.inception_4d_3x3_reduce_bn(inception_4d_3x3_reduce_out)
inception_4d_relu_3x3_reduce_out = self.inception_4d_relu_3x3_reduce(inception_4d_3x3_reduce_bn_out)
inception_4d_3x3_out = self.inception_4d_3x3(inception_4d_relu_3x3_reduce_out)
inception_4d_3x3_bn_out = self.inception_4d_3x3_bn(inception_4d_3x3_out)
inception_4d_relu_3x3_out = self.inception_4d_relu_3x3(inception_4d_3x3_bn_out)
inception_4d_double_3x3_reduce_out = self.inception_4d_double_3x3_reduce(inception_4c_output_out)
inception_4d_double_3x3_reduce_bn_out = self.inception_4d_double_3x3_reduce_bn(inception_4d_double_3x3_reduce_out)
inception_4d_relu_double_3x3_reduce_out = self.inception_4d_relu_double_3x3_reduce(inception_4d_double_3x3_reduce_bn_out)
inception_4d_double_3x3_1_out = self.inception_4d_double_3x3_1(inception_4d_relu_double_3x3_reduce_out)
inception_4d_double_3x3_1_bn_out = self.inception_4d_double_3x3_1_bn(inception_4d_double_3x3_1_out)
inception_4d_relu_double_3x3_1_out = self.inception_4d_relu_double_3x3_1(inception_4d_double_3x3_1_bn_out)
inception_4d_double_3x3_2_out = self.inception_4d_double_3x3_2(inception_4d_relu_double_3x3_1_out)
inception_4d_double_3x3_2_bn_out = self.inception_4d_double_3x3_2_bn(inception_4d_double_3x3_2_out)
inception_4d_relu_double_3x3_2_out = self.inception_4d_relu_double_3x3_2(inception_4d_double_3x3_2_bn_out)
inception_4d_pool_out = self.inception_4d_pool(inception_4c_output_out)
inception_4d_pool_proj_out = self.inception_4d_pool_proj(inception_4d_pool_out)
inception_4d_pool_proj_bn_out = self.inception_4d_pool_proj_bn(inception_4d_pool_proj_out)
inception_4d_relu_pool_proj_out = self.inception_4d_relu_pool_proj(inception_4d_pool_proj_bn_out)
inception_4d_output_out = torch.cat([inception_4d_relu_1x1_out,inception_4d_relu_3x3_out,inception_4d_relu_double_3x3_2_out,inception_4d_relu_pool_proj_out], 1)
inception_4e_3x3_reduce_out = self.inception_4e_3x3_reduce(inception_4d_output_out)
inception_4e_3x3_reduce_bn_out = self.inception_4e_3x3_reduce_bn(inception_4e_3x3_reduce_out)
inception_4e_relu_3x3_reduce_out = self.inception_4e_relu_3x3_reduce(inception_4e_3x3_reduce_bn_out)
inception_4e_3x3_out = self.inception_4e_3x3(inception_4e_relu_3x3_reduce_out)
inception_4e_3x3_bn_out = self.inception_4e_3x3_bn(inception_4e_3x3_out)
inception_4e_relu_3x3_out = self.inception_4e_relu_3x3(inception_4e_3x3_bn_out)
inception_4e_double_3x3_reduce_out = self.inception_4e_double_3x3_reduce(inception_4d_output_out)
inception_4e_double_3x3_reduce_bn_out = self.inception_4e_double_3x3_reduce_bn(inception_4e_double_3x3_reduce_out)
inception_4e_relu_double_3x3_reduce_out = self.inception_4e_relu_double_3x3_reduce(inception_4e_double_3x3_reduce_bn_out)
inception_4e_double_3x3_1_out = self.inception_4e_double_3x3_1(inception_4e_relu_double_3x3_reduce_out)
inception_4e_double_3x3_1_bn_out = self.inception_4e_double_3x3_1_bn(inception_4e_double_3x3_1_out)
inception_4e_relu_double_3x3_1_out = self.inception_4e_relu_double_3x3_1(inception_4e_double_3x3_1_bn_out)
inception_4e_double_3x3_2_out = self.inception_4e_double_3x3_2(inception_4e_relu_double_3x3_1_out)
inception_4e_double_3x3_2_bn_out = self.inception_4e_double_3x3_2_bn(inception_4e_double_3x3_2_out)
inception_4e_relu_double_3x3_2_out = self.inception_4e_relu_double_3x3_2(inception_4e_double_3x3_2_bn_out)
inception_4e_pool_out = self.inception_4e_pool(inception_4d_output_out)
inception_4e_output_out = torch.cat([inception_4e_relu_3x3_out,inception_4e_relu_double_3x3_2_out,inception_4e_pool_out], 1)
inception_5a_1x1_out = self.inception_5a_1x1(inception_4e_output_out)
inception_5a_1x1_bn_out = self.inception_5a_1x1_bn(inception_5a_1x1_out)
inception_5a_relu_1x1_out = self.inception_5a_relu_1x1(inception_5a_1x1_bn_out)
inception_5a_3x3_reduce_out = self.inception_5a_3x3_reduce(inception_4e_output_out)
inception_5a_3x3_reduce_bn_out = self.inception_5a_3x3_reduce_bn(inception_5a_3x3_reduce_out)
inception_5a_relu_3x3_reduce_out = self.inception_5a_relu_3x3_reduce(inception_5a_3x3_reduce_bn_out)
inception_5a_3x3_out = self.inception_5a_3x3(inception_5a_relu_3x3_reduce_out)
inception_5a_3x3_bn_out = self.inception_5a_3x3_bn(inception_5a_3x3_out)
inception_5a_relu_3x3_out = self.inception_5a_relu_3x3(inception_5a_3x3_bn_out)
inception_5a_double_3x3_reduce_out = self.inception_5a_double_3x3_reduce(inception_4e_output_out)
inception_5a_double_3x3_reduce_bn_out = self.inception_5a_double_3x3_reduce_bn(inception_5a_double_3x3_reduce_out)
inception_5a_relu_double_3x3_reduce_out = self.inception_5a_relu_double_3x3_reduce(inception_5a_double_3x3_reduce_bn_out)
inception_5a_double_3x3_1_out = self.inception_5a_double_3x3_1(inception_5a_relu_double_3x3_reduce_out)
inception_5a_double_3x3_1_bn_out = self.inception_5a_double_3x3_1_bn(inception_5a_double_3x3_1_out)
inception_5a_relu_double_3x3_1_out = self.inception_5a_relu_double_3x3_1(inception_5a_double_3x3_1_bn_out)
inception_5a_double_3x3_2_out = self.inception_5a_double_3x3_2(inception_5a_relu_double_3x3_1_out)
inception_5a_double_3x3_2_bn_out = self.inception_5a_double_3x3_2_bn(inception_5a_double_3x3_2_out)
inception_5a_relu_double_3x3_2_out = self.inception_5a_relu_double_3x3_2(inception_5a_double_3x3_2_bn_out)
inception_5a_pool_out = self.inception_5a_pool(inception_4e_output_out)
inception_5a_pool_proj_out = self.inception_5a_pool_proj(inception_5a_pool_out)
inception_5a_pool_proj_bn_out = self.inception_5a_pool_proj_bn(inception_5a_pool_proj_out)
inception_5a_relu_pool_proj_out = self.inception_5a_relu_pool_proj(inception_5a_pool_proj_bn_out)
inception_5a_output_out = torch.cat([inception_5a_relu_1x1_out,inception_5a_relu_3x3_out,inception_5a_relu_double_3x3_2_out,inception_5a_relu_pool_proj_out], 1)
inception_5b_1x1_out = self.inception_5b_1x1(inception_5a_output_out)
inception_5b_1x1_bn_out = self.inception_5b_1x1_bn(inception_5b_1x1_out)
inception_5b_relu_1x1_out = self.inception_5b_relu_1x1(inception_5b_1x1_bn_out)
inception_5b_3x3_reduce_out = self.inception_5b_3x3_reduce(inception_5a_output_out)
inception_5b_3x3_reduce_bn_out = self.inception_5b_3x3_reduce_bn(inception_5b_3x3_reduce_out)
inception_5b_relu_3x3_reduce_out = self.inception_5b_relu_3x3_reduce(inception_5b_3x3_reduce_bn_out)
inception_5b_3x3_out = self.inception_5b_3x3(inception_5b_relu_3x3_reduce_out)
inception_5b_3x3_bn_out = self.inception_5b_3x3_bn(inception_5b_3x3_out)
inception_5b_relu_3x3_out = self.inception_5b_relu_3x3(inception_5b_3x3_bn_out)
inception_5b_double_3x3_reduce_out = self.inception_5b_double_3x3_reduce(inception_5a_output_out)
inception_5b_double_3x3_reduce_bn_out = self.inception_5b_double_3x3_reduce_bn(inception_5b_double_3x3_reduce_out)
inception_5b_relu_double_3x3_reduce_out = self.inception_5b_relu_double_3x3_reduce(inception_5b_double_3x3_reduce_bn_out)
inception_5b_double_3x3_1_out = self.inception_5b_double_3x3_1(inception_5b_relu_double_3x3_reduce_out)
inception_5b_double_3x3_1_bn_out = self.inception_5b_double_3x3_1_bn(inception_5b_double_3x3_1_out)
inception_5b_relu_double_3x3_1_out = self.inception_5b_relu_double_3x3_1(inception_5b_double_3x3_1_bn_out)
inception_5b_double_3x3_2_out = self.inception_5b_double_3x3_2(inception_5b_relu_double_3x3_1_out)
inception_5b_double_3x3_2_bn_out = self.inception_5b_double_3x3_2_bn(inception_5b_double_3x3_2_out)
inception_5b_relu_double_3x3_2_out = self.inception_5b_relu_double_3x3_2(inception_5b_double_3x3_2_bn_out)
inception_5b_pool_out = self.inception_5b_pool(inception_5a_output_out)
inception_5b_pool_proj_out = self.inception_5b_pool_proj(inception_5b_pool_out)
inception_5b_pool_proj_bn_out = self.inception_5b_pool_proj_bn(inception_5b_pool_proj_out)
inception_5b_relu_pool_proj_out = self.inception_5b_relu_pool_proj(inception_5b_pool_proj_bn_out)
inception_5b_output_out = torch.cat([inception_5b_relu_1x1_out,inception_5b_relu_3x3_out,inception_5b_relu_double_3x3_2_out,inception_5b_relu_pool_proj_out], 1)
return inception_5b_output_out
def logits(self, features):
adaptiveAvgPoolWidth = features.shape[2]
x = F.avg_pool2d(features, kernel_size=adaptiveAvgPoolWidth)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
if __name__ == '__main__':
model = bninception()
|
Cadene/pretrained-models.pytorch | pretrainedmodels/models/fbresnet/resnet152_load.py | conv3x3 | python | def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=True) | 3x3 convolution with padding | train | https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/pretrainedmodels/models/fbresnet/resnet152_load.py#L20-L23 | null | from __future__ import print_function, division, absolute_import
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=True)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=True)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=True)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
from torch.legacy import nn as nnl
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=True)
#self.conv1 = nnl.SpatialConvolution(3, 64, 7, 7, 2, 2, 3, 3)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=True),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
self.conv1_input = x.clone()
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
import torchfile
from torch.utils.serialization import load_lua
import torch
netparams = torchfile.load('data/resnet152/netparams.t7')
#netparams2 = load_lua('data/resnet152/netparams.t7')
#import ipdb; ipdb.set_trace()
netoutputs = []
for i in range(1, 12):
path = 'data/resnet152/output{}.t7'.format(i)
out = load_lua(path)
#print(out.size())
if out.dim()==4:
pass#out.transpose_(2, 3)
netoutputs.append(out)
net = resnet152()
state_dict = net.state_dict()
import collections
s = collections.OrderedDict()
i=0
for key in state_dict.keys():
new = torch.from_numpy(netparams[i])
s[key] = new
if s[key].dim() == 4:
pass#s[key].transpose_(2,3)
i += 1
net.load_state_dict(s)
net.conv1.register_forward_hook(lambda self, input, output: \
print('conv1', torch.dist(output.data, netoutputs[0])))
net.bn1.register_forward_hook(lambda self, input, output: \
print('bn1', torch.dist(output.data, netoutputs[1])))
net.relu.register_forward_hook(lambda self, input, output: \
print('relu', torch.dist(output.data, netoutputs[2])))
net.maxpool.register_forward_hook(lambda self, input, output: \
print('maxpool', torch.dist(output.data, netoutputs[3])))
net.layer1.register_forward_hook(lambda self, input, output: \
print('layer1', torch.dist(output.data, netoutputs[4])))
net.layer2.register_forward_hook(lambda self, input, output: \
print('layer2', torch.dist(output.data, netoutputs[5])))
net.layer3.register_forward_hook(lambda self, input, output: \
print('layer3', torch.dist(output.data, netoutputs[6])))
net.layer4.register_forward_hook(lambda self, input, output: \
print('layer4', torch.dist(output.data, netoutputs[7])))
net.avgpool.register_forward_hook(lambda self, input, output: \
print('avgpool', torch.dist(output.data, netoutputs[8])))
net.fc.register_forward_hook(lambda self, input, output: \
print('fc', torch.dist(output.data, netoutputs[10])))
net.eval()
input_data = torch.ones(1,3,224,224)
input_data[0][0][0][0] = -1
from PIL import Image
import torchvision.transforms as transforms
input_data[0] = transforms.ToTensor()(Image.open('data/cat_224.png'))
print('cat sum', input_data.sum())
input = torch.autograd.Variable(input_data)
output = net.forward(input)
torch.save(s, 'data/resnet152.pth')
|
Cadene/pretrained-models.pytorch | pretrainedmodels/models/fbresnet/resnet152_load.py | resnet18 | python | def resnet18(pretrained=False, **kwargs):
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model | Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | train | https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/pretrainedmodels/models/fbresnet/resnet152_load.py#L160-L169 | null | from __future__ import print_function, division, absolute_import
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=True)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=True)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=True)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=True)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
from torch.legacy import nn as nnl
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=True)
#self.conv1 = nnl.SpatialConvolution(3, 64, 7, 7, 2, 2, 3, 3)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=True),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
self.conv1_input = x.clone()
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
import torchfile
from torch.utils.serialization import load_lua
import torch
netparams = torchfile.load('data/resnet152/netparams.t7')
#netparams2 = load_lua('data/resnet152/netparams.t7')
#import ipdb; ipdb.set_trace()
netoutputs = []
for i in range(1, 12):
path = 'data/resnet152/output{}.t7'.format(i)
out = load_lua(path)
#print(out.size())
if out.dim()==4:
pass#out.transpose_(2, 3)
netoutputs.append(out)
net = resnet152()
state_dict = net.state_dict()
import collections
s = collections.OrderedDict()
i=0
for key in state_dict.keys():
new = torch.from_numpy(netparams[i])
s[key] = new
if s[key].dim() == 4:
pass#s[key].transpose_(2,3)
i += 1
net.load_state_dict(s)
net.conv1.register_forward_hook(lambda self, input, output: \
print('conv1', torch.dist(output.data, netoutputs[0])))
net.bn1.register_forward_hook(lambda self, input, output: \
print('bn1', torch.dist(output.data, netoutputs[1])))
net.relu.register_forward_hook(lambda self, input, output: \
print('relu', torch.dist(output.data, netoutputs[2])))
net.maxpool.register_forward_hook(lambda self, input, output: \
print('maxpool', torch.dist(output.data, netoutputs[3])))
net.layer1.register_forward_hook(lambda self, input, output: \
print('layer1', torch.dist(output.data, netoutputs[4])))
net.layer2.register_forward_hook(lambda self, input, output: \
print('layer2', torch.dist(output.data, netoutputs[5])))
net.layer3.register_forward_hook(lambda self, input, output: \
print('layer3', torch.dist(output.data, netoutputs[6])))
net.layer4.register_forward_hook(lambda self, input, output: \
print('layer4', torch.dist(output.data, netoutputs[7])))
net.avgpool.register_forward_hook(lambda self, input, output: \
print('avgpool', torch.dist(output.data, netoutputs[8])))
net.fc.register_forward_hook(lambda self, input, output: \
print('fc', torch.dist(output.data, netoutputs[10])))
net.eval()
input_data = torch.ones(1,3,224,224)
input_data[0][0][0][0] = -1
from PIL import Image
import torchvision.transforms as transforms
input_data[0] = transforms.ToTensor()(Image.open('data/cat_224.png'))
print('cat sum', input_data.sum())
input = torch.autograd.Variable(input_data)
output = net.forward(input)
torch.save(s, 'data/resnet152.pth')
|
Cadene/pretrained-models.pytorch | pretrainedmodels/models/fbresnet/resnet152_load.py | resnet50 | python | def resnet50(pretrained=False, **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model | Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | train | https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/pretrainedmodels/models/fbresnet/resnet152_load.py#L184-L193 | null | from __future__ import print_function, division, absolute_import
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=True)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=True)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=True)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=True)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
from torch.legacy import nn as nnl
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=True)
#self.conv1 = nnl.SpatialConvolution(3, 64, 7, 7, 2, 2, 3, 3)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=True),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
self.conv1_input = x.clone()
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
import torchfile
from torch.utils.serialization import load_lua
import torch
netparams = torchfile.load('data/resnet152/netparams.t7')
#netparams2 = load_lua('data/resnet152/netparams.t7')
#import ipdb; ipdb.set_trace()
netoutputs = []
for i in range(1, 12):
path = 'data/resnet152/output{}.t7'.format(i)
out = load_lua(path)
#print(out.size())
if out.dim()==4:
pass#out.transpose_(2, 3)
netoutputs.append(out)
net = resnet152()
state_dict = net.state_dict()
import collections
s = collections.OrderedDict()
i=0
for key in state_dict.keys():
new = torch.from_numpy(netparams[i])
s[key] = new
if s[key].dim() == 4:
pass#s[key].transpose_(2,3)
i += 1
net.load_state_dict(s)
net.conv1.register_forward_hook(lambda self, input, output: \
print('conv1', torch.dist(output.data, netoutputs[0])))
net.bn1.register_forward_hook(lambda self, input, output: \
print('bn1', torch.dist(output.data, netoutputs[1])))
net.relu.register_forward_hook(lambda self, input, output: \
print('relu', torch.dist(output.data, netoutputs[2])))
net.maxpool.register_forward_hook(lambda self, input, output: \
print('maxpool', torch.dist(output.data, netoutputs[3])))
net.layer1.register_forward_hook(lambda self, input, output: \
print('layer1', torch.dist(output.data, netoutputs[4])))
net.layer2.register_forward_hook(lambda self, input, output: \
print('layer2', torch.dist(output.data, netoutputs[5])))
net.layer3.register_forward_hook(lambda self, input, output: \
print('layer3', torch.dist(output.data, netoutputs[6])))
net.layer4.register_forward_hook(lambda self, input, output: \
print('layer4', torch.dist(output.data, netoutputs[7])))
net.avgpool.register_forward_hook(lambda self, input, output: \
print('avgpool', torch.dist(output.data, netoutputs[8])))
net.fc.register_forward_hook(lambda self, input, output: \
print('fc', torch.dist(output.data, netoutputs[10])))
net.eval()
input_data = torch.ones(1,3,224,224)
input_data[0][0][0][0] = -1
from PIL import Image
import torchvision.transforms as transforms
input_data[0] = transforms.ToTensor()(Image.open('data/cat_224.png'))
print('cat sum', input_data.sum())
input = torch.autograd.Variable(input_data)
output = net.forward(input)
torch.save(s, 'data/resnet152.pth')
|
Cadene/pretrained-models.pytorch | pretrainedmodels/models/nasnet_mobile.py | nasnetamobile | python | def nasnetamobile(num_classes=1000, pretrained='imagenet'):
r"""NASNetALarge model architecture from the
`"NASNet" <https://arxiv.org/abs/1707.07012>`_ paper.
"""
if pretrained:
settings = pretrained_settings['nasnetamobile'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
# both 'imagenet'&'imagenet+background' are loaded from same parameters
model = NASNetAMobile(num_classes=num_classes)
model.load_state_dict(model_zoo.load_url(settings['url'], map_location=None))
# if pretrained == 'imagenet':
# new_last_linear = nn.Linear(model.last_linear.in_features, 1000)
# new_last_linear.weight.data = model.last_linear.weight.data[1:]
# new_last_linear.bias.data = model.last_linear.bias.data[1:]
# model.last_linear = new_last_linear
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
else:
settings = pretrained_settings['nasnetamobile']['imagenet']
model = NASNetAMobile(num_classes=num_classes)
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model | r"""NASNetALarge model architecture from the
`"NASNet" <https://arxiv.org/abs/1707.07012>`_ paper. | train | https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/pretrainedmodels/models/nasnet_mobile.py#L618-L652 | null | """
NASNet Mobile
Thanks to Anastasiia (https://github.com/DagnyT) for the great help, support and motivation!
------------------------------------------------------------------------------------
Architecture | Top-1 Acc | Top-5 Acc | Multiply-Adds | Params (M)
------------------------------------------------------------------------------------
| NASNet-A (4 @ 1056) | 74.08% | 91.74% | 564 M | 5.3 |
------------------------------------------------------------------------------------
# References:
- [Learning Transferable Architectures for Scalable Image Recognition]
(https://arxiv.org/abs/1707.07012)
"""
from __future__ import print_function, division, absolute_import
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from torch.autograd import Variable
import numpy as np
pretrained_settings = {
'nasnetamobile': {
'imagenet': {
#'url': 'https://github.com/veronikayurchuk/pretrained-models.pytorch/releases/download/v1.0/nasnetmobile-7e03cead.pth.tar',
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/nasnetamobile-7e03cead.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224], # resize 256
'input_range': [0, 1],
'mean': [0.5, 0.5, 0.5],
'std': [0.5, 0.5, 0.5],
'num_classes': 1000
},
# 'imagenet+background': {
# # 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/nasnetalarge-a1897284.pth',
# 'input_space': 'RGB',
# 'input_size': [3, 224, 224], # resize 256
# 'input_range': [0, 1],
# 'mean': [0.5, 0.5, 0.5],
# 'std': [0.5, 0.5, 0.5],
# 'num_classes': 1001
# }
}
}
class MaxPoolPad(nn.Module):
def __init__(self):
super(MaxPoolPad, self).__init__()
self.pad = nn.ZeroPad2d((1, 0, 1, 0))
self.pool = nn.MaxPool2d(3, stride=2, padding=1)
def forward(self, x):
x = self.pad(x)
x = self.pool(x)
x = x[:, :, 1:, 1:].contiguous()
return x
class AvgPoolPad(nn.Module):
def __init__(self, stride=2, padding=1):
super(AvgPoolPad, self).__init__()
self.pad = nn.ZeroPad2d((1, 0, 1, 0))
self.pool = nn.AvgPool2d(3, stride=stride, padding=padding, count_include_pad=False)
def forward(self, x):
x = self.pad(x)
x = self.pool(x)
x = x[:, :, 1:, 1:].contiguous()
return x
class SeparableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, dw_kernel, dw_stride, dw_padding, bias=False):
super(SeparableConv2d, self).__init__()
self.depthwise_conv2d = nn.Conv2d(in_channels, in_channels, dw_kernel,
stride=dw_stride,
padding=dw_padding,
bias=bias,
groups=in_channels)
self.pointwise_conv2d = nn.Conv2d(in_channels, out_channels, 1, stride=1, bias=bias)
def forward(self, x):
x = self.depthwise_conv2d(x)
x = self.pointwise_conv2d(x)
return x
class BranchSeparables(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, name=None, bias=False):
super(BranchSeparables, self).__init__()
self.relu = nn.ReLU()
self.separable_1 = SeparableConv2d(in_channels, in_channels, kernel_size, stride, padding, bias=bias)
self.bn_sep_1 = nn.BatchNorm2d(in_channels, eps=0.001, momentum=0.1, affine=True)
self.relu1 = nn.ReLU()
self.separable_2 = SeparableConv2d(in_channels, out_channels, kernel_size, 1, padding, bias=bias)
self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1, affine=True)
self.name = name
def forward(self, x):
x = self.relu(x)
if self.name == 'specific':
x = nn.ZeroPad2d((1, 0, 1, 0))(x)
x = self.separable_1(x)
if self.name == 'specific':
x = x[:, :, 1:, 1:].contiguous()
x = self.bn_sep_1(x)
x = self.relu1(x)
x = self.separable_2(x)
x = self.bn_sep_2(x)
return x
class BranchSeparablesStem(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=False):
super(BranchSeparablesStem, self).__init__()
self.relu = nn.ReLU()
self.separable_1 = SeparableConv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias)
self.bn_sep_1 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1, affine=True)
self.relu1 = nn.ReLU()
self.separable_2 = SeparableConv2d(out_channels, out_channels, kernel_size, 1, padding, bias=bias)
self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1, affine=True)
def forward(self, x):
x = self.relu(x)
x = self.separable_1(x)
x = self.bn_sep_1(x)
x = self.relu1(x)
x = self.separable_2(x)
x = self.bn_sep_2(x)
return x
class BranchSeparablesReduction(BranchSeparables):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, z_padding=1, bias=False):
BranchSeparables.__init__(self, in_channels, out_channels, kernel_size, stride, padding, bias)
self.padding = nn.ZeroPad2d((z_padding, 0, z_padding, 0))
def forward(self, x):
x = self.relu(x)
x = self.padding(x)
x = self.separable_1(x)
x = x[:, :, 1:, 1:].contiguous()
x = self.bn_sep_1(x)
x = self.relu1(x)
x = self.separable_2(x)
x = self.bn_sep_2(x)
return x
class CellStem0(nn.Module):
def __init__(self, stem_filters, num_filters=42):
super(CellStem0, self).__init__()
self.num_filters = num_filters
self.stem_filters = stem_filters
self.conv_1x1 = nn.Sequential()
self.conv_1x1.add_module('relu', nn.ReLU())
self.conv_1x1.add_module('conv', nn.Conv2d(self.stem_filters, self.num_filters, 1, stride=1, bias=False))
self.conv_1x1.add_module('bn', nn.BatchNorm2d(self.num_filters, eps=0.001, momentum=0.1, affine=True))
self.comb_iter_0_left = BranchSeparables(self.num_filters, self.num_filters, 5, 2, 2)
self.comb_iter_0_right = BranchSeparablesStem(self.stem_filters, self.num_filters, 7, 2, 3, bias=False)
self.comb_iter_1_left = nn.MaxPool2d(3, stride=2, padding=1)
self.comb_iter_1_right = BranchSeparablesStem(self.stem_filters, self.num_filters, 7, 2, 3, bias=False)
self.comb_iter_2_left = nn.AvgPool2d(3, stride=2, padding=1, count_include_pad=False)
self.comb_iter_2_right = BranchSeparablesStem(self.stem_filters, self.num_filters, 5, 2, 2, bias=False)
self.comb_iter_3_right = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_4_left = BranchSeparables(self.num_filters, self.num_filters, 3, 1, 1, bias=False)
self.comb_iter_4_right = nn.MaxPool2d(3, stride=2, padding=1)
def forward(self, x):
x1 = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x1)
x_comb_iter_0_right = self.comb_iter_0_right(x)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x1)
x_comb_iter_1_right = self.comb_iter_1_right(x)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x1)
x_comb_iter_2_right = self.comb_iter_2_right(x)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0)
x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1
x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0)
x_comb_iter_4_right = self.comb_iter_4_right(x1)
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class CellStem1(nn.Module):
def __init__(self, stem_filters, num_filters):
super(CellStem1, self).__init__()
self.num_filters = num_filters
self.stem_filters = stem_filters
self.conv_1x1 = nn.Sequential()
self.conv_1x1.add_module('relu', nn.ReLU())
self.conv_1x1.add_module('conv', nn.Conv2d(2*self.num_filters, self.num_filters, 1, stride=1, bias=False))
self.conv_1x1.add_module('bn', nn.BatchNorm2d(self.num_filters, eps=0.001, momentum=0.1, affine=True))
self.relu = nn.ReLU()
self.path_1 = nn.Sequential()
self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
self.path_1.add_module('conv', nn.Conv2d(self.stem_filters, self.num_filters//2, 1, stride=1, bias=False))
self.path_2 = nn.ModuleList()
self.path_2.add_module('pad', nn.ZeroPad2d((0, 1, 0, 1)))
self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
self.path_2.add_module('conv', nn.Conv2d(self.stem_filters, self.num_filters//2, 1, stride=1, bias=False))
self.final_path_bn = nn.BatchNorm2d(self.num_filters, eps=0.001, momentum=0.1, affine=True)
self.comb_iter_0_left = BranchSeparables(self.num_filters, self.num_filters, 5, 2, 2, name='specific', bias=False)
self.comb_iter_0_right = BranchSeparables(self.num_filters, self.num_filters, 7, 2, 3, name='specific', bias=False)
# self.comb_iter_1_left = nn.MaxPool2d(3, stride=2, padding=1)
self.comb_iter_1_left = MaxPoolPad()
self.comb_iter_1_right = BranchSeparables(self.num_filters, self.num_filters, 7, 2, 3, name='specific', bias=False)
# self.comb_iter_2_left = nn.AvgPool2d(3, stride=2, padding=1, count_include_pad=False)
self.comb_iter_2_left = AvgPoolPad()
self.comb_iter_2_right = BranchSeparables(self.num_filters, self.num_filters, 5, 2, 2, name='specific', bias=False)
self.comb_iter_3_right = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_4_left = BranchSeparables(self.num_filters, self.num_filters, 3, 1, 1, name='specific', bias=False)
# self.comb_iter_4_right = nn.MaxPool2d(3, stride=2, padding=1)
self.comb_iter_4_right = MaxPoolPad()
def forward(self, x_conv0, x_stem_0):
x_left = self.conv_1x1(x_stem_0)
x_relu = self.relu(x_conv0)
# path 1
x_path1 = self.path_1(x_relu)
# path 2
x_path2 = self.path_2.pad(x_relu)
x_path2 = x_path2[:, :, 1:, 1:]
x_path2 = self.path_2.avgpool(x_path2)
x_path2 = self.path_2.conv(x_path2)
# final path
x_right = self.final_path_bn(torch.cat([x_path1, x_path2], 1))
x_comb_iter_0_left = self.comb_iter_0_left(x_left)
x_comb_iter_0_right = self.comb_iter_0_right(x_right)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_left)
x_comb_iter_1_right = self.comb_iter_1_right(x_right)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_left)
x_comb_iter_2_right = self.comb_iter_2_right(x_right)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0)
x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1
x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0)
x_comb_iter_4_right = self.comb_iter_4_right(x_left)
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class FirstCell(nn.Module):
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
super(FirstCell, self).__init__()
self.conv_1x1 = nn.Sequential()
self.conv_1x1.add_module('relu', nn.ReLU())
self.conv_1x1.add_module('conv', nn.Conv2d(in_channels_right, out_channels_right, 1, stride=1, bias=False))
self.conv_1x1.add_module('bn', nn.BatchNorm2d(out_channels_right, eps=0.001, momentum=0.1, affine=True))
self.relu = nn.ReLU()
self.path_1 = nn.Sequential()
self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
self.path_1.add_module('conv', nn.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
self.path_2 = nn.ModuleList()
self.path_2.add_module('pad', nn.ZeroPad2d((0, 1, 0, 1)))
self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
self.path_2.add_module('conv', nn.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
self.final_path_bn = nn.BatchNorm2d(out_channels_left * 2, eps=0.001, momentum=0.1, affine=True)
self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False)
self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
self.comb_iter_1_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False)
self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
self.comb_iter_2_left = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_3_left = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_3_right = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
def forward(self, x, x_prev):
x_relu = self.relu(x_prev)
# path 1
x_path1 = self.path_1(x_relu)
# path 2
x_path2 = self.path_2.pad(x_relu)
x_path2 = x_path2[:, :, 1:, 1:]
x_path2 = self.path_2.avgpool(x_path2)
x_path2 = self.path_2.conv(x_path2)
# final path
x_left = self.final_path_bn(torch.cat([x_path1, x_path2], 1))
x_right = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x_right)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_left)
x_comb_iter_1_right = self.comb_iter_1_right(x_left)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2 = x_comb_iter_2_left + x_left
x_comb_iter_3_left = self.comb_iter_3_left(x_left)
x_comb_iter_3_right = self.comb_iter_3_right(x_left)
x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right
x_comb_iter_4_left = self.comb_iter_4_left(x_right)
x_comb_iter_4 = x_comb_iter_4_left + x_right
x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class NormalCell(nn.Module):
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
super(NormalCell, self).__init__()
self.conv_prev_1x1 = nn.Sequential()
self.conv_prev_1x1.add_module('relu', nn.ReLU())
self.conv_prev_1x1.add_module('conv', nn.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
self.conv_prev_1x1.add_module('bn', nn.BatchNorm2d(out_channels_left, eps=0.001, momentum=0.1, affine=True))
self.conv_1x1 = nn.Sequential()
self.conv_1x1.add_module('relu', nn.ReLU())
self.conv_1x1.add_module('conv', nn.Conv2d(in_channels_right, out_channels_right, 1, stride=1, bias=False))
self.conv_1x1.add_module('bn', nn.BatchNorm2d(out_channels_right, eps=0.001, momentum=0.1, affine=True))
self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False)
self.comb_iter_0_right = BranchSeparables(out_channels_left, out_channels_left, 3, 1, 1, bias=False)
self.comb_iter_1_left = BranchSeparables(out_channels_left, out_channels_left, 5, 1, 2, bias=False)
self.comb_iter_1_right = BranchSeparables(out_channels_left, out_channels_left, 3, 1, 1, bias=False)
self.comb_iter_2_left = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_3_left = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_3_right = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
def forward(self, x, x_prev):
x_left = self.conv_prev_1x1(x_prev)
x_right = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x_right)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_left)
x_comb_iter_1_right = self.comb_iter_1_right(x_left)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2 = x_comb_iter_2_left + x_left
x_comb_iter_3_left = self.comb_iter_3_left(x_left)
x_comb_iter_3_right = self.comb_iter_3_right(x_left)
x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right
x_comb_iter_4_left = self.comb_iter_4_left(x_right)
x_comb_iter_4 = x_comb_iter_4_left + x_right
x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class ReductionCell0(nn.Module):
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
super(ReductionCell0, self).__init__()
self.conv_prev_1x1 = nn.Sequential()
self.conv_prev_1x1.add_module('relu', nn.ReLU())
self.conv_prev_1x1.add_module('conv', nn.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
self.conv_prev_1x1.add_module('bn', nn.BatchNorm2d(out_channels_left, eps=0.001, momentum=0.1, affine=True))
self.conv_1x1 = nn.Sequential()
self.conv_1x1.add_module('relu', nn.ReLU())
self.conv_1x1.add_module('conv', nn.Conv2d(in_channels_right, out_channels_right, 1, stride=1, bias=False))
self.conv_1x1.add_module('bn', nn.BatchNorm2d(out_channels_right, eps=0.001, momentum=0.1, affine=True))
self.comb_iter_0_left = BranchSeparablesReduction(out_channels_right, out_channels_right, 5, 2, 2, bias=False)
self.comb_iter_0_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 7, 2, 3, bias=False)
self.comb_iter_1_left = MaxPoolPad()
self.comb_iter_1_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 7, 2, 3, bias=False)
self.comb_iter_2_left = AvgPoolPad()
self.comb_iter_2_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 5, 2, 2, bias=False)
self.comb_iter_3_right = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_4_left = BranchSeparablesReduction(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
self.comb_iter_4_right = MaxPoolPad()
def forward(self, x, x_prev):
x_left = self.conv_prev_1x1(x_prev)
x_right = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x_right)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_right)
x_comb_iter_1_right = self.comb_iter_1_right(x_left)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2_right = self.comb_iter_2_right(x_left)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0)
x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1
x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0)
x_comb_iter_4_right = self.comb_iter_4_right(x_right)
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class ReductionCell1(nn.Module):
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
super(ReductionCell1, self).__init__()
self.conv_prev_1x1 = nn.Sequential()
self.conv_prev_1x1.add_module('relu', nn.ReLU())
self.conv_prev_1x1.add_module('conv', nn.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
self.conv_prev_1x1.add_module('bn', nn.BatchNorm2d(out_channels_left, eps=0.001, momentum=0.1, affine=True))
self.conv_1x1 = nn.Sequential()
self.conv_1x1.add_module('relu', nn.ReLU())
self.conv_1x1.add_module('conv', nn.Conv2d(in_channels_right, out_channels_right, 1, stride=1, bias=False))
self.conv_1x1.add_module('bn', nn.BatchNorm2d(out_channels_right, eps=0.001, momentum=0.1, affine=True))
self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, name='specific', bias=False)
self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, name='specific', bias=False)
# self.comb_iter_1_left = nn.MaxPool2d(3, stride=2, padding=1)
self.comb_iter_1_left = MaxPoolPad()
self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, name='specific', bias=False)
# self.comb_iter_2_left = nn.AvgPool2d(3, stride=2, padding=1, count_include_pad=False)
self.comb_iter_2_left = AvgPoolPad()
self.comb_iter_2_right = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, name='specific', bias=False)
self.comb_iter_3_right = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, name='specific', bias=False)
# self.comb_iter_4_right = nn.MaxPool2d(3, stride=2, padding=1)
self.comb_iter_4_right =MaxPoolPad()
def forward(self, x, x_prev):
x_left = self.conv_prev_1x1(x_prev)
x_right = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x_right)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_right)
x_comb_iter_1_right = self.comb_iter_1_right(x_left)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2_right = self.comb_iter_2_right(x_left)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0)
x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1
x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0)
x_comb_iter_4_right = self.comb_iter_4_right(x_right)
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class NASNetAMobile(nn.Module):
"""NASNetAMobile (4 @ 1056) """
def __init__(self, num_classes=1000, stem_filters=32, penultimate_filters=1056, filters_multiplier=2):
super(NASNetAMobile, self).__init__()
self.num_classes = num_classes
self.stem_filters = stem_filters
self.penultimate_filters = penultimate_filters
self.filters_multiplier = filters_multiplier
filters = self.penultimate_filters // 24
# 24 is default value for the architecture
self.conv0 = nn.Sequential()
self.conv0.add_module('conv', nn.Conv2d(in_channels=3, out_channels=self.stem_filters, kernel_size=3, padding=0, stride=2,
bias=False))
self.conv0.add_module('bn', nn.BatchNorm2d(self.stem_filters, eps=0.001, momentum=0.1, affine=True))
self.cell_stem_0 = CellStem0(self.stem_filters, num_filters=filters // (filters_multiplier ** 2))
self.cell_stem_1 = CellStem1(self.stem_filters, num_filters=filters // filters_multiplier)
self.cell_0 = FirstCell(in_channels_left=filters, out_channels_left=filters//2, # 1, 0.5
in_channels_right=2*filters, out_channels_right=filters) # 2, 1
self.cell_1 = NormalCell(in_channels_left=2*filters, out_channels_left=filters, # 2, 1
in_channels_right=6*filters, out_channels_right=filters) # 6, 1
self.cell_2 = NormalCell(in_channels_left=6*filters, out_channels_left=filters, # 6, 1
in_channels_right=6*filters, out_channels_right=filters) # 6, 1
self.cell_3 = NormalCell(in_channels_left=6*filters, out_channels_left=filters, # 6, 1
in_channels_right=6*filters, out_channels_right=filters) # 6, 1
self.reduction_cell_0 = ReductionCell0(in_channels_left=6*filters, out_channels_left=2*filters, # 6, 2
in_channels_right=6*filters, out_channels_right=2*filters) # 6, 2
self.cell_6 = FirstCell(in_channels_left=6*filters, out_channels_left=filters, # 6, 1
in_channels_right=8*filters, out_channels_right=2*filters) # 8, 2
self.cell_7 = NormalCell(in_channels_left=8*filters, out_channels_left=2*filters, # 8, 2
in_channels_right=12*filters, out_channels_right=2*filters) # 12, 2
self.cell_8 = NormalCell(in_channels_left=12*filters, out_channels_left=2*filters, # 12, 2
in_channels_right=12*filters, out_channels_right=2*filters) # 12, 2
self.cell_9 = NormalCell(in_channels_left=12*filters, out_channels_left=2*filters, # 12, 2
in_channels_right=12*filters, out_channels_right=2*filters) # 12, 2
self.reduction_cell_1 = ReductionCell1(in_channels_left=12*filters, out_channels_left=4*filters, # 12, 4
in_channels_right=12*filters, out_channels_right=4*filters) # 12, 4
self.cell_12 = FirstCell(in_channels_left=12*filters, out_channels_left=2*filters, # 12, 2
in_channels_right=16*filters, out_channels_right=4*filters) # 16, 4
self.cell_13 = NormalCell(in_channels_left=16*filters, out_channels_left=4*filters, # 16, 4
in_channels_right=24*filters, out_channels_right=4*filters) # 24, 4
self.cell_14 = NormalCell(in_channels_left=24*filters, out_channels_left=4*filters, # 24, 4
in_channels_right=24*filters, out_channels_right=4*filters) # 24, 4
self.cell_15 = NormalCell(in_channels_left=24*filters, out_channels_left=4*filters, # 24, 4
in_channels_right=24*filters, out_channels_right=4*filters) # 24, 4
self.relu = nn.ReLU()
self.avg_pool = nn.AvgPool2d(7, stride=1, padding=0)
self.dropout = nn.Dropout()
self.last_linear = nn.Linear(24*filters, self.num_classes)
def features(self, input):
x_conv0 = self.conv0(input)
x_stem_0 = self.cell_stem_0(x_conv0)
x_stem_1 = self.cell_stem_1(x_conv0, x_stem_0)
x_cell_0 = self.cell_0(x_stem_1, x_stem_0)
x_cell_1 = self.cell_1(x_cell_0, x_stem_1)
x_cell_2 = self.cell_2(x_cell_1, x_cell_0)
x_cell_3 = self.cell_3(x_cell_2, x_cell_1)
x_reduction_cell_0 = self.reduction_cell_0(x_cell_3, x_cell_2)
x_cell_6 = self.cell_6(x_reduction_cell_0, x_cell_3)
x_cell_7 = self.cell_7(x_cell_6, x_reduction_cell_0)
x_cell_8 = self.cell_8(x_cell_7, x_cell_6)
x_cell_9 = self.cell_9(x_cell_8, x_cell_7)
x_reduction_cell_1 = self.reduction_cell_1(x_cell_9, x_cell_8)
x_cell_12 = self.cell_12(x_reduction_cell_1, x_cell_9)
x_cell_13 = self.cell_13(x_cell_12, x_reduction_cell_1)
x_cell_14 = self.cell_14(x_cell_13, x_cell_12)
x_cell_15 = self.cell_15(x_cell_14, x_cell_13)
return x_cell_15
def logits(self, features):
x = self.relu(features)
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.dropout(x)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
if __name__ == "__main__":
model = NASNetAMobile()
input = Variable(torch.randn(2, 3, 224, 224))
output = model(input)
print(output.size())
|
Cadene/pretrained-models.pytorch | pretrainedmodels/models/cafferesnet.py | cafferesnet101 | python | def cafferesnet101(num_classes=1000, pretrained='imagenet'):
model = ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes)
if pretrained is not None:
settings = pretrained_settings['cafferesnet101'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model | Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | train | https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/pretrainedmodels/models/cafferesnet.py#L168-L184 | null | from __future__ import print_function, division, absolute_import
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
pretrained_settings = {
'cafferesnet101': {
'imagenet': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/cafferesnet101-9d633cc0.pth',
'input_space': 'BGR',
'input_size': [3, 224, 224],
'input_range': [0, 255],
'mean': [102.9801, 115.9465, 122.7717],
'std': [1, 1, 1],
'num_classes': 1000
}
}
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, # change
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True) # change
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# it is slightly better whereas slower to set stride = 1
# self.layer4 = self._make_layer(block, 512, layers[3], stride=1)
self.avgpool = nn.AvgPool2d(7)
self.last_linear = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def features(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def logits(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, x):
x = self.features(x)
x = self.logits(x)
return x
|
Cadene/pretrained-models.pytorch | pretrainedmodels/models/fbresnet.py | fbresnet152 | python | def fbresnet152(num_classes=1000, pretrained='imagenet'):
model = FBResNet(Bottleneck, [3, 8, 36, 3], num_classes=num_classes)
if pretrained is not None:
settings = pretrained_settings['fbresnet152'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model | Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | train | https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/pretrainedmodels/models/fbresnet.py#L216-L233 | null | from __future__ import print_function, division, absolute_import
import torch.nn as nn
import torch.nn.functional as F
import math
import torch.utils.model_zoo as model_zoo
__all__ = ['FBResNet',
#'fbresnet18', 'fbresnet34', 'fbresnet50', 'fbresnet101',
'fbresnet152']
pretrained_settings = {
'fbresnet152': {
'imagenet': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/fbresnet152-2e20f6b4.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'num_classes': 1000
}
}
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=True)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=True)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=True)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=True)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class FBResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
# Special attributs
self.input_space = None
self.input_size = (299, 299, 3)
self.mean = None
self.std = None
super(FBResNet, self).__init__()
# Modules
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=True)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.last_linear = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=True),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def features(self, input):
x = self.conv1(input)
self.conv1_input = x.clone()
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def logits(self, features):
adaptiveAvgPoolWidth = features.shape[2]
x = F.avg_pool2d(features, kernel_size=adaptiveAvgPoolWidth)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
def fbresnet18(num_classes=1000):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = FBResNet(BasicBlock, [2, 2, 2, 2], num_classes=num_classes)
return model
def fbresnet34(num_classes=1000):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = FBResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes)
return model
def fbresnet50(num_classes=1000):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = FBResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes)
return model
def fbresnet101(num_classes=1000):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = FBResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes)
return model
|
Cadene/pretrained-models.pytorch | pretrainedmodels/models/torchvision_models.py | alexnet | python | def alexnet(num_classes=1000, pretrained='imagenet'):
r"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
"""
# https://github.com/pytorch/vision/blob/master/torchvision/models/alexnet.py
model = models.alexnet(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['alexnet'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_alexnet(model)
return model | r"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper. | train | https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/pretrainedmodels/models/torchvision_models.py#L168-L178 | [
"def load_pretrained(model, num_classes, settings):\n assert num_classes == settings['num_classes'], \\\n \"num_classes should be {}, but is {}\".format(settings['num_classes'], num_classes)\n state_dict = model_zoo.load_url(settings['url'])\n state_dict = update_state_dict(state_dict)\n model.load_state_dict(state_dict)\n model.input_space = settings['input_space']\n model.input_size = settings['input_size']\n model.input_range = settings['input_range']\n model.mean = settings['mean']\n model.std = settings['std']\n return model\n",
"def modify_alexnet(model):\n # Modify attributs\n model._features = model.features\n del model.features\n model.dropout0 = model.classifier[0]\n model.linear0 = model.classifier[1]\n model.relu0 = model.classifier[2]\n model.dropout1 = model.classifier[3]\n model.linear1 = model.classifier[4]\n model.relu1 = model.classifier[5]\n model.last_linear = model.classifier[6]\n del model.classifier\n\n def features(self, input):\n x = self._features(input)\n x = x.view(x.size(0), 256 * 6 * 6)\n x = self.dropout0(x)\n x = self.linear0(x)\n x = self.relu0(x)\n x = self.dropout1(x)\n x = self.linear1(x)\n return x\n\n def logits(self, features):\n x = self.relu1(features)\n x = self.last_linear(x)\n return x\n\n def forward(self, input):\n x = self.features(input)\n x = self.logits(x)\n return x\n\n # Modify methods\n model.features = types.MethodType(features, model)\n model.logits = types.MethodType(logits, model)\n model.forward = types.MethodType(forward, model)\n return model\n"
] | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import torchvision.models as models
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
import types
import re
#################################################################
# You can find the definitions of those models here:
# https://github.com/pytorch/vision/blob/master/torchvision/models
#
# To fit the API, we usually added/redefined some methods and
# renamed some attributs (see below for each models).
#
# However, you usually do not need to see the original model
# definition from torchvision. Just use `print(model)` to see
# the modules and see bellow the `model.features` and
# `model.classifier` definitions.
#################################################################
__all__ = [
'alexnet',
'densenet121', 'densenet169', 'densenet201', 'densenet161',
'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
'inceptionv3',
'squeezenet1_0', 'squeezenet1_1',
'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19'
]
model_urls = {
'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
'densenet121': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet121-fbdb23505.pth',
'densenet169': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet169-f470b90a4.pth',
'densenet201': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet201-5750cbb1e.pth',
'densenet161': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet161-347e6b360.pth',
'inceptionv3': 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth',
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth',
'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth',
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
# 'vgg16_caffe': 'https://s3-us-west-2.amazonaws.com/jcjohns-models/vgg16-00b39a1b.pth',
# 'vgg19_caffe': 'https://s3-us-west-2.amazonaws.com/jcjohns-models/vgg19-d01eb7cb.pth'
}
input_sizes = {}
means = {}
stds = {}
for model_name in __all__:
input_sizes[model_name] = [3, 224, 224]
means[model_name] = [0.485, 0.456, 0.406]
stds[model_name] = [0.229, 0.224, 0.225]
for model_name in ['inceptionv3']:
input_sizes[model_name] = [3, 299, 299]
means[model_name] = [0.5, 0.5, 0.5]
stds[model_name] = [0.5, 0.5, 0.5]
pretrained_settings = {}
for model_name in __all__:
pretrained_settings[model_name] = {
'imagenet': {
'url': model_urls[model_name],
'input_space': 'RGB',
'input_size': input_sizes[model_name],
'input_range': [0, 1],
'mean': means[model_name],
'std': stds[model_name],
'num_classes': 1000
}
}
# for model_name in ['vgg16', 'vgg19']:
# pretrained_settings[model_name]['imagenet_caffe'] = {
# 'url': model_urls[model_name + '_caffe'],
# 'input_space': 'BGR',
# 'input_size': input_sizes[model_name],
# 'input_range': [0, 255],
# 'mean': [103.939, 116.779, 123.68],
# 'std': [1., 1., 1.],
# 'num_classes': 1000
# }
def update_state_dict(state_dict):
# '.'s are no longer allowed in module names, but pervious _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls. This pattern is used
# to find such keys.
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
return state_dict
def load_pretrained(model, num_classes, settings):
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
state_dict = model_zoo.load_url(settings['url'])
state_dict = update_state_dict(state_dict)
model.load_state_dict(state_dict)
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
#################################################################
# AlexNet
def modify_alexnet(model):
# Modify attributs
model._features = model.features
del model.features
model.dropout0 = model.classifier[0]
model.linear0 = model.classifier[1]
model.relu0 = model.classifier[2]
model.dropout1 = model.classifier[3]
model.linear1 = model.classifier[4]
model.relu1 = model.classifier[5]
model.last_linear = model.classifier[6]
del model.classifier
def features(self, input):
x = self._features(input)
x = x.view(x.size(0), 256 * 6 * 6)
x = self.dropout0(x)
x = self.linear0(x)
x = self.relu0(x)
x = self.dropout1(x)
x = self.linear1(x)
return x
def logits(self, features):
x = self.relu1(features)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.features = types.MethodType(features, model)
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
###############################################################
# DenseNets
def modify_densenets(model):
# Modify attributs
model.last_linear = model.classifier
del model.classifier
def logits(self, features):
x = F.relu(features, inplace=True)
x = F.avg_pool2d(x, kernel_size=7, stride=1)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def densenet121(num_classes=1000, pretrained='imagenet'):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet121(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet121'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model
def densenet169(num_classes=1000, pretrained='imagenet'):
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet169(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet169'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model
def densenet201(num_classes=1000, pretrained='imagenet'):
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet201(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet201'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model
def densenet161(num_classes=1000, pretrained='imagenet'):
r"""Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet161(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet161'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model
###############################################################
# InceptionV3
def inceptionv3(num_classes=1000, pretrained='imagenet'):
r"""Inception v3 model architecture from
`"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_.
"""
model = models.inception_v3(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['inceptionv3'][pretrained]
model = load_pretrained(model, num_classes, settings)
# Modify attributs
model.last_linear = model.fc
del model.fc
def features(self, input):
# 299 x 299 x 3
x = self.Conv2d_1a_3x3(input) # 149 x 149 x 32
x = self.Conv2d_2a_3x3(x) # 147 x 147 x 32
x = self.Conv2d_2b_3x3(x) # 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2) # 73 x 73 x 64
x = self.Conv2d_3b_1x1(x) # 73 x 73 x 80
x = self.Conv2d_4a_3x3(x) # 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2) # 35 x 35 x 192
x = self.Mixed_5b(x) # 35 x 35 x 256
x = self.Mixed_5c(x) # 35 x 35 x 288
x = self.Mixed_5d(x) # 35 x 35 x 288
x = self.Mixed_6a(x) # 17 x 17 x 768
x = self.Mixed_6b(x) # 17 x 17 x 768
x = self.Mixed_6c(x) # 17 x 17 x 768
x = self.Mixed_6d(x) # 17 x 17 x 768
x = self.Mixed_6e(x) # 17 x 17 x 768
if self.training and self.aux_logits:
self._out_aux = self.AuxLogits(x) # 17 x 17 x 768
x = self.Mixed_7a(x) # 8 x 8 x 1280
x = self.Mixed_7b(x) # 8 x 8 x 2048
x = self.Mixed_7c(x) # 8 x 8 x 2048
return x
def logits(self, features):
x = F.avg_pool2d(features, kernel_size=8) # 1 x 1 x 2048
x = F.dropout(x, training=self.training) # 1 x 1 x 2048
x = x.view(x.size(0), -1) # 2048
x = self.last_linear(x) # 1000 (num_classes)
if self.training and self.aux_logits:
aux = self._out_aux
self._out_aux = None
return x, aux
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.features = types.MethodType(features, model)
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
###############################################################
# ResNets
def modify_resnets(model):
# Modify attributs
model.last_linear = model.fc
model.fc = None
def features(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def logits(self, features):
x = self.avgpool(features)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.features = types.MethodType(features, model)
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def resnet18(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-18 model.
"""
model = models.resnet18(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet18'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet34(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-34 model.
"""
model = models.resnet34(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet34'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet50(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-50 model.
"""
model = models.resnet50(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet50'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet101(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-101 model.
"""
model = models.resnet101(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet101'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet152(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-152 model.
"""
model = models.resnet152(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet152'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
###############################################################
# SqueezeNets
def modify_squeezenets(model):
# /!\ Beware squeezenets do not have any last_linear module
# Modify attributs
model.dropout = model.classifier[0]
model.last_conv = model.classifier[1]
model.relu = model.classifier[2]
model.avgpool = model.classifier[3]
del model.classifier
def logits(self, features):
x = self.dropout(features)
x = self.last_conv(x)
x = self.relu(x)
x = self.avgpool(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def squeezenet1_0(num_classes=1000, pretrained='imagenet'):
r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level
accuracy with 50x fewer parameters and <0.5MB model size"
<https://arxiv.org/abs/1602.07360>`_ paper.
"""
model = models.squeezenet1_0(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['squeezenet1_0'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_squeezenets(model)
return model
def squeezenet1_1(num_classes=1000, pretrained='imagenet'):
r"""SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
"""
model = models.squeezenet1_1(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['squeezenet1_1'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_squeezenets(model)
return model
###############################################################
# VGGs
def modify_vggs(model):
# Modify attributs
model._features = model.features
del model.features
model.linear0 = model.classifier[0]
model.relu0 = model.classifier[1]
model.dropout0 = model.classifier[2]
model.linear1 = model.classifier[3]
model.relu1 = model.classifier[4]
model.dropout1 = model.classifier[5]
model.last_linear = model.classifier[6]
del model.classifier
def features(self, input):
x = self._features(input)
x = x.view(x.size(0), -1)
x = self.linear0(x)
x = self.relu0(x)
x = self.dropout0(x)
x = self.linear1(x)
return x
def logits(self, features):
x = self.relu1(features)
x = self.dropout1(x)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.features = types.MethodType(features, model)
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def vgg11(num_classes=1000, pretrained='imagenet'):
"""VGG 11-layer model (configuration "A")
"""
model = models.vgg11(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg11'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg11_bn(num_classes=1000, pretrained='imagenet'):
"""VGG 11-layer model (configuration "A") with batch normalization
"""
model = models.vgg11_bn(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg11_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg13(num_classes=1000, pretrained='imagenet'):
"""VGG 13-layer model (configuration "B")
"""
model = models.vgg13(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg13'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg13_bn(num_classes=1000, pretrained='imagenet'):
"""VGG 13-layer model (configuration "B") with batch normalization
"""
model = models.vgg13_bn(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg13_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg16(num_classes=1000, pretrained='imagenet'):
"""VGG 16-layer model (configuration "D")
"""
model = models.vgg16(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg16'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg16_bn(num_classes=1000, pretrained='imagenet'):
"""VGG 16-layer model (configuration "D") with batch normalization
"""
model = models.vgg16_bn(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg16_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg19(num_classes=1000, pretrained='imagenet'):
"""VGG 19-layer model (configuration "E")
"""
model = models.vgg19(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg19'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg19_bn(num_classes=1000, pretrained='imagenet'):
"""VGG 19-layer model (configuration 'E') with batch normalization
"""
model = models.vgg19_bn(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg19_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
|
Cadene/pretrained-models.pytorch | pretrainedmodels/models/torchvision_models.py | densenet121 | python | def densenet121(num_classes=1000, pretrained='imagenet'):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet121(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet121'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model | r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>` | train | https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/pretrainedmodels/models/torchvision_models.py#L205-L214 | [
"def load_pretrained(model, num_classes, settings):\n assert num_classes == settings['num_classes'], \\\n \"num_classes should be {}, but is {}\".format(settings['num_classes'], num_classes)\n state_dict = model_zoo.load_url(settings['url'])\n state_dict = update_state_dict(state_dict)\n model.load_state_dict(state_dict)\n model.input_space = settings['input_space']\n model.input_size = settings['input_size']\n model.input_range = settings['input_range']\n model.mean = settings['mean']\n model.std = settings['std']\n return model\n",
"def modify_densenets(model):\n # Modify attributs\n model.last_linear = model.classifier\n del model.classifier\n\n def logits(self, features):\n x = F.relu(features, inplace=True)\n x = F.avg_pool2d(x, kernel_size=7, stride=1)\n x = x.view(x.size(0), -1)\n x = self.last_linear(x)\n return x\n\n def forward(self, input):\n x = self.features(input)\n x = self.logits(x)\n return x\n\n # Modify methods\n model.logits = types.MethodType(logits, model)\n model.forward = types.MethodType(forward, model)\n return model\n"
] | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import torchvision.models as models
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
import types
import re
#################################################################
# You can find the definitions of those models here:
# https://github.com/pytorch/vision/blob/master/torchvision/models
#
# To fit the API, we usually added/redefined some methods and
# renamed some attributs (see below for each models).
#
# However, you usually do not need to see the original model
# definition from torchvision. Just use `print(model)` to see
# the modules and see bellow the `model.features` and
# `model.classifier` definitions.
#################################################################
__all__ = [
'alexnet',
'densenet121', 'densenet169', 'densenet201', 'densenet161',
'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
'inceptionv3',
'squeezenet1_0', 'squeezenet1_1',
'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19'
]
model_urls = {
'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
'densenet121': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet121-fbdb23505.pth',
'densenet169': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet169-f470b90a4.pth',
'densenet201': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet201-5750cbb1e.pth',
'densenet161': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet161-347e6b360.pth',
'inceptionv3': 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth',
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth',
'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth',
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
# 'vgg16_caffe': 'https://s3-us-west-2.amazonaws.com/jcjohns-models/vgg16-00b39a1b.pth',
# 'vgg19_caffe': 'https://s3-us-west-2.amazonaws.com/jcjohns-models/vgg19-d01eb7cb.pth'
}
input_sizes = {}
means = {}
stds = {}
for model_name in __all__:
input_sizes[model_name] = [3, 224, 224]
means[model_name] = [0.485, 0.456, 0.406]
stds[model_name] = [0.229, 0.224, 0.225]
for model_name in ['inceptionv3']:
input_sizes[model_name] = [3, 299, 299]
means[model_name] = [0.5, 0.5, 0.5]
stds[model_name] = [0.5, 0.5, 0.5]
pretrained_settings = {}
for model_name in __all__:
pretrained_settings[model_name] = {
'imagenet': {
'url': model_urls[model_name],
'input_space': 'RGB',
'input_size': input_sizes[model_name],
'input_range': [0, 1],
'mean': means[model_name],
'std': stds[model_name],
'num_classes': 1000
}
}
# for model_name in ['vgg16', 'vgg19']:
# pretrained_settings[model_name]['imagenet_caffe'] = {
# 'url': model_urls[model_name + '_caffe'],
# 'input_space': 'BGR',
# 'input_size': input_sizes[model_name],
# 'input_range': [0, 255],
# 'mean': [103.939, 116.779, 123.68],
# 'std': [1., 1., 1.],
# 'num_classes': 1000
# }
def update_state_dict(state_dict):
# '.'s are no longer allowed in module names, but pervious _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls. This pattern is used
# to find such keys.
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
return state_dict
def load_pretrained(model, num_classes, settings):
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
state_dict = model_zoo.load_url(settings['url'])
state_dict = update_state_dict(state_dict)
model.load_state_dict(state_dict)
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
#################################################################
# AlexNet
def modify_alexnet(model):
# Modify attributs
model._features = model.features
del model.features
model.dropout0 = model.classifier[0]
model.linear0 = model.classifier[1]
model.relu0 = model.classifier[2]
model.dropout1 = model.classifier[3]
model.linear1 = model.classifier[4]
model.relu1 = model.classifier[5]
model.last_linear = model.classifier[6]
del model.classifier
def features(self, input):
x = self._features(input)
x = x.view(x.size(0), 256 * 6 * 6)
x = self.dropout0(x)
x = self.linear0(x)
x = self.relu0(x)
x = self.dropout1(x)
x = self.linear1(x)
return x
def logits(self, features):
x = self.relu1(features)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.features = types.MethodType(features, model)
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def alexnet(num_classes=1000, pretrained='imagenet'):
r"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
"""
# https://github.com/pytorch/vision/blob/master/torchvision/models/alexnet.py
model = models.alexnet(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['alexnet'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_alexnet(model)
return model
###############################################################
# DenseNets
def modify_densenets(model):
# Modify attributs
model.last_linear = model.classifier
del model.classifier
def logits(self, features):
x = F.relu(features, inplace=True)
x = F.avg_pool2d(x, kernel_size=7, stride=1)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def densenet169(num_classes=1000, pretrained='imagenet'):
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet169(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet169'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model
def densenet201(num_classes=1000, pretrained='imagenet'):
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet201(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet201'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model
def densenet161(num_classes=1000, pretrained='imagenet'):
r"""Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet161(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet161'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model
###############################################################
# InceptionV3
def inceptionv3(num_classes=1000, pretrained='imagenet'):
r"""Inception v3 model architecture from
`"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_.
"""
model = models.inception_v3(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['inceptionv3'][pretrained]
model = load_pretrained(model, num_classes, settings)
# Modify attributs
model.last_linear = model.fc
del model.fc
def features(self, input):
# 299 x 299 x 3
x = self.Conv2d_1a_3x3(input) # 149 x 149 x 32
x = self.Conv2d_2a_3x3(x) # 147 x 147 x 32
x = self.Conv2d_2b_3x3(x) # 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2) # 73 x 73 x 64
x = self.Conv2d_3b_1x1(x) # 73 x 73 x 80
x = self.Conv2d_4a_3x3(x) # 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2) # 35 x 35 x 192
x = self.Mixed_5b(x) # 35 x 35 x 256
x = self.Mixed_5c(x) # 35 x 35 x 288
x = self.Mixed_5d(x) # 35 x 35 x 288
x = self.Mixed_6a(x) # 17 x 17 x 768
x = self.Mixed_6b(x) # 17 x 17 x 768
x = self.Mixed_6c(x) # 17 x 17 x 768
x = self.Mixed_6d(x) # 17 x 17 x 768
x = self.Mixed_6e(x) # 17 x 17 x 768
if self.training and self.aux_logits:
self._out_aux = self.AuxLogits(x) # 17 x 17 x 768
x = self.Mixed_7a(x) # 8 x 8 x 1280
x = self.Mixed_7b(x) # 8 x 8 x 2048
x = self.Mixed_7c(x) # 8 x 8 x 2048
return x
def logits(self, features):
x = F.avg_pool2d(features, kernel_size=8) # 1 x 1 x 2048
x = F.dropout(x, training=self.training) # 1 x 1 x 2048
x = x.view(x.size(0), -1) # 2048
x = self.last_linear(x) # 1000 (num_classes)
if self.training and self.aux_logits:
aux = self._out_aux
self._out_aux = None
return x, aux
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.features = types.MethodType(features, model)
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
###############################################################
# ResNets
def modify_resnets(model):
# Modify attributs
model.last_linear = model.fc
model.fc = None
def features(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def logits(self, features):
x = self.avgpool(features)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.features = types.MethodType(features, model)
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def resnet18(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-18 model.
"""
model = models.resnet18(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet18'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet34(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-34 model.
"""
model = models.resnet34(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet34'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet50(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-50 model.
"""
model = models.resnet50(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet50'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet101(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-101 model.
"""
model = models.resnet101(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet101'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet152(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-152 model.
"""
model = models.resnet152(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet152'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
###############################################################
# SqueezeNets
def modify_squeezenets(model):
# /!\ Beware squeezenets do not have any last_linear module
# Modify attributs
model.dropout = model.classifier[0]
model.last_conv = model.classifier[1]
model.relu = model.classifier[2]
model.avgpool = model.classifier[3]
del model.classifier
def logits(self, features):
x = self.dropout(features)
x = self.last_conv(x)
x = self.relu(x)
x = self.avgpool(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def squeezenet1_0(num_classes=1000, pretrained='imagenet'):
r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level
accuracy with 50x fewer parameters and <0.5MB model size"
<https://arxiv.org/abs/1602.07360>`_ paper.
"""
model = models.squeezenet1_0(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['squeezenet1_0'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_squeezenets(model)
return model
def squeezenet1_1(num_classes=1000, pretrained='imagenet'):
r"""SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
"""
model = models.squeezenet1_1(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['squeezenet1_1'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_squeezenets(model)
return model
###############################################################
# VGGs
def modify_vggs(model):
# Modify attributs
model._features = model.features
del model.features
model.linear0 = model.classifier[0]
model.relu0 = model.classifier[1]
model.dropout0 = model.classifier[2]
model.linear1 = model.classifier[3]
model.relu1 = model.classifier[4]
model.dropout1 = model.classifier[5]
model.last_linear = model.classifier[6]
del model.classifier
def features(self, input):
x = self._features(input)
x = x.view(x.size(0), -1)
x = self.linear0(x)
x = self.relu0(x)
x = self.dropout0(x)
x = self.linear1(x)
return x
def logits(self, features):
x = self.relu1(features)
x = self.dropout1(x)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.features = types.MethodType(features, model)
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def vgg11(num_classes=1000, pretrained='imagenet'):
"""VGG 11-layer model (configuration "A")
"""
model = models.vgg11(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg11'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg11_bn(num_classes=1000, pretrained='imagenet'):
"""VGG 11-layer model (configuration "A") with batch normalization
"""
model = models.vgg11_bn(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg11_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg13(num_classes=1000, pretrained='imagenet'):
"""VGG 13-layer model (configuration "B")
"""
model = models.vgg13(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg13'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg13_bn(num_classes=1000, pretrained='imagenet'):
"""VGG 13-layer model (configuration "B") with batch normalization
"""
model = models.vgg13_bn(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg13_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg16(num_classes=1000, pretrained='imagenet'):
"""VGG 16-layer model (configuration "D")
"""
model = models.vgg16(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg16'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg16_bn(num_classes=1000, pretrained='imagenet'):
"""VGG 16-layer model (configuration "D") with batch normalization
"""
model = models.vgg16_bn(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg16_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg19(num_classes=1000, pretrained='imagenet'):
"""VGG 19-layer model (configuration "E")
"""
model = models.vgg19(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg19'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg19_bn(num_classes=1000, pretrained='imagenet'):
"""VGG 19-layer model (configuration 'E') with batch normalization
"""
model = models.vgg19_bn(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg19_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
|
Cadene/pretrained-models.pytorch | pretrainedmodels/models/torchvision_models.py | inceptionv3 | python | def inceptionv3(num_classes=1000, pretrained='imagenet'):
r"""Inception v3 model architecture from
`"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_.
"""
model = models.inception_v3(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['inceptionv3'][pretrained]
model = load_pretrained(model, num_classes, settings)
# Modify attributs
model.last_linear = model.fc
del model.fc
def features(self, input):
# 299 x 299 x 3
x = self.Conv2d_1a_3x3(input) # 149 x 149 x 32
x = self.Conv2d_2a_3x3(x) # 147 x 147 x 32
x = self.Conv2d_2b_3x3(x) # 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2) # 73 x 73 x 64
x = self.Conv2d_3b_1x1(x) # 73 x 73 x 80
x = self.Conv2d_4a_3x3(x) # 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2) # 35 x 35 x 192
x = self.Mixed_5b(x) # 35 x 35 x 256
x = self.Mixed_5c(x) # 35 x 35 x 288
x = self.Mixed_5d(x) # 35 x 35 x 288
x = self.Mixed_6a(x) # 17 x 17 x 768
x = self.Mixed_6b(x) # 17 x 17 x 768
x = self.Mixed_6c(x) # 17 x 17 x 768
x = self.Mixed_6d(x) # 17 x 17 x 768
x = self.Mixed_6e(x) # 17 x 17 x 768
if self.training and self.aux_logits:
self._out_aux = self.AuxLogits(x) # 17 x 17 x 768
x = self.Mixed_7a(x) # 8 x 8 x 1280
x = self.Mixed_7b(x) # 8 x 8 x 2048
x = self.Mixed_7c(x) # 8 x 8 x 2048
return x
def logits(self, features):
x = F.avg_pool2d(features, kernel_size=8) # 1 x 1 x 2048
x = F.dropout(x, training=self.training) # 1 x 1 x 2048
x = x.view(x.size(0), -1) # 2048
x = self.last_linear(x) # 1000 (num_classes)
if self.training and self.aux_logits:
aux = self._out_aux
self._out_aux = None
return x, aux
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.features = types.MethodType(features, model)
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model | r"""Inception v3 model architecture from
`"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_. | train | https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/pretrainedmodels/models/torchvision_models.py#L252-L309 | [
"def load_pretrained(model, num_classes, settings):\n assert num_classes == settings['num_classes'], \\\n \"num_classes should be {}, but is {}\".format(settings['num_classes'], num_classes)\n state_dict = model_zoo.load_url(settings['url'])\n state_dict = update_state_dict(state_dict)\n model.load_state_dict(state_dict)\n model.input_space = settings['input_space']\n model.input_size = settings['input_size']\n model.input_range = settings['input_range']\n model.mean = settings['mean']\n model.std = settings['std']\n return model\n"
] | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import torchvision.models as models
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
import types
import re
#################################################################
# You can find the definitions of those models here:
# https://github.com/pytorch/vision/blob/master/torchvision/models
#
# To fit the API, we usually added/redefined some methods and
# renamed some attributs (see below for each models).
#
# However, you usually do not need to see the original model
# definition from torchvision. Just use `print(model)` to see
# the modules and see bellow the `model.features` and
# `model.classifier` definitions.
#################################################################
__all__ = [
'alexnet',
'densenet121', 'densenet169', 'densenet201', 'densenet161',
'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
'inceptionv3',
'squeezenet1_0', 'squeezenet1_1',
'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19'
]
model_urls = {
'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
'densenet121': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet121-fbdb23505.pth',
'densenet169': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet169-f470b90a4.pth',
'densenet201': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet201-5750cbb1e.pth',
'densenet161': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet161-347e6b360.pth',
'inceptionv3': 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth',
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth',
'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth',
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
# 'vgg16_caffe': 'https://s3-us-west-2.amazonaws.com/jcjohns-models/vgg16-00b39a1b.pth',
# 'vgg19_caffe': 'https://s3-us-west-2.amazonaws.com/jcjohns-models/vgg19-d01eb7cb.pth'
}
input_sizes = {}
means = {}
stds = {}
for model_name in __all__:
input_sizes[model_name] = [3, 224, 224]
means[model_name] = [0.485, 0.456, 0.406]
stds[model_name] = [0.229, 0.224, 0.225]
for model_name in ['inceptionv3']:
input_sizes[model_name] = [3, 299, 299]
means[model_name] = [0.5, 0.5, 0.5]
stds[model_name] = [0.5, 0.5, 0.5]
pretrained_settings = {}
for model_name in __all__:
pretrained_settings[model_name] = {
'imagenet': {
'url': model_urls[model_name],
'input_space': 'RGB',
'input_size': input_sizes[model_name],
'input_range': [0, 1],
'mean': means[model_name],
'std': stds[model_name],
'num_classes': 1000
}
}
# for model_name in ['vgg16', 'vgg19']:
# pretrained_settings[model_name]['imagenet_caffe'] = {
# 'url': model_urls[model_name + '_caffe'],
# 'input_space': 'BGR',
# 'input_size': input_sizes[model_name],
# 'input_range': [0, 255],
# 'mean': [103.939, 116.779, 123.68],
# 'std': [1., 1., 1.],
# 'num_classes': 1000
# }
def update_state_dict(state_dict):
# '.'s are no longer allowed in module names, but pervious _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls. This pattern is used
# to find such keys.
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
return state_dict
def load_pretrained(model, num_classes, settings):
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
state_dict = model_zoo.load_url(settings['url'])
state_dict = update_state_dict(state_dict)
model.load_state_dict(state_dict)
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
#################################################################
# AlexNet
def modify_alexnet(model):
# Modify attributs
model._features = model.features
del model.features
model.dropout0 = model.classifier[0]
model.linear0 = model.classifier[1]
model.relu0 = model.classifier[2]
model.dropout1 = model.classifier[3]
model.linear1 = model.classifier[4]
model.relu1 = model.classifier[5]
model.last_linear = model.classifier[6]
del model.classifier
def features(self, input):
x = self._features(input)
x = x.view(x.size(0), 256 * 6 * 6)
x = self.dropout0(x)
x = self.linear0(x)
x = self.relu0(x)
x = self.dropout1(x)
x = self.linear1(x)
return x
def logits(self, features):
x = self.relu1(features)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.features = types.MethodType(features, model)
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def alexnet(num_classes=1000, pretrained='imagenet'):
r"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
"""
# https://github.com/pytorch/vision/blob/master/torchvision/models/alexnet.py
model = models.alexnet(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['alexnet'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_alexnet(model)
return model
###############################################################
# DenseNets
def modify_densenets(model):
# Modify attributs
model.last_linear = model.classifier
del model.classifier
def logits(self, features):
x = F.relu(features, inplace=True)
x = F.avg_pool2d(x, kernel_size=7, stride=1)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def densenet121(num_classes=1000, pretrained='imagenet'):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet121(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet121'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model
def densenet169(num_classes=1000, pretrained='imagenet'):
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet169(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet169'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model
def densenet201(num_classes=1000, pretrained='imagenet'):
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet201(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet201'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model
def densenet161(num_classes=1000, pretrained='imagenet'):
r"""Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet161(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet161'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model
###############################################################
# InceptionV3
###############################################################
# ResNets
def modify_resnets(model):
# Modify attributs
model.last_linear = model.fc
model.fc = None
def features(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def logits(self, features):
x = self.avgpool(features)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.features = types.MethodType(features, model)
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def resnet18(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-18 model.
"""
model = models.resnet18(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet18'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet34(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-34 model.
"""
model = models.resnet34(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet34'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet50(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-50 model.
"""
model = models.resnet50(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet50'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet101(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-101 model.
"""
model = models.resnet101(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet101'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet152(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-152 model.
"""
model = models.resnet152(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet152'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
###############################################################
# SqueezeNets
def modify_squeezenets(model):
# /!\ Beware squeezenets do not have any last_linear module
# Modify attributs
model.dropout = model.classifier[0]
model.last_conv = model.classifier[1]
model.relu = model.classifier[2]
model.avgpool = model.classifier[3]
del model.classifier
def logits(self, features):
x = self.dropout(features)
x = self.last_conv(x)
x = self.relu(x)
x = self.avgpool(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def squeezenet1_0(num_classes=1000, pretrained='imagenet'):
r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level
accuracy with 50x fewer parameters and <0.5MB model size"
<https://arxiv.org/abs/1602.07360>`_ paper.
"""
model = models.squeezenet1_0(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['squeezenet1_0'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_squeezenets(model)
return model
def squeezenet1_1(num_classes=1000, pretrained='imagenet'):
r"""SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
"""
model = models.squeezenet1_1(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['squeezenet1_1'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_squeezenets(model)
return model
###############################################################
# VGGs
def modify_vggs(model):
# Modify attributs
model._features = model.features
del model.features
model.linear0 = model.classifier[0]
model.relu0 = model.classifier[1]
model.dropout0 = model.classifier[2]
model.linear1 = model.classifier[3]
model.relu1 = model.classifier[4]
model.dropout1 = model.classifier[5]
model.last_linear = model.classifier[6]
del model.classifier
def features(self, input):
x = self._features(input)
x = x.view(x.size(0), -1)
x = self.linear0(x)
x = self.relu0(x)
x = self.dropout0(x)
x = self.linear1(x)
return x
def logits(self, features):
x = self.relu1(features)
x = self.dropout1(x)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.features = types.MethodType(features, model)
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def vgg11(num_classes=1000, pretrained='imagenet'):
"""VGG 11-layer model (configuration "A")
"""
model = models.vgg11(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg11'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg11_bn(num_classes=1000, pretrained='imagenet'):
"""VGG 11-layer model (configuration "A") with batch normalization
"""
model = models.vgg11_bn(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg11_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg13(num_classes=1000, pretrained='imagenet'):
"""VGG 13-layer model (configuration "B")
"""
model = models.vgg13(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg13'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg13_bn(num_classes=1000, pretrained='imagenet'):
"""VGG 13-layer model (configuration "B") with batch normalization
"""
model = models.vgg13_bn(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg13_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg16(num_classes=1000, pretrained='imagenet'):
"""VGG 16-layer model (configuration "D")
"""
model = models.vgg16(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg16'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg16_bn(num_classes=1000, pretrained='imagenet'):
"""VGG 16-layer model (configuration "D") with batch normalization
"""
model = models.vgg16_bn(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg16_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg19(num_classes=1000, pretrained='imagenet'):
"""VGG 19-layer model (configuration "E")
"""
model = models.vgg19(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg19'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg19_bn(num_classes=1000, pretrained='imagenet'):
"""VGG 19-layer model (configuration 'E') with batch normalization
"""
model = models.vgg19_bn(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg19_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
|
Cadene/pretrained-models.pytorch | pretrainedmodels/models/torchvision_models.py | resnet50 | python | def resnet50(num_classes=1000, pretrained='imagenet'):
model = models.resnet50(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet50'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model | Constructs a ResNet-50 model. | train | https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/pretrainedmodels/models/torchvision_models.py#L368-L376 | [
"def load_pretrained(model, num_classes, settings):\n assert num_classes == settings['num_classes'], \\\n \"num_classes should be {}, but is {}\".format(settings['num_classes'], num_classes)\n state_dict = model_zoo.load_url(settings['url'])\n state_dict = update_state_dict(state_dict)\n model.load_state_dict(state_dict)\n model.input_space = settings['input_space']\n model.input_size = settings['input_size']\n model.input_range = settings['input_range']\n model.mean = settings['mean']\n model.std = settings['std']\n return model\n",
"def modify_resnets(model):\n # Modify attributs\n model.last_linear = model.fc\n model.fc = None\n\n def features(self, input):\n x = self.conv1(input)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n return x\n\n def logits(self, features):\n x = self.avgpool(features)\n x = x.view(x.size(0), -1)\n x = self.last_linear(x)\n return x\n\n def forward(self, input):\n x = self.features(input)\n x = self.logits(x)\n return x\n\n # Modify methods\n model.features = types.MethodType(features, model)\n model.logits = types.MethodType(logits, model)\n model.forward = types.MethodType(forward, model)\n return model\n"
] | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import torchvision.models as models
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
import types
import re
#################################################################
# You can find the definitions of those models here:
# https://github.com/pytorch/vision/blob/master/torchvision/models
#
# To fit the API, we usually added/redefined some methods and
# renamed some attributs (see below for each models).
#
# However, you usually do not need to see the original model
# definition from torchvision. Just use `print(model)` to see
# the modules and see bellow the `model.features` and
# `model.classifier` definitions.
#################################################################
__all__ = [
'alexnet',
'densenet121', 'densenet169', 'densenet201', 'densenet161',
'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
'inceptionv3',
'squeezenet1_0', 'squeezenet1_1',
'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19'
]
model_urls = {
'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
'densenet121': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet121-fbdb23505.pth',
'densenet169': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet169-f470b90a4.pth',
'densenet201': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet201-5750cbb1e.pth',
'densenet161': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet161-347e6b360.pth',
'inceptionv3': 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth',
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth',
'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth',
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
# 'vgg16_caffe': 'https://s3-us-west-2.amazonaws.com/jcjohns-models/vgg16-00b39a1b.pth',
# 'vgg19_caffe': 'https://s3-us-west-2.amazonaws.com/jcjohns-models/vgg19-d01eb7cb.pth'
}
input_sizes = {}
means = {}
stds = {}
for model_name in __all__:
input_sizes[model_name] = [3, 224, 224]
means[model_name] = [0.485, 0.456, 0.406]
stds[model_name] = [0.229, 0.224, 0.225]
for model_name in ['inceptionv3']:
input_sizes[model_name] = [3, 299, 299]
means[model_name] = [0.5, 0.5, 0.5]
stds[model_name] = [0.5, 0.5, 0.5]
pretrained_settings = {}
for model_name in __all__:
pretrained_settings[model_name] = {
'imagenet': {
'url': model_urls[model_name],
'input_space': 'RGB',
'input_size': input_sizes[model_name],
'input_range': [0, 1],
'mean': means[model_name],
'std': stds[model_name],
'num_classes': 1000
}
}
# for model_name in ['vgg16', 'vgg19']:
# pretrained_settings[model_name]['imagenet_caffe'] = {
# 'url': model_urls[model_name + '_caffe'],
# 'input_space': 'BGR',
# 'input_size': input_sizes[model_name],
# 'input_range': [0, 255],
# 'mean': [103.939, 116.779, 123.68],
# 'std': [1., 1., 1.],
# 'num_classes': 1000
# }
def update_state_dict(state_dict):
# '.'s are no longer allowed in module names, but pervious _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls. This pattern is used
# to find such keys.
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
return state_dict
def load_pretrained(model, num_classes, settings):
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
state_dict = model_zoo.load_url(settings['url'])
state_dict = update_state_dict(state_dict)
model.load_state_dict(state_dict)
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
#################################################################
# AlexNet
def modify_alexnet(model):
# Modify attributs
model._features = model.features
del model.features
model.dropout0 = model.classifier[0]
model.linear0 = model.classifier[1]
model.relu0 = model.classifier[2]
model.dropout1 = model.classifier[3]
model.linear1 = model.classifier[4]
model.relu1 = model.classifier[5]
model.last_linear = model.classifier[6]
del model.classifier
def features(self, input):
x = self._features(input)
x = x.view(x.size(0), 256 * 6 * 6)
x = self.dropout0(x)
x = self.linear0(x)
x = self.relu0(x)
x = self.dropout1(x)
x = self.linear1(x)
return x
def logits(self, features):
x = self.relu1(features)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.features = types.MethodType(features, model)
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def alexnet(num_classes=1000, pretrained='imagenet'):
r"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
"""
# https://github.com/pytorch/vision/blob/master/torchvision/models/alexnet.py
model = models.alexnet(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['alexnet'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_alexnet(model)
return model
###############################################################
# DenseNets
def modify_densenets(model):
# Modify attributs
model.last_linear = model.classifier
del model.classifier
def logits(self, features):
x = F.relu(features, inplace=True)
x = F.avg_pool2d(x, kernel_size=7, stride=1)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def densenet121(num_classes=1000, pretrained='imagenet'):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet121(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet121'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model
def densenet169(num_classes=1000, pretrained='imagenet'):
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet169(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet169'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model
def densenet201(num_classes=1000, pretrained='imagenet'):
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet201(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet201'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model
def densenet161(num_classes=1000, pretrained='imagenet'):
r"""Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet161(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet161'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model
###############################################################
# InceptionV3
def inceptionv3(num_classes=1000, pretrained='imagenet'):
r"""Inception v3 model architecture from
`"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_.
"""
model = models.inception_v3(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['inceptionv3'][pretrained]
model = load_pretrained(model, num_classes, settings)
# Modify attributs
model.last_linear = model.fc
del model.fc
def features(self, input):
# 299 x 299 x 3
x = self.Conv2d_1a_3x3(input) # 149 x 149 x 32
x = self.Conv2d_2a_3x3(x) # 147 x 147 x 32
x = self.Conv2d_2b_3x3(x) # 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2) # 73 x 73 x 64
x = self.Conv2d_3b_1x1(x) # 73 x 73 x 80
x = self.Conv2d_4a_3x3(x) # 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2) # 35 x 35 x 192
x = self.Mixed_5b(x) # 35 x 35 x 256
x = self.Mixed_5c(x) # 35 x 35 x 288
x = self.Mixed_5d(x) # 35 x 35 x 288
x = self.Mixed_6a(x) # 17 x 17 x 768
x = self.Mixed_6b(x) # 17 x 17 x 768
x = self.Mixed_6c(x) # 17 x 17 x 768
x = self.Mixed_6d(x) # 17 x 17 x 768
x = self.Mixed_6e(x) # 17 x 17 x 768
if self.training and self.aux_logits:
self._out_aux = self.AuxLogits(x) # 17 x 17 x 768
x = self.Mixed_7a(x) # 8 x 8 x 1280
x = self.Mixed_7b(x) # 8 x 8 x 2048
x = self.Mixed_7c(x) # 8 x 8 x 2048
return x
def logits(self, features):
x = F.avg_pool2d(features, kernel_size=8) # 1 x 1 x 2048
x = F.dropout(x, training=self.training) # 1 x 1 x 2048
x = x.view(x.size(0), -1) # 2048
x = self.last_linear(x) # 1000 (num_classes)
if self.training and self.aux_logits:
aux = self._out_aux
self._out_aux = None
return x, aux
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.features = types.MethodType(features, model)
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
###############################################################
# ResNets
def modify_resnets(model):
# Modify attributs
model.last_linear = model.fc
model.fc = None
def features(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def logits(self, features):
x = self.avgpool(features)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.features = types.MethodType(features, model)
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def resnet18(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-18 model.
"""
model = models.resnet18(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet18'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet34(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-34 model.
"""
model = models.resnet34(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet34'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet101(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-101 model.
"""
model = models.resnet101(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet101'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet152(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-152 model.
"""
model = models.resnet152(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet152'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
###############################################################
# SqueezeNets
def modify_squeezenets(model):
# /!\ Beware squeezenets do not have any last_linear module
# Modify attributs
model.dropout = model.classifier[0]
model.last_conv = model.classifier[1]
model.relu = model.classifier[2]
model.avgpool = model.classifier[3]
del model.classifier
def logits(self, features):
x = self.dropout(features)
x = self.last_conv(x)
x = self.relu(x)
x = self.avgpool(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def squeezenet1_0(num_classes=1000, pretrained='imagenet'):
r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level
accuracy with 50x fewer parameters and <0.5MB model size"
<https://arxiv.org/abs/1602.07360>`_ paper.
"""
model = models.squeezenet1_0(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['squeezenet1_0'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_squeezenets(model)
return model
def squeezenet1_1(num_classes=1000, pretrained='imagenet'):
r"""SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
"""
model = models.squeezenet1_1(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['squeezenet1_1'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_squeezenets(model)
return model
###############################################################
# VGGs
def modify_vggs(model):
# Modify attributs
model._features = model.features
del model.features
model.linear0 = model.classifier[0]
model.relu0 = model.classifier[1]
model.dropout0 = model.classifier[2]
model.linear1 = model.classifier[3]
model.relu1 = model.classifier[4]
model.dropout1 = model.classifier[5]
model.last_linear = model.classifier[6]
del model.classifier
def features(self, input):
x = self._features(input)
x = x.view(x.size(0), -1)
x = self.linear0(x)
x = self.relu0(x)
x = self.dropout0(x)
x = self.linear1(x)
return x
def logits(self, features):
x = self.relu1(features)
x = self.dropout1(x)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.features = types.MethodType(features, model)
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def vgg11(num_classes=1000, pretrained='imagenet'):
"""VGG 11-layer model (configuration "A")
"""
model = models.vgg11(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg11'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg11_bn(num_classes=1000, pretrained='imagenet'):
"""VGG 11-layer model (configuration "A") with batch normalization
"""
model = models.vgg11_bn(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg11_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg13(num_classes=1000, pretrained='imagenet'):
"""VGG 13-layer model (configuration "B")
"""
model = models.vgg13(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg13'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg13_bn(num_classes=1000, pretrained='imagenet'):
"""VGG 13-layer model (configuration "B") with batch normalization
"""
model = models.vgg13_bn(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg13_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg16(num_classes=1000, pretrained='imagenet'):
"""VGG 16-layer model (configuration "D")
"""
model = models.vgg16(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg16'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg16_bn(num_classes=1000, pretrained='imagenet'):
"""VGG 16-layer model (configuration "D") with batch normalization
"""
model = models.vgg16_bn(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg16_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg19(num_classes=1000, pretrained='imagenet'):
"""VGG 19-layer model (configuration "E")
"""
model = models.vgg19(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg19'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg19_bn(num_classes=1000, pretrained='imagenet'):
"""VGG 19-layer model (configuration 'E') with batch normalization
"""
model = models.vgg19_bn(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg19_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
|
Cadene/pretrained-models.pytorch | pretrainedmodels/models/torchvision_models.py | squeezenet1_0 | python | def squeezenet1_0(num_classes=1000, pretrained='imagenet'):
r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level
accuracy with 50x fewer parameters and <0.5MB model size"
<https://arxiv.org/abs/1602.07360>`_ paper.
"""
model = models.squeezenet1_0(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['squeezenet1_0'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_squeezenets(model)
return model | r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level
accuracy with 50x fewer parameters and <0.5MB model size"
<https://arxiv.org/abs/1602.07360>`_ paper. | train | https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/pretrainedmodels/models/torchvision_models.py#L428-L438 | [
"def load_pretrained(model, num_classes, settings):\n assert num_classes == settings['num_classes'], \\\n \"num_classes should be {}, but is {}\".format(settings['num_classes'], num_classes)\n state_dict = model_zoo.load_url(settings['url'])\n state_dict = update_state_dict(state_dict)\n model.load_state_dict(state_dict)\n model.input_space = settings['input_space']\n model.input_size = settings['input_size']\n model.input_range = settings['input_range']\n model.mean = settings['mean']\n model.std = settings['std']\n return model\n",
"def modify_squeezenets(model):\n # /!\\ Beware squeezenets do not have any last_linear module\n\n # Modify attributs\n model.dropout = model.classifier[0]\n model.last_conv = model.classifier[1]\n model.relu = model.classifier[2]\n model.avgpool = model.classifier[3]\n del model.classifier\n\n def logits(self, features):\n x = self.dropout(features)\n x = self.last_conv(x)\n x = self.relu(x)\n x = self.avgpool(x)\n return x\n\n def forward(self, input):\n x = self.features(input)\n x = self.logits(x)\n return x\n\n # Modify methods\n model.logits = types.MethodType(logits, model)\n model.forward = types.MethodType(forward, model)\n return model\n"
] | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import torchvision.models as models
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
import types
import re
#################################################################
# You can find the definitions of those models here:
# https://github.com/pytorch/vision/blob/master/torchvision/models
#
# To fit the API, we usually added/redefined some methods and
# renamed some attributs (see below for each models).
#
# However, you usually do not need to see the original model
# definition from torchvision. Just use `print(model)` to see
# the modules and see bellow the `model.features` and
# `model.classifier` definitions.
#################################################################
__all__ = [
'alexnet',
'densenet121', 'densenet169', 'densenet201', 'densenet161',
'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
'inceptionv3',
'squeezenet1_0', 'squeezenet1_1',
'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19'
]
model_urls = {
'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
'densenet121': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet121-fbdb23505.pth',
'densenet169': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet169-f470b90a4.pth',
'densenet201': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet201-5750cbb1e.pth',
'densenet161': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet161-347e6b360.pth',
'inceptionv3': 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth',
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth',
'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth',
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
# 'vgg16_caffe': 'https://s3-us-west-2.amazonaws.com/jcjohns-models/vgg16-00b39a1b.pth',
# 'vgg19_caffe': 'https://s3-us-west-2.amazonaws.com/jcjohns-models/vgg19-d01eb7cb.pth'
}
input_sizes = {}
means = {}
stds = {}
for model_name in __all__:
input_sizes[model_name] = [3, 224, 224]
means[model_name] = [0.485, 0.456, 0.406]
stds[model_name] = [0.229, 0.224, 0.225]
for model_name in ['inceptionv3']:
input_sizes[model_name] = [3, 299, 299]
means[model_name] = [0.5, 0.5, 0.5]
stds[model_name] = [0.5, 0.5, 0.5]
pretrained_settings = {}
for model_name in __all__:
pretrained_settings[model_name] = {
'imagenet': {
'url': model_urls[model_name],
'input_space': 'RGB',
'input_size': input_sizes[model_name],
'input_range': [0, 1],
'mean': means[model_name],
'std': stds[model_name],
'num_classes': 1000
}
}
# for model_name in ['vgg16', 'vgg19']:
# pretrained_settings[model_name]['imagenet_caffe'] = {
# 'url': model_urls[model_name + '_caffe'],
# 'input_space': 'BGR',
# 'input_size': input_sizes[model_name],
# 'input_range': [0, 255],
# 'mean': [103.939, 116.779, 123.68],
# 'std': [1., 1., 1.],
# 'num_classes': 1000
# }
def update_state_dict(state_dict):
# '.'s are no longer allowed in module names, but pervious _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls. This pattern is used
# to find such keys.
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
return state_dict
def load_pretrained(model, num_classes, settings):
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
state_dict = model_zoo.load_url(settings['url'])
state_dict = update_state_dict(state_dict)
model.load_state_dict(state_dict)
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
#################################################################
# AlexNet
def modify_alexnet(model):
# Modify attributs
model._features = model.features
del model.features
model.dropout0 = model.classifier[0]
model.linear0 = model.classifier[1]
model.relu0 = model.classifier[2]
model.dropout1 = model.classifier[3]
model.linear1 = model.classifier[4]
model.relu1 = model.classifier[5]
model.last_linear = model.classifier[6]
del model.classifier
def features(self, input):
x = self._features(input)
x = x.view(x.size(0), 256 * 6 * 6)
x = self.dropout0(x)
x = self.linear0(x)
x = self.relu0(x)
x = self.dropout1(x)
x = self.linear1(x)
return x
def logits(self, features):
x = self.relu1(features)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.features = types.MethodType(features, model)
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def alexnet(num_classes=1000, pretrained='imagenet'):
r"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
"""
# https://github.com/pytorch/vision/blob/master/torchvision/models/alexnet.py
model = models.alexnet(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['alexnet'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_alexnet(model)
return model
###############################################################
# DenseNets
def modify_densenets(model):
# Modify attributs
model.last_linear = model.classifier
del model.classifier
def logits(self, features):
x = F.relu(features, inplace=True)
x = F.avg_pool2d(x, kernel_size=7, stride=1)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def densenet121(num_classes=1000, pretrained='imagenet'):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet121(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet121'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model
def densenet169(num_classes=1000, pretrained='imagenet'):
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet169(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet169'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model
def densenet201(num_classes=1000, pretrained='imagenet'):
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet201(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet201'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model
def densenet161(num_classes=1000, pretrained='imagenet'):
r"""Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet161(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet161'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model
###############################################################
# InceptionV3
def inceptionv3(num_classes=1000, pretrained='imagenet'):
r"""Inception v3 model architecture from
`"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_.
"""
model = models.inception_v3(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['inceptionv3'][pretrained]
model = load_pretrained(model, num_classes, settings)
# Modify attributs
model.last_linear = model.fc
del model.fc
def features(self, input):
# 299 x 299 x 3
x = self.Conv2d_1a_3x3(input) # 149 x 149 x 32
x = self.Conv2d_2a_3x3(x) # 147 x 147 x 32
x = self.Conv2d_2b_3x3(x) # 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2) # 73 x 73 x 64
x = self.Conv2d_3b_1x1(x) # 73 x 73 x 80
x = self.Conv2d_4a_3x3(x) # 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2) # 35 x 35 x 192
x = self.Mixed_5b(x) # 35 x 35 x 256
x = self.Mixed_5c(x) # 35 x 35 x 288
x = self.Mixed_5d(x) # 35 x 35 x 288
x = self.Mixed_6a(x) # 17 x 17 x 768
x = self.Mixed_6b(x) # 17 x 17 x 768
x = self.Mixed_6c(x) # 17 x 17 x 768
x = self.Mixed_6d(x) # 17 x 17 x 768
x = self.Mixed_6e(x) # 17 x 17 x 768
if self.training and self.aux_logits:
self._out_aux = self.AuxLogits(x) # 17 x 17 x 768
x = self.Mixed_7a(x) # 8 x 8 x 1280
x = self.Mixed_7b(x) # 8 x 8 x 2048
x = self.Mixed_7c(x) # 8 x 8 x 2048
return x
def logits(self, features):
x = F.avg_pool2d(features, kernel_size=8) # 1 x 1 x 2048
x = F.dropout(x, training=self.training) # 1 x 1 x 2048
x = x.view(x.size(0), -1) # 2048
x = self.last_linear(x) # 1000 (num_classes)
if self.training and self.aux_logits:
aux = self._out_aux
self._out_aux = None
return x, aux
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.features = types.MethodType(features, model)
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
###############################################################
# ResNets
def modify_resnets(model):
# Modify attributs
model.last_linear = model.fc
model.fc = None
def features(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def logits(self, features):
x = self.avgpool(features)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.features = types.MethodType(features, model)
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def resnet18(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-18 model.
"""
model = models.resnet18(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet18'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet34(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-34 model.
"""
model = models.resnet34(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet34'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet50(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-50 model.
"""
model = models.resnet50(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet50'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet101(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-101 model.
"""
model = models.resnet101(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet101'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet152(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-152 model.
"""
model = models.resnet152(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet152'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
###############################################################
# SqueezeNets
def modify_squeezenets(model):
# /!\ Beware squeezenets do not have any last_linear module
# Modify attributs
model.dropout = model.classifier[0]
model.last_conv = model.classifier[1]
model.relu = model.classifier[2]
model.avgpool = model.classifier[3]
del model.classifier
def logits(self, features):
x = self.dropout(features)
x = self.last_conv(x)
x = self.relu(x)
x = self.avgpool(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def squeezenet1_1(num_classes=1000, pretrained='imagenet'):
r"""SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
"""
model = models.squeezenet1_1(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['squeezenet1_1'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_squeezenets(model)
return model
###############################################################
# VGGs
def modify_vggs(model):
# Modify attributs
model._features = model.features
del model.features
model.linear0 = model.classifier[0]
model.relu0 = model.classifier[1]
model.dropout0 = model.classifier[2]
model.linear1 = model.classifier[3]
model.relu1 = model.classifier[4]
model.dropout1 = model.classifier[5]
model.last_linear = model.classifier[6]
del model.classifier
def features(self, input):
x = self._features(input)
x = x.view(x.size(0), -1)
x = self.linear0(x)
x = self.relu0(x)
x = self.dropout0(x)
x = self.linear1(x)
return x
def logits(self, features):
x = self.relu1(features)
x = self.dropout1(x)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.features = types.MethodType(features, model)
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def vgg11(num_classes=1000, pretrained='imagenet'):
"""VGG 11-layer model (configuration "A")
"""
model = models.vgg11(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg11'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg11_bn(num_classes=1000, pretrained='imagenet'):
"""VGG 11-layer model (configuration "A") with batch normalization
"""
model = models.vgg11_bn(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg11_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg13(num_classes=1000, pretrained='imagenet'):
"""VGG 13-layer model (configuration "B")
"""
model = models.vgg13(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg13'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg13_bn(num_classes=1000, pretrained='imagenet'):
"""VGG 13-layer model (configuration "B") with batch normalization
"""
model = models.vgg13_bn(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg13_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg16(num_classes=1000, pretrained='imagenet'):
"""VGG 16-layer model (configuration "D")
"""
model = models.vgg16(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg16'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg16_bn(num_classes=1000, pretrained='imagenet'):
"""VGG 16-layer model (configuration "D") with batch normalization
"""
model = models.vgg16_bn(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg16_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg19(num_classes=1000, pretrained='imagenet'):
"""VGG 19-layer model (configuration "E")
"""
model = models.vgg19(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg19'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg19_bn(num_classes=1000, pretrained='imagenet'):
"""VGG 19-layer model (configuration 'E') with batch normalization
"""
model = models.vgg19_bn(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg19_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
|
Cadene/pretrained-models.pytorch | pretrainedmodels/models/torchvision_models.py | vgg11 | python | def vgg11(num_classes=1000, pretrained='imagenet'):
model = models.vgg11(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg11'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model | VGG 11-layer model (configuration "A") | train | https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/pretrainedmodels/models/torchvision_models.py#L495-L503 | [
"def load_pretrained(model, num_classes, settings):\n assert num_classes == settings['num_classes'], \\\n \"num_classes should be {}, but is {}\".format(settings['num_classes'], num_classes)\n state_dict = model_zoo.load_url(settings['url'])\n state_dict = update_state_dict(state_dict)\n model.load_state_dict(state_dict)\n model.input_space = settings['input_space']\n model.input_size = settings['input_size']\n model.input_range = settings['input_range']\n model.mean = settings['mean']\n model.std = settings['std']\n return model\n",
"def modify_vggs(model):\n # Modify attributs\n model._features = model.features\n del model.features\n model.linear0 = model.classifier[0]\n model.relu0 = model.classifier[1]\n model.dropout0 = model.classifier[2]\n model.linear1 = model.classifier[3]\n model.relu1 = model.classifier[4]\n model.dropout1 = model.classifier[5]\n model.last_linear = model.classifier[6]\n del model.classifier\n\n def features(self, input):\n x = self._features(input)\n x = x.view(x.size(0), -1)\n x = self.linear0(x)\n x = self.relu0(x)\n x = self.dropout0(x)\n x = self.linear1(x)\n return x\n\n def logits(self, features):\n x = self.relu1(features)\n x = self.dropout1(x)\n x = self.last_linear(x)\n return x\n\n def forward(self, input):\n x = self.features(input)\n x = self.logits(x)\n return x\n\n # Modify methods\n model.features = types.MethodType(features, model)\n model.logits = types.MethodType(logits, model)\n model.forward = types.MethodType(forward, model)\n return model\n"
] | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import torchvision.models as models
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
import types
import re
#################################################################
# You can find the definitions of those models here:
# https://github.com/pytorch/vision/blob/master/torchvision/models
#
# To fit the API, we usually added/redefined some methods and
# renamed some attributs (see below for each models).
#
# However, you usually do not need to see the original model
# definition from torchvision. Just use `print(model)` to see
# the modules and see bellow the `model.features` and
# `model.classifier` definitions.
#################################################################
__all__ = [
'alexnet',
'densenet121', 'densenet169', 'densenet201', 'densenet161',
'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
'inceptionv3',
'squeezenet1_0', 'squeezenet1_1',
'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19'
]
model_urls = {
'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
'densenet121': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet121-fbdb23505.pth',
'densenet169': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet169-f470b90a4.pth',
'densenet201': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet201-5750cbb1e.pth',
'densenet161': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet161-347e6b360.pth',
'inceptionv3': 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth',
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth',
'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth',
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
# 'vgg16_caffe': 'https://s3-us-west-2.amazonaws.com/jcjohns-models/vgg16-00b39a1b.pth',
# 'vgg19_caffe': 'https://s3-us-west-2.amazonaws.com/jcjohns-models/vgg19-d01eb7cb.pth'
}
input_sizes = {}
means = {}
stds = {}
for model_name in __all__:
input_sizes[model_name] = [3, 224, 224]
means[model_name] = [0.485, 0.456, 0.406]
stds[model_name] = [0.229, 0.224, 0.225]
for model_name in ['inceptionv3']:
input_sizes[model_name] = [3, 299, 299]
means[model_name] = [0.5, 0.5, 0.5]
stds[model_name] = [0.5, 0.5, 0.5]
pretrained_settings = {}
for model_name in __all__:
pretrained_settings[model_name] = {
'imagenet': {
'url': model_urls[model_name],
'input_space': 'RGB',
'input_size': input_sizes[model_name],
'input_range': [0, 1],
'mean': means[model_name],
'std': stds[model_name],
'num_classes': 1000
}
}
# for model_name in ['vgg16', 'vgg19']:
# pretrained_settings[model_name]['imagenet_caffe'] = {
# 'url': model_urls[model_name + '_caffe'],
# 'input_space': 'BGR',
# 'input_size': input_sizes[model_name],
# 'input_range': [0, 255],
# 'mean': [103.939, 116.779, 123.68],
# 'std': [1., 1., 1.],
# 'num_classes': 1000
# }
def update_state_dict(state_dict):
# '.'s are no longer allowed in module names, but pervious _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls. This pattern is used
# to find such keys.
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
return state_dict
def load_pretrained(model, num_classes, settings):
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
state_dict = model_zoo.load_url(settings['url'])
state_dict = update_state_dict(state_dict)
model.load_state_dict(state_dict)
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
#################################################################
# AlexNet
def modify_alexnet(model):
# Modify attributs
model._features = model.features
del model.features
model.dropout0 = model.classifier[0]
model.linear0 = model.classifier[1]
model.relu0 = model.classifier[2]
model.dropout1 = model.classifier[3]
model.linear1 = model.classifier[4]
model.relu1 = model.classifier[5]
model.last_linear = model.classifier[6]
del model.classifier
def features(self, input):
x = self._features(input)
x = x.view(x.size(0), 256 * 6 * 6)
x = self.dropout0(x)
x = self.linear0(x)
x = self.relu0(x)
x = self.dropout1(x)
x = self.linear1(x)
return x
def logits(self, features):
x = self.relu1(features)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.features = types.MethodType(features, model)
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def alexnet(num_classes=1000, pretrained='imagenet'):
r"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
"""
# https://github.com/pytorch/vision/blob/master/torchvision/models/alexnet.py
model = models.alexnet(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['alexnet'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_alexnet(model)
return model
###############################################################
# DenseNets
def modify_densenets(model):
# Modify attributs
model.last_linear = model.classifier
del model.classifier
def logits(self, features):
x = F.relu(features, inplace=True)
x = F.avg_pool2d(x, kernel_size=7, stride=1)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def densenet121(num_classes=1000, pretrained='imagenet'):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet121(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet121'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model
def densenet169(num_classes=1000, pretrained='imagenet'):
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet169(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet169'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model
def densenet201(num_classes=1000, pretrained='imagenet'):
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet201(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet201'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model
def densenet161(num_classes=1000, pretrained='imagenet'):
r"""Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet161(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet161'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model
###############################################################
# InceptionV3
def inceptionv3(num_classes=1000, pretrained='imagenet'):
r"""Inception v3 model architecture from
`"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_.
"""
model = models.inception_v3(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['inceptionv3'][pretrained]
model = load_pretrained(model, num_classes, settings)
# Modify attributs
model.last_linear = model.fc
del model.fc
def features(self, input):
# 299 x 299 x 3
x = self.Conv2d_1a_3x3(input) # 149 x 149 x 32
x = self.Conv2d_2a_3x3(x) # 147 x 147 x 32
x = self.Conv2d_2b_3x3(x) # 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2) # 73 x 73 x 64
x = self.Conv2d_3b_1x1(x) # 73 x 73 x 80
x = self.Conv2d_4a_3x3(x) # 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2) # 35 x 35 x 192
x = self.Mixed_5b(x) # 35 x 35 x 256
x = self.Mixed_5c(x) # 35 x 35 x 288
x = self.Mixed_5d(x) # 35 x 35 x 288
x = self.Mixed_6a(x) # 17 x 17 x 768
x = self.Mixed_6b(x) # 17 x 17 x 768
x = self.Mixed_6c(x) # 17 x 17 x 768
x = self.Mixed_6d(x) # 17 x 17 x 768
x = self.Mixed_6e(x) # 17 x 17 x 768
if self.training and self.aux_logits:
self._out_aux = self.AuxLogits(x) # 17 x 17 x 768
x = self.Mixed_7a(x) # 8 x 8 x 1280
x = self.Mixed_7b(x) # 8 x 8 x 2048
x = self.Mixed_7c(x) # 8 x 8 x 2048
return x
def logits(self, features):
x = F.avg_pool2d(features, kernel_size=8) # 1 x 1 x 2048
x = F.dropout(x, training=self.training) # 1 x 1 x 2048
x = x.view(x.size(0), -1) # 2048
x = self.last_linear(x) # 1000 (num_classes)
if self.training and self.aux_logits:
aux = self._out_aux
self._out_aux = None
return x, aux
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.features = types.MethodType(features, model)
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
###############################################################
# ResNets
def modify_resnets(model):
# Modify attributs
model.last_linear = model.fc
model.fc = None
def features(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def logits(self, features):
x = self.avgpool(features)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.features = types.MethodType(features, model)
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def resnet18(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-18 model.
"""
model = models.resnet18(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet18'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet34(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-34 model.
"""
model = models.resnet34(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet34'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet50(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-50 model.
"""
model = models.resnet50(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet50'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet101(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-101 model.
"""
model = models.resnet101(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet101'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
def resnet152(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-152 model.
"""
model = models.resnet152(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['resnet152'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_resnets(model)
return model
###############################################################
# SqueezeNets
def modify_squeezenets(model):
# /!\ Beware squeezenets do not have any last_linear module
# Modify attributs
model.dropout = model.classifier[0]
model.last_conv = model.classifier[1]
model.relu = model.classifier[2]
model.avgpool = model.classifier[3]
del model.classifier
def logits(self, features):
x = self.dropout(features)
x = self.last_conv(x)
x = self.relu(x)
x = self.avgpool(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def squeezenet1_0(num_classes=1000, pretrained='imagenet'):
r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level
accuracy with 50x fewer parameters and <0.5MB model size"
<https://arxiv.org/abs/1602.07360>`_ paper.
"""
model = models.squeezenet1_0(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['squeezenet1_0'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_squeezenets(model)
return model
def squeezenet1_1(num_classes=1000, pretrained='imagenet'):
r"""SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
"""
model = models.squeezenet1_1(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['squeezenet1_1'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_squeezenets(model)
return model
###############################################################
# VGGs
def modify_vggs(model):
# Modify attributs
model._features = model.features
del model.features
model.linear0 = model.classifier[0]
model.relu0 = model.classifier[1]
model.dropout0 = model.classifier[2]
model.linear1 = model.classifier[3]
model.relu1 = model.classifier[4]
model.dropout1 = model.classifier[5]
model.last_linear = model.classifier[6]
del model.classifier
def features(self, input):
x = self._features(input)
x = x.view(x.size(0), -1)
x = self.linear0(x)
x = self.relu0(x)
x = self.dropout0(x)
x = self.linear1(x)
return x
def logits(self, features):
x = self.relu1(features)
x = self.dropout1(x)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
# Modify methods
model.features = types.MethodType(features, model)
model.logits = types.MethodType(logits, model)
model.forward = types.MethodType(forward, model)
return model
def vgg11_bn(num_classes=1000, pretrained='imagenet'):
"""VGG 11-layer model (configuration "A") with batch normalization
"""
model = models.vgg11_bn(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg11_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg13(num_classes=1000, pretrained='imagenet'):
"""VGG 13-layer model (configuration "B")
"""
model = models.vgg13(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg13'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg13_bn(num_classes=1000, pretrained='imagenet'):
"""VGG 13-layer model (configuration "B") with batch normalization
"""
model = models.vgg13_bn(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg13_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg16(num_classes=1000, pretrained='imagenet'):
"""VGG 16-layer model (configuration "D")
"""
model = models.vgg16(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg16'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg16_bn(num_classes=1000, pretrained='imagenet'):
"""VGG 16-layer model (configuration "D") with batch normalization
"""
model = models.vgg16_bn(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg16_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg19(num_classes=1000, pretrained='imagenet'):
"""VGG 19-layer model (configuration "E")
"""
model = models.vgg19(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg19'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
def vgg19_bn(num_classes=1000, pretrained='imagenet'):
"""VGG 19-layer model (configuration 'E') with batch normalization
"""
model = models.vgg19_bn(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['vgg19_bn'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_vggs(model)
return model
|
Cadene/pretrained-models.pytorch | examples/imagenet_eval.py | adjust_learning_rate | python | def adjust_learning_rate(optimizer, epoch):
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr | Sets the learning rate to the initial LR decayed by 10 every 30 epochs | train | https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/examples/imagenet_eval.py#L280-L284 | null | from __future__ import print_function, division, absolute_import
import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import sys
sys.path.append('.')
import pretrainedmodels
import pretrainedmodels.utils
model_names = sorted(name for name in pretrainedmodels.__dict__
if not name.startswith("__")
and name.islower()
and callable(pretrainedmodels.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--data', metavar='DIR', default="path_to_imagenet",
help='path to dataset')
parser.add_argument('--arch', '-a', metavar='ARCH', default='nasnetamobile',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: fbresnet152)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=1256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', default=True,
action='store_true', help='evaluate model on validation set')
parser.add_argument('--pretrained', default='imagenet', help='use pre-trained model')
parser.add_argument('--do-not-preserve-aspect-ratio',
dest='preserve_aspect_ratio',
help='do not preserve the aspect ratio when resizing an image',
action='store_false')
parser.set_defaults(preserve_aspect_ratio=True)
best_prec1 = 0
def main():
global args, best_prec1
args = parser.parse_args()
# create model
print("=> creating model '{}'".format(args.arch))
if args.pretrained.lower() not in ['false', 'none', 'not', 'no', '0']:
print("=> using pre-trained parameters '{}'".format(args.pretrained))
model = pretrainedmodels.__dict__[args.arch](num_classes=1000,
pretrained=args.pretrained)
else:
model = pretrainedmodels.__dict__[args.arch]()
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
# traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
# train_loader = torch.utils.data.DataLoader(
# datasets.ImageFolder(traindir, transforms.Compose([
# transforms.RandomSizedCrop(max(model.input_size)),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# normalize,
# ])),
# batch_size=args.batch_size, shuffle=True,
# num_workers=args.workers, pin_memory=True)
# if 'scale' in pretrainedmodels.pretrained_settings[args.arch][args.pretrained]:
# scale = pretrainedmodels.pretrained_settings[args.arch][args.pretrained]['scale']
# else:
# scale = 0.875
scale = 0.875
print('Images transformed from size {} to {}'.format(
int(round(max(model.input_size) / scale)),
model.input_size))
val_tf = pretrainedmodels.utils.TransformImage(
model,
scale=scale,
preserve_aspect_ratio=args.preserve_aspect_ratio
)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, val_tf),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
model = torch.nn.DataParallel(model).cuda()
if args.evaluate:
validate(val_loader, model, criterion)
return
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best)
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda()
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
def validate(val_loader, model, criterion):
with torch.no_grad():
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda()
input = input.cuda()
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target.data, topk=(1, 5))
losses.update(loss.data.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, top5.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main() |
Cadene/pretrained-models.pytorch | pretrainedmodels/models/nasnet.py | nasnetalarge | python | def nasnetalarge(num_classes=1001, pretrained='imagenet'):
r"""NASNetALarge model architecture from the
`"NASNet" <https://arxiv.org/abs/1707.07012>`_ paper.
"""
if pretrained:
settings = pretrained_settings['nasnetalarge'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
# both 'imagenet'&'imagenet+background' are loaded from same parameters
model = NASNetALarge(num_classes=1001)
model.load_state_dict(model_zoo.load_url(settings['url']))
if pretrained == 'imagenet':
new_last_linear = nn.Linear(model.last_linear.in_features, 1000)
new_last_linear.weight.data = model.last_linear.weight.data[1:]
new_last_linear.bias.data = model.last_linear.bias.data[1:]
model.last_linear = new_last_linear
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
else:
model = NASNetALarge(num_classes=num_classes)
return model | r"""NASNetALarge model architecture from the
`"NASNet" <https://arxiv.org/abs/1707.07012>`_ paper. | train | https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/pretrainedmodels/models/nasnet.py#L608-L635 | null | from __future__ import print_function, division, absolute_import
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from torch.autograd import Variable
pretrained_settings = {
'nasnetalarge': {
'imagenet': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/nasnetalarge-a1897284.pth',
'input_space': 'RGB',
'input_size': [3, 331, 331], # resize 354
'input_range': [0, 1],
'mean': [0.5, 0.5, 0.5],
'std': [0.5, 0.5, 0.5],
'num_classes': 1000
},
'imagenet+background': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/nasnetalarge-a1897284.pth',
'input_space': 'RGB',
'input_size': [3, 331, 331], # resize 354
'input_range': [0, 1],
'mean': [0.5, 0.5, 0.5],
'std': [0.5, 0.5, 0.5],
'num_classes': 1001
}
}
}
class MaxPoolPad(nn.Module):
def __init__(self):
super(MaxPoolPad, self).__init__()
self.pad = nn.ZeroPad2d((1, 0, 1, 0))
self.pool = nn.MaxPool2d(3, stride=2, padding=1)
def forward(self, x):
x = self.pad(x)
x = self.pool(x)
x = x[:, :, 1:, 1:]
return x
class AvgPoolPad(nn.Module):
def __init__(self, stride=2, padding=1):
super(AvgPoolPad, self).__init__()
self.pad = nn.ZeroPad2d((1, 0, 1, 0))
self.pool = nn.AvgPool2d(3, stride=stride, padding=padding, count_include_pad=False)
def forward(self, x):
x = self.pad(x)
x = self.pool(x)
x = x[:, :, 1:, 1:]
return x
class SeparableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, dw_kernel, dw_stride, dw_padding, bias=False):
super(SeparableConv2d, self).__init__()
self.depthwise_conv2d = nn.Conv2d(in_channels, in_channels, dw_kernel,
stride=dw_stride,
padding=dw_padding,
bias=bias,
groups=in_channels)
self.pointwise_conv2d = nn.Conv2d(in_channels, out_channels, 1, stride=1, bias=bias)
def forward(self, x):
x = self.depthwise_conv2d(x)
x = self.pointwise_conv2d(x)
return x
class BranchSeparables(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=False):
super(BranchSeparables, self).__init__()
self.relu = nn.ReLU()
self.separable_1 = SeparableConv2d(in_channels, in_channels, kernel_size, stride, padding, bias=bias)
self.bn_sep_1 = nn.BatchNorm2d(in_channels, eps=0.001, momentum=0.1, affine=True)
self.relu1 = nn.ReLU()
self.separable_2 = SeparableConv2d(in_channels, out_channels, kernel_size, 1, padding, bias=bias)
self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1, affine=True)
def forward(self, x):
x = self.relu(x)
x = self.separable_1(x)
x = self.bn_sep_1(x)
x = self.relu1(x)
x = self.separable_2(x)
x = self.bn_sep_2(x)
return x
class BranchSeparablesStem(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=False):
super(BranchSeparablesStem, self).__init__()
self.relu = nn.ReLU()
self.separable_1 = SeparableConv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias)
self.bn_sep_1 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1, affine=True)
self.relu1 = nn.ReLU()
self.separable_2 = SeparableConv2d(out_channels, out_channels, kernel_size, 1, padding, bias=bias)
self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1, affine=True)
def forward(self, x):
x = self.relu(x)
x = self.separable_1(x)
x = self.bn_sep_1(x)
x = self.relu1(x)
x = self.separable_2(x)
x = self.bn_sep_2(x)
return x
class BranchSeparablesReduction(BranchSeparables):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, z_padding=1, bias=False):
BranchSeparables.__init__(self, in_channels, out_channels, kernel_size, stride, padding, bias)
self.padding = nn.ZeroPad2d((z_padding, 0, z_padding, 0))
def forward(self, x):
x = self.relu(x)
x = self.padding(x)
x = self.separable_1(x)
x = x[:, :, 1:, 1:].contiguous()
x = self.bn_sep_1(x)
x = self.relu1(x)
x = self.separable_2(x)
x = self.bn_sep_2(x)
return x
class CellStem0(nn.Module):
def __init__(self, stem_filters, num_filters=42):
super(CellStem0, self).__init__()
self.num_filters = num_filters
self.stem_filters = stem_filters
self.conv_1x1 = nn.Sequential()
self.conv_1x1.add_module('relu', nn.ReLU())
self.conv_1x1.add_module('conv', nn.Conv2d(self.stem_filters, self.num_filters, 1, stride=1, bias=False))
self.conv_1x1.add_module('bn', nn.BatchNorm2d(self.num_filters, eps=0.001, momentum=0.1, affine=True))
self.comb_iter_0_left = BranchSeparables(self.num_filters, self.num_filters, 5, 2, 2)
self.comb_iter_0_right = BranchSeparablesStem(self.stem_filters, self.num_filters, 7, 2, 3, bias=False)
self.comb_iter_1_left = nn.MaxPool2d(3, stride=2, padding=1)
self.comb_iter_1_right = BranchSeparablesStem(self.stem_filters, self.num_filters, 7, 2, 3, bias=False)
self.comb_iter_2_left = nn.AvgPool2d(3, stride=2, padding=1, count_include_pad=False)
self.comb_iter_2_right = BranchSeparablesStem(self.stem_filters, self.num_filters, 5, 2, 2, bias=False)
self.comb_iter_3_right = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_4_left = BranchSeparables(self.num_filters, self.num_filters, 3, 1, 1, bias=False)
self.comb_iter_4_right = nn.MaxPool2d(3, stride=2, padding=1)
def forward(self, x):
x1 = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x1)
x_comb_iter_0_right = self.comb_iter_0_right(x)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x1)
x_comb_iter_1_right = self.comb_iter_1_right(x)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x1)
x_comb_iter_2_right = self.comb_iter_2_right(x)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0)
x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1
x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0)
x_comb_iter_4_right = self.comb_iter_4_right(x1)
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class CellStem1(nn.Module):
def __init__(self, stem_filters, num_filters):
super(CellStem1, self).__init__()
self.num_filters = num_filters
self.stem_filters = stem_filters
self.conv_1x1 = nn.Sequential()
self.conv_1x1.add_module('relu', nn.ReLU())
self.conv_1x1.add_module('conv', nn.Conv2d(2*self.num_filters, self.num_filters, 1, stride=1, bias=False))
self.conv_1x1.add_module('bn', nn.BatchNorm2d(self.num_filters, eps=0.001, momentum=0.1, affine=True))
self.relu = nn.ReLU()
self.path_1 = nn.Sequential()
self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
self.path_1.add_module('conv', nn.Conv2d(self.stem_filters, self.num_filters//2, 1, stride=1, bias=False))
self.path_2 = nn.ModuleList()
self.path_2.add_module('pad', nn.ZeroPad2d((0, 1, 0, 1)))
self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
self.path_2.add_module('conv', nn.Conv2d(self.stem_filters, self.num_filters//2, 1, stride=1, bias=False))
self.final_path_bn = nn.BatchNorm2d(self.num_filters, eps=0.001, momentum=0.1, affine=True)
self.comb_iter_0_left = BranchSeparables(self.num_filters, self.num_filters, 5, 2, 2, bias=False)
self.comb_iter_0_right = BranchSeparables(self.num_filters, self.num_filters, 7, 2, 3, bias=False)
self.comb_iter_1_left = nn.MaxPool2d(3, stride=2, padding=1)
self.comb_iter_1_right = BranchSeparables(self.num_filters, self.num_filters, 7, 2, 3, bias=False)
self.comb_iter_2_left = nn.AvgPool2d(3, stride=2, padding=1, count_include_pad=False)
self.comb_iter_2_right = BranchSeparables(self.num_filters, self.num_filters, 5, 2, 2, bias=False)
self.comb_iter_3_right = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_4_left = BranchSeparables(self.num_filters, self.num_filters, 3, 1, 1, bias=False)
self.comb_iter_4_right = nn.MaxPool2d(3, stride=2, padding=1)
def forward(self, x_conv0, x_stem_0):
x_left = self.conv_1x1(x_stem_0)
x_relu = self.relu(x_conv0)
# path 1
x_path1 = self.path_1(x_relu)
# path 2
x_path2 = self.path_2.pad(x_relu)
x_path2 = x_path2[:, :, 1:, 1:]
x_path2 = self.path_2.avgpool(x_path2)
x_path2 = self.path_2.conv(x_path2)
# final path
x_right = self.final_path_bn(torch.cat([x_path1, x_path2], 1))
x_comb_iter_0_left = self.comb_iter_0_left(x_left)
x_comb_iter_0_right = self.comb_iter_0_right(x_right)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_left)
x_comb_iter_1_right = self.comb_iter_1_right(x_right)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_left)
x_comb_iter_2_right = self.comb_iter_2_right(x_right)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0)
x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1
x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0)
x_comb_iter_4_right = self.comb_iter_4_right(x_left)
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class FirstCell(nn.Module):
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
super(FirstCell, self).__init__()
self.conv_1x1 = nn.Sequential()
self.conv_1x1.add_module('relu', nn.ReLU())
self.conv_1x1.add_module('conv', nn.Conv2d(in_channels_right, out_channels_right, 1, stride=1, bias=False))
self.conv_1x1.add_module('bn', nn.BatchNorm2d(out_channels_right, eps=0.001, momentum=0.1, affine=True))
self.relu = nn.ReLU()
self.path_1 = nn.Sequential()
self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
self.path_1.add_module('conv', nn.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
self.path_2 = nn.ModuleList()
self.path_2.add_module('pad', nn.ZeroPad2d((0, 1, 0, 1)))
self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
self.path_2.add_module('conv', nn.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
self.final_path_bn = nn.BatchNorm2d(out_channels_left * 2, eps=0.001, momentum=0.1, affine=True)
self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False)
self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
self.comb_iter_1_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False)
self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
self.comb_iter_2_left = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_3_left = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_3_right = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
def forward(self, x, x_prev):
x_relu = self.relu(x_prev)
# path 1
x_path1 = self.path_1(x_relu)
# path 2
x_path2 = self.path_2.pad(x_relu)
x_path2 = x_path2[:, :, 1:, 1:]
x_path2 = self.path_2.avgpool(x_path2)
x_path2 = self.path_2.conv(x_path2)
# final path
x_left = self.final_path_bn(torch.cat([x_path1, x_path2], 1))
x_right = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x_right)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_left)
x_comb_iter_1_right = self.comb_iter_1_right(x_left)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2 = x_comb_iter_2_left + x_left
x_comb_iter_3_left = self.comb_iter_3_left(x_left)
x_comb_iter_3_right = self.comb_iter_3_right(x_left)
x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right
x_comb_iter_4_left = self.comb_iter_4_left(x_right)
x_comb_iter_4 = x_comb_iter_4_left + x_right
x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class NormalCell(nn.Module):
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
super(NormalCell, self).__init__()
self.conv_prev_1x1 = nn.Sequential()
self.conv_prev_1x1.add_module('relu', nn.ReLU())
self.conv_prev_1x1.add_module('conv', nn.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
self.conv_prev_1x1.add_module('bn', nn.BatchNorm2d(out_channels_left, eps=0.001, momentum=0.1, affine=True))
self.conv_1x1 = nn.Sequential()
self.conv_1x1.add_module('relu', nn.ReLU())
self.conv_1x1.add_module('conv', nn.Conv2d(in_channels_right, out_channels_right, 1, stride=1, bias=False))
self.conv_1x1.add_module('bn', nn.BatchNorm2d(out_channels_right, eps=0.001, momentum=0.1, affine=True))
self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False)
self.comb_iter_0_right = BranchSeparables(out_channels_left, out_channels_left, 3, 1, 1, bias=False)
self.comb_iter_1_left = BranchSeparables(out_channels_left, out_channels_left, 5, 1, 2, bias=False)
self.comb_iter_1_right = BranchSeparables(out_channels_left, out_channels_left, 3, 1, 1, bias=False)
self.comb_iter_2_left = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_3_left = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_3_right = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
def forward(self, x, x_prev):
x_left = self.conv_prev_1x1(x_prev)
x_right = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x_right)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_left)
x_comb_iter_1_right = self.comb_iter_1_right(x_left)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2 = x_comb_iter_2_left + x_left
x_comb_iter_3_left = self.comb_iter_3_left(x_left)
x_comb_iter_3_right = self.comb_iter_3_right(x_left)
x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right
x_comb_iter_4_left = self.comb_iter_4_left(x_right)
x_comb_iter_4 = x_comb_iter_4_left + x_right
x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class ReductionCell0(nn.Module):
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
super(ReductionCell0, self).__init__()
self.conv_prev_1x1 = nn.Sequential()
self.conv_prev_1x1.add_module('relu', nn.ReLU())
self.conv_prev_1x1.add_module('conv', nn.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
self.conv_prev_1x1.add_module('bn', nn.BatchNorm2d(out_channels_left, eps=0.001, momentum=0.1, affine=True))
self.conv_1x1 = nn.Sequential()
self.conv_1x1.add_module('relu', nn.ReLU())
self.conv_1x1.add_module('conv', nn.Conv2d(in_channels_right, out_channels_right, 1, stride=1, bias=False))
self.conv_1x1.add_module('bn', nn.BatchNorm2d(out_channels_right, eps=0.001, momentum=0.1, affine=True))
self.comb_iter_0_left = BranchSeparablesReduction(out_channels_right, out_channels_right, 5, 2, 2, bias=False)
self.comb_iter_0_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 7, 2, 3, bias=False)
self.comb_iter_1_left = MaxPoolPad()
self.comb_iter_1_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 7, 2, 3, bias=False)
self.comb_iter_2_left = AvgPoolPad()
self.comb_iter_2_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 5, 2, 2, bias=False)
self.comb_iter_3_right = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_4_left = BranchSeparablesReduction(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
self.comb_iter_4_right = MaxPoolPad()
def forward(self, x, x_prev):
x_left = self.conv_prev_1x1(x_prev)
x_right = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x_right)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_right)
x_comb_iter_1_right = self.comb_iter_1_right(x_left)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2_right = self.comb_iter_2_right(x_left)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0)
x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1
x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0)
x_comb_iter_4_right = self.comb_iter_4_right(x_right)
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class ReductionCell1(nn.Module):
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
super(ReductionCell1, self).__init__()
self.conv_prev_1x1 = nn.Sequential()
self.conv_prev_1x1.add_module('relu', nn.ReLU())
self.conv_prev_1x1.add_module('conv', nn.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
self.conv_prev_1x1.add_module('bn', nn.BatchNorm2d(out_channels_left, eps=0.001, momentum=0.1, affine=True))
self.conv_1x1 = nn.Sequential()
self.conv_1x1.add_module('relu', nn.ReLU())
self.conv_1x1.add_module('conv', nn.Conv2d(in_channels_right, out_channels_right, 1, stride=1, bias=False))
self.conv_1x1.add_module('bn', nn.BatchNorm2d(out_channels_right, eps=0.001, momentum=0.1, affine=True))
self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False)
self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False)
self.comb_iter_1_left = nn.MaxPool2d(3, stride=2, padding=1)
self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, bias=False)
self.comb_iter_2_left = nn.AvgPool2d(3, stride=2, padding=1, count_include_pad=False)
self.comb_iter_2_right = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, bias=False)
self.comb_iter_3_right = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
self.comb_iter_4_right = nn.MaxPool2d(3, stride=2, padding=1)
def forward(self, x, x_prev):
x_left = self.conv_prev_1x1(x_prev)
x_right = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x_right)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_right)
x_comb_iter_1_right = self.comb_iter_1_right(x_left)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2_right = self.comb_iter_2_right(x_left)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0)
x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1
x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0)
x_comb_iter_4_right = self.comb_iter_4_right(x_right)
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class NASNetALarge(nn.Module):
"""NASNetALarge (6 @ 4032) """
def __init__(self, num_classes=1001, stem_filters=96, penultimate_filters=4032, filters_multiplier=2):
super(NASNetALarge, self).__init__()
self.num_classes = num_classes
self.stem_filters = stem_filters
self.penultimate_filters = penultimate_filters
self.filters_multiplier = filters_multiplier
filters = self.penultimate_filters // 24
# 24 is default value for the architecture
self.conv0 = nn.Sequential()
self.conv0.add_module('conv', nn.Conv2d(in_channels=3, out_channels=self.stem_filters, kernel_size=3, padding=0, stride=2,
bias=False))
self.conv0.add_module('bn', nn.BatchNorm2d(self.stem_filters, eps=0.001, momentum=0.1, affine=True))
self.cell_stem_0 = CellStem0(self.stem_filters, num_filters=filters // (filters_multiplier ** 2))
self.cell_stem_1 = CellStem1(self.stem_filters, num_filters=filters // filters_multiplier)
self.cell_0 = FirstCell(in_channels_left=filters, out_channels_left=filters//2,
in_channels_right=2*filters, out_channels_right=filters)
self.cell_1 = NormalCell(in_channels_left=2*filters, out_channels_left=filters,
in_channels_right=6*filters, out_channels_right=filters)
self.cell_2 = NormalCell(in_channels_left=6*filters, out_channels_left=filters,
in_channels_right=6*filters, out_channels_right=filters)
self.cell_3 = NormalCell(in_channels_left=6*filters, out_channels_left=filters,
in_channels_right=6*filters, out_channels_right=filters)
self.cell_4 = NormalCell(in_channels_left=6*filters, out_channels_left=filters,
in_channels_right=6*filters, out_channels_right=filters)
self.cell_5 = NormalCell(in_channels_left=6*filters, out_channels_left=filters,
in_channels_right=6*filters, out_channels_right=filters)
self.reduction_cell_0 = ReductionCell0(in_channels_left=6*filters, out_channels_left=2*filters,
in_channels_right=6*filters, out_channels_right=2*filters)
self.cell_6 = FirstCell(in_channels_left=6*filters, out_channels_left=filters,
in_channels_right=8*filters, out_channels_right=2*filters)
self.cell_7 = NormalCell(in_channels_left=8*filters, out_channels_left=2*filters,
in_channels_right=12*filters, out_channels_right=2*filters)
self.cell_8 = NormalCell(in_channels_left=12*filters, out_channels_left=2*filters,
in_channels_right=12*filters, out_channels_right=2*filters)
self.cell_9 = NormalCell(in_channels_left=12*filters, out_channels_left=2*filters,
in_channels_right=12*filters, out_channels_right=2*filters)
self.cell_10 = NormalCell(in_channels_left=12*filters, out_channels_left=2*filters,
in_channels_right=12*filters, out_channels_right=2*filters)
self.cell_11 = NormalCell(in_channels_left=12*filters, out_channels_left=2*filters,
in_channels_right=12*filters, out_channels_right=2*filters)
self.reduction_cell_1 = ReductionCell1(in_channels_left=12*filters, out_channels_left=4*filters,
in_channels_right=12*filters, out_channels_right=4*filters)
self.cell_12 = FirstCell(in_channels_left=12*filters, out_channels_left=2*filters,
in_channels_right=16*filters, out_channels_right=4*filters)
self.cell_13 = NormalCell(in_channels_left=16*filters, out_channels_left=4*filters,
in_channels_right=24*filters, out_channels_right=4*filters)
self.cell_14 = NormalCell(in_channels_left=24*filters, out_channels_left=4*filters,
in_channels_right=24*filters, out_channels_right=4*filters)
self.cell_15 = NormalCell(in_channels_left=24*filters, out_channels_left=4*filters,
in_channels_right=24*filters, out_channels_right=4*filters)
self.cell_16 = NormalCell(in_channels_left=24*filters, out_channels_left=4*filters,
in_channels_right=24*filters, out_channels_right=4*filters)
self.cell_17 = NormalCell(in_channels_left=24*filters, out_channels_left=4*filters,
in_channels_right=24*filters, out_channels_right=4*filters)
self.relu = nn.ReLU()
self.avg_pool = nn.AvgPool2d(11, stride=1, padding=0)
self.dropout = nn.Dropout()
self.last_linear = nn.Linear(24*filters, self.num_classes)
def features(self, input):
x_conv0 = self.conv0(input)
x_stem_0 = self.cell_stem_0(x_conv0)
x_stem_1 = self.cell_stem_1(x_conv0, x_stem_0)
x_cell_0 = self.cell_0(x_stem_1, x_stem_0)
x_cell_1 = self.cell_1(x_cell_0, x_stem_1)
x_cell_2 = self.cell_2(x_cell_1, x_cell_0)
x_cell_3 = self.cell_3(x_cell_2, x_cell_1)
x_cell_4 = self.cell_4(x_cell_3, x_cell_2)
x_cell_5 = self.cell_5(x_cell_4, x_cell_3)
x_reduction_cell_0 = self.reduction_cell_0(x_cell_5, x_cell_4)
x_cell_6 = self.cell_6(x_reduction_cell_0, x_cell_4)
x_cell_7 = self.cell_7(x_cell_6, x_reduction_cell_0)
x_cell_8 = self.cell_8(x_cell_7, x_cell_6)
x_cell_9 = self.cell_9(x_cell_8, x_cell_7)
x_cell_10 = self.cell_10(x_cell_9, x_cell_8)
x_cell_11 = self.cell_11(x_cell_10, x_cell_9)
x_reduction_cell_1 = self.reduction_cell_1(x_cell_11, x_cell_10)
x_cell_12 = self.cell_12(x_reduction_cell_1, x_cell_10)
x_cell_13 = self.cell_13(x_cell_12, x_reduction_cell_1)
x_cell_14 = self.cell_14(x_cell_13, x_cell_12)
x_cell_15 = self.cell_15(x_cell_14, x_cell_13)
x_cell_16 = self.cell_16(x_cell_15, x_cell_14)
x_cell_17 = self.cell_17(x_cell_16, x_cell_15)
return x_cell_17
def logits(self, features):
x = self.relu(features)
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.dropout(x)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
if __name__ == "__main__":
model = NASNetALarge()
input = Variable(torch.randn(2, 3, 331, 331))
output = model(input)
print(output.size())
|
Cadene/pretrained-models.pytorch | pretrainedmodels/models/dpn.py | adaptive_avgmax_pool2d | python | def adaptive_avgmax_pool2d(x, pool_type='avg', padding=0, count_include_pad=False):
if pool_type == 'avgmaxc':
x = torch.cat([
F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad),
F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
], dim=1)
elif pool_type == 'avgmax':
x_avg = F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
x_max = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
x = 0.5 * (x_avg + x_max)
elif pool_type == 'max':
x = F.max_pool2d(x, kernel_size=(x.size(2), x.size(3)), padding=padding)
else:
if pool_type != 'avg':
print('Invalid pool type %s specified. Defaulting to average pooling.' % pool_type)
x = F.avg_pool2d(
x, kernel_size=(x.size(2), x.size(3)), padding=padding, count_include_pad=count_include_pad)
return x | Selectable global pooling function with dynamic input kernel size | train | https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/pretrainedmodels/models/dpn.py#L407-L428 | null | """ PyTorch implementation of DualPathNetworks
Ported to PyTorch by [Ross Wightman](https://github.com/rwightman/pytorch-dpn-pretrained)
Based on original MXNet implementation https://github.com/cypw/DPNs with
many ideas from another PyTorch implementation https://github.com/oyam/pytorch-DPNs.
This implementation is compatible with the pretrained weights
from cypw's MXNet implementation.
"""
from __future__ import print_function, division, absolute_import
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from collections import OrderedDict
__all__ = ['DPN', 'dpn68', 'dpn68b', 'dpn92', 'dpn98', 'dpn131', 'dpn107']
pretrained_settings = {
'dpn68': {
'imagenet': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/dpn68-4af7d88d2.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [124 / 255, 117 / 255, 104 / 255],
'std': [1 / (.0167 * 255)] * 3,
'num_classes': 1000
}
},
'dpn68b': {
'imagenet+5k': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/dpn68b_extra-363ab9c19.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [124 / 255, 117 / 255, 104 / 255],
'std': [1 / (.0167 * 255)] * 3,
'num_classes': 1000
}
},
'dpn92': {
# 'imagenet': {
# 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/dpn68-66bebafa7.pth',
# 'input_space': 'RGB',
# 'input_size': [3, 224, 224],
# 'input_range': [0, 1],
# 'mean': [124 / 255, 117 / 255, 104 / 255],
# 'std': [1 / (.0167 * 255)] * 3,
# 'num_classes': 1000
# },
'imagenet+5k': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/dpn92_extra-fda993c95.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [124 / 255, 117 / 255, 104 / 255],
'std': [1 / (.0167 * 255)] * 3,
'num_classes': 1000
}
},
'dpn98': {
'imagenet': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/dpn98-722954780.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [124 / 255, 117 / 255, 104 / 255],
'std': [1 / (.0167 * 255)] * 3,
'num_classes': 1000
}
},
'dpn131': {
'imagenet': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/dpn131-7af84be88.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [124 / 255, 117 / 255, 104 / 255],
'std': [1 / (.0167 * 255)] * 3,
'num_classes': 1000
}
},
'dpn107': {
'imagenet+5k': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/dpn107_extra-b7f9f4cc9.pth',
'input_space': 'RGB',
'input_size': [3, 224, 224],
'input_range': [0, 1],
'mean': [124 / 255, 117 / 255, 104 / 255],
'std': [1 / (.0167 * 255)] * 3,
'num_classes': 1000
}
}
}
def dpn68(num_classes=1000, pretrained='imagenet'):
model = DPN(
small=True, num_init_features=10, k_r=128, groups=32,
k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64),
num_classes=num_classes, test_time_pool=True)
if pretrained:
settings = pretrained_settings['dpn68'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
def dpn68b(num_classes=1000, pretrained='imagenet+5k'):
model = DPN(
small=True, num_init_features=10, k_r=128, groups=32,
b=True, k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64),
num_classes=num_classes, test_time_pool=True)
if pretrained:
settings = pretrained_settings['dpn68b'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
def dpn92(num_classes=1000, pretrained='imagenet+5k'):
model = DPN(
num_init_features=64, k_r=96, groups=32,
k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128),
num_classes=num_classes, test_time_pool=True)
if pretrained:
settings = pretrained_settings['dpn92'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
def dpn98(num_classes=1000, pretrained='imagenet'):
model = DPN(
num_init_features=96, k_r=160, groups=40,
k_sec=(3, 6, 20, 3), inc_sec=(16, 32, 32, 128),
num_classes=num_classes, test_time_pool=True)
if pretrained:
settings = pretrained_settings['dpn98'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
def dpn131(num_classes=1000, pretrained='imagenet'):
model = DPN(
num_init_features=128, k_r=160, groups=40,
k_sec=(4, 8, 28, 3), inc_sec=(16, 32, 32, 128),
num_classes=num_classes, test_time_pool=True)
if pretrained:
settings = pretrained_settings['dpn131'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
def dpn107(num_classes=1000, pretrained='imagenet+5k'):
model = DPN(
num_init_features=128, k_r=200, groups=50,
k_sec=(4, 8, 20, 3), inc_sec=(20, 64, 64, 128),
num_classes=num_classes, test_time_pool=True)
if pretrained:
settings = pretrained_settings['dpn107'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
class CatBnAct(nn.Module):
def __init__(self, in_chs, activation_fn=nn.ReLU(inplace=True)):
super(CatBnAct, self).__init__()
self.bn = nn.BatchNorm2d(in_chs, eps=0.001)
self.act = activation_fn
def forward(self, x):
x = torch.cat(x, dim=1) if isinstance(x, tuple) else x
return self.act(self.bn(x))
class BnActConv2d(nn.Module):
def __init__(self, in_chs, out_chs, kernel_size, stride,
padding=0, groups=1, activation_fn=nn.ReLU(inplace=True)):
super(BnActConv2d, self).__init__()
self.bn = nn.BatchNorm2d(in_chs, eps=0.001)
self.act = activation_fn
self.conv = nn.Conv2d(in_chs, out_chs, kernel_size, stride, padding, groups=groups, bias=False)
def forward(self, x):
return self.conv(self.act(self.bn(x)))
class InputBlock(nn.Module):
def __init__(self, num_init_features, kernel_size=7,
padding=3, activation_fn=nn.ReLU(inplace=True)):
super(InputBlock, self).__init__()
self.conv = nn.Conv2d(
3, num_init_features, kernel_size=kernel_size, stride=2, padding=padding, bias=False)
self.bn = nn.BatchNorm2d(num_init_features, eps=0.001)
self.act = activation_fn
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.act(x)
x = self.pool(x)
return x
class DualPathBlock(nn.Module):
def __init__(
self, in_chs, num_1x1_a, num_3x3_b, num_1x1_c, inc, groups, block_type='normal', b=False):
super(DualPathBlock, self).__init__()
self.num_1x1_c = num_1x1_c
self.inc = inc
self.b = b
if block_type is 'proj':
self.key_stride = 1
self.has_proj = True
elif block_type is 'down':
self.key_stride = 2
self.has_proj = True
else:
assert block_type is 'normal'
self.key_stride = 1
self.has_proj = False
if self.has_proj:
# Using different member names here to allow easier parameter key matching for conversion
if self.key_stride == 2:
self.c1x1_w_s2 = BnActConv2d(
in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=2)
else:
self.c1x1_w_s1 = BnActConv2d(
in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=1)
self.c1x1_a = BnActConv2d(in_chs=in_chs, out_chs=num_1x1_a, kernel_size=1, stride=1)
self.c3x3_b = BnActConv2d(
in_chs=num_1x1_a, out_chs=num_3x3_b, kernel_size=3,
stride=self.key_stride, padding=1, groups=groups)
if b:
self.c1x1_c = CatBnAct(in_chs=num_3x3_b)
self.c1x1_c1 = nn.Conv2d(num_3x3_b, num_1x1_c, kernel_size=1, bias=False)
self.c1x1_c2 = nn.Conv2d(num_3x3_b, inc, kernel_size=1, bias=False)
else:
self.c1x1_c = BnActConv2d(in_chs=num_3x3_b, out_chs=num_1x1_c + inc, kernel_size=1, stride=1)
def forward(self, x):
x_in = torch.cat(x, dim=1) if isinstance(x, tuple) else x
if self.has_proj:
if self.key_stride == 2:
x_s = self.c1x1_w_s2(x_in)
else:
x_s = self.c1x1_w_s1(x_in)
x_s1 = x_s[:, :self.num_1x1_c, :, :]
x_s2 = x_s[:, self.num_1x1_c:, :, :]
else:
x_s1 = x[0]
x_s2 = x[1]
x_in = self.c1x1_a(x_in)
x_in = self.c3x3_b(x_in)
if self.b:
x_in = self.c1x1_c(x_in)
out1 = self.c1x1_c1(x_in)
out2 = self.c1x1_c2(x_in)
else:
x_in = self.c1x1_c(x_in)
out1 = x_in[:, :self.num_1x1_c, :, :]
out2 = x_in[:, self.num_1x1_c:, :, :]
resid = x_s1 + out1
dense = torch.cat([x_s2, out2], dim=1)
return resid, dense
class DPN(nn.Module):
def __init__(self, small=False, num_init_features=64, k_r=96, groups=32,
b=False, k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128),
num_classes=1000, test_time_pool=False):
super(DPN, self).__init__()
self.test_time_pool = test_time_pool
self.b = b
bw_factor = 1 if small else 4
blocks = OrderedDict()
# conv1
if small:
blocks['conv1_1'] = InputBlock(num_init_features, kernel_size=3, padding=1)
else:
blocks['conv1_1'] = InputBlock(num_init_features, kernel_size=7, padding=3)
# conv2
bw = 64 * bw_factor
inc = inc_sec[0]
r = (k_r * bw) // (64 * bw_factor)
blocks['conv2_1'] = DualPathBlock(num_init_features, r, r, bw, inc, groups, 'proj', b)
in_chs = bw + 3 * inc
for i in range(2, k_sec[0] + 1):
blocks['conv2_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b)
in_chs += inc
# conv3
bw = 128 * bw_factor
inc = inc_sec[1]
r = (k_r * bw) // (64 * bw_factor)
blocks['conv3_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b)
in_chs = bw + 3 * inc
for i in range(2, k_sec[1] + 1):
blocks['conv3_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b)
in_chs += inc
# conv4
bw = 256 * bw_factor
inc = inc_sec[2]
r = (k_r * bw) // (64 * bw_factor)
blocks['conv4_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b)
in_chs = bw + 3 * inc
for i in range(2, k_sec[2] + 1):
blocks['conv4_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b)
in_chs += inc
# conv5
bw = 512 * bw_factor
inc = inc_sec[3]
r = (k_r * bw) // (64 * bw_factor)
blocks['conv5_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b)
in_chs = bw + 3 * inc
for i in range(2, k_sec[3] + 1):
blocks['conv5_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b)
in_chs += inc
blocks['conv5_bn_ac'] = CatBnAct(in_chs)
self.features = nn.Sequential(blocks)
# Using 1x1 conv for the FC layer to allow the extra pooling scheme
self.last_linear = nn.Conv2d(in_chs, num_classes, kernel_size=1, bias=True)
def logits(self, features):
if not self.training and self.test_time_pool:
x = F.avg_pool2d(features, kernel_size=7, stride=1)
out = self.last_linear(x)
# The extra test time pool should be pooling an img_size//32 - 6 size patch
out = adaptive_avgmax_pool2d(out, pool_type='avgmax')
else:
x = adaptive_avgmax_pool2d(features, pool_type='avg')
out = self.last_linear(x)
return out.view(out.size(0), -1)
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
""" PyTorch selectable adaptive pooling
Adaptive pooling with the ability to select the type of pooling from:
* 'avg' - Average pooling
* 'max' - Max pooling
* 'avgmax' - Sum of average and max pooling re-scaled by 0.5
* 'avgmaxc' - Concatenation of average and max pooling along feature dim, doubles feature dim
Both a functional and a nn.Module version of the pooling is provided.
Author: Ross Wightman (rwightman)
"""
def pooling_factor(pool_type='avg'):
return 2 if pool_type == 'avgmaxc' else 1
class AdaptiveAvgMaxPool2d(torch.nn.Module):
"""Selectable global pooling layer with dynamic input kernel size
"""
def __init__(self, output_size=1, pool_type='avg'):
super(AdaptiveAvgMaxPool2d, self).__init__()
self.output_size = output_size
self.pool_type = pool_type
if pool_type == 'avgmaxc' or pool_type == 'avgmax':
self.pool = nn.ModuleList([nn.AdaptiveAvgPool2d(output_size), nn.AdaptiveMaxPool2d(output_size)])
elif pool_type == 'max':
self.pool = nn.AdaptiveMaxPool2d(output_size)
else:
if pool_type != 'avg':
print('Invalid pool type %s specified. Defaulting to average pooling.' % pool_type)
self.pool = nn.AdaptiveAvgPool2d(output_size)
def forward(self, x):
if self.pool_type == 'avgmaxc':
x = torch.cat([p(x) for p in self.pool], dim=1)
elif self.pool_type == 'avgmax':
x = 0.5 * torch.sum(torch.stack([p(x) for p in self.pool]), 0).squeeze(dim=0)
else:
x = self.pool(x)
return x
def factor(self):
return pooling_factor(self.pool_type)
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ 'output_size=' + str(self.output_size) \
+ ', pool_type=' + self.pool_type + ')' |
Cadene/pretrained-models.pytorch | pretrainedmodels/datasets/utils.py | download_url | python | def download_url(url, destination=None, progress_bar=True):
def my_hook(t):
last_b = [0]
def inner(b=1, bsize=1, tsize=None):
if tsize is not None:
t.total = tsize
if b > 0:
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return inner
if progress_bar:
with tqdm(unit='B', unit_scale=True, miniters=1, desc=url.split('/')[-1]) as t:
filename, _ = urlretrieve(url, filename=destination, reporthook=my_hook(t))
else:
filename, _ = urlretrieve(url, filename=destination) | Download a URL to a local file.
Parameters
----------
url : str
The URL to download.
destination : str, None
The destination of the file. If None is given the file is saved to a temporary directory.
progress_bar : bool
Whether to show a command-line progress bar while downloading.
Returns
-------
filename : str
The location of the downloaded file.
Notes
-----
Progress bar use/example adapted from tqdm documentation: https://github.com/tqdm/tqdm | train | https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/pretrainedmodels/datasets/utils.py#L45-L83 | [
"def my_hook(t):\n last_b = [0]\n\n def inner(b=1, bsize=1, tsize=None):\n if tsize is not None:\n t.total = tsize\n if b > 0:\n t.update((b - last_b[0]) * bsize)\n last_b[0] = b\n\n return inner\n"
] | from __future__ import print_function, division, absolute_import
import math
from six.moves.urllib.request import urlretrieve
import torch
from PIL import Image
from tqdm import tqdm
def load_imagenet_classes(path_synsets='data/imagenet_synsets.txt',
path_classes='data/imagenet_classes.txt'):
with open(path_synsets, 'r') as f:
synsets = f.readlines()
synsets = [x.strip() for x in synsets]
splits = [line.split(' ') for line in synsets]
key_to_classname = {spl[0]:' '.join(spl[1:]) for spl in splits}
with open(path_classes, 'r') as f:
class_id_to_key = f.readlines()
class_id_to_key = [x.strip() for x in class_id_to_key]
cid_to_cname = []
for i in range(len(class_id_to_key)):
key = class_id_to_key[i]
cname = key_to_classname[key]
cid_to_cname.append(cname)
return cid_to_cname
class Warp(object):
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = int(size)
self.interpolation = interpolation
def __call__(self, img):
return img.resize((self.size, self.size), self.interpolation)
def __str__(self):
return self.__class__.__name__ + ' (size={size}, interpolation={interpolation})'.format(size=self.size,
interpolation=self.interpolation)
class AveragePrecisionMeter(object):
"""
The APMeter measures the average precision per class.
The APMeter is designed to operate on `NxK` Tensors `output` and
`target`, and optionally a `Nx1` Tensor weight where (1) the `output`
contains model output scores for `N` examples and `K` classes that ought to
be higher when the model is more convinced that the example should be
positively labeled, and smaller when the model believes the example should
be negatively labeled (for instance, the output of a sigmoid function); (2)
the `target` contains only values 0 (for negative examples) and 1
(for positive examples); and (3) the `weight` ( > 0) represents weight for
each sample.
"""
def __init__(self, difficult_examples=False):
super(AveragePrecisionMeter, self).__init__()
self.reset()
self.difficult_examples = difficult_examples
def reset(self):
"""Resets the meter with empty member variables"""
self.scores = torch.FloatTensor(torch.FloatStorage())
self.targets = torch.LongTensor(torch.LongStorage())
def add(self, output, target):
"""
Args:
output (Tensor): NxK tensor that for each of the N examples
indicates the probability of the example belonging to each of
the K classes, according to the model. The probabilities should
sum to one over all classes
target (Tensor): binary NxK tensort that encodes which of the K
classes are associated with the N-th input
(eg: a row [0, 1, 0, 1] indicates that the example is
associated with classes 2 and 4)
weight (optional, Tensor): Nx1 tensor representing the weight for
each example (each weight > 0)
"""
if not torch.is_tensor(output):
output = torch.from_numpy(output)
if not torch.is_tensor(target):
target = torch.from_numpy(target)
if output.dim() == 1:
output = output.view(-1, 1)
else:
assert output.dim() == 2, \
'wrong output size (should be 1D or 2D with one column \
per class)'
if target.dim() == 1:
target = target.view(-1, 1)
else:
assert target.dim() == 2, \
'wrong target size (should be 1D or 2D with one column \
per class)'
if self.scores.numel() > 0:
assert target.size(1) == self.targets.size(1), \
'dimensions for output should match previously added examples.'
# make sure storage is of sufficient size
if self.scores.storage().size() < self.scores.numel() + output.numel():
new_size = math.ceil(self.scores.storage().size() * 1.5)
self.scores.storage().resize_(int(new_size + output.numel()))
self.targets.storage().resize_(int(new_size + output.numel()))
# store scores and targets
offset = self.scores.size(0) if self.scores.dim() > 0 else 0
self.scores.resize_(offset + output.size(0), output.size(1))
self.targets.resize_(offset + target.size(0), target.size(1))
self.scores.narrow(0, offset, output.size(0)).copy_(output)
self.targets.narrow(0, offset, target.size(0)).copy_(target)
def value(self):
"""Returns the model's average precision for each class
Return:
ap (FloatTensor): 1xK tensor, with avg precision for each class k
"""
if self.scores.numel() == 0:
return 0
ap = torch.zeros(self.scores.size(1))
rg = torch.arange(1, self.scores.size(0)).float()
# compute average precision for each class
for k in range(self.scores.size(1)):
# sort scores
scores = self.scores[:, k]
targets = self.targets[:, k]
# compute average precision
ap[k] = AveragePrecisionMeter.average_precision(scores, targets, self.difficult_examples)
return ap
@staticmethod
def average_precision(output, target, difficult_examples=True):
# sort examples
sorted, indices = torch.sort(output, dim=0, descending=True)
# Computes prec@i
pos_count = 0.
total_count = 0.
precision_at_i = 0.
for i in indices:
label = target[i]
if difficult_examples and label == 0:
continue
if label == 1:
pos_count += 1
total_count += 1
if label == 1:
precision_at_i += pos_count / total_count
precision_at_i /= pos_count
return precision_at_i |
Cadene/pretrained-models.pytorch | pretrainedmodels/datasets/utils.py | AveragePrecisionMeter.add | python | def add(self, output, target):
if not torch.is_tensor(output):
output = torch.from_numpy(output)
if not torch.is_tensor(target):
target = torch.from_numpy(target)
if output.dim() == 1:
output = output.view(-1, 1)
else:
assert output.dim() == 2, \
'wrong output size (should be 1D or 2D with one column \
per class)'
if target.dim() == 1:
target = target.view(-1, 1)
else:
assert target.dim() == 2, \
'wrong target size (should be 1D or 2D with one column \
per class)'
if self.scores.numel() > 0:
assert target.size(1) == self.targets.size(1), \
'dimensions for output should match previously added examples.'
# make sure storage is of sufficient size
if self.scores.storage().size() < self.scores.numel() + output.numel():
new_size = math.ceil(self.scores.storage().size() * 1.5)
self.scores.storage().resize_(int(new_size + output.numel()))
self.targets.storage().resize_(int(new_size + output.numel()))
# store scores and targets
offset = self.scores.size(0) if self.scores.dim() > 0 else 0
self.scores.resize_(offset + output.size(0), output.size(1))
self.targets.resize_(offset + target.size(0), target.size(1))
self.scores.narrow(0, offset, output.size(0)).copy_(output)
self.targets.narrow(0, offset, target.size(0)).copy_(target) | Args:
output (Tensor): NxK tensor that for each of the N examples
indicates the probability of the example belonging to each of
the K classes, according to the model. The probabilities should
sum to one over all classes
target (Tensor): binary NxK tensort that encodes which of the K
classes are associated with the N-th input
(eg: a row [0, 1, 0, 1] indicates that the example is
associated with classes 2 and 4)
weight (optional, Tensor): Nx1 tensor representing the weight for
each example (each weight > 0) | train | https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/pretrainedmodels/datasets/utils.py#L110-L156 | null | class AveragePrecisionMeter(object):
"""
The APMeter measures the average precision per class.
The APMeter is designed to operate on `NxK` Tensors `output` and
`target`, and optionally a `Nx1` Tensor weight where (1) the `output`
contains model output scores for `N` examples and `K` classes that ought to
be higher when the model is more convinced that the example should be
positively labeled, and smaller when the model believes the example should
be negatively labeled (for instance, the output of a sigmoid function); (2)
the `target` contains only values 0 (for negative examples) and 1
(for positive examples); and (3) the `weight` ( > 0) represents weight for
each sample.
"""
def __init__(self, difficult_examples=False):
super(AveragePrecisionMeter, self).__init__()
self.reset()
self.difficult_examples = difficult_examples
def reset(self):
"""Resets the meter with empty member variables"""
self.scores = torch.FloatTensor(torch.FloatStorage())
self.targets = torch.LongTensor(torch.LongStorage())
def value(self):
"""Returns the model's average precision for each class
Return:
ap (FloatTensor): 1xK tensor, with avg precision for each class k
"""
if self.scores.numel() == 0:
return 0
ap = torch.zeros(self.scores.size(1))
rg = torch.arange(1, self.scores.size(0)).float()
# compute average precision for each class
for k in range(self.scores.size(1)):
# sort scores
scores = self.scores[:, k]
targets = self.targets[:, k]
# compute average precision
ap[k] = AveragePrecisionMeter.average_precision(scores, targets, self.difficult_examples)
return ap
@staticmethod
def average_precision(output, target, difficult_examples=True):
# sort examples
sorted, indices = torch.sort(output, dim=0, descending=True)
# Computes prec@i
pos_count = 0.
total_count = 0.
precision_at_i = 0.
for i in indices:
label = target[i]
if difficult_examples and label == 0:
continue
if label == 1:
pos_count += 1
total_count += 1
if label == 1:
precision_at_i += pos_count / total_count
precision_at_i /= pos_count
return precision_at_i |
Cadene/pretrained-models.pytorch | pretrainedmodels/datasets/utils.py | AveragePrecisionMeter.value | python | def value(self):
if self.scores.numel() == 0:
return 0
ap = torch.zeros(self.scores.size(1))
rg = torch.arange(1, self.scores.size(0)).float()
# compute average precision for each class
for k in range(self.scores.size(1)):
# sort scores
scores = self.scores[:, k]
targets = self.targets[:, k]
# compute average precision
ap[k] = AveragePrecisionMeter.average_precision(scores, targets, self.difficult_examples)
return ap | Returns the model's average precision for each class
Return:
ap (FloatTensor): 1xK tensor, with avg precision for each class k | train | https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/pretrainedmodels/datasets/utils.py#L158-L177 | [
"def average_precision(output, target, difficult_examples=True):\n\n # sort examples\n sorted, indices = torch.sort(output, dim=0, descending=True)\n\n # Computes prec@i\n pos_count = 0.\n total_count = 0.\n precision_at_i = 0.\n for i in indices:\n label = target[i]\n if difficult_examples and label == 0:\n continue\n if label == 1:\n pos_count += 1\n total_count += 1\n if label == 1:\n precision_at_i += pos_count / total_count\n precision_at_i /= pos_count\n return precision_at_i"
] | class AveragePrecisionMeter(object):
"""
The APMeter measures the average precision per class.
The APMeter is designed to operate on `NxK` Tensors `output` and
`target`, and optionally a `Nx1` Tensor weight where (1) the `output`
contains model output scores for `N` examples and `K` classes that ought to
be higher when the model is more convinced that the example should be
positively labeled, and smaller when the model believes the example should
be negatively labeled (for instance, the output of a sigmoid function); (2)
the `target` contains only values 0 (for negative examples) and 1
(for positive examples); and (3) the `weight` ( > 0) represents weight for
each sample.
"""
def __init__(self, difficult_examples=False):
super(AveragePrecisionMeter, self).__init__()
self.reset()
self.difficult_examples = difficult_examples
def reset(self):
"""Resets the meter with empty member variables"""
self.scores = torch.FloatTensor(torch.FloatStorage())
self.targets = torch.LongTensor(torch.LongStorage())
def add(self, output, target):
"""
Args:
output (Tensor): NxK tensor that for each of the N examples
indicates the probability of the example belonging to each of
the K classes, according to the model. The probabilities should
sum to one over all classes
target (Tensor): binary NxK tensort that encodes which of the K
classes are associated with the N-th input
(eg: a row [0, 1, 0, 1] indicates that the example is
associated with classes 2 and 4)
weight (optional, Tensor): Nx1 tensor representing the weight for
each example (each weight > 0)
"""
if not torch.is_tensor(output):
output = torch.from_numpy(output)
if not torch.is_tensor(target):
target = torch.from_numpy(target)
if output.dim() == 1:
output = output.view(-1, 1)
else:
assert output.dim() == 2, \
'wrong output size (should be 1D or 2D with one column \
per class)'
if target.dim() == 1:
target = target.view(-1, 1)
else:
assert target.dim() == 2, \
'wrong target size (should be 1D or 2D with one column \
per class)'
if self.scores.numel() > 0:
assert target.size(1) == self.targets.size(1), \
'dimensions for output should match previously added examples.'
# make sure storage is of sufficient size
if self.scores.storage().size() < self.scores.numel() + output.numel():
new_size = math.ceil(self.scores.storage().size() * 1.5)
self.scores.storage().resize_(int(new_size + output.numel()))
self.targets.storage().resize_(int(new_size + output.numel()))
# store scores and targets
offset = self.scores.size(0) if self.scores.dim() > 0 else 0
self.scores.resize_(offset + output.size(0), output.size(1))
self.targets.resize_(offset + target.size(0), target.size(1))
self.scores.narrow(0, offset, output.size(0)).copy_(output)
self.targets.narrow(0, offset, target.size(0)).copy_(target)
@staticmethod
def average_precision(output, target, difficult_examples=True):
# sort examples
sorted, indices = torch.sort(output, dim=0, descending=True)
# Computes prec@i
pos_count = 0.
total_count = 0.
precision_at_i = 0.
for i in indices:
label = target[i]
if difficult_examples and label == 0:
continue
if label == 1:
pos_count += 1
total_count += 1
if label == 1:
precision_at_i += pos_count / total_count
precision_at_i /= pos_count
return precision_at_i |
Cadene/pretrained-models.pytorch | pretrainedmodels/models/polynet.py | polynet | python | def polynet(num_classes=1000, pretrained='imagenet'):
if pretrained:
settings = pretrained_settings['polynet'][pretrained]
assert num_classes == settings['num_classes'], \
'num_classes should be {}, but is {}'.format(
settings['num_classes'], num_classes)
model = PolyNet(num_classes=num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
else:
model = PolyNet(num_classes=num_classes)
return model | PolyNet architecture from the paper
'PolyNet: A Pursuit of Structural Diversity in Very Deep Networks'
https://arxiv.org/abs/1611.05725 | train | https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/pretrainedmodels/models/polynet.py#L461-L480 | null | from __future__ import print_function, division, absolute_import
import torch
import torch.nn as nn
from torch.utils import model_zoo
__all__ = ['PolyNet', 'polynet']
pretrained_settings = {
'polynet': {
'imagenet': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/polynet-f71d82a5.pth',
'input_space': 'RGB',
'input_size': [3, 331, 331],
'input_range': [0, 1],
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
'num_classes': 1000
},
}
}
class BasicConv2d(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0,
output_relu=True):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size,
stride=stride, padding=padding, bias=False)
self.bn = nn.BatchNorm2d(out_planes)
self.relu = nn.ReLU() if output_relu else None
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
if self.relu:
x = self.relu(x)
return x
class PolyConv2d(nn.Module):
"""A block that is used inside poly-N (poly-2, poly-3, and so on) modules.
The Convolution layer is shared between all Inception blocks inside
a poly-N module. BatchNorm layers are not shared between Inception blocks
and therefore the number of BatchNorm layers is equal to the number of
Inception blocks inside a poly-N module.
"""
def __init__(self, in_planes, out_planes, kernel_size, num_blocks,
stride=1, padding=0):
super(PolyConv2d, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size,
stride=stride, padding=padding, bias=False)
self.bn_blocks = nn.ModuleList([
nn.BatchNorm2d(out_planes) for _ in range(num_blocks)
])
self.relu = nn.ReLU()
def forward(self, x, block_index):
x = self.conv(x)
bn = self.bn_blocks[block_index]
x = bn(x)
x = self.relu(x)
return x
class Stem(nn.Module):
def __init__(self):
super(Stem, self).__init__()
self.conv1 = nn.Sequential(
BasicConv2d(3, 32, kernel_size=3, stride=2),
BasicConv2d(32, 32, kernel_size=3),
BasicConv2d(32, 64, kernel_size=3, padding=1),
)
self.conv1_pool_branch = nn.MaxPool2d(3, stride=2)
self.conv1_branch = BasicConv2d(64, 96, kernel_size=3, stride=2)
self.conv2_short = nn.Sequential(
BasicConv2d(160, 64, kernel_size=1),
BasicConv2d(64, 96, kernel_size=3),
)
self.conv2_long = nn.Sequential(
BasicConv2d(160, 64, kernel_size=1),
BasicConv2d(64, 64, kernel_size=(7, 1), padding=(3, 0)),
BasicConv2d(64, 64, kernel_size=(1, 7), padding=(0, 3)),
BasicConv2d(64, 96, kernel_size=3),
)
self.conv2_pool_branch = nn.MaxPool2d(3, stride=2)
self.conv2_branch = BasicConv2d(192, 192, kernel_size=3, stride=2)
def forward(self, x):
x = self.conv1(x)
x0 = self.conv1_pool_branch(x)
x1 = self.conv1_branch(x)
x = torch.cat((x0, x1), 1)
x0 = self.conv2_short(x)
x1 = self.conv2_long(x)
x = torch.cat((x0, x1), 1)
x0 = self.conv2_pool_branch(x)
x1 = self.conv2_branch(x)
out = torch.cat((x0, x1), 1)
return out
class BlockA(nn.Module):
"""Inception-ResNet-A block."""
def __init__(self):
super(BlockA, self).__init__()
self.path0 = nn.Sequential(
BasicConv2d(384, 32, kernel_size=1),
BasicConv2d(32, 48, kernel_size=3, padding=1),
BasicConv2d(48, 64, kernel_size=3, padding=1),
)
self.path1 = nn.Sequential(
BasicConv2d(384, 32, kernel_size=1),
BasicConv2d(32, 32, kernel_size=3, padding=1),
)
self.path2 = BasicConv2d(384, 32, kernel_size=1)
self.conv2d = BasicConv2d(128, 384, kernel_size=1, output_relu=False)
def forward(self, x):
x0 = self.path0(x)
x1 = self.path1(x)
x2 = self.path2(x)
out = torch.cat((x0, x1, x2), 1)
out = self.conv2d(out)
return out
class BlockB(nn.Module):
"""Inception-ResNet-B block."""
def __init__(self):
super(BlockB, self).__init__()
self.path0 = nn.Sequential(
BasicConv2d(1152, 128, kernel_size=1),
BasicConv2d(128, 160, kernel_size=(1, 7), padding=(0, 3)),
BasicConv2d(160, 192, kernel_size=(7, 1), padding=(3, 0)),
)
self.path1 = BasicConv2d(1152, 192, kernel_size=1)
self.conv2d = BasicConv2d(384, 1152, kernel_size=1, output_relu=False)
def forward(self, x):
x0 = self.path0(x)
x1 = self.path1(x)
out = torch.cat((x0, x1), 1)
out = self.conv2d(out)
return out
class BlockC(nn.Module):
"""Inception-ResNet-C block."""
def __init__(self):
super(BlockC, self).__init__()
self.path0 = nn.Sequential(
BasicConv2d(2048, 192, kernel_size=1),
BasicConv2d(192, 224, kernel_size=(1, 3), padding=(0, 1)),
BasicConv2d(224, 256, kernel_size=(3, 1), padding=(1, 0)),
)
self.path1 = BasicConv2d(2048, 192, kernel_size=1)
self.conv2d = BasicConv2d(448, 2048, kernel_size=1, output_relu=False)
def forward(self, x):
x0 = self.path0(x)
x1 = self.path1(x)
out = torch.cat((x0, x1), 1)
out = self.conv2d(out)
return out
class ReductionA(nn.Module):
"""A dimensionality reduction block that is placed after stage-a
Inception-ResNet blocks.
"""
def __init__(self):
super(ReductionA, self).__init__()
self.path0 = nn.Sequential(
BasicConv2d(384, 256, kernel_size=1),
BasicConv2d(256, 256, kernel_size=3, padding=1),
BasicConv2d(256, 384, kernel_size=3, stride=2),
)
self.path1 = BasicConv2d(384, 384, kernel_size=3, stride=2)
self.path2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.path0(x)
x1 = self.path1(x)
x2 = self.path2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class ReductionB(nn.Module):
"""A dimensionality reduction block that is placed after stage-b
Inception-ResNet blocks.
"""
def __init__(self):
super(ReductionB, self).__init__()
self.path0 = nn.Sequential(
BasicConv2d(1152, 256, kernel_size=1),
BasicConv2d(256, 256, kernel_size=3, padding=1),
BasicConv2d(256, 256, kernel_size=3, stride=2),
)
self.path1 = nn.Sequential(
BasicConv2d(1152, 256, kernel_size=1),
BasicConv2d(256, 256, kernel_size=3, stride=2),
)
self.path2 = nn.Sequential(
BasicConv2d(1152, 256, kernel_size=1),
BasicConv2d(256, 384, kernel_size=3, stride=2),
)
self.path3 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.path0(x)
x1 = self.path1(x)
x2 = self.path2(x)
x3 = self.path3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class InceptionResNetBPoly(nn.Module):
"""Base class for constructing poly-N Inception-ResNet-B modules.
When `num_blocks` is equal to 1, a module will have only a first-order path
and will be equal to a standard Inception-ResNet-B block.
When `num_blocks` is equal to 2, a module will have first-order and
second-order paths and will be called Inception-ResNet-B poly-2 module.
Increasing value of the `num_blocks` parameter will produce a higher order
Inception-ResNet-B poly-N modules.
"""
def __init__(self, scale, num_blocks):
super(InceptionResNetBPoly, self).__init__()
assert num_blocks >= 1, 'num_blocks should be greater or equal to 1'
self.scale = scale
self.num_blocks = num_blocks
self.path0_1x1 = PolyConv2d(1152, 128, kernel_size=1,
num_blocks=self.num_blocks)
self.path0_1x7 = PolyConv2d(128, 160, kernel_size=(1, 7),
num_blocks=self.num_blocks, padding=(0, 3))
self.path0_7x1 = PolyConv2d(160, 192, kernel_size=(7, 1),
num_blocks=self.num_blocks, padding=(3, 0))
self.path1 = PolyConv2d(1152, 192, kernel_size=1,
num_blocks=self.num_blocks)
# conv2d blocks are not shared between Inception-ResNet-B blocks
self.conv2d_blocks = nn.ModuleList([
BasicConv2d(384, 1152, kernel_size=1, output_relu=False)
for _ in range(self.num_blocks)
])
self.relu = nn.ReLU()
def forward_block(self, x, block_index):
x0 = self.path0_1x1(x, block_index)
x0 = self.path0_1x7(x0, block_index)
x0 = self.path0_7x1(x0, block_index)
x1 = self.path1(x, block_index)
out = torch.cat((x0, x1), 1)
conv2d_block = self.conv2d_blocks[block_index]
out = conv2d_block(out)
return out
def forward(self, x):
out = x
for block_index in range(self.num_blocks):
x = self.forward_block(x, block_index)
out = out + x * self.scale
x = self.relu(x)
out = self.relu(out)
return out
class InceptionResNetCPoly(nn.Module):
"""Base class for constructing poly-N Inception-ResNet-C modules.
When `num_blocks` is equal to 1, a module will have only a first-order path
and will be equal to a standard Inception-ResNet-C block.
When `num_blocks` is equal to 2, a module will have first-order and
second-order paths and will be called Inception-ResNet-C poly-2 module.
Increasing value of the `num_blocks` parameter will produce a higher order
Inception-ResNet-C poly-N modules.
"""
def __init__(self, scale, num_blocks):
super(InceptionResNetCPoly, self).__init__()
assert num_blocks >= 1, 'num_blocks should be greater or equal to 1'
self.scale = scale
self.num_blocks = num_blocks
self.path0_1x1 = PolyConv2d(2048, 192, kernel_size=1,
num_blocks=self.num_blocks)
self.path0_1x3 = PolyConv2d(192, 224, kernel_size=(1, 3),
num_blocks=self.num_blocks, padding=(0, 1))
self.path0_3x1 = PolyConv2d(224, 256, kernel_size=(3, 1),
num_blocks=self.num_blocks, padding=(1, 0))
self.path1 = PolyConv2d(2048, 192, kernel_size=1,
num_blocks=self.num_blocks)
# conv2d blocks are not shared between Inception-ResNet-C blocks
self.conv2d_blocks = nn.ModuleList([
BasicConv2d(448, 2048, kernel_size=1, output_relu=False)
for _ in range(self.num_blocks)
])
self.relu = nn.ReLU()
def forward_block(self, x, block_index):
x0 = self.path0_1x1(x, block_index)
x0 = self.path0_1x3(x0, block_index)
x0 = self.path0_3x1(x0, block_index)
x1 = self.path1(x, block_index)
out = torch.cat((x0, x1), 1)
conv2d_block = self.conv2d_blocks[block_index]
out = conv2d_block(out)
return out
def forward(self, x):
out = x
for block_index in range(self.num_blocks):
x = self.forward_block(x, block_index)
out = out + x * self.scale
x = self.relu(x)
out = self.relu(out)
return out
class MultiWay(nn.Module):
"""Base class for constructing N-way modules (2-way, 3-way, and so on)."""
def __init__(self, scale, block_cls, num_blocks):
super(MultiWay, self).__init__()
assert num_blocks >= 1, 'num_blocks should be greater or equal to 1'
self.scale = scale
self.blocks = nn.ModuleList([block_cls() for _ in range(num_blocks)])
self.relu = nn.ReLU()
def forward(self, x):
out = x
for block in self.blocks:
out = out + block(x) * self.scale
out = self.relu(out)
return out
# Some helper classes to simplify the construction of PolyNet
class InceptionResNetA2Way(MultiWay):
def __init__(self, scale):
super(InceptionResNetA2Way, self).__init__(scale, block_cls=BlockA,
num_blocks=2)
class InceptionResNetB2Way(MultiWay):
def __init__(self, scale):
super(InceptionResNetB2Way, self).__init__(scale, block_cls=BlockB,
num_blocks=2)
class InceptionResNetC2Way(MultiWay):
def __init__(self, scale):
super(InceptionResNetC2Way, self).__init__(scale, block_cls=BlockC,
num_blocks=2)
class InceptionResNetBPoly3(InceptionResNetBPoly):
def __init__(self, scale):
super(InceptionResNetBPoly3, self).__init__(scale, num_blocks=3)
class InceptionResNetCPoly3(InceptionResNetCPoly):
def __init__(self, scale):
super(InceptionResNetCPoly3, self).__init__(scale, num_blocks=3)
class PolyNet(nn.Module):
def __init__(self, num_classes=1000):
super(PolyNet, self).__init__()
self.stem = Stem()
self.stage_a = nn.Sequential(
InceptionResNetA2Way(scale=1),
InceptionResNetA2Way(scale=0.992308),
InceptionResNetA2Way(scale=0.984615),
InceptionResNetA2Way(scale=0.976923),
InceptionResNetA2Way(scale=0.969231),
InceptionResNetA2Way(scale=0.961538),
InceptionResNetA2Way(scale=0.953846),
InceptionResNetA2Way(scale=0.946154),
InceptionResNetA2Way(scale=0.938462),
InceptionResNetA2Way(scale=0.930769),
)
self.reduction_a = ReductionA()
self.stage_b = nn.Sequential(
InceptionResNetBPoly3(scale=0.923077),
InceptionResNetB2Way(scale=0.915385),
InceptionResNetBPoly3(scale=0.907692),
InceptionResNetB2Way(scale=0.9),
InceptionResNetBPoly3(scale=0.892308),
InceptionResNetB2Way(scale=0.884615),
InceptionResNetBPoly3(scale=0.876923),
InceptionResNetB2Way(scale=0.869231),
InceptionResNetBPoly3(scale=0.861538),
InceptionResNetB2Way(scale=0.853846),
InceptionResNetBPoly3(scale=0.846154),
InceptionResNetB2Way(scale=0.838462),
InceptionResNetBPoly3(scale=0.830769),
InceptionResNetB2Way(scale=0.823077),
InceptionResNetBPoly3(scale=0.815385),
InceptionResNetB2Way(scale=0.807692),
InceptionResNetBPoly3(scale=0.8),
InceptionResNetB2Way(scale=0.792308),
InceptionResNetBPoly3(scale=0.784615),
InceptionResNetB2Way(scale=0.776923),
)
self.reduction_b = ReductionB()
self.stage_c = nn.Sequential(
InceptionResNetCPoly3(scale=0.769231),
InceptionResNetC2Way(scale=0.761538),
InceptionResNetCPoly3(scale=0.753846),
InceptionResNetC2Way(scale=0.746154),
InceptionResNetCPoly3(scale=0.738462),
InceptionResNetC2Way(scale=0.730769),
InceptionResNetCPoly3(scale=0.723077),
InceptionResNetC2Way(scale=0.715385),
InceptionResNetCPoly3(scale=0.707692),
InceptionResNetC2Way(scale=0.7),
)
self.avg_pool = nn.AvgPool2d(9, stride=1)
self.dropout = nn.Dropout(0.2)
self.last_linear = nn.Linear(2048, num_classes)
def features(self, x):
x = self.stem(x)
x = self.stage_a(x)
x = self.reduction_a(x)
x = self.stage_b(x)
x = self.reduction_b(x)
x = self.stage_c(x)
return x
def logits(self, x):
x = self.avg_pool(x)
x = self.dropout(x)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, x):
x = self.features(x)
x = self.logits(x)
return x
|
fmfn/BayesianOptimization | examples/async_optimization.py | BayesianOptimizationHandler.post | python | def post(self):
body = tornado.escape.json_decode(self.request.body)
try:
self._bo.register(
params=body["params"],
target=body["target"],
)
print("BO has registered: {} points.".format(len(self._bo.space)), end="\n\n")
except KeyError:
pass
finally:
suggested_params = self._bo.suggest(self._uf)
self.write(json.dumps(suggested_params)) | Deal with incoming requests. | train | https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/examples/async_optimization.py#L42-L57 | null | class BayesianOptimizationHandler(RequestHandler):
"""Basic functionality for NLP handlers."""
_bo = BayesianOptimization(
f=black_box_function,
pbounds={"x": (-4, 4), "y": (-3, 3)}
)
_uf = UtilityFunction(kind="ucb", kappa=3, xi=1)
|
fmfn/BayesianOptimization | bayes_opt/bayesian_optimization.py | BayesianOptimization.register | python | def register(self, params, target):
self._space.register(params, target)
self.dispatch(Events.OPTMIZATION_STEP) | Expect observation with known target | train | https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/bayesian_optimization.py#L102-L105 | [
"def dispatch(self, event):\n for _, callback in self.get_subscribers(event).items():\n callback(event, self)\n",
"def register(self, params, target):\n \"\"\"\n Append a point and its target value to the known data.\n\n Parameters\n ----------\n x : ndarray\n a single point, with len(x) == self.dim\n\n y : float\n target function value\n\n Raises\n ------\n KeyError:\n if the point is not unique\n\n Notes\n -----\n runs in ammortized constant time\n\n Example\n -------\n >>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}\n >>> space = TargetSpace(lambda p1, p2: p1 + p2, pbounds)\n >>> len(space)\n 0\n >>> x = np.array([0, 0])\n >>> y = 1\n >>> space.add_observation(x, y)\n >>> len(space)\n 1\n \"\"\"\n x = self._as_array(params)\n if x in self:\n raise KeyError('Data point {} is not unique'.format(x))\n\n # Insert data into unique dictionary\n self._cache[_hashable(x.ravel())] = target\n\n self._params = np.concatenate([self._params, x.reshape(1, -1)])\n self._target = np.concatenate([self._target, [target]])\n"
] | class BayesianOptimization(Observable):
def __init__(self, f, pbounds, random_state=None, verbose=2):
""""""
self._random_state = ensure_rng(random_state)
# Data structure containing the function to be optimized, the bounds of
# its domain, and a record of the evaluations we have done so far
self._space = TargetSpace(f, pbounds, random_state)
# queue
self._queue = Queue()
# Internal GP regressor
self._gp = GaussianProcessRegressor(
kernel=Matern(nu=2.5),
alpha=1e-6,
normalize_y=True,
n_restarts_optimizer=25,
random_state=self._random_state,
)
self._verbose = verbose
super(BayesianOptimization, self).__init__(events=DEFAULT_EVENTS)
@property
def space(self):
return self._space
@property
def max(self):
return self._space.max()
@property
def res(self):
return self._space.res()
def probe(self, params, lazy=True):
"""Probe target of x"""
if lazy:
self._queue.add(params)
else:
self._space.probe(params)
self.dispatch(Events.OPTMIZATION_STEP)
def suggest(self, utility_function):
"""Most promissing point to probe next"""
if len(self._space) == 0:
return self._space.array_to_params(self._space.random_sample())
# Sklearn's GP throws a large number of warnings at times, but
# we don't really need to see them here.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self._gp.fit(self._space.params, self._space.target)
# Finding argmax of the acquisition function.
suggestion = acq_max(
ac=utility_function.utility,
gp=self._gp,
y_max=self._space.target.max(),
bounds=self._space.bounds,
random_state=self._random_state
)
return self._space.array_to_params(suggestion)
def _prime_queue(self, init_points):
"""Make sure there's something in the queue at the very beginning."""
if self._queue.empty and self._space.empty:
init_points = max(init_points, 1)
for _ in range(init_points):
self._queue.add(self._space.random_sample())
def _prime_subscriptions(self):
if not any([len(subs) for subs in self._events.values()]):
_logger = _get_default_logger(self._verbose)
self.subscribe(Events.OPTMIZATION_START, _logger)
self.subscribe(Events.OPTMIZATION_STEP, _logger)
self.subscribe(Events.OPTMIZATION_END, _logger)
def maximize(self,
init_points=5,
n_iter=25,
acq='ucb',
kappa=2.576,
xi=0.0,
**gp_params):
"""Mazimize your function"""
self._prime_subscriptions()
self.dispatch(Events.OPTMIZATION_START)
self._prime_queue(init_points)
self.set_gp_params(**gp_params)
util = UtilityFunction(kind=acq, kappa=kappa, xi=xi)
iteration = 0
while not self._queue.empty or iteration < n_iter:
try:
x_probe = next(self._queue)
except StopIteration:
x_probe = self.suggest(util)
iteration += 1
self.probe(x_probe, lazy=False)
self.dispatch(Events.OPTMIZATION_END)
def set_bounds(self, new_bounds):
"""
A method that allows changing the lower and upper searching bounds
Parameters
----------
new_bounds : dict
A dictionary with the parameter name and its new bounds
"""
self._space.set_bounds(new_bounds)
def set_gp_params(self, **params):
self._gp.set_params(**params)
|
fmfn/BayesianOptimization | bayes_opt/bayesian_optimization.py | BayesianOptimization.probe | python | def probe(self, params, lazy=True):
if lazy:
self._queue.add(params)
else:
self._space.probe(params)
self.dispatch(Events.OPTMIZATION_STEP) | Probe target of x | train | https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/bayesian_optimization.py#L107-L113 | [
"def add(self, obj):\n \"\"\"Add object to end of queue.\"\"\"\n self._queue.append(obj)\n",
"def dispatch(self, event):\n for _, callback in self.get_subscribers(event).items():\n callback(event, self)\n",
"def probe(self, params):\n \"\"\"\n Evaulates a single point x, to obtain the value y and then records them\n as observations.\n\n Notes\n -----\n If x has been previously seen returns a cached value of y.\n\n Parameters\n ----------\n x : ndarray\n a single point, with len(x) == self.dim\n\n Returns\n -------\n y : float\n target function value.\n \"\"\"\n x = self._as_array(params)\n\n try:\n target = self._cache[_hashable(x)]\n except KeyError:\n params = dict(zip(self._keys, x))\n target = self.target_func(**params)\n self.register(x, target)\n return target\n"
] | class BayesianOptimization(Observable):
def __init__(self, f, pbounds, random_state=None, verbose=2):
""""""
self._random_state = ensure_rng(random_state)
# Data structure containing the function to be optimized, the bounds of
# its domain, and a record of the evaluations we have done so far
self._space = TargetSpace(f, pbounds, random_state)
# queue
self._queue = Queue()
# Internal GP regressor
self._gp = GaussianProcessRegressor(
kernel=Matern(nu=2.5),
alpha=1e-6,
normalize_y=True,
n_restarts_optimizer=25,
random_state=self._random_state,
)
self._verbose = verbose
super(BayesianOptimization, self).__init__(events=DEFAULT_EVENTS)
@property
def space(self):
return self._space
@property
def max(self):
return self._space.max()
@property
def res(self):
return self._space.res()
def register(self, params, target):
"""Expect observation with known target"""
self._space.register(params, target)
self.dispatch(Events.OPTMIZATION_STEP)
def suggest(self, utility_function):
"""Most promissing point to probe next"""
if len(self._space) == 0:
return self._space.array_to_params(self._space.random_sample())
# Sklearn's GP throws a large number of warnings at times, but
# we don't really need to see them here.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self._gp.fit(self._space.params, self._space.target)
# Finding argmax of the acquisition function.
suggestion = acq_max(
ac=utility_function.utility,
gp=self._gp,
y_max=self._space.target.max(),
bounds=self._space.bounds,
random_state=self._random_state
)
return self._space.array_to_params(suggestion)
def _prime_queue(self, init_points):
"""Make sure there's something in the queue at the very beginning."""
if self._queue.empty and self._space.empty:
init_points = max(init_points, 1)
for _ in range(init_points):
self._queue.add(self._space.random_sample())
def _prime_subscriptions(self):
if not any([len(subs) for subs in self._events.values()]):
_logger = _get_default_logger(self._verbose)
self.subscribe(Events.OPTMIZATION_START, _logger)
self.subscribe(Events.OPTMIZATION_STEP, _logger)
self.subscribe(Events.OPTMIZATION_END, _logger)
def maximize(self,
init_points=5,
n_iter=25,
acq='ucb',
kappa=2.576,
xi=0.0,
**gp_params):
"""Mazimize your function"""
self._prime_subscriptions()
self.dispatch(Events.OPTMIZATION_START)
self._prime_queue(init_points)
self.set_gp_params(**gp_params)
util = UtilityFunction(kind=acq, kappa=kappa, xi=xi)
iteration = 0
while not self._queue.empty or iteration < n_iter:
try:
x_probe = next(self._queue)
except StopIteration:
x_probe = self.suggest(util)
iteration += 1
self.probe(x_probe, lazy=False)
self.dispatch(Events.OPTMIZATION_END)
def set_bounds(self, new_bounds):
"""
A method that allows changing the lower and upper searching bounds
Parameters
----------
new_bounds : dict
A dictionary with the parameter name and its new bounds
"""
self._space.set_bounds(new_bounds)
def set_gp_params(self, **params):
self._gp.set_params(**params)
|
fmfn/BayesianOptimization | bayes_opt/bayesian_optimization.py | BayesianOptimization.suggest | python | def suggest(self, utility_function):
if len(self._space) == 0:
return self._space.array_to_params(self._space.random_sample())
# Sklearn's GP throws a large number of warnings at times, but
# we don't really need to see them here.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self._gp.fit(self._space.params, self._space.target)
# Finding argmax of the acquisition function.
suggestion = acq_max(
ac=utility_function.utility,
gp=self._gp,
y_max=self._space.target.max(),
bounds=self._space.bounds,
random_state=self._random_state
)
return self._space.array_to_params(suggestion) | Most promissing point to probe next | train | https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/bayesian_optimization.py#L115-L135 | [
"def acq_max(ac, gp, y_max, bounds, random_state, n_warmup=100000, n_iter=250):\n \"\"\"\n A function to find the maximum of the acquisition function\n\n It uses a combination of random sampling (cheap) and the 'L-BFGS-B'\n optimization method. First by sampling `n_warmup` (1e5) points at random,\n and then running L-BFGS-B from `n_iter` (250) random starting points.\n\n Parameters\n ----------\n :param ac:\n The acquisition function object that return its point-wise value.\n\n :param gp:\n A gaussian process fitted to the relevant data.\n\n :param y_max:\n The current maximum known value of the target function.\n\n :param bounds:\n The variables bounds to limit the search of the acq max.\n\n :param random_state:\n instance of np.RandomState random number generator\n\n :param n_warmup:\n number of times to randomly sample the aquisition function\n\n :param n_iter:\n number of times to run scipy.minimize\n\n Returns\n -------\n :return: x_max, The arg max of the acquisition function.\n \"\"\"\n\n # Warm up with random points\n x_tries = random_state.uniform(bounds[:, 0], bounds[:, 1],\n size=(n_warmup, bounds.shape[0]))\n ys = ac(x_tries, gp=gp, y_max=y_max)\n x_max = x_tries[ys.argmax()]\n max_acq = ys.max()\n\n # Explore the parameter space more throughly\n x_seeds = random_state.uniform(bounds[:, 0], bounds[:, 1],\n size=(n_iter, bounds.shape[0]))\n for x_try in x_seeds:\n # Find the minimum of minus the acquisition function\n res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max),\n x_try.reshape(1, -1),\n bounds=bounds,\n method=\"L-BFGS-B\")\n\n # See if success\n if not res.success:\n continue\n\n # Store it if better than previous minimum(maximum).\n if max_acq is None or -res.fun[0] >= max_acq:\n x_max = res.x\n max_acq = -res.fun[0]\n\n # Clip output to make sure it lies within the bounds. Due to floating\n # point technicalities this is not always the case.\n return np.clip(x_max, bounds[:, 0], bounds[:, 1])\n",
"def array_to_params(self, x):\n try:\n assert len(x) == len(self.keys)\n except AssertionError:\n raise ValueError(\n \"Size of array ({}) is different than the \".format(len(x)) +\n \"expected number of parameters ({}).\".format(len(self.keys))\n )\n return dict(zip(self.keys, x))\n",
"def random_sample(self):\n \"\"\"\n Creates random points within the bounds of the space.\n\n Returns\n ----------\n data: ndarray\n [num x dim] array points with dimensions corresponding to `self._keys`\n\n Example\n -------\n >>> target_func = lambda p1, p2: p1 + p2\n >>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}\n >>> space = TargetSpace(target_func, pbounds, random_state=0)\n >>> space.random_points(1)\n array([[ 55.33253689, 0.54488318]])\n \"\"\"\n # TODO: support integer, category, and basic scipy.optimize constraints\n data = np.empty((1, self.dim))\n for col, (lower, upper) in enumerate(self._bounds):\n data.T[col] = self.random_state.uniform(lower, upper, size=1)\n return data.ravel()\n"
] | class BayesianOptimization(Observable):
def __init__(self, f, pbounds, random_state=None, verbose=2):
""""""
self._random_state = ensure_rng(random_state)
# Data structure containing the function to be optimized, the bounds of
# its domain, and a record of the evaluations we have done so far
self._space = TargetSpace(f, pbounds, random_state)
# queue
self._queue = Queue()
# Internal GP regressor
self._gp = GaussianProcessRegressor(
kernel=Matern(nu=2.5),
alpha=1e-6,
normalize_y=True,
n_restarts_optimizer=25,
random_state=self._random_state,
)
self._verbose = verbose
super(BayesianOptimization, self).__init__(events=DEFAULT_EVENTS)
@property
def space(self):
return self._space
@property
def max(self):
return self._space.max()
@property
def res(self):
return self._space.res()
def register(self, params, target):
"""Expect observation with known target"""
self._space.register(params, target)
self.dispatch(Events.OPTMIZATION_STEP)
def probe(self, params, lazy=True):
"""Probe target of x"""
if lazy:
self._queue.add(params)
else:
self._space.probe(params)
self.dispatch(Events.OPTMIZATION_STEP)
def _prime_queue(self, init_points):
"""Make sure there's something in the queue at the very beginning."""
if self._queue.empty and self._space.empty:
init_points = max(init_points, 1)
for _ in range(init_points):
self._queue.add(self._space.random_sample())
def _prime_subscriptions(self):
if not any([len(subs) for subs in self._events.values()]):
_logger = _get_default_logger(self._verbose)
self.subscribe(Events.OPTMIZATION_START, _logger)
self.subscribe(Events.OPTMIZATION_STEP, _logger)
self.subscribe(Events.OPTMIZATION_END, _logger)
def maximize(self,
init_points=5,
n_iter=25,
acq='ucb',
kappa=2.576,
xi=0.0,
**gp_params):
"""Mazimize your function"""
self._prime_subscriptions()
self.dispatch(Events.OPTMIZATION_START)
self._prime_queue(init_points)
self.set_gp_params(**gp_params)
util = UtilityFunction(kind=acq, kappa=kappa, xi=xi)
iteration = 0
while not self._queue.empty or iteration < n_iter:
try:
x_probe = next(self._queue)
except StopIteration:
x_probe = self.suggest(util)
iteration += 1
self.probe(x_probe, lazy=False)
self.dispatch(Events.OPTMIZATION_END)
def set_bounds(self, new_bounds):
"""
A method that allows changing the lower and upper searching bounds
Parameters
----------
new_bounds : dict
A dictionary with the parameter name and its new bounds
"""
self._space.set_bounds(new_bounds)
def set_gp_params(self, **params):
self._gp.set_params(**params)
|
fmfn/BayesianOptimization | bayes_opt/bayesian_optimization.py | BayesianOptimization._prime_queue | python | def _prime_queue(self, init_points):
if self._queue.empty and self._space.empty:
init_points = max(init_points, 1)
for _ in range(init_points):
self._queue.add(self._space.random_sample()) | Make sure there's something in the queue at the very beginning. | train | https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/bayesian_optimization.py#L137-L143 | [
"def add(self, obj):\n \"\"\"Add object to end of queue.\"\"\"\n self._queue.append(obj)\n",
"def random_sample(self):\n \"\"\"\n Creates random points within the bounds of the space.\n\n Returns\n ----------\n data: ndarray\n [num x dim] array points with dimensions corresponding to `self._keys`\n\n Example\n -------\n >>> target_func = lambda p1, p2: p1 + p2\n >>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}\n >>> space = TargetSpace(target_func, pbounds, random_state=0)\n >>> space.random_points(1)\n array([[ 55.33253689, 0.54488318]])\n \"\"\"\n # TODO: support integer, category, and basic scipy.optimize constraints\n data = np.empty((1, self.dim))\n for col, (lower, upper) in enumerate(self._bounds):\n data.T[col] = self.random_state.uniform(lower, upper, size=1)\n return data.ravel()\n"
] | class BayesianOptimization(Observable):
def __init__(self, f, pbounds, random_state=None, verbose=2):
""""""
self._random_state = ensure_rng(random_state)
# Data structure containing the function to be optimized, the bounds of
# its domain, and a record of the evaluations we have done so far
self._space = TargetSpace(f, pbounds, random_state)
# queue
self._queue = Queue()
# Internal GP regressor
self._gp = GaussianProcessRegressor(
kernel=Matern(nu=2.5),
alpha=1e-6,
normalize_y=True,
n_restarts_optimizer=25,
random_state=self._random_state,
)
self._verbose = verbose
super(BayesianOptimization, self).__init__(events=DEFAULT_EVENTS)
@property
def space(self):
return self._space
@property
def max(self):
return self._space.max()
@property
def res(self):
return self._space.res()
def register(self, params, target):
"""Expect observation with known target"""
self._space.register(params, target)
self.dispatch(Events.OPTMIZATION_STEP)
def probe(self, params, lazy=True):
"""Probe target of x"""
if lazy:
self._queue.add(params)
else:
self._space.probe(params)
self.dispatch(Events.OPTMIZATION_STEP)
def suggest(self, utility_function):
"""Most promissing point to probe next"""
if len(self._space) == 0:
return self._space.array_to_params(self._space.random_sample())
# Sklearn's GP throws a large number of warnings at times, but
# we don't really need to see them here.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self._gp.fit(self._space.params, self._space.target)
# Finding argmax of the acquisition function.
suggestion = acq_max(
ac=utility_function.utility,
gp=self._gp,
y_max=self._space.target.max(),
bounds=self._space.bounds,
random_state=self._random_state
)
return self._space.array_to_params(suggestion)
def _prime_subscriptions(self):
if not any([len(subs) for subs in self._events.values()]):
_logger = _get_default_logger(self._verbose)
self.subscribe(Events.OPTMIZATION_START, _logger)
self.subscribe(Events.OPTMIZATION_STEP, _logger)
self.subscribe(Events.OPTMIZATION_END, _logger)
def maximize(self,
init_points=5,
n_iter=25,
acq='ucb',
kappa=2.576,
xi=0.0,
**gp_params):
"""Mazimize your function"""
self._prime_subscriptions()
self.dispatch(Events.OPTMIZATION_START)
self._prime_queue(init_points)
self.set_gp_params(**gp_params)
util = UtilityFunction(kind=acq, kappa=kappa, xi=xi)
iteration = 0
while not self._queue.empty or iteration < n_iter:
try:
x_probe = next(self._queue)
except StopIteration:
x_probe = self.suggest(util)
iteration += 1
self.probe(x_probe, lazy=False)
self.dispatch(Events.OPTMIZATION_END)
def set_bounds(self, new_bounds):
"""
A method that allows changing the lower and upper searching bounds
Parameters
----------
new_bounds : dict
A dictionary with the parameter name and its new bounds
"""
self._space.set_bounds(new_bounds)
def set_gp_params(self, **params):
self._gp.set_params(**params)
|
fmfn/BayesianOptimization | bayes_opt/bayesian_optimization.py | BayesianOptimization.maximize | python | def maximize(self,
init_points=5,
n_iter=25,
acq='ucb',
kappa=2.576,
xi=0.0,
**gp_params):
self._prime_subscriptions()
self.dispatch(Events.OPTMIZATION_START)
self._prime_queue(init_points)
self.set_gp_params(**gp_params)
util = UtilityFunction(kind=acq, kappa=kappa, xi=xi)
iteration = 0
while not self._queue.empty or iteration < n_iter:
try:
x_probe = next(self._queue)
except StopIteration:
x_probe = self.suggest(util)
iteration += 1
self.probe(x_probe, lazy=False)
self.dispatch(Events.OPTMIZATION_END) | Mazimize your function | train | https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/bayesian_optimization.py#L152-L176 | [
"def dispatch(self, event):\n for _, callback in self.get_subscribers(event).items():\n callback(event, self)\n",
"def probe(self, params, lazy=True):\n \"\"\"Probe target of x\"\"\"\n if lazy:\n self._queue.add(params)\n else:\n self._space.probe(params)\n self.dispatch(Events.OPTMIZATION_STEP)\n",
"def suggest(self, utility_function):\n \"\"\"Most promissing point to probe next\"\"\"\n if len(self._space) == 0:\n return self._space.array_to_params(self._space.random_sample())\n\n # Sklearn's GP throws a large number of warnings at times, but\n # we don't really need to see them here.\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n self._gp.fit(self._space.params, self._space.target)\n\n # Finding argmax of the acquisition function.\n suggestion = acq_max(\n ac=utility_function.utility,\n gp=self._gp,\n y_max=self._space.target.max(),\n bounds=self._space.bounds,\n random_state=self._random_state\n )\n\n return self._space.array_to_params(suggestion)\n",
"def _prime_queue(self, init_points):\n \"\"\"Make sure there's something in the queue at the very beginning.\"\"\"\n if self._queue.empty and self._space.empty:\n init_points = max(init_points, 1)\n\n for _ in range(init_points):\n self._queue.add(self._space.random_sample())\n",
"def _prime_subscriptions(self):\n if not any([len(subs) for subs in self._events.values()]):\n _logger = _get_default_logger(self._verbose)\n self.subscribe(Events.OPTMIZATION_START, _logger)\n self.subscribe(Events.OPTMIZATION_STEP, _logger)\n self.subscribe(Events.OPTMIZATION_END, _logger)\n",
"def set_gp_params(self, **params):\n self._gp.set_params(**params)\n"
] | class BayesianOptimization(Observable):
def __init__(self, f, pbounds, random_state=None, verbose=2):
""""""
self._random_state = ensure_rng(random_state)
# Data structure containing the function to be optimized, the bounds of
# its domain, and a record of the evaluations we have done so far
self._space = TargetSpace(f, pbounds, random_state)
# queue
self._queue = Queue()
# Internal GP regressor
self._gp = GaussianProcessRegressor(
kernel=Matern(nu=2.5),
alpha=1e-6,
normalize_y=True,
n_restarts_optimizer=25,
random_state=self._random_state,
)
self._verbose = verbose
super(BayesianOptimization, self).__init__(events=DEFAULT_EVENTS)
@property
def space(self):
return self._space
@property
def max(self):
return self._space.max()
@property
def res(self):
return self._space.res()
def register(self, params, target):
"""Expect observation with known target"""
self._space.register(params, target)
self.dispatch(Events.OPTMIZATION_STEP)
def probe(self, params, lazy=True):
"""Probe target of x"""
if lazy:
self._queue.add(params)
else:
self._space.probe(params)
self.dispatch(Events.OPTMIZATION_STEP)
def suggest(self, utility_function):
"""Most promissing point to probe next"""
if len(self._space) == 0:
return self._space.array_to_params(self._space.random_sample())
# Sklearn's GP throws a large number of warnings at times, but
# we don't really need to see them here.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self._gp.fit(self._space.params, self._space.target)
# Finding argmax of the acquisition function.
suggestion = acq_max(
ac=utility_function.utility,
gp=self._gp,
y_max=self._space.target.max(),
bounds=self._space.bounds,
random_state=self._random_state
)
return self._space.array_to_params(suggestion)
def _prime_queue(self, init_points):
"""Make sure there's something in the queue at the very beginning."""
if self._queue.empty and self._space.empty:
init_points = max(init_points, 1)
for _ in range(init_points):
self._queue.add(self._space.random_sample())
def _prime_subscriptions(self):
if not any([len(subs) for subs in self._events.values()]):
_logger = _get_default_logger(self._verbose)
self.subscribe(Events.OPTMIZATION_START, _logger)
self.subscribe(Events.OPTMIZATION_STEP, _logger)
self.subscribe(Events.OPTMIZATION_END, _logger)
def set_bounds(self, new_bounds):
"""
A method that allows changing the lower and upper searching bounds
Parameters
----------
new_bounds : dict
A dictionary with the parameter name and its new bounds
"""
self._space.set_bounds(new_bounds)
def set_gp_params(self, **params):
self._gp.set_params(**params)
|
fmfn/BayesianOptimization | bayes_opt/target_space.py | TargetSpace.register | python | def register(self, params, target):
x = self._as_array(params)
if x in self:
raise KeyError('Data point {} is not unique'.format(x))
# Insert data into unique dictionary
self._cache[_hashable(x.ravel())] = target
self._params = np.concatenate([self._params, x.reshape(1, -1)])
self._target = np.concatenate([self._target, [target]]) | Append a point and its target value to the known data.
Parameters
----------
x : ndarray
a single point, with len(x) == self.dim
y : float
target function value
Raises
------
KeyError:
if the point is not unique
Notes
-----
runs in ammortized constant time
Example
-------
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(lambda p1, p2: p1 + p2, pbounds)
>>> len(space)
0
>>> x = np.array([0, 0])
>>> y = 1
>>> space.add_observation(x, y)
>>> len(space)
1 | train | https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/target_space.py#L126-L167 | [
"def _hashable(x):\n \"\"\" ensure that an point is hashable by a python dict \"\"\"\n return tuple(map(float, x))\n",
"def _as_array(self, x):\n try:\n x = np.asarray(x, dtype=float)\n except TypeError:\n x = self.params_to_array(x)\n\n x = x.ravel()\n try:\n assert x.size == self.dim\n except AssertionError:\n raise ValueError(\n \"Size of array ({}) is different than the \".format(len(x)) +\n \"expected number of parameters ({}).\".format(len(self.keys))\n )\n return x\n"
] | class TargetSpace(object):
"""
Holds the param-space coordinates (X) and target values (Y)
Allows for constant-time appends while ensuring no duplicates are added
Example
-------
>>> def target_func(p1, p2):
>>> return p1 + p2
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(target_func, pbounds, random_state=0)
>>> x = space.random_points(1)[0]
>>> y = space.register_point(x)
>>> assert self.max_point()['max_val'] == y
"""
def __init__(self, target_func, pbounds, random_state=None):
"""
Parameters
----------
target_func : function
Function to be maximized.
pbounds : dict
Dictionary with parameters names as keys and a tuple with minimum
and maximum values.
random_state : int, RandomState, or None
optionally specify a seed for a random number generator
"""
self.random_state = ensure_rng(random_state)
# The function to be optimized
self.target_func = target_func
# Get the name of the parameters
self._keys = sorted(pbounds)
# Create an array with parameters bounds
self._bounds = np.array(
[item[1] for item in sorted(pbounds.items(), key=lambda x: x[0])],
dtype=np.float
)
# preallocated memory for X and Y points
self._params = np.empty(shape=(0, self.dim))
self._target = np.empty(shape=(0))
# keep track of unique points we have seen so far
self._cache = {}
def __contains__(self, x):
return _hashable(x) in self._cache
def __len__(self):
assert len(self._params) == len(self._target)
return len(self._target)
@property
def empty(self):
return len(self) == 0
@property
def params(self):
return self._params
@property
def target(self):
return self._target
@property
def dim(self):
return len(self._keys)
@property
def keys(self):
return self._keys
@property
def bounds(self):
return self._bounds
def params_to_array(self, params):
try:
assert set(params) == set(self.keys)
except AssertionError:
raise ValueError(
"Parameters' keys ({}) do ".format(sorted(params)) +
"not match the expected set of keys ({}).".format(self.keys)
)
return np.asarray([params[key] for key in self.keys])
def array_to_params(self, x):
try:
assert len(x) == len(self.keys)
except AssertionError:
raise ValueError(
"Size of array ({}) is different than the ".format(len(x)) +
"expected number of parameters ({}).".format(len(self.keys))
)
return dict(zip(self.keys, x))
def _as_array(self, x):
try:
x = np.asarray(x, dtype=float)
except TypeError:
x = self.params_to_array(x)
x = x.ravel()
try:
assert x.size == self.dim
except AssertionError:
raise ValueError(
"Size of array ({}) is different than the ".format(len(x)) +
"expected number of parameters ({}).".format(len(self.keys))
)
return x
def probe(self, params):
"""
Evaulates a single point x, to obtain the value y and then records them
as observations.
Notes
-----
If x has been previously seen returns a cached value of y.
Parameters
----------
x : ndarray
a single point, with len(x) == self.dim
Returns
-------
y : float
target function value.
"""
x = self._as_array(params)
try:
target = self._cache[_hashable(x)]
except KeyError:
params = dict(zip(self._keys, x))
target = self.target_func(**params)
self.register(x, target)
return target
def random_sample(self):
"""
Creates random points within the bounds of the space.
Returns
----------
data: ndarray
[num x dim] array points with dimensions corresponding to `self._keys`
Example
-------
>>> target_func = lambda p1, p2: p1 + p2
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(target_func, pbounds, random_state=0)
>>> space.random_points(1)
array([[ 55.33253689, 0.54488318]])
"""
# TODO: support integer, category, and basic scipy.optimize constraints
data = np.empty((1, self.dim))
for col, (lower, upper) in enumerate(self._bounds):
data.T[col] = self.random_state.uniform(lower, upper, size=1)
return data.ravel()
def max(self):
"""Get maximum target value found and corresponding parametes."""
try:
res = {
'target': self.target.max(),
'params': dict(
zip(self.keys, self.params[self.target.argmax()])
)
}
except ValueError:
res = {}
return res
def res(self):
"""Get all target values found and corresponding parametes."""
params = [dict(zip(self.keys, p)) for p in self.params]
return [
{"target": target, "params": param}
for target, param in zip(self.target, params)
]
def set_bounds(self, new_bounds):
"""
A method that allows changing the lower and upper searching bounds
Parameters
----------
new_bounds : dict
A dictionary with the parameter name and its new bounds
"""
for row, key in enumerate(self.keys):
if key in new_bounds:
self._bounds[row] = new_bounds[key]
|
fmfn/BayesianOptimization | bayes_opt/target_space.py | TargetSpace.probe | python | def probe(self, params):
x = self._as_array(params)
try:
target = self._cache[_hashable(x)]
except KeyError:
params = dict(zip(self._keys, x))
target = self.target_func(**params)
self.register(x, target)
return target | Evaulates a single point x, to obtain the value y and then records them
as observations.
Notes
-----
If x has been previously seen returns a cached value of y.
Parameters
----------
x : ndarray
a single point, with len(x) == self.dim
Returns
-------
y : float
target function value. | train | https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/target_space.py#L169-L196 | [
"def _hashable(x):\n \"\"\" ensure that an point is hashable by a python dict \"\"\"\n return tuple(map(float, x))\n",
"def target_func(**kwargs):\n # arbitrary target func\n return sum(kwargs.values())\n",
"def _as_array(self, x):\n try:\n x = np.asarray(x, dtype=float)\n except TypeError:\n x = self.params_to_array(x)\n\n x = x.ravel()\n try:\n assert x.size == self.dim\n except AssertionError:\n raise ValueError(\n \"Size of array ({}) is different than the \".format(len(x)) +\n \"expected number of parameters ({}).\".format(len(self.keys))\n )\n return x\n",
"def register(self, params, target):\n \"\"\"\n Append a point and its target value to the known data.\n\n Parameters\n ----------\n x : ndarray\n a single point, with len(x) == self.dim\n\n y : float\n target function value\n\n Raises\n ------\n KeyError:\n if the point is not unique\n\n Notes\n -----\n runs in ammortized constant time\n\n Example\n -------\n >>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}\n >>> space = TargetSpace(lambda p1, p2: p1 + p2, pbounds)\n >>> len(space)\n 0\n >>> x = np.array([0, 0])\n >>> y = 1\n >>> space.add_observation(x, y)\n >>> len(space)\n 1\n \"\"\"\n x = self._as_array(params)\n if x in self:\n raise KeyError('Data point {} is not unique'.format(x))\n\n # Insert data into unique dictionary\n self._cache[_hashable(x.ravel())] = target\n\n self._params = np.concatenate([self._params, x.reshape(1, -1)])\n self._target = np.concatenate([self._target, [target]])\n"
] | class TargetSpace(object):
"""
Holds the param-space coordinates (X) and target values (Y)
Allows for constant-time appends while ensuring no duplicates are added
Example
-------
>>> def target_func(p1, p2):
>>> return p1 + p2
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(target_func, pbounds, random_state=0)
>>> x = space.random_points(1)[0]
>>> y = space.register_point(x)
>>> assert self.max_point()['max_val'] == y
"""
def __init__(self, target_func, pbounds, random_state=None):
"""
Parameters
----------
target_func : function
Function to be maximized.
pbounds : dict
Dictionary with parameters names as keys and a tuple with minimum
and maximum values.
random_state : int, RandomState, or None
optionally specify a seed for a random number generator
"""
self.random_state = ensure_rng(random_state)
# The function to be optimized
self.target_func = target_func
# Get the name of the parameters
self._keys = sorted(pbounds)
# Create an array with parameters bounds
self._bounds = np.array(
[item[1] for item in sorted(pbounds.items(), key=lambda x: x[0])],
dtype=np.float
)
# preallocated memory for X and Y points
self._params = np.empty(shape=(0, self.dim))
self._target = np.empty(shape=(0))
# keep track of unique points we have seen so far
self._cache = {}
def __contains__(self, x):
return _hashable(x) in self._cache
def __len__(self):
assert len(self._params) == len(self._target)
return len(self._target)
@property
def empty(self):
return len(self) == 0
@property
def params(self):
return self._params
@property
def target(self):
return self._target
@property
def dim(self):
return len(self._keys)
@property
def keys(self):
return self._keys
@property
def bounds(self):
return self._bounds
def params_to_array(self, params):
try:
assert set(params) == set(self.keys)
except AssertionError:
raise ValueError(
"Parameters' keys ({}) do ".format(sorted(params)) +
"not match the expected set of keys ({}).".format(self.keys)
)
return np.asarray([params[key] for key in self.keys])
def array_to_params(self, x):
try:
assert len(x) == len(self.keys)
except AssertionError:
raise ValueError(
"Size of array ({}) is different than the ".format(len(x)) +
"expected number of parameters ({}).".format(len(self.keys))
)
return dict(zip(self.keys, x))
def _as_array(self, x):
try:
x = np.asarray(x, dtype=float)
except TypeError:
x = self.params_to_array(x)
x = x.ravel()
try:
assert x.size == self.dim
except AssertionError:
raise ValueError(
"Size of array ({}) is different than the ".format(len(x)) +
"expected number of parameters ({}).".format(len(self.keys))
)
return x
def register(self, params, target):
"""
Append a point and its target value to the known data.
Parameters
----------
x : ndarray
a single point, with len(x) == self.dim
y : float
target function value
Raises
------
KeyError:
if the point is not unique
Notes
-----
runs in ammortized constant time
Example
-------
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(lambda p1, p2: p1 + p2, pbounds)
>>> len(space)
0
>>> x = np.array([0, 0])
>>> y = 1
>>> space.add_observation(x, y)
>>> len(space)
1
"""
x = self._as_array(params)
if x in self:
raise KeyError('Data point {} is not unique'.format(x))
# Insert data into unique dictionary
self._cache[_hashable(x.ravel())] = target
self._params = np.concatenate([self._params, x.reshape(1, -1)])
self._target = np.concatenate([self._target, [target]])
def random_sample(self):
"""
Creates random points within the bounds of the space.
Returns
----------
data: ndarray
[num x dim] array points with dimensions corresponding to `self._keys`
Example
-------
>>> target_func = lambda p1, p2: p1 + p2
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(target_func, pbounds, random_state=0)
>>> space.random_points(1)
array([[ 55.33253689, 0.54488318]])
"""
# TODO: support integer, category, and basic scipy.optimize constraints
data = np.empty((1, self.dim))
for col, (lower, upper) in enumerate(self._bounds):
data.T[col] = self.random_state.uniform(lower, upper, size=1)
return data.ravel()
def max(self):
"""Get maximum target value found and corresponding parametes."""
try:
res = {
'target': self.target.max(),
'params': dict(
zip(self.keys, self.params[self.target.argmax()])
)
}
except ValueError:
res = {}
return res
def res(self):
"""Get all target values found and corresponding parametes."""
params = [dict(zip(self.keys, p)) for p in self.params]
return [
{"target": target, "params": param}
for target, param in zip(self.target, params)
]
def set_bounds(self, new_bounds):
"""
A method that allows changing the lower and upper searching bounds
Parameters
----------
new_bounds : dict
A dictionary with the parameter name and its new bounds
"""
for row, key in enumerate(self.keys):
if key in new_bounds:
self._bounds[row] = new_bounds[key]
|
fmfn/BayesianOptimization | bayes_opt/target_space.py | TargetSpace.random_sample | python | def random_sample(self):
# TODO: support integer, category, and basic scipy.optimize constraints
data = np.empty((1, self.dim))
for col, (lower, upper) in enumerate(self._bounds):
data.T[col] = self.random_state.uniform(lower, upper, size=1)
return data.ravel() | Creates random points within the bounds of the space.
Returns
----------
data: ndarray
[num x dim] array points with dimensions corresponding to `self._keys`
Example
-------
>>> target_func = lambda p1, p2: p1 + p2
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(target_func, pbounds, random_state=0)
>>> space.random_points(1)
array([[ 55.33253689, 0.54488318]]) | train | https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/target_space.py#L198-L219 | null | class TargetSpace(object):
"""
Holds the param-space coordinates (X) and target values (Y)
Allows for constant-time appends while ensuring no duplicates are added
Example
-------
>>> def target_func(p1, p2):
>>> return p1 + p2
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(target_func, pbounds, random_state=0)
>>> x = space.random_points(1)[0]
>>> y = space.register_point(x)
>>> assert self.max_point()['max_val'] == y
"""
def __init__(self, target_func, pbounds, random_state=None):
"""
Parameters
----------
target_func : function
Function to be maximized.
pbounds : dict
Dictionary with parameters names as keys and a tuple with minimum
and maximum values.
random_state : int, RandomState, or None
optionally specify a seed for a random number generator
"""
self.random_state = ensure_rng(random_state)
# The function to be optimized
self.target_func = target_func
# Get the name of the parameters
self._keys = sorted(pbounds)
# Create an array with parameters bounds
self._bounds = np.array(
[item[1] for item in sorted(pbounds.items(), key=lambda x: x[0])],
dtype=np.float
)
# preallocated memory for X and Y points
self._params = np.empty(shape=(0, self.dim))
self._target = np.empty(shape=(0))
# keep track of unique points we have seen so far
self._cache = {}
def __contains__(self, x):
return _hashable(x) in self._cache
def __len__(self):
assert len(self._params) == len(self._target)
return len(self._target)
@property
def empty(self):
return len(self) == 0
@property
def params(self):
return self._params
@property
def target(self):
return self._target
@property
def dim(self):
return len(self._keys)
@property
def keys(self):
return self._keys
@property
def bounds(self):
return self._bounds
def params_to_array(self, params):
try:
assert set(params) == set(self.keys)
except AssertionError:
raise ValueError(
"Parameters' keys ({}) do ".format(sorted(params)) +
"not match the expected set of keys ({}).".format(self.keys)
)
return np.asarray([params[key] for key in self.keys])
def array_to_params(self, x):
try:
assert len(x) == len(self.keys)
except AssertionError:
raise ValueError(
"Size of array ({}) is different than the ".format(len(x)) +
"expected number of parameters ({}).".format(len(self.keys))
)
return dict(zip(self.keys, x))
def _as_array(self, x):
try:
x = np.asarray(x, dtype=float)
except TypeError:
x = self.params_to_array(x)
x = x.ravel()
try:
assert x.size == self.dim
except AssertionError:
raise ValueError(
"Size of array ({}) is different than the ".format(len(x)) +
"expected number of parameters ({}).".format(len(self.keys))
)
return x
def register(self, params, target):
"""
Append a point and its target value to the known data.
Parameters
----------
x : ndarray
a single point, with len(x) == self.dim
y : float
target function value
Raises
------
KeyError:
if the point is not unique
Notes
-----
runs in ammortized constant time
Example
-------
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(lambda p1, p2: p1 + p2, pbounds)
>>> len(space)
0
>>> x = np.array([0, 0])
>>> y = 1
>>> space.add_observation(x, y)
>>> len(space)
1
"""
x = self._as_array(params)
if x in self:
raise KeyError('Data point {} is not unique'.format(x))
# Insert data into unique dictionary
self._cache[_hashable(x.ravel())] = target
self._params = np.concatenate([self._params, x.reshape(1, -1)])
self._target = np.concatenate([self._target, [target]])
def probe(self, params):
"""
Evaulates a single point x, to obtain the value y and then records them
as observations.
Notes
-----
If x has been previously seen returns a cached value of y.
Parameters
----------
x : ndarray
a single point, with len(x) == self.dim
Returns
-------
y : float
target function value.
"""
x = self._as_array(params)
try:
target = self._cache[_hashable(x)]
except KeyError:
params = dict(zip(self._keys, x))
target = self.target_func(**params)
self.register(x, target)
return target
def max(self):
"""Get maximum target value found and corresponding parametes."""
try:
res = {
'target': self.target.max(),
'params': dict(
zip(self.keys, self.params[self.target.argmax()])
)
}
except ValueError:
res = {}
return res
def res(self):
"""Get all target values found and corresponding parametes."""
params = [dict(zip(self.keys, p)) for p in self.params]
return [
{"target": target, "params": param}
for target, param in zip(self.target, params)
]
def set_bounds(self, new_bounds):
"""
A method that allows changing the lower and upper searching bounds
Parameters
----------
new_bounds : dict
A dictionary with the parameter name and its new bounds
"""
for row, key in enumerate(self.keys):
if key in new_bounds:
self._bounds[row] = new_bounds[key]
|
fmfn/BayesianOptimization | bayes_opt/target_space.py | TargetSpace.max | python | def max(self):
try:
res = {
'target': self.target.max(),
'params': dict(
zip(self.keys, self.params[self.target.argmax()])
)
}
except ValueError:
res = {}
return res | Get maximum target value found and corresponding parametes. | train | https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/target_space.py#L221-L232 | null | class TargetSpace(object):
"""
Holds the param-space coordinates (X) and target values (Y)
Allows for constant-time appends while ensuring no duplicates are added
Example
-------
>>> def target_func(p1, p2):
>>> return p1 + p2
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(target_func, pbounds, random_state=0)
>>> x = space.random_points(1)[0]
>>> y = space.register_point(x)
>>> assert self.max_point()['max_val'] == y
"""
def __init__(self, target_func, pbounds, random_state=None):
"""
Parameters
----------
target_func : function
Function to be maximized.
pbounds : dict
Dictionary with parameters names as keys and a tuple with minimum
and maximum values.
random_state : int, RandomState, or None
optionally specify a seed for a random number generator
"""
self.random_state = ensure_rng(random_state)
# The function to be optimized
self.target_func = target_func
# Get the name of the parameters
self._keys = sorted(pbounds)
# Create an array with parameters bounds
self._bounds = np.array(
[item[1] for item in sorted(pbounds.items(), key=lambda x: x[0])],
dtype=np.float
)
# preallocated memory for X and Y points
self._params = np.empty(shape=(0, self.dim))
self._target = np.empty(shape=(0))
# keep track of unique points we have seen so far
self._cache = {}
def __contains__(self, x):
return _hashable(x) in self._cache
def __len__(self):
assert len(self._params) == len(self._target)
return len(self._target)
@property
def empty(self):
return len(self) == 0
@property
def params(self):
return self._params
@property
def target(self):
return self._target
@property
def dim(self):
return len(self._keys)
@property
def keys(self):
return self._keys
@property
def bounds(self):
return self._bounds
def params_to_array(self, params):
try:
assert set(params) == set(self.keys)
except AssertionError:
raise ValueError(
"Parameters' keys ({}) do ".format(sorted(params)) +
"not match the expected set of keys ({}).".format(self.keys)
)
return np.asarray([params[key] for key in self.keys])
def array_to_params(self, x):
try:
assert len(x) == len(self.keys)
except AssertionError:
raise ValueError(
"Size of array ({}) is different than the ".format(len(x)) +
"expected number of parameters ({}).".format(len(self.keys))
)
return dict(zip(self.keys, x))
def _as_array(self, x):
try:
x = np.asarray(x, dtype=float)
except TypeError:
x = self.params_to_array(x)
x = x.ravel()
try:
assert x.size == self.dim
except AssertionError:
raise ValueError(
"Size of array ({}) is different than the ".format(len(x)) +
"expected number of parameters ({}).".format(len(self.keys))
)
return x
def register(self, params, target):
"""
Append a point and its target value to the known data.
Parameters
----------
x : ndarray
a single point, with len(x) == self.dim
y : float
target function value
Raises
------
KeyError:
if the point is not unique
Notes
-----
runs in ammortized constant time
Example
-------
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(lambda p1, p2: p1 + p2, pbounds)
>>> len(space)
0
>>> x = np.array([0, 0])
>>> y = 1
>>> space.add_observation(x, y)
>>> len(space)
1
"""
x = self._as_array(params)
if x in self:
raise KeyError('Data point {} is not unique'.format(x))
# Insert data into unique dictionary
self._cache[_hashable(x.ravel())] = target
self._params = np.concatenate([self._params, x.reshape(1, -1)])
self._target = np.concatenate([self._target, [target]])
def probe(self, params):
"""
Evaulates a single point x, to obtain the value y and then records them
as observations.
Notes
-----
If x has been previously seen returns a cached value of y.
Parameters
----------
x : ndarray
a single point, with len(x) == self.dim
Returns
-------
y : float
target function value.
"""
x = self._as_array(params)
try:
target = self._cache[_hashable(x)]
except KeyError:
params = dict(zip(self._keys, x))
target = self.target_func(**params)
self.register(x, target)
return target
def random_sample(self):
"""
Creates random points within the bounds of the space.
Returns
----------
data: ndarray
[num x dim] array points with dimensions corresponding to `self._keys`
Example
-------
>>> target_func = lambda p1, p2: p1 + p2
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(target_func, pbounds, random_state=0)
>>> space.random_points(1)
array([[ 55.33253689, 0.54488318]])
"""
# TODO: support integer, category, and basic scipy.optimize constraints
data = np.empty((1, self.dim))
for col, (lower, upper) in enumerate(self._bounds):
data.T[col] = self.random_state.uniform(lower, upper, size=1)
return data.ravel()
def res(self):
"""Get all target values found and corresponding parametes."""
params = [dict(zip(self.keys, p)) for p in self.params]
return [
{"target": target, "params": param}
for target, param in zip(self.target, params)
]
def set_bounds(self, new_bounds):
"""
A method that allows changing the lower and upper searching bounds
Parameters
----------
new_bounds : dict
A dictionary with the parameter name and its new bounds
"""
for row, key in enumerate(self.keys):
if key in new_bounds:
self._bounds[row] = new_bounds[key]
|
fmfn/BayesianOptimization | bayes_opt/target_space.py | TargetSpace.res | python | def res(self):
params = [dict(zip(self.keys, p)) for p in self.params]
return [
{"target": target, "params": param}
for target, param in zip(self.target, params)
] | Get all target values found and corresponding parametes. | train | https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/target_space.py#L234-L241 | null | class TargetSpace(object):
"""
Holds the param-space coordinates (X) and target values (Y)
Allows for constant-time appends while ensuring no duplicates are added
Example
-------
>>> def target_func(p1, p2):
>>> return p1 + p2
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(target_func, pbounds, random_state=0)
>>> x = space.random_points(1)[0]
>>> y = space.register_point(x)
>>> assert self.max_point()['max_val'] == y
"""
def __init__(self, target_func, pbounds, random_state=None):
"""
Parameters
----------
target_func : function
Function to be maximized.
pbounds : dict
Dictionary with parameters names as keys and a tuple with minimum
and maximum values.
random_state : int, RandomState, or None
optionally specify a seed for a random number generator
"""
self.random_state = ensure_rng(random_state)
# The function to be optimized
self.target_func = target_func
# Get the name of the parameters
self._keys = sorted(pbounds)
# Create an array with parameters bounds
self._bounds = np.array(
[item[1] for item in sorted(pbounds.items(), key=lambda x: x[0])],
dtype=np.float
)
# preallocated memory for X and Y points
self._params = np.empty(shape=(0, self.dim))
self._target = np.empty(shape=(0))
# keep track of unique points we have seen so far
self._cache = {}
def __contains__(self, x):
return _hashable(x) in self._cache
def __len__(self):
assert len(self._params) == len(self._target)
return len(self._target)
@property
def empty(self):
return len(self) == 0
@property
def params(self):
return self._params
@property
def target(self):
return self._target
@property
def dim(self):
return len(self._keys)
@property
def keys(self):
return self._keys
@property
def bounds(self):
return self._bounds
def params_to_array(self, params):
try:
assert set(params) == set(self.keys)
except AssertionError:
raise ValueError(
"Parameters' keys ({}) do ".format(sorted(params)) +
"not match the expected set of keys ({}).".format(self.keys)
)
return np.asarray([params[key] for key in self.keys])
def array_to_params(self, x):
try:
assert len(x) == len(self.keys)
except AssertionError:
raise ValueError(
"Size of array ({}) is different than the ".format(len(x)) +
"expected number of parameters ({}).".format(len(self.keys))
)
return dict(zip(self.keys, x))
def _as_array(self, x):
try:
x = np.asarray(x, dtype=float)
except TypeError:
x = self.params_to_array(x)
x = x.ravel()
try:
assert x.size == self.dim
except AssertionError:
raise ValueError(
"Size of array ({}) is different than the ".format(len(x)) +
"expected number of parameters ({}).".format(len(self.keys))
)
return x
def register(self, params, target):
"""
Append a point and its target value to the known data.
Parameters
----------
x : ndarray
a single point, with len(x) == self.dim
y : float
target function value
Raises
------
KeyError:
if the point is not unique
Notes
-----
runs in ammortized constant time
Example
-------
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(lambda p1, p2: p1 + p2, pbounds)
>>> len(space)
0
>>> x = np.array([0, 0])
>>> y = 1
>>> space.add_observation(x, y)
>>> len(space)
1
"""
x = self._as_array(params)
if x in self:
raise KeyError('Data point {} is not unique'.format(x))
# Insert data into unique dictionary
self._cache[_hashable(x.ravel())] = target
self._params = np.concatenate([self._params, x.reshape(1, -1)])
self._target = np.concatenate([self._target, [target]])
def probe(self, params):
"""
Evaulates a single point x, to obtain the value y and then records them
as observations.
Notes
-----
If x has been previously seen returns a cached value of y.
Parameters
----------
x : ndarray
a single point, with len(x) == self.dim
Returns
-------
y : float
target function value.
"""
x = self._as_array(params)
try:
target = self._cache[_hashable(x)]
except KeyError:
params = dict(zip(self._keys, x))
target = self.target_func(**params)
self.register(x, target)
return target
def random_sample(self):
"""
Creates random points within the bounds of the space.
Returns
----------
data: ndarray
[num x dim] array points with dimensions corresponding to `self._keys`
Example
-------
>>> target_func = lambda p1, p2: p1 + p2
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(target_func, pbounds, random_state=0)
>>> space.random_points(1)
array([[ 55.33253689, 0.54488318]])
"""
# TODO: support integer, category, and basic scipy.optimize constraints
data = np.empty((1, self.dim))
for col, (lower, upper) in enumerate(self._bounds):
data.T[col] = self.random_state.uniform(lower, upper, size=1)
return data.ravel()
def max(self):
"""Get maximum target value found and corresponding parametes."""
try:
res = {
'target': self.target.max(),
'params': dict(
zip(self.keys, self.params[self.target.argmax()])
)
}
except ValueError:
res = {}
return res
def set_bounds(self, new_bounds):
"""
A method that allows changing the lower and upper searching bounds
Parameters
----------
new_bounds : dict
A dictionary with the parameter name and its new bounds
"""
for row, key in enumerate(self.keys):
if key in new_bounds:
self._bounds[row] = new_bounds[key]
|
fmfn/BayesianOptimization | bayes_opt/target_space.py | TargetSpace.set_bounds | python | def set_bounds(self, new_bounds):
for row, key in enumerate(self.keys):
if key in new_bounds:
self._bounds[row] = new_bounds[key] | A method that allows changing the lower and upper searching bounds
Parameters
----------
new_bounds : dict
A dictionary with the parameter name and its new bounds | train | https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/target_space.py#L243-L254 | null | class TargetSpace(object):
"""
Holds the param-space coordinates (X) and target values (Y)
Allows for constant-time appends while ensuring no duplicates are added
Example
-------
>>> def target_func(p1, p2):
>>> return p1 + p2
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(target_func, pbounds, random_state=0)
>>> x = space.random_points(1)[0]
>>> y = space.register_point(x)
>>> assert self.max_point()['max_val'] == y
"""
def __init__(self, target_func, pbounds, random_state=None):
"""
Parameters
----------
target_func : function
Function to be maximized.
pbounds : dict
Dictionary with parameters names as keys and a tuple with minimum
and maximum values.
random_state : int, RandomState, or None
optionally specify a seed for a random number generator
"""
self.random_state = ensure_rng(random_state)
# The function to be optimized
self.target_func = target_func
# Get the name of the parameters
self._keys = sorted(pbounds)
# Create an array with parameters bounds
self._bounds = np.array(
[item[1] for item in sorted(pbounds.items(), key=lambda x: x[0])],
dtype=np.float
)
# preallocated memory for X and Y points
self._params = np.empty(shape=(0, self.dim))
self._target = np.empty(shape=(0))
# keep track of unique points we have seen so far
self._cache = {}
def __contains__(self, x):
return _hashable(x) in self._cache
def __len__(self):
assert len(self._params) == len(self._target)
return len(self._target)
@property
def empty(self):
return len(self) == 0
@property
def params(self):
return self._params
@property
def target(self):
return self._target
@property
def dim(self):
return len(self._keys)
@property
def keys(self):
return self._keys
@property
def bounds(self):
return self._bounds
def params_to_array(self, params):
try:
assert set(params) == set(self.keys)
except AssertionError:
raise ValueError(
"Parameters' keys ({}) do ".format(sorted(params)) +
"not match the expected set of keys ({}).".format(self.keys)
)
return np.asarray([params[key] for key in self.keys])
def array_to_params(self, x):
try:
assert len(x) == len(self.keys)
except AssertionError:
raise ValueError(
"Size of array ({}) is different than the ".format(len(x)) +
"expected number of parameters ({}).".format(len(self.keys))
)
return dict(zip(self.keys, x))
def _as_array(self, x):
try:
x = np.asarray(x, dtype=float)
except TypeError:
x = self.params_to_array(x)
x = x.ravel()
try:
assert x.size == self.dim
except AssertionError:
raise ValueError(
"Size of array ({}) is different than the ".format(len(x)) +
"expected number of parameters ({}).".format(len(self.keys))
)
return x
def register(self, params, target):
"""
Append a point and its target value to the known data.
Parameters
----------
x : ndarray
a single point, with len(x) == self.dim
y : float
target function value
Raises
------
KeyError:
if the point is not unique
Notes
-----
runs in ammortized constant time
Example
-------
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(lambda p1, p2: p1 + p2, pbounds)
>>> len(space)
0
>>> x = np.array([0, 0])
>>> y = 1
>>> space.add_observation(x, y)
>>> len(space)
1
"""
x = self._as_array(params)
if x in self:
raise KeyError('Data point {} is not unique'.format(x))
# Insert data into unique dictionary
self._cache[_hashable(x.ravel())] = target
self._params = np.concatenate([self._params, x.reshape(1, -1)])
self._target = np.concatenate([self._target, [target]])
def probe(self, params):
"""
Evaulates a single point x, to obtain the value y and then records them
as observations.
Notes
-----
If x has been previously seen returns a cached value of y.
Parameters
----------
x : ndarray
a single point, with len(x) == self.dim
Returns
-------
y : float
target function value.
"""
x = self._as_array(params)
try:
target = self._cache[_hashable(x)]
except KeyError:
params = dict(zip(self._keys, x))
target = self.target_func(**params)
self.register(x, target)
return target
def random_sample(self):
"""
Creates random points within the bounds of the space.
Returns
----------
data: ndarray
[num x dim] array points with dimensions corresponding to `self._keys`
Example
-------
>>> target_func = lambda p1, p2: p1 + p2
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(target_func, pbounds, random_state=0)
>>> space.random_points(1)
array([[ 55.33253689, 0.54488318]])
"""
# TODO: support integer, category, and basic scipy.optimize constraints
data = np.empty((1, self.dim))
for col, (lower, upper) in enumerate(self._bounds):
data.T[col] = self.random_state.uniform(lower, upper, size=1)
return data.ravel()
def max(self):
"""Get maximum target value found and corresponding parametes."""
try:
res = {
'target': self.target.max(),
'params': dict(
zip(self.keys, self.params[self.target.argmax()])
)
}
except ValueError:
res = {}
return res
def res(self):
"""Get all target values found and corresponding parametes."""
params = [dict(zip(self.keys, p)) for p in self.params]
return [
{"target": target, "params": param}
for target, param in zip(self.target, params)
]
|
fmfn/BayesianOptimization | examples/sklearn_example.py | get_data | python | def get_data():
data, targets = make_classification(
n_samples=1000,
n_features=45,
n_informative=12,
n_redundant=7,
random_state=134985745,
)
return data, targets | Synthetic binary classification dataset. | train | https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/examples/sklearn_example.py#L9-L18 | null | from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier as RFC
from sklearn.svm import SVC
from bayes_opt import BayesianOptimization
from bayes_opt.util import Colours
def svc_cv(C, gamma, data, targets):
"""SVC cross validation.
This function will instantiate a SVC classifier with parameters C and
gamma. Combined with data and targets this will in turn be used to perform
cross validation. The result of cross validation is returned.
Our goal is to find combinations of C and gamma that maximizes the roc_auc
metric.
"""
estimator = SVC(C=C, gamma=gamma, random_state=2)
cval = cross_val_score(estimator, data, targets, scoring='roc_auc', cv=4)
return cval.mean()
def rfc_cv(n_estimators, min_samples_split, max_features, data, targets):
"""Random Forest cross validation.
This function will instantiate a random forest classifier with parameters
n_estimators, min_samples_split, and max_features. Combined with data and
targets this will in turn be used to perform cross validation. The result
of cross validation is returned.
Our goal is to find combinations of n_estimators, min_samples_split, and
max_features that minimzes the log loss.
"""
estimator = RFC(
n_estimators=n_estimators,
min_samples_split=min_samples_split,
max_features=max_features,
random_state=2
)
cval = cross_val_score(estimator, data, targets,
scoring='neg_log_loss', cv=4)
return cval.mean()
def optimize_svc(data, targets):
"""Apply Bayesian Optimization to SVC parameters."""
def svc_crossval(expC, expGamma):
"""Wrapper of SVC cross validation.
Notice how we transform between regular and log scale. While this
is not technically necessary, it greatly improves the performance
of the optimizer.
"""
C = 10 ** expC
gamma = 10 ** expGamma
return svc_cv(C=C, gamma=gamma, data=data, targets=targets)
optimizer = BayesianOptimization(
f=svc_crossval,
pbounds={"expC": (-3, 2), "expGamma": (-4, -1)},
random_state=1234,
verbose=2
)
optimizer.maximize(n_iter=10)
print("Final result:", optimizer.max)
def optimize_rfc(data, targets):
"""Apply Bayesian Optimization to Random Forest parameters."""
def rfc_crossval(n_estimators, min_samples_split, max_features):
"""Wrapper of RandomForest cross validation.
Notice how we ensure n_estimators and min_samples_split are casted
to integer before we pass them along. Moreover, to avoid max_features
taking values outside the (0, 1) range, we also ensure it is capped
accordingly.
"""
return rfc_cv(
n_estimators=int(n_estimators),
min_samples_split=int(min_samples_split),
max_features=max(min(max_features, 0.999), 1e-3),
data=data,
targets=targets,
)
optimizer = BayesianOptimization(
f=rfc_crossval,
pbounds={
"n_estimators": (10, 250),
"min_samples_split": (2, 25),
"max_features": (0.1, 0.999),
},
random_state=1234,
verbose=2
)
optimizer.maximize(n_iter=10)
print("Final result:", optimizer.max)
if __name__ == "__main__":
data, targets = get_data()
print(Colours.yellow("--- Optimizing SVM ---"))
optimize_svc(data, targets)
print(Colours.green("--- Optimizing Random Forest ---"))
optimize_rfc(data, targets)
|
fmfn/BayesianOptimization | examples/sklearn_example.py | svc_cv | python | def svc_cv(C, gamma, data, targets):
estimator = SVC(C=C, gamma=gamma, random_state=2)
cval = cross_val_score(estimator, data, targets, scoring='roc_auc', cv=4)
return cval.mean() | SVC cross validation.
This function will instantiate a SVC classifier with parameters C and
gamma. Combined with data and targets this will in turn be used to perform
cross validation. The result of cross validation is returned.
Our goal is to find combinations of C and gamma that maximizes the roc_auc
metric. | train | https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/examples/sklearn_example.py#L21-L33 | null | from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier as RFC
from sklearn.svm import SVC
from bayes_opt import BayesianOptimization
from bayes_opt.util import Colours
def get_data():
"""Synthetic binary classification dataset."""
data, targets = make_classification(
n_samples=1000,
n_features=45,
n_informative=12,
n_redundant=7,
random_state=134985745,
)
return data, targets
def rfc_cv(n_estimators, min_samples_split, max_features, data, targets):
"""Random Forest cross validation.
This function will instantiate a random forest classifier with parameters
n_estimators, min_samples_split, and max_features. Combined with data and
targets this will in turn be used to perform cross validation. The result
of cross validation is returned.
Our goal is to find combinations of n_estimators, min_samples_split, and
max_features that minimzes the log loss.
"""
estimator = RFC(
n_estimators=n_estimators,
min_samples_split=min_samples_split,
max_features=max_features,
random_state=2
)
cval = cross_val_score(estimator, data, targets,
scoring='neg_log_loss', cv=4)
return cval.mean()
def optimize_svc(data, targets):
"""Apply Bayesian Optimization to SVC parameters."""
def svc_crossval(expC, expGamma):
"""Wrapper of SVC cross validation.
Notice how we transform between regular and log scale. While this
is not technically necessary, it greatly improves the performance
of the optimizer.
"""
C = 10 ** expC
gamma = 10 ** expGamma
return svc_cv(C=C, gamma=gamma, data=data, targets=targets)
optimizer = BayesianOptimization(
f=svc_crossval,
pbounds={"expC": (-3, 2), "expGamma": (-4, -1)},
random_state=1234,
verbose=2
)
optimizer.maximize(n_iter=10)
print("Final result:", optimizer.max)
def optimize_rfc(data, targets):
"""Apply Bayesian Optimization to Random Forest parameters."""
def rfc_crossval(n_estimators, min_samples_split, max_features):
"""Wrapper of RandomForest cross validation.
Notice how we ensure n_estimators and min_samples_split are casted
to integer before we pass them along. Moreover, to avoid max_features
taking values outside the (0, 1) range, we also ensure it is capped
accordingly.
"""
return rfc_cv(
n_estimators=int(n_estimators),
min_samples_split=int(min_samples_split),
max_features=max(min(max_features, 0.999), 1e-3),
data=data,
targets=targets,
)
optimizer = BayesianOptimization(
f=rfc_crossval,
pbounds={
"n_estimators": (10, 250),
"min_samples_split": (2, 25),
"max_features": (0.1, 0.999),
},
random_state=1234,
verbose=2
)
optimizer.maximize(n_iter=10)
print("Final result:", optimizer.max)
if __name__ == "__main__":
data, targets = get_data()
print(Colours.yellow("--- Optimizing SVM ---"))
optimize_svc(data, targets)
print(Colours.green("--- Optimizing Random Forest ---"))
optimize_rfc(data, targets)
|
fmfn/BayesianOptimization | examples/sklearn_example.py | rfc_cv | python | def rfc_cv(n_estimators, min_samples_split, max_features, data, targets):
estimator = RFC(
n_estimators=n_estimators,
min_samples_split=min_samples_split,
max_features=max_features,
random_state=2
)
cval = cross_val_score(estimator, data, targets,
scoring='neg_log_loss', cv=4)
return cval.mean() | Random Forest cross validation.
This function will instantiate a random forest classifier with parameters
n_estimators, min_samples_split, and max_features. Combined with data and
targets this will in turn be used to perform cross validation. The result
of cross validation is returned.
Our goal is to find combinations of n_estimators, min_samples_split, and
max_features that minimzes the log loss. | train | https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/examples/sklearn_example.py#L36-L55 | null | from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier as RFC
from sklearn.svm import SVC
from bayes_opt import BayesianOptimization
from bayes_opt.util import Colours
def get_data():
"""Synthetic binary classification dataset."""
data, targets = make_classification(
n_samples=1000,
n_features=45,
n_informative=12,
n_redundant=7,
random_state=134985745,
)
return data, targets
def svc_cv(C, gamma, data, targets):
"""SVC cross validation.
This function will instantiate a SVC classifier with parameters C and
gamma. Combined with data and targets this will in turn be used to perform
cross validation. The result of cross validation is returned.
Our goal is to find combinations of C and gamma that maximizes the roc_auc
metric.
"""
estimator = SVC(C=C, gamma=gamma, random_state=2)
cval = cross_val_score(estimator, data, targets, scoring='roc_auc', cv=4)
return cval.mean()
def optimize_svc(data, targets):
"""Apply Bayesian Optimization to SVC parameters."""
def svc_crossval(expC, expGamma):
"""Wrapper of SVC cross validation.
Notice how we transform between regular and log scale. While this
is not technically necessary, it greatly improves the performance
of the optimizer.
"""
C = 10 ** expC
gamma = 10 ** expGamma
return svc_cv(C=C, gamma=gamma, data=data, targets=targets)
optimizer = BayesianOptimization(
f=svc_crossval,
pbounds={"expC": (-3, 2), "expGamma": (-4, -1)},
random_state=1234,
verbose=2
)
optimizer.maximize(n_iter=10)
print("Final result:", optimizer.max)
def optimize_rfc(data, targets):
"""Apply Bayesian Optimization to Random Forest parameters."""
def rfc_crossval(n_estimators, min_samples_split, max_features):
"""Wrapper of RandomForest cross validation.
Notice how we ensure n_estimators and min_samples_split are casted
to integer before we pass them along. Moreover, to avoid max_features
taking values outside the (0, 1) range, we also ensure it is capped
accordingly.
"""
return rfc_cv(
n_estimators=int(n_estimators),
min_samples_split=int(min_samples_split),
max_features=max(min(max_features, 0.999), 1e-3),
data=data,
targets=targets,
)
optimizer = BayesianOptimization(
f=rfc_crossval,
pbounds={
"n_estimators": (10, 250),
"min_samples_split": (2, 25),
"max_features": (0.1, 0.999),
},
random_state=1234,
verbose=2
)
optimizer.maximize(n_iter=10)
print("Final result:", optimizer.max)
if __name__ == "__main__":
data, targets = get_data()
print(Colours.yellow("--- Optimizing SVM ---"))
optimize_svc(data, targets)
print(Colours.green("--- Optimizing Random Forest ---"))
optimize_rfc(data, targets)
|
fmfn/BayesianOptimization | examples/sklearn_example.py | optimize_svc | python | def optimize_svc(data, targets):
def svc_crossval(expC, expGamma):
"""Wrapper of SVC cross validation.
Notice how we transform between regular and log scale. While this
is not technically necessary, it greatly improves the performance
of the optimizer.
"""
C = 10 ** expC
gamma = 10 ** expGamma
return svc_cv(C=C, gamma=gamma, data=data, targets=targets)
optimizer = BayesianOptimization(
f=svc_crossval,
pbounds={"expC": (-3, 2), "expGamma": (-4, -1)},
random_state=1234,
verbose=2
)
optimizer.maximize(n_iter=10)
print("Final result:", optimizer.max) | Apply Bayesian Optimization to SVC parameters. | train | https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/examples/sklearn_example.py#L58-L79 | [
"def maximize(self,\n init_points=5,\n n_iter=25,\n acq='ucb',\n kappa=2.576,\n xi=0.0,\n **gp_params):\n \"\"\"Mazimize your function\"\"\"\n self._prime_subscriptions()\n self.dispatch(Events.OPTMIZATION_START)\n self._prime_queue(init_points)\n self.set_gp_params(**gp_params)\n\n util = UtilityFunction(kind=acq, kappa=kappa, xi=xi)\n iteration = 0\n while not self._queue.empty or iteration < n_iter:\n try:\n x_probe = next(self._queue)\n except StopIteration:\n x_probe = self.suggest(util)\n iteration += 1\n\n self.probe(x_probe, lazy=False)\n\n self.dispatch(Events.OPTMIZATION_END)\n"
] | from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier as RFC
from sklearn.svm import SVC
from bayes_opt import BayesianOptimization
from bayes_opt.util import Colours
def get_data():
"""Synthetic binary classification dataset."""
data, targets = make_classification(
n_samples=1000,
n_features=45,
n_informative=12,
n_redundant=7,
random_state=134985745,
)
return data, targets
def svc_cv(C, gamma, data, targets):
"""SVC cross validation.
This function will instantiate a SVC classifier with parameters C and
gamma. Combined with data and targets this will in turn be used to perform
cross validation. The result of cross validation is returned.
Our goal is to find combinations of C and gamma that maximizes the roc_auc
metric.
"""
estimator = SVC(C=C, gamma=gamma, random_state=2)
cval = cross_val_score(estimator, data, targets, scoring='roc_auc', cv=4)
return cval.mean()
def rfc_cv(n_estimators, min_samples_split, max_features, data, targets):
"""Random Forest cross validation.
This function will instantiate a random forest classifier with parameters
n_estimators, min_samples_split, and max_features. Combined with data and
targets this will in turn be used to perform cross validation. The result
of cross validation is returned.
Our goal is to find combinations of n_estimators, min_samples_split, and
max_features that minimzes the log loss.
"""
estimator = RFC(
n_estimators=n_estimators,
min_samples_split=min_samples_split,
max_features=max_features,
random_state=2
)
cval = cross_val_score(estimator, data, targets,
scoring='neg_log_loss', cv=4)
return cval.mean()
def optimize_rfc(data, targets):
"""Apply Bayesian Optimization to Random Forest parameters."""
def rfc_crossval(n_estimators, min_samples_split, max_features):
"""Wrapper of RandomForest cross validation.
Notice how we ensure n_estimators and min_samples_split are casted
to integer before we pass them along. Moreover, to avoid max_features
taking values outside the (0, 1) range, we also ensure it is capped
accordingly.
"""
return rfc_cv(
n_estimators=int(n_estimators),
min_samples_split=int(min_samples_split),
max_features=max(min(max_features, 0.999), 1e-3),
data=data,
targets=targets,
)
optimizer = BayesianOptimization(
f=rfc_crossval,
pbounds={
"n_estimators": (10, 250),
"min_samples_split": (2, 25),
"max_features": (0.1, 0.999),
},
random_state=1234,
verbose=2
)
optimizer.maximize(n_iter=10)
print("Final result:", optimizer.max)
if __name__ == "__main__":
data, targets = get_data()
print(Colours.yellow("--- Optimizing SVM ---"))
optimize_svc(data, targets)
print(Colours.green("--- Optimizing Random Forest ---"))
optimize_rfc(data, targets)
|
fmfn/BayesianOptimization | examples/sklearn_example.py | optimize_rfc | python | def optimize_rfc(data, targets):
def rfc_crossval(n_estimators, min_samples_split, max_features):
"""Wrapper of RandomForest cross validation.
Notice how we ensure n_estimators and min_samples_split are casted
to integer before we pass them along. Moreover, to avoid max_features
taking values outside the (0, 1) range, we also ensure it is capped
accordingly.
"""
return rfc_cv(
n_estimators=int(n_estimators),
min_samples_split=int(min_samples_split),
max_features=max(min(max_features, 0.999), 1e-3),
data=data,
targets=targets,
)
optimizer = BayesianOptimization(
f=rfc_crossval,
pbounds={
"n_estimators": (10, 250),
"min_samples_split": (2, 25),
"max_features": (0.1, 0.999),
},
random_state=1234,
verbose=2
)
optimizer.maximize(n_iter=10)
print("Final result:", optimizer.max) | Apply Bayesian Optimization to Random Forest parameters. | train | https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/examples/sklearn_example.py#L82-L112 | [
"def maximize(self,\n init_points=5,\n n_iter=25,\n acq='ucb',\n kappa=2.576,\n xi=0.0,\n **gp_params):\n \"\"\"Mazimize your function\"\"\"\n self._prime_subscriptions()\n self.dispatch(Events.OPTMIZATION_START)\n self._prime_queue(init_points)\n self.set_gp_params(**gp_params)\n\n util = UtilityFunction(kind=acq, kappa=kappa, xi=xi)\n iteration = 0\n while not self._queue.empty or iteration < n_iter:\n try:\n x_probe = next(self._queue)\n except StopIteration:\n x_probe = self.suggest(util)\n iteration += 1\n\n self.probe(x_probe, lazy=False)\n\n self.dispatch(Events.OPTMIZATION_END)\n"
] | from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier as RFC
from sklearn.svm import SVC
from bayes_opt import BayesianOptimization
from bayes_opt.util import Colours
def get_data():
"""Synthetic binary classification dataset."""
data, targets = make_classification(
n_samples=1000,
n_features=45,
n_informative=12,
n_redundant=7,
random_state=134985745,
)
return data, targets
def svc_cv(C, gamma, data, targets):
"""SVC cross validation.
This function will instantiate a SVC classifier with parameters C and
gamma. Combined with data and targets this will in turn be used to perform
cross validation. The result of cross validation is returned.
Our goal is to find combinations of C and gamma that maximizes the roc_auc
metric.
"""
estimator = SVC(C=C, gamma=gamma, random_state=2)
cval = cross_val_score(estimator, data, targets, scoring='roc_auc', cv=4)
return cval.mean()
def rfc_cv(n_estimators, min_samples_split, max_features, data, targets):
"""Random Forest cross validation.
This function will instantiate a random forest classifier with parameters
n_estimators, min_samples_split, and max_features. Combined with data and
targets this will in turn be used to perform cross validation. The result
of cross validation is returned.
Our goal is to find combinations of n_estimators, min_samples_split, and
max_features that minimzes the log loss.
"""
estimator = RFC(
n_estimators=n_estimators,
min_samples_split=min_samples_split,
max_features=max_features,
random_state=2
)
cval = cross_val_score(estimator, data, targets,
scoring='neg_log_loss', cv=4)
return cval.mean()
def optimize_svc(data, targets):
"""Apply Bayesian Optimization to SVC parameters."""
def svc_crossval(expC, expGamma):
"""Wrapper of SVC cross validation.
Notice how we transform between regular and log scale. While this
is not technically necessary, it greatly improves the performance
of the optimizer.
"""
C = 10 ** expC
gamma = 10 ** expGamma
return svc_cv(C=C, gamma=gamma, data=data, targets=targets)
optimizer = BayesianOptimization(
f=svc_crossval,
pbounds={"expC": (-3, 2), "expGamma": (-4, -1)},
random_state=1234,
verbose=2
)
optimizer.maximize(n_iter=10)
print("Final result:", optimizer.max)
if __name__ == "__main__":
data, targets = get_data()
print(Colours.yellow("--- Optimizing SVM ---"))
optimize_svc(data, targets)
print(Colours.green("--- Optimizing Random Forest ---"))
optimize_rfc(data, targets)
|
fmfn/BayesianOptimization | bayes_opt/util.py | acq_max | python | def acq_max(ac, gp, y_max, bounds, random_state, n_warmup=100000, n_iter=250):
# Warm up with random points
x_tries = random_state.uniform(bounds[:, 0], bounds[:, 1],
size=(n_warmup, bounds.shape[0]))
ys = ac(x_tries, gp=gp, y_max=y_max)
x_max = x_tries[ys.argmax()]
max_acq = ys.max()
# Explore the parameter space more throughly
x_seeds = random_state.uniform(bounds[:, 0], bounds[:, 1],
size=(n_iter, bounds.shape[0]))
for x_try in x_seeds:
# Find the minimum of minus the acquisition function
res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max),
x_try.reshape(1, -1),
bounds=bounds,
method="L-BFGS-B")
# See if success
if not res.success:
continue
# Store it if better than previous minimum(maximum).
if max_acq is None or -res.fun[0] >= max_acq:
x_max = res.x
max_acq = -res.fun[0]
# Clip output to make sure it lies within the bounds. Due to floating
# point technicalities this is not always the case.
return np.clip(x_max, bounds[:, 0], bounds[:, 1]) | A function to find the maximum of the acquisition function
It uses a combination of random sampling (cheap) and the 'L-BFGS-B'
optimization method. First by sampling `n_warmup` (1e5) points at random,
and then running L-BFGS-B from `n_iter` (250) random starting points.
Parameters
----------
:param ac:
The acquisition function object that return its point-wise value.
:param gp:
A gaussian process fitted to the relevant data.
:param y_max:
The current maximum known value of the target function.
:param bounds:
The variables bounds to limit the search of the acq max.
:param random_state:
instance of np.RandomState random number generator
:param n_warmup:
number of times to randomly sample the aquisition function
:param n_iter:
number of times to run scipy.minimize
Returns
-------
:return: x_max, The arg max of the acquisition function. | train | https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/util.py#L7-L71 | [
"def utility(self, x, gp, y_max):\n if self.kind == 'ucb':\n return self._ucb(x, gp, self.kappa)\n if self.kind == 'ei':\n return self._ei(x, gp, y_max, self.xi)\n if self.kind == 'poi':\n return self._poi(x, gp, y_max, self.xi)\n"
] | import warnings
import numpy as np
from scipy.stats import norm
from scipy.optimize import minimize
class UtilityFunction(object):
"""
An object to compute the acquisition functions.
"""
def __init__(self, kind, kappa, xi):
"""
If UCB is to be used, a constant kappa is needed.
"""
self.kappa = kappa
self.xi = xi
if kind not in ['ucb', 'ei', 'poi']:
err = "The utility function " \
"{} has not been implemented, " \
"please choose one of ucb, ei, or poi.".format(kind)
raise NotImplementedError(err)
else:
self.kind = kind
def utility(self, x, gp, y_max):
if self.kind == 'ucb':
return self._ucb(x, gp, self.kappa)
if self.kind == 'ei':
return self._ei(x, gp, y_max, self.xi)
if self.kind == 'poi':
return self._poi(x, gp, y_max, self.xi)
@staticmethod
def _ucb(x, gp, kappa):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mean, std = gp.predict(x, return_std=True)
return mean + kappa * std
@staticmethod
def _ei(x, gp, y_max, xi):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mean, std = gp.predict(x, return_std=True)
z = (mean - y_max - xi)/std
return (mean - y_max - xi) * norm.cdf(z) + std * norm.pdf(z)
@staticmethod
def _poi(x, gp, y_max, xi):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mean, std = gp.predict(x, return_std=True)
z = (mean - y_max - xi)/std
return norm.cdf(z)
def load_logs(optimizer, logs):
"""Load previous ...
"""
import json
if isinstance(logs, str):
logs = [logs]
for log in logs:
with open(log, "r") as j:
while True:
try:
iteration = next(j)
except StopIteration:
break
iteration = json.loads(iteration)
try:
optimizer.register(
params=iteration["params"],
target=iteration["target"],
)
except KeyError:
pass
return optimizer
def ensure_rng(random_state=None):
"""
Creates a random number generator based on an optional seed. This can be
an integer or another random state for a seeded rng, or None for an
unseeded rng.
"""
if random_state is None:
random_state = np.random.RandomState()
elif isinstance(random_state, int):
random_state = np.random.RandomState(random_state)
else:
assert isinstance(random_state, np.random.RandomState)
return random_state
class Colours:
"""Print in nice colours."""
BLUE = '\033[94m'
BOLD = '\033[1m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
END = '\033[0m'
GREEN = '\033[92m'
PURPLE = '\033[95m'
RED = '\033[91m'
UNDERLINE = '\033[4m'
YELLOW = '\033[93m'
@classmethod
def _wrap_colour(cls, s, colour):
return colour + s + cls.END
@classmethod
def black(cls, s):
"""Wrap text in black."""
return cls._wrap_colour(s, cls.END)
@classmethod
def blue(cls, s):
"""Wrap text in blue."""
return cls._wrap_colour(s, cls.BLUE)
@classmethod
def bold(cls, s):
"""Wrap text in bold."""
return cls._wrap_colour(s, cls.BOLD)
@classmethod
def cyan(cls, s):
"""Wrap text in cyan."""
return cls._wrap_colour(s, cls.CYAN)
@classmethod
def darkcyan(cls, s):
"""Wrap text in darkcyan."""
return cls._wrap_colour(s, cls.DARKCYAN)
@classmethod
def green(cls, s):
"""Wrap text in green."""
return cls._wrap_colour(s, cls.GREEN)
@classmethod
def purple(cls, s):
"""Wrap text in purple."""
return cls._wrap_colour(s, cls.PURPLE)
@classmethod
def red(cls, s):
"""Wrap text in red."""
return cls._wrap_colour(s, cls.RED)
@classmethod
def underline(cls, s):
"""Wrap text in underline."""
return cls._wrap_colour(s, cls.UNDERLINE)
@classmethod
def yellow(cls, s):
"""Wrap text in yellow."""
return cls._wrap_colour(s, cls.YELLOW)
|
fmfn/BayesianOptimization | bayes_opt/util.py | load_logs | python | def load_logs(optimizer, logs):
import json
if isinstance(logs, str):
logs = [logs]
for log in logs:
with open(log, "r") as j:
while True:
try:
iteration = next(j)
except StopIteration:
break
iteration = json.loads(iteration)
try:
optimizer.register(
params=iteration["params"],
target=iteration["target"],
)
except KeyError:
pass
return optimizer | Load previous ... | train | https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/util.py#L130-L156 | [
"def register(self, params, target):\n \"\"\"Expect observation with known target\"\"\"\n self._space.register(params, target)\n self.dispatch(Events.OPTMIZATION_STEP)\n"
] | import warnings
import numpy as np
from scipy.stats import norm
from scipy.optimize import minimize
def acq_max(ac, gp, y_max, bounds, random_state, n_warmup=100000, n_iter=250):
"""
A function to find the maximum of the acquisition function
It uses a combination of random sampling (cheap) and the 'L-BFGS-B'
optimization method. First by sampling `n_warmup` (1e5) points at random,
and then running L-BFGS-B from `n_iter` (250) random starting points.
Parameters
----------
:param ac:
The acquisition function object that return its point-wise value.
:param gp:
A gaussian process fitted to the relevant data.
:param y_max:
The current maximum known value of the target function.
:param bounds:
The variables bounds to limit the search of the acq max.
:param random_state:
instance of np.RandomState random number generator
:param n_warmup:
number of times to randomly sample the aquisition function
:param n_iter:
number of times to run scipy.minimize
Returns
-------
:return: x_max, The arg max of the acquisition function.
"""
# Warm up with random points
x_tries = random_state.uniform(bounds[:, 0], bounds[:, 1],
size=(n_warmup, bounds.shape[0]))
ys = ac(x_tries, gp=gp, y_max=y_max)
x_max = x_tries[ys.argmax()]
max_acq = ys.max()
# Explore the parameter space more throughly
x_seeds = random_state.uniform(bounds[:, 0], bounds[:, 1],
size=(n_iter, bounds.shape[0]))
for x_try in x_seeds:
# Find the minimum of minus the acquisition function
res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max),
x_try.reshape(1, -1),
bounds=bounds,
method="L-BFGS-B")
# See if success
if not res.success:
continue
# Store it if better than previous minimum(maximum).
if max_acq is None or -res.fun[0] >= max_acq:
x_max = res.x
max_acq = -res.fun[0]
# Clip output to make sure it lies within the bounds. Due to floating
# point technicalities this is not always the case.
return np.clip(x_max, bounds[:, 0], bounds[:, 1])
class UtilityFunction(object):
"""
An object to compute the acquisition functions.
"""
def __init__(self, kind, kappa, xi):
"""
If UCB is to be used, a constant kappa is needed.
"""
self.kappa = kappa
self.xi = xi
if kind not in ['ucb', 'ei', 'poi']:
err = "The utility function " \
"{} has not been implemented, " \
"please choose one of ucb, ei, or poi.".format(kind)
raise NotImplementedError(err)
else:
self.kind = kind
def utility(self, x, gp, y_max):
if self.kind == 'ucb':
return self._ucb(x, gp, self.kappa)
if self.kind == 'ei':
return self._ei(x, gp, y_max, self.xi)
if self.kind == 'poi':
return self._poi(x, gp, y_max, self.xi)
@staticmethod
def _ucb(x, gp, kappa):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mean, std = gp.predict(x, return_std=True)
return mean + kappa * std
@staticmethod
def _ei(x, gp, y_max, xi):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mean, std = gp.predict(x, return_std=True)
z = (mean - y_max - xi)/std
return (mean - y_max - xi) * norm.cdf(z) + std * norm.pdf(z)
@staticmethod
def _poi(x, gp, y_max, xi):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mean, std = gp.predict(x, return_std=True)
z = (mean - y_max - xi)/std
return norm.cdf(z)
def ensure_rng(random_state=None):
"""
Creates a random number generator based on an optional seed. This can be
an integer or another random state for a seeded rng, or None for an
unseeded rng.
"""
if random_state is None:
random_state = np.random.RandomState()
elif isinstance(random_state, int):
random_state = np.random.RandomState(random_state)
else:
assert isinstance(random_state, np.random.RandomState)
return random_state
class Colours:
"""Print in nice colours."""
BLUE = '\033[94m'
BOLD = '\033[1m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
END = '\033[0m'
GREEN = '\033[92m'
PURPLE = '\033[95m'
RED = '\033[91m'
UNDERLINE = '\033[4m'
YELLOW = '\033[93m'
@classmethod
def _wrap_colour(cls, s, colour):
return colour + s + cls.END
@classmethod
def black(cls, s):
"""Wrap text in black."""
return cls._wrap_colour(s, cls.END)
@classmethod
def blue(cls, s):
"""Wrap text in blue."""
return cls._wrap_colour(s, cls.BLUE)
@classmethod
def bold(cls, s):
"""Wrap text in bold."""
return cls._wrap_colour(s, cls.BOLD)
@classmethod
def cyan(cls, s):
"""Wrap text in cyan."""
return cls._wrap_colour(s, cls.CYAN)
@classmethod
def darkcyan(cls, s):
"""Wrap text in darkcyan."""
return cls._wrap_colour(s, cls.DARKCYAN)
@classmethod
def green(cls, s):
"""Wrap text in green."""
return cls._wrap_colour(s, cls.GREEN)
@classmethod
def purple(cls, s):
"""Wrap text in purple."""
return cls._wrap_colour(s, cls.PURPLE)
@classmethod
def red(cls, s):
"""Wrap text in red."""
return cls._wrap_colour(s, cls.RED)
@classmethod
def underline(cls, s):
"""Wrap text in underline."""
return cls._wrap_colour(s, cls.UNDERLINE)
@classmethod
def yellow(cls, s):
"""Wrap text in yellow."""
return cls._wrap_colour(s, cls.YELLOW)
|
fmfn/BayesianOptimization | bayes_opt/util.py | ensure_rng | python | def ensure_rng(random_state=None):
if random_state is None:
random_state = np.random.RandomState()
elif isinstance(random_state, int):
random_state = np.random.RandomState(random_state)
else:
assert isinstance(random_state, np.random.RandomState)
return random_state | Creates a random number generator based on an optional seed. This can be
an integer or another random state for a seeded rng, or None for an
unseeded rng. | train | https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/util.py#L159-L171 | null | import warnings
import numpy as np
from scipy.stats import norm
from scipy.optimize import minimize
def acq_max(ac, gp, y_max, bounds, random_state, n_warmup=100000, n_iter=250):
"""
A function to find the maximum of the acquisition function
It uses a combination of random sampling (cheap) and the 'L-BFGS-B'
optimization method. First by sampling `n_warmup` (1e5) points at random,
and then running L-BFGS-B from `n_iter` (250) random starting points.
Parameters
----------
:param ac:
The acquisition function object that return its point-wise value.
:param gp:
A gaussian process fitted to the relevant data.
:param y_max:
The current maximum known value of the target function.
:param bounds:
The variables bounds to limit the search of the acq max.
:param random_state:
instance of np.RandomState random number generator
:param n_warmup:
number of times to randomly sample the aquisition function
:param n_iter:
number of times to run scipy.minimize
Returns
-------
:return: x_max, The arg max of the acquisition function.
"""
# Warm up with random points
x_tries = random_state.uniform(bounds[:, 0], bounds[:, 1],
size=(n_warmup, bounds.shape[0]))
ys = ac(x_tries, gp=gp, y_max=y_max)
x_max = x_tries[ys.argmax()]
max_acq = ys.max()
# Explore the parameter space more throughly
x_seeds = random_state.uniform(bounds[:, 0], bounds[:, 1],
size=(n_iter, bounds.shape[0]))
for x_try in x_seeds:
# Find the minimum of minus the acquisition function
res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max),
x_try.reshape(1, -1),
bounds=bounds,
method="L-BFGS-B")
# See if success
if not res.success:
continue
# Store it if better than previous minimum(maximum).
if max_acq is None or -res.fun[0] >= max_acq:
x_max = res.x
max_acq = -res.fun[0]
# Clip output to make sure it lies within the bounds. Due to floating
# point technicalities this is not always the case.
return np.clip(x_max, bounds[:, 0], bounds[:, 1])
class UtilityFunction(object):
"""
An object to compute the acquisition functions.
"""
def __init__(self, kind, kappa, xi):
"""
If UCB is to be used, a constant kappa is needed.
"""
self.kappa = kappa
self.xi = xi
if kind not in ['ucb', 'ei', 'poi']:
err = "The utility function " \
"{} has not been implemented, " \
"please choose one of ucb, ei, or poi.".format(kind)
raise NotImplementedError(err)
else:
self.kind = kind
def utility(self, x, gp, y_max):
if self.kind == 'ucb':
return self._ucb(x, gp, self.kappa)
if self.kind == 'ei':
return self._ei(x, gp, y_max, self.xi)
if self.kind == 'poi':
return self._poi(x, gp, y_max, self.xi)
@staticmethod
def _ucb(x, gp, kappa):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mean, std = gp.predict(x, return_std=True)
return mean + kappa * std
@staticmethod
def _ei(x, gp, y_max, xi):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mean, std = gp.predict(x, return_std=True)
z = (mean - y_max - xi)/std
return (mean - y_max - xi) * norm.cdf(z) + std * norm.pdf(z)
@staticmethod
def _poi(x, gp, y_max, xi):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mean, std = gp.predict(x, return_std=True)
z = (mean - y_max - xi)/std
return norm.cdf(z)
def load_logs(optimizer, logs):
"""Load previous ...
"""
import json
if isinstance(logs, str):
logs = [logs]
for log in logs:
with open(log, "r") as j:
while True:
try:
iteration = next(j)
except StopIteration:
break
iteration = json.loads(iteration)
try:
optimizer.register(
params=iteration["params"],
target=iteration["target"],
)
except KeyError:
pass
return optimizer
class Colours:
"""Print in nice colours."""
BLUE = '\033[94m'
BOLD = '\033[1m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
END = '\033[0m'
GREEN = '\033[92m'
PURPLE = '\033[95m'
RED = '\033[91m'
UNDERLINE = '\033[4m'
YELLOW = '\033[93m'
@classmethod
def _wrap_colour(cls, s, colour):
return colour + s + cls.END
@classmethod
def black(cls, s):
"""Wrap text in black."""
return cls._wrap_colour(s, cls.END)
@classmethod
def blue(cls, s):
"""Wrap text in blue."""
return cls._wrap_colour(s, cls.BLUE)
@classmethod
def bold(cls, s):
"""Wrap text in bold."""
return cls._wrap_colour(s, cls.BOLD)
@classmethod
def cyan(cls, s):
"""Wrap text in cyan."""
return cls._wrap_colour(s, cls.CYAN)
@classmethod
def darkcyan(cls, s):
"""Wrap text in darkcyan."""
return cls._wrap_colour(s, cls.DARKCYAN)
@classmethod
def green(cls, s):
"""Wrap text in green."""
return cls._wrap_colour(s, cls.GREEN)
@classmethod
def purple(cls, s):
"""Wrap text in purple."""
return cls._wrap_colour(s, cls.PURPLE)
@classmethod
def red(cls, s):
"""Wrap text in red."""
return cls._wrap_colour(s, cls.RED)
@classmethod
def underline(cls, s):
"""Wrap text in underline."""
return cls._wrap_colour(s, cls.UNDERLINE)
@classmethod
def yellow(cls, s):
"""Wrap text in yellow."""
return cls._wrap_colour(s, cls.YELLOW)
|
TheRealLink/pylgtv | pylgtv/webos_client.py | WebOsClient._get_key_file_path | python | def _get_key_file_path():
if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME),
os.W_OK):
return os.path.join(os.getenv(USER_HOME), KEY_FILE_NAME)
return os.path.join(os.getcwd(), KEY_FILE_NAME) | Return the key file path. | train | https://github.com/TheRealLink/pylgtv/blob/a7d9ad87ce47e77180fe9262da785465219f4ed6/pylgtv/webos_client.py#L39-L45 | null | class WebOsClient(object):
def __init__(self, ip, key_file_path=None, timeout_connect=2):
"""Initialize the client."""
self.ip = ip
self.port = 3000
self.key_file_path = key_file_path
self.client_key = None
self.web_socket = None
self.command_count = 0
self.last_response = None
self.timeout_connect = timeout_connect
self.load_key_file()
@staticmethod
def load_key_file(self):
"""Try to load the client key for the current ip."""
self.client_key = None
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
key_dict = {}
logger.debug('load keyfile from %s', key_file_path);
if os.path.isfile(key_file_path):
with open(key_file_path, 'r') as f:
raw_data = f.read()
if raw_data:
key_dict = json.loads(raw_data)
logger.debug('getting client_key for %s from %s', self.ip, key_file_path);
if self.ip in key_dict:
self.client_key = key_dict[self.ip]
def save_key_file(self):
"""Save the current client key."""
if self.client_key is None:
return
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
logger.debug('save keyfile to %s', key_file_path);
with open(key_file_path, 'w+') as f:
raw_data = f.read()
key_dict = {}
if raw_data:
key_dict = json.loads(raw_data)
key_dict[self.ip] = self.client_key
f.write(json.dumps(key_dict))
@asyncio.coroutine
def _send_register_payload(self, websocket):
"""Send the register payload."""
file = os.path.join(os.path.dirname(__file__), HANDSHAKE_FILE_NAME)
data = codecs.open(file, 'r', 'utf-8')
raw_handshake = data.read()
handshake = json.loads(raw_handshake)
handshake['payload']['client-key'] = self.client_key
yield from websocket.send(json.dumps(handshake))
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'response' and \
response['payload']['pairingType'] == 'PROMPT':
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'registered':
self.client_key = response['payload']['client-key']
self.save_key_file()
def is_registered(self):
"""Paired with the tv."""
return self.client_key is not None
@asyncio.coroutine
def _register(self):
"""Register wrapper."""
logger.debug('register on %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.error('register failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('register websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
finally:
logger.debug('close register connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def register(self):
"""Pair client with tv."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self._register())
@asyncio.coroutine
def _command(self, msg):
"""Send a command to the tv."""
logger.debug('send command to %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.debug('command failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('command websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
if not self.client_key:
raise PyLGTVPairException("Unable to pair")
yield from websocket.send(json.dumps(msg))
if msg['type'] == 'request':
raw_response = yield from websocket.recv()
self.last_response = json.loads(raw_response)
finally:
logger.debug('close command connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def command(self, request_type, uri, payload):
"""Build and send a command."""
self.command_count += 1
if payload is None:
payload = {}
message = {
'id': "{}_{}".format(type, self.command_count),
'type': request_type,
'uri': "ssap://{}".format(uri),
'payload': payload,
}
self.last_response = None
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(asyncio.wait_for(self._command(message), self.timeout_connect, loop=loop))
finally:
loop.close()
def request(self, uri, payload=None):
"""Send a request."""
self.command('request', uri, payload)
def send_message(self, message, icon_path=None):
"""Show a floating message."""
icon_encoded_string = ''
icon_extension = ''
if icon_path is not None:
icon_extension = os.path.splitext(icon_path)[1][1:]
with open(icon_path, 'rb') as icon_file:
icon_encoded_string = base64.b64encode(icon_file.read()).decode('ascii')
self.request(EP_SHOW_MESSAGE, {
'message': message,
'iconData': icon_encoded_string,
'iconExtension': icon_extension
})
# Apps
def get_apps(self):
"""Return all apps."""
self.request(EP_GET_APPS)
return {} if self.last_response is None else self.last_response.get('payload').get('launchPoints')
def get_current_app(self):
"""Get the current app id."""
self.request(EP_GET_CURRENT_APP_INFO)
return None if self.last_response is None else self.last_response.get('payload').get('appId')
def launch_app(self, app):
"""Launch an app."""
self.command('request', EP_LAUNCH, {
'id': app
})
def launch_app_with_params(self, app, params):
"""Launch an app with parameters."""
self.request(EP_LAUNCH, {
'id': app,
'params': params
})
def launch_app_with_content_id(self, app, contentId):
"""Launch an app with contentId."""
self.request(EP_LAUNCH, {
'id': app,
'contentId': contentId
})
def close_app(self, app):
"""Close the current app."""
self.request(EP_LAUNCHER_CLOSE, {
'id': app
})
# Services
def get_services(self):
"""Get all services."""
self.request(EP_GET_SERVICES)
return {} if self.last_response is None else self.last_response.get('payload').get('services')
def get_software_info(self):
"""Return the current software status."""
self.request(EP_GET_SOFTWARE_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def power_off(self):
"""Play media."""
self.request(EP_POWER_OFF)
def power_on(self):
"""Play media."""
self.request(EP_POWER_ON)
# 3D Mode
def turn_3d_on(self):
"""Turn 3D on."""
self.request(EP_3D_ON)
def turn_3d_off(self):
"""Turn 3D off."""
self.request(EP_3D_OFF)
# Inputs
def get_inputs(self):
"""Get all inputs."""
self.request(EP_GET_INPUTS)
return {} if self.last_response is None else self.last_response.get('payload').get('devices')
def get_input(self):
"""Get current input."""
return self.get_current_app()
def set_input(self, input):
"""Set the current input."""
self.request(EP_SET_INPUT, {
'inputId': input
})
# Audio
def get_audio_status(self):
"""Get the current audio status"""
self.request(EP_GET_AUDIO_STATUS)
return {} if self.last_response is None else self.last_response.get('payload')
def get_muted(self):
"""Get mute status."""
return self.get_audio_status().get('mute')
def set_mute(self, mute):
"""Set mute."""
self.request(EP_SET_MUTE, {
'mute': mute
})
def get_volume(self):
"""Get the current volume."""
self.request(EP_GET_VOLUME)
return 0 if self.last_response is None else self.last_response.get('payload').get('volume')
def set_volume(self, volume):
"""Set volume."""
volume = max(0, volume)
self.request(EP_SET_VOLUME, {
'volume': volume
})
def volume_up(self):
"""Volume up."""
self.request(EP_VOLUME_UP)
def volume_down(self):
"""Volume down."""
self.request(EP_VOLUME_DOWN)
# TV Channel
def channel_up(self):
"""Channel up."""
self.request(EP_TV_CHANNEL_UP)
def channel_down(self):
"""Channel down."""
self.request(EP_TV_CHANNEL_DOWN)
def get_channels(self):
"""Get all tv channels."""
self.request(EP_GET_TV_CHANNELS)
return {} if self.last_response is None else self.last_response.get('payload').get('channelList')
def get_current_channel(self):
"""Get the current tv channel."""
self.request(EP_GET_CURRENT_CHANNEL)
return {} if self.last_response is None else self.last_response.get('payload')
def get_channel_info(self):
"""Get the current channel info."""
self.request(EP_GET_CHANNEL_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def set_channel(self, channel):
"""Set the current channel."""
self.request(EP_SET_CHANNEL, {
'channelId': channel
})
# Media control
def play(self):
"""Play media."""
self.request(EP_MEDIA_PLAY)
def pause(self):
"""Pause media."""
self.request(EP_MEDIA_PAUSE)
def stop(self):
"""Stop media."""
self.request(EP_MEDIA_STOP)
def close(self):
"""Close media."""
self.request(EP_MEDIA_CLOSE)
def rewind(self):
"""Rewind media."""
self.request(EP_MEDIA_REWIND)
def fast_forward(self):
"""Fast Forward media."""
self.request(EP_MEDIA_FAST_FORWARD)
# Keys
def send_enter_key(self):
"""Send enter key."""
self.request(EP_SEND_ENTER)
def send_delete_key(self):
"""Send delete key."""
self.request(EP_SEND_DELETE)
# Web
def open_url(self, url):
"""Open URL."""
self.request(EP_OPEN, {
'target': url
})
def close_web(self):
"""Close web app."""
self.request(EP_CLOSE_WEB_APP)
|
TheRealLink/pylgtv | pylgtv/webos_client.py | WebOsClient.load_key_file | python | def load_key_file(self):
self.client_key = None
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
key_dict = {}
logger.debug('load keyfile from %s', key_file_path);
if os.path.isfile(key_file_path):
with open(key_file_path, 'r') as f:
raw_data = f.read()
if raw_data:
key_dict = json.loads(raw_data)
logger.debug('getting client_key for %s from %s', self.ip, key_file_path);
if self.ip in key_dict:
self.client_key = key_dict[self.ip] | Try to load the client key for the current ip. | train | https://github.com/TheRealLink/pylgtv/blob/a7d9ad87ce47e77180fe9262da785465219f4ed6/pylgtv/webos_client.py#L47-L66 | [
"def _get_key_file_path():\n \"\"\"Return the key file path.\"\"\"\n if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME),\n os.W_OK):\n return os.path.join(os.getenv(USER_HOME), KEY_FILE_NAME)\n\n return os.path.join(os.getcwd(), KEY_FILE_NAME)\n"
] | class WebOsClient(object):
def __init__(self, ip, key_file_path=None, timeout_connect=2):
"""Initialize the client."""
self.ip = ip
self.port = 3000
self.key_file_path = key_file_path
self.client_key = None
self.web_socket = None
self.command_count = 0
self.last_response = None
self.timeout_connect = timeout_connect
self.load_key_file()
@staticmethod
def _get_key_file_path():
"""Return the key file path."""
if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME),
os.W_OK):
return os.path.join(os.getenv(USER_HOME), KEY_FILE_NAME)
return os.path.join(os.getcwd(), KEY_FILE_NAME)
def save_key_file(self):
"""Save the current client key."""
if self.client_key is None:
return
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
logger.debug('save keyfile to %s', key_file_path);
with open(key_file_path, 'w+') as f:
raw_data = f.read()
key_dict = {}
if raw_data:
key_dict = json.loads(raw_data)
key_dict[self.ip] = self.client_key
f.write(json.dumps(key_dict))
@asyncio.coroutine
def _send_register_payload(self, websocket):
"""Send the register payload."""
file = os.path.join(os.path.dirname(__file__), HANDSHAKE_FILE_NAME)
data = codecs.open(file, 'r', 'utf-8')
raw_handshake = data.read()
handshake = json.loads(raw_handshake)
handshake['payload']['client-key'] = self.client_key
yield from websocket.send(json.dumps(handshake))
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'response' and \
response['payload']['pairingType'] == 'PROMPT':
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'registered':
self.client_key = response['payload']['client-key']
self.save_key_file()
def is_registered(self):
"""Paired with the tv."""
return self.client_key is not None
@asyncio.coroutine
def _register(self):
"""Register wrapper."""
logger.debug('register on %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.error('register failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('register websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
finally:
logger.debug('close register connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def register(self):
"""Pair client with tv."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self._register())
@asyncio.coroutine
def _command(self, msg):
"""Send a command to the tv."""
logger.debug('send command to %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.debug('command failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('command websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
if not self.client_key:
raise PyLGTVPairException("Unable to pair")
yield from websocket.send(json.dumps(msg))
if msg['type'] == 'request':
raw_response = yield from websocket.recv()
self.last_response = json.loads(raw_response)
finally:
logger.debug('close command connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def command(self, request_type, uri, payload):
"""Build and send a command."""
self.command_count += 1
if payload is None:
payload = {}
message = {
'id': "{}_{}".format(type, self.command_count),
'type': request_type,
'uri': "ssap://{}".format(uri),
'payload': payload,
}
self.last_response = None
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(asyncio.wait_for(self._command(message), self.timeout_connect, loop=loop))
finally:
loop.close()
def request(self, uri, payload=None):
"""Send a request."""
self.command('request', uri, payload)
def send_message(self, message, icon_path=None):
"""Show a floating message."""
icon_encoded_string = ''
icon_extension = ''
if icon_path is not None:
icon_extension = os.path.splitext(icon_path)[1][1:]
with open(icon_path, 'rb') as icon_file:
icon_encoded_string = base64.b64encode(icon_file.read()).decode('ascii')
self.request(EP_SHOW_MESSAGE, {
'message': message,
'iconData': icon_encoded_string,
'iconExtension': icon_extension
})
# Apps
def get_apps(self):
"""Return all apps."""
self.request(EP_GET_APPS)
return {} if self.last_response is None else self.last_response.get('payload').get('launchPoints')
def get_current_app(self):
"""Get the current app id."""
self.request(EP_GET_CURRENT_APP_INFO)
return None if self.last_response is None else self.last_response.get('payload').get('appId')
def launch_app(self, app):
"""Launch an app."""
self.command('request', EP_LAUNCH, {
'id': app
})
def launch_app_with_params(self, app, params):
"""Launch an app with parameters."""
self.request(EP_LAUNCH, {
'id': app,
'params': params
})
def launch_app_with_content_id(self, app, contentId):
"""Launch an app with contentId."""
self.request(EP_LAUNCH, {
'id': app,
'contentId': contentId
})
def close_app(self, app):
"""Close the current app."""
self.request(EP_LAUNCHER_CLOSE, {
'id': app
})
# Services
def get_services(self):
"""Get all services."""
self.request(EP_GET_SERVICES)
return {} if self.last_response is None else self.last_response.get('payload').get('services')
def get_software_info(self):
"""Return the current software status."""
self.request(EP_GET_SOFTWARE_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def power_off(self):
"""Play media."""
self.request(EP_POWER_OFF)
def power_on(self):
"""Play media."""
self.request(EP_POWER_ON)
# 3D Mode
def turn_3d_on(self):
"""Turn 3D on."""
self.request(EP_3D_ON)
def turn_3d_off(self):
"""Turn 3D off."""
self.request(EP_3D_OFF)
# Inputs
def get_inputs(self):
"""Get all inputs."""
self.request(EP_GET_INPUTS)
return {} if self.last_response is None else self.last_response.get('payload').get('devices')
def get_input(self):
"""Get current input."""
return self.get_current_app()
def set_input(self, input):
"""Set the current input."""
self.request(EP_SET_INPUT, {
'inputId': input
})
# Audio
def get_audio_status(self):
"""Get the current audio status"""
self.request(EP_GET_AUDIO_STATUS)
return {} if self.last_response is None else self.last_response.get('payload')
def get_muted(self):
"""Get mute status."""
return self.get_audio_status().get('mute')
def set_mute(self, mute):
"""Set mute."""
self.request(EP_SET_MUTE, {
'mute': mute
})
def get_volume(self):
"""Get the current volume."""
self.request(EP_GET_VOLUME)
return 0 if self.last_response is None else self.last_response.get('payload').get('volume')
def set_volume(self, volume):
"""Set volume."""
volume = max(0, volume)
self.request(EP_SET_VOLUME, {
'volume': volume
})
def volume_up(self):
"""Volume up."""
self.request(EP_VOLUME_UP)
def volume_down(self):
"""Volume down."""
self.request(EP_VOLUME_DOWN)
# TV Channel
def channel_up(self):
"""Channel up."""
self.request(EP_TV_CHANNEL_UP)
def channel_down(self):
"""Channel down."""
self.request(EP_TV_CHANNEL_DOWN)
def get_channels(self):
"""Get all tv channels."""
self.request(EP_GET_TV_CHANNELS)
return {} if self.last_response is None else self.last_response.get('payload').get('channelList')
def get_current_channel(self):
"""Get the current tv channel."""
self.request(EP_GET_CURRENT_CHANNEL)
return {} if self.last_response is None else self.last_response.get('payload')
def get_channel_info(self):
"""Get the current channel info."""
self.request(EP_GET_CHANNEL_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def set_channel(self, channel):
"""Set the current channel."""
self.request(EP_SET_CHANNEL, {
'channelId': channel
})
# Media control
def play(self):
"""Play media."""
self.request(EP_MEDIA_PLAY)
def pause(self):
"""Pause media."""
self.request(EP_MEDIA_PAUSE)
def stop(self):
"""Stop media."""
self.request(EP_MEDIA_STOP)
def close(self):
"""Close media."""
self.request(EP_MEDIA_CLOSE)
def rewind(self):
"""Rewind media."""
self.request(EP_MEDIA_REWIND)
def fast_forward(self):
"""Fast Forward media."""
self.request(EP_MEDIA_FAST_FORWARD)
# Keys
def send_enter_key(self):
"""Send enter key."""
self.request(EP_SEND_ENTER)
def send_delete_key(self):
"""Send delete key."""
self.request(EP_SEND_DELETE)
# Web
def open_url(self, url):
"""Open URL."""
self.request(EP_OPEN, {
'target': url
})
def close_web(self):
"""Close web app."""
self.request(EP_CLOSE_WEB_APP)
|
TheRealLink/pylgtv | pylgtv/webos_client.py | WebOsClient.save_key_file | python | def save_key_file(self):
if self.client_key is None:
return
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
logger.debug('save keyfile to %s', key_file_path);
with open(key_file_path, 'w+') as f:
raw_data = f.read()
key_dict = {}
if raw_data:
key_dict = json.loads(raw_data)
key_dict[self.ip] = self.client_key
f.write(json.dumps(key_dict)) | Save the current client key. | train | https://github.com/TheRealLink/pylgtv/blob/a7d9ad87ce47e77180fe9262da785465219f4ed6/pylgtv/webos_client.py#L68-L89 | [
"def _get_key_file_path():\n \"\"\"Return the key file path.\"\"\"\n if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME),\n os.W_OK):\n return os.path.join(os.getenv(USER_HOME), KEY_FILE_NAME)\n\n return os.path.join(os.getcwd(), KEY_FILE_NAME)\n"
] | class WebOsClient(object):
def __init__(self, ip, key_file_path=None, timeout_connect=2):
"""Initialize the client."""
self.ip = ip
self.port = 3000
self.key_file_path = key_file_path
self.client_key = None
self.web_socket = None
self.command_count = 0
self.last_response = None
self.timeout_connect = timeout_connect
self.load_key_file()
@staticmethod
def _get_key_file_path():
"""Return the key file path."""
if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME),
os.W_OK):
return os.path.join(os.getenv(USER_HOME), KEY_FILE_NAME)
return os.path.join(os.getcwd(), KEY_FILE_NAME)
def load_key_file(self):
"""Try to load the client key for the current ip."""
self.client_key = None
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
key_dict = {}
logger.debug('load keyfile from %s', key_file_path);
if os.path.isfile(key_file_path):
with open(key_file_path, 'r') as f:
raw_data = f.read()
if raw_data:
key_dict = json.loads(raw_data)
logger.debug('getting client_key for %s from %s', self.ip, key_file_path);
if self.ip in key_dict:
self.client_key = key_dict[self.ip]
@asyncio.coroutine
def _send_register_payload(self, websocket):
"""Send the register payload."""
file = os.path.join(os.path.dirname(__file__), HANDSHAKE_FILE_NAME)
data = codecs.open(file, 'r', 'utf-8')
raw_handshake = data.read()
handshake = json.loads(raw_handshake)
handshake['payload']['client-key'] = self.client_key
yield from websocket.send(json.dumps(handshake))
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'response' and \
response['payload']['pairingType'] == 'PROMPT':
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'registered':
self.client_key = response['payload']['client-key']
self.save_key_file()
def is_registered(self):
"""Paired with the tv."""
return self.client_key is not None
@asyncio.coroutine
def _register(self):
"""Register wrapper."""
logger.debug('register on %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.error('register failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('register websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
finally:
logger.debug('close register connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def register(self):
"""Pair client with tv."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self._register())
@asyncio.coroutine
def _command(self, msg):
"""Send a command to the tv."""
logger.debug('send command to %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.debug('command failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('command websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
if not self.client_key:
raise PyLGTVPairException("Unable to pair")
yield from websocket.send(json.dumps(msg))
if msg['type'] == 'request':
raw_response = yield from websocket.recv()
self.last_response = json.loads(raw_response)
finally:
logger.debug('close command connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def command(self, request_type, uri, payload):
"""Build and send a command."""
self.command_count += 1
if payload is None:
payload = {}
message = {
'id': "{}_{}".format(type, self.command_count),
'type': request_type,
'uri': "ssap://{}".format(uri),
'payload': payload,
}
self.last_response = None
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(asyncio.wait_for(self._command(message), self.timeout_connect, loop=loop))
finally:
loop.close()
def request(self, uri, payload=None):
"""Send a request."""
self.command('request', uri, payload)
def send_message(self, message, icon_path=None):
"""Show a floating message."""
icon_encoded_string = ''
icon_extension = ''
if icon_path is not None:
icon_extension = os.path.splitext(icon_path)[1][1:]
with open(icon_path, 'rb') as icon_file:
icon_encoded_string = base64.b64encode(icon_file.read()).decode('ascii')
self.request(EP_SHOW_MESSAGE, {
'message': message,
'iconData': icon_encoded_string,
'iconExtension': icon_extension
})
# Apps
def get_apps(self):
"""Return all apps."""
self.request(EP_GET_APPS)
return {} if self.last_response is None else self.last_response.get('payload').get('launchPoints')
def get_current_app(self):
"""Get the current app id."""
self.request(EP_GET_CURRENT_APP_INFO)
return None if self.last_response is None else self.last_response.get('payload').get('appId')
def launch_app(self, app):
"""Launch an app."""
self.command('request', EP_LAUNCH, {
'id': app
})
def launch_app_with_params(self, app, params):
"""Launch an app with parameters."""
self.request(EP_LAUNCH, {
'id': app,
'params': params
})
def launch_app_with_content_id(self, app, contentId):
"""Launch an app with contentId."""
self.request(EP_LAUNCH, {
'id': app,
'contentId': contentId
})
def close_app(self, app):
"""Close the current app."""
self.request(EP_LAUNCHER_CLOSE, {
'id': app
})
# Services
def get_services(self):
"""Get all services."""
self.request(EP_GET_SERVICES)
return {} if self.last_response is None else self.last_response.get('payload').get('services')
def get_software_info(self):
"""Return the current software status."""
self.request(EP_GET_SOFTWARE_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def power_off(self):
"""Play media."""
self.request(EP_POWER_OFF)
def power_on(self):
"""Play media."""
self.request(EP_POWER_ON)
# 3D Mode
def turn_3d_on(self):
"""Turn 3D on."""
self.request(EP_3D_ON)
def turn_3d_off(self):
"""Turn 3D off."""
self.request(EP_3D_OFF)
# Inputs
def get_inputs(self):
"""Get all inputs."""
self.request(EP_GET_INPUTS)
return {} if self.last_response is None else self.last_response.get('payload').get('devices')
def get_input(self):
"""Get current input."""
return self.get_current_app()
def set_input(self, input):
"""Set the current input."""
self.request(EP_SET_INPUT, {
'inputId': input
})
# Audio
def get_audio_status(self):
"""Get the current audio status"""
self.request(EP_GET_AUDIO_STATUS)
return {} if self.last_response is None else self.last_response.get('payload')
def get_muted(self):
"""Get mute status."""
return self.get_audio_status().get('mute')
def set_mute(self, mute):
"""Set mute."""
self.request(EP_SET_MUTE, {
'mute': mute
})
def get_volume(self):
"""Get the current volume."""
self.request(EP_GET_VOLUME)
return 0 if self.last_response is None else self.last_response.get('payload').get('volume')
def set_volume(self, volume):
"""Set volume."""
volume = max(0, volume)
self.request(EP_SET_VOLUME, {
'volume': volume
})
def volume_up(self):
"""Volume up."""
self.request(EP_VOLUME_UP)
def volume_down(self):
"""Volume down."""
self.request(EP_VOLUME_DOWN)
# TV Channel
def channel_up(self):
"""Channel up."""
self.request(EP_TV_CHANNEL_UP)
def channel_down(self):
"""Channel down."""
self.request(EP_TV_CHANNEL_DOWN)
def get_channels(self):
"""Get all tv channels."""
self.request(EP_GET_TV_CHANNELS)
return {} if self.last_response is None else self.last_response.get('payload').get('channelList')
def get_current_channel(self):
"""Get the current tv channel."""
self.request(EP_GET_CURRENT_CHANNEL)
return {} if self.last_response is None else self.last_response.get('payload')
def get_channel_info(self):
"""Get the current channel info."""
self.request(EP_GET_CHANNEL_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def set_channel(self, channel):
"""Set the current channel."""
self.request(EP_SET_CHANNEL, {
'channelId': channel
})
# Media control
def play(self):
"""Play media."""
self.request(EP_MEDIA_PLAY)
def pause(self):
"""Pause media."""
self.request(EP_MEDIA_PAUSE)
def stop(self):
"""Stop media."""
self.request(EP_MEDIA_STOP)
def close(self):
"""Close media."""
self.request(EP_MEDIA_CLOSE)
def rewind(self):
"""Rewind media."""
self.request(EP_MEDIA_REWIND)
def fast_forward(self):
"""Fast Forward media."""
self.request(EP_MEDIA_FAST_FORWARD)
# Keys
def send_enter_key(self):
"""Send enter key."""
self.request(EP_SEND_ENTER)
def send_delete_key(self):
"""Send delete key."""
self.request(EP_SEND_DELETE)
# Web
def open_url(self, url):
"""Open URL."""
self.request(EP_OPEN, {
'target': url
})
def close_web(self):
"""Close web app."""
self.request(EP_CLOSE_WEB_APP)
|
TheRealLink/pylgtv | pylgtv/webos_client.py | WebOsClient._send_register_payload | python | def _send_register_payload(self, websocket):
file = os.path.join(os.path.dirname(__file__), HANDSHAKE_FILE_NAME)
data = codecs.open(file, 'r', 'utf-8')
raw_handshake = data.read()
handshake = json.loads(raw_handshake)
handshake['payload']['client-key'] = self.client_key
yield from websocket.send(json.dumps(handshake))
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'response' and \
response['payload']['pairingType'] == 'PROMPT':
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'registered':
self.client_key = response['payload']['client-key']
self.save_key_file() | Send the register payload. | train | https://github.com/TheRealLink/pylgtv/blob/a7d9ad87ce47e77180fe9262da785465219f4ed6/pylgtv/webos_client.py#L92-L112 | null | class WebOsClient(object):
def __init__(self, ip, key_file_path=None, timeout_connect=2):
"""Initialize the client."""
self.ip = ip
self.port = 3000
self.key_file_path = key_file_path
self.client_key = None
self.web_socket = None
self.command_count = 0
self.last_response = None
self.timeout_connect = timeout_connect
self.load_key_file()
@staticmethod
def _get_key_file_path():
"""Return the key file path."""
if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME),
os.W_OK):
return os.path.join(os.getenv(USER_HOME), KEY_FILE_NAME)
return os.path.join(os.getcwd(), KEY_FILE_NAME)
def load_key_file(self):
"""Try to load the client key for the current ip."""
self.client_key = None
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
key_dict = {}
logger.debug('load keyfile from %s', key_file_path);
if os.path.isfile(key_file_path):
with open(key_file_path, 'r') as f:
raw_data = f.read()
if raw_data:
key_dict = json.loads(raw_data)
logger.debug('getting client_key for %s from %s', self.ip, key_file_path);
if self.ip in key_dict:
self.client_key = key_dict[self.ip]
def save_key_file(self):
"""Save the current client key."""
if self.client_key is None:
return
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
logger.debug('save keyfile to %s', key_file_path);
with open(key_file_path, 'w+') as f:
raw_data = f.read()
key_dict = {}
if raw_data:
key_dict = json.loads(raw_data)
key_dict[self.ip] = self.client_key
f.write(json.dumps(key_dict))
@asyncio.coroutine
def is_registered(self):
"""Paired with the tv."""
return self.client_key is not None
@asyncio.coroutine
def _register(self):
"""Register wrapper."""
logger.debug('register on %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.error('register failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('register websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
finally:
logger.debug('close register connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def register(self):
"""Pair client with tv."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self._register())
@asyncio.coroutine
def _command(self, msg):
"""Send a command to the tv."""
logger.debug('send command to %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.debug('command failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('command websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
if not self.client_key:
raise PyLGTVPairException("Unable to pair")
yield from websocket.send(json.dumps(msg))
if msg['type'] == 'request':
raw_response = yield from websocket.recv()
self.last_response = json.loads(raw_response)
finally:
logger.debug('close command connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def command(self, request_type, uri, payload):
"""Build and send a command."""
self.command_count += 1
if payload is None:
payload = {}
message = {
'id': "{}_{}".format(type, self.command_count),
'type': request_type,
'uri': "ssap://{}".format(uri),
'payload': payload,
}
self.last_response = None
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(asyncio.wait_for(self._command(message), self.timeout_connect, loop=loop))
finally:
loop.close()
def request(self, uri, payload=None):
"""Send a request."""
self.command('request', uri, payload)
def send_message(self, message, icon_path=None):
"""Show a floating message."""
icon_encoded_string = ''
icon_extension = ''
if icon_path is not None:
icon_extension = os.path.splitext(icon_path)[1][1:]
with open(icon_path, 'rb') as icon_file:
icon_encoded_string = base64.b64encode(icon_file.read()).decode('ascii')
self.request(EP_SHOW_MESSAGE, {
'message': message,
'iconData': icon_encoded_string,
'iconExtension': icon_extension
})
# Apps
def get_apps(self):
"""Return all apps."""
self.request(EP_GET_APPS)
return {} if self.last_response is None else self.last_response.get('payload').get('launchPoints')
def get_current_app(self):
"""Get the current app id."""
self.request(EP_GET_CURRENT_APP_INFO)
return None if self.last_response is None else self.last_response.get('payload').get('appId')
def launch_app(self, app):
"""Launch an app."""
self.command('request', EP_LAUNCH, {
'id': app
})
def launch_app_with_params(self, app, params):
"""Launch an app with parameters."""
self.request(EP_LAUNCH, {
'id': app,
'params': params
})
def launch_app_with_content_id(self, app, contentId):
"""Launch an app with contentId."""
self.request(EP_LAUNCH, {
'id': app,
'contentId': contentId
})
def close_app(self, app):
"""Close the current app."""
self.request(EP_LAUNCHER_CLOSE, {
'id': app
})
# Services
def get_services(self):
"""Get all services."""
self.request(EP_GET_SERVICES)
return {} if self.last_response is None else self.last_response.get('payload').get('services')
def get_software_info(self):
"""Return the current software status."""
self.request(EP_GET_SOFTWARE_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def power_off(self):
"""Play media."""
self.request(EP_POWER_OFF)
def power_on(self):
"""Play media."""
self.request(EP_POWER_ON)
# 3D Mode
def turn_3d_on(self):
"""Turn 3D on."""
self.request(EP_3D_ON)
def turn_3d_off(self):
"""Turn 3D off."""
self.request(EP_3D_OFF)
# Inputs
def get_inputs(self):
"""Get all inputs."""
self.request(EP_GET_INPUTS)
return {} if self.last_response is None else self.last_response.get('payload').get('devices')
def get_input(self):
"""Get current input."""
return self.get_current_app()
def set_input(self, input):
"""Set the current input."""
self.request(EP_SET_INPUT, {
'inputId': input
})
# Audio
def get_audio_status(self):
"""Get the current audio status"""
self.request(EP_GET_AUDIO_STATUS)
return {} if self.last_response is None else self.last_response.get('payload')
def get_muted(self):
"""Get mute status."""
return self.get_audio_status().get('mute')
def set_mute(self, mute):
"""Set mute."""
self.request(EP_SET_MUTE, {
'mute': mute
})
def get_volume(self):
"""Get the current volume."""
self.request(EP_GET_VOLUME)
return 0 if self.last_response is None else self.last_response.get('payload').get('volume')
def set_volume(self, volume):
"""Set volume."""
volume = max(0, volume)
self.request(EP_SET_VOLUME, {
'volume': volume
})
def volume_up(self):
"""Volume up."""
self.request(EP_VOLUME_UP)
def volume_down(self):
"""Volume down."""
self.request(EP_VOLUME_DOWN)
# TV Channel
def channel_up(self):
"""Channel up."""
self.request(EP_TV_CHANNEL_UP)
def channel_down(self):
"""Channel down."""
self.request(EP_TV_CHANNEL_DOWN)
def get_channels(self):
"""Get all tv channels."""
self.request(EP_GET_TV_CHANNELS)
return {} if self.last_response is None else self.last_response.get('payload').get('channelList')
def get_current_channel(self):
"""Get the current tv channel."""
self.request(EP_GET_CURRENT_CHANNEL)
return {} if self.last_response is None else self.last_response.get('payload')
def get_channel_info(self):
"""Get the current channel info."""
self.request(EP_GET_CHANNEL_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def set_channel(self, channel):
"""Set the current channel."""
self.request(EP_SET_CHANNEL, {
'channelId': channel
})
# Media control
def play(self):
"""Play media."""
self.request(EP_MEDIA_PLAY)
def pause(self):
"""Pause media."""
self.request(EP_MEDIA_PAUSE)
def stop(self):
"""Stop media."""
self.request(EP_MEDIA_STOP)
def close(self):
"""Close media."""
self.request(EP_MEDIA_CLOSE)
def rewind(self):
"""Rewind media."""
self.request(EP_MEDIA_REWIND)
def fast_forward(self):
"""Fast Forward media."""
self.request(EP_MEDIA_FAST_FORWARD)
# Keys
def send_enter_key(self):
"""Send enter key."""
self.request(EP_SEND_ENTER)
def send_delete_key(self):
"""Send delete key."""
self.request(EP_SEND_DELETE)
# Web
def open_url(self, url):
"""Open URL."""
self.request(EP_OPEN, {
'target': url
})
def close_web(self):
"""Close web app."""
self.request(EP_CLOSE_WEB_APP)
|
TheRealLink/pylgtv | pylgtv/webos_client.py | WebOsClient._register | python | def _register(self):
logger.debug('register on %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.error('register failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('register websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
finally:
logger.debug('close register connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close() | Register wrapper. | train | https://github.com/TheRealLink/pylgtv/blob/a7d9ad87ce47e77180fe9262da785465219f4ed6/pylgtv/webos_client.py#L119-L137 | null | class WebOsClient(object):
def __init__(self, ip, key_file_path=None, timeout_connect=2):
"""Initialize the client."""
self.ip = ip
self.port = 3000
self.key_file_path = key_file_path
self.client_key = None
self.web_socket = None
self.command_count = 0
self.last_response = None
self.timeout_connect = timeout_connect
self.load_key_file()
@staticmethod
def _get_key_file_path():
"""Return the key file path."""
if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME),
os.W_OK):
return os.path.join(os.getenv(USER_HOME), KEY_FILE_NAME)
return os.path.join(os.getcwd(), KEY_FILE_NAME)
def load_key_file(self):
"""Try to load the client key for the current ip."""
self.client_key = None
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
key_dict = {}
logger.debug('load keyfile from %s', key_file_path);
if os.path.isfile(key_file_path):
with open(key_file_path, 'r') as f:
raw_data = f.read()
if raw_data:
key_dict = json.loads(raw_data)
logger.debug('getting client_key for %s from %s', self.ip, key_file_path);
if self.ip in key_dict:
self.client_key = key_dict[self.ip]
def save_key_file(self):
"""Save the current client key."""
if self.client_key is None:
return
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
logger.debug('save keyfile to %s', key_file_path);
with open(key_file_path, 'w+') as f:
raw_data = f.read()
key_dict = {}
if raw_data:
key_dict = json.loads(raw_data)
key_dict[self.ip] = self.client_key
f.write(json.dumps(key_dict))
@asyncio.coroutine
def _send_register_payload(self, websocket):
"""Send the register payload."""
file = os.path.join(os.path.dirname(__file__), HANDSHAKE_FILE_NAME)
data = codecs.open(file, 'r', 'utf-8')
raw_handshake = data.read()
handshake = json.loads(raw_handshake)
handshake['payload']['client-key'] = self.client_key
yield from websocket.send(json.dumps(handshake))
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'response' and \
response['payload']['pairingType'] == 'PROMPT':
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'registered':
self.client_key = response['payload']['client-key']
self.save_key_file()
def is_registered(self):
"""Paired with the tv."""
return self.client_key is not None
@asyncio.coroutine
def register(self):
"""Pair client with tv."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self._register())
@asyncio.coroutine
def _command(self, msg):
"""Send a command to the tv."""
logger.debug('send command to %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.debug('command failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('command websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
if not self.client_key:
raise PyLGTVPairException("Unable to pair")
yield from websocket.send(json.dumps(msg))
if msg['type'] == 'request':
raw_response = yield from websocket.recv()
self.last_response = json.loads(raw_response)
finally:
logger.debug('close command connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def command(self, request_type, uri, payload):
"""Build and send a command."""
self.command_count += 1
if payload is None:
payload = {}
message = {
'id': "{}_{}".format(type, self.command_count),
'type': request_type,
'uri': "ssap://{}".format(uri),
'payload': payload,
}
self.last_response = None
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(asyncio.wait_for(self._command(message), self.timeout_connect, loop=loop))
finally:
loop.close()
def request(self, uri, payload=None):
"""Send a request."""
self.command('request', uri, payload)
def send_message(self, message, icon_path=None):
"""Show a floating message."""
icon_encoded_string = ''
icon_extension = ''
if icon_path is not None:
icon_extension = os.path.splitext(icon_path)[1][1:]
with open(icon_path, 'rb') as icon_file:
icon_encoded_string = base64.b64encode(icon_file.read()).decode('ascii')
self.request(EP_SHOW_MESSAGE, {
'message': message,
'iconData': icon_encoded_string,
'iconExtension': icon_extension
})
# Apps
def get_apps(self):
"""Return all apps."""
self.request(EP_GET_APPS)
return {} if self.last_response is None else self.last_response.get('payload').get('launchPoints')
def get_current_app(self):
"""Get the current app id."""
self.request(EP_GET_CURRENT_APP_INFO)
return None if self.last_response is None else self.last_response.get('payload').get('appId')
def launch_app(self, app):
"""Launch an app."""
self.command('request', EP_LAUNCH, {
'id': app
})
def launch_app_with_params(self, app, params):
"""Launch an app with parameters."""
self.request(EP_LAUNCH, {
'id': app,
'params': params
})
def launch_app_with_content_id(self, app, contentId):
"""Launch an app with contentId."""
self.request(EP_LAUNCH, {
'id': app,
'contentId': contentId
})
def close_app(self, app):
"""Close the current app."""
self.request(EP_LAUNCHER_CLOSE, {
'id': app
})
# Services
def get_services(self):
"""Get all services."""
self.request(EP_GET_SERVICES)
return {} if self.last_response is None else self.last_response.get('payload').get('services')
def get_software_info(self):
"""Return the current software status."""
self.request(EP_GET_SOFTWARE_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def power_off(self):
"""Play media."""
self.request(EP_POWER_OFF)
def power_on(self):
"""Play media."""
self.request(EP_POWER_ON)
# 3D Mode
def turn_3d_on(self):
"""Turn 3D on."""
self.request(EP_3D_ON)
def turn_3d_off(self):
"""Turn 3D off."""
self.request(EP_3D_OFF)
# Inputs
def get_inputs(self):
"""Get all inputs."""
self.request(EP_GET_INPUTS)
return {} if self.last_response is None else self.last_response.get('payload').get('devices')
def get_input(self):
"""Get current input."""
return self.get_current_app()
def set_input(self, input):
"""Set the current input."""
self.request(EP_SET_INPUT, {
'inputId': input
})
# Audio
def get_audio_status(self):
"""Get the current audio status"""
self.request(EP_GET_AUDIO_STATUS)
return {} if self.last_response is None else self.last_response.get('payload')
def get_muted(self):
"""Get mute status."""
return self.get_audio_status().get('mute')
def set_mute(self, mute):
"""Set mute."""
self.request(EP_SET_MUTE, {
'mute': mute
})
def get_volume(self):
"""Get the current volume."""
self.request(EP_GET_VOLUME)
return 0 if self.last_response is None else self.last_response.get('payload').get('volume')
def set_volume(self, volume):
"""Set volume."""
volume = max(0, volume)
self.request(EP_SET_VOLUME, {
'volume': volume
})
def volume_up(self):
"""Volume up."""
self.request(EP_VOLUME_UP)
def volume_down(self):
"""Volume down."""
self.request(EP_VOLUME_DOWN)
# TV Channel
def channel_up(self):
"""Channel up."""
self.request(EP_TV_CHANNEL_UP)
def channel_down(self):
"""Channel down."""
self.request(EP_TV_CHANNEL_DOWN)
def get_channels(self):
"""Get all tv channels."""
self.request(EP_GET_TV_CHANNELS)
return {} if self.last_response is None else self.last_response.get('payload').get('channelList')
def get_current_channel(self):
"""Get the current tv channel."""
self.request(EP_GET_CURRENT_CHANNEL)
return {} if self.last_response is None else self.last_response.get('payload')
def get_channel_info(self):
"""Get the current channel info."""
self.request(EP_GET_CHANNEL_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def set_channel(self, channel):
"""Set the current channel."""
self.request(EP_SET_CHANNEL, {
'channelId': channel
})
# Media control
def play(self):
"""Play media."""
self.request(EP_MEDIA_PLAY)
def pause(self):
"""Pause media."""
self.request(EP_MEDIA_PAUSE)
def stop(self):
"""Stop media."""
self.request(EP_MEDIA_STOP)
def close(self):
"""Close media."""
self.request(EP_MEDIA_CLOSE)
def rewind(self):
"""Rewind media."""
self.request(EP_MEDIA_REWIND)
def fast_forward(self):
"""Fast Forward media."""
self.request(EP_MEDIA_FAST_FORWARD)
# Keys
def send_enter_key(self):
"""Send enter key."""
self.request(EP_SEND_ENTER)
def send_delete_key(self):
"""Send delete key."""
self.request(EP_SEND_DELETE)
# Web
def open_url(self, url):
"""Open URL."""
self.request(EP_OPEN, {
'target': url
})
def close_web(self):
"""Close web app."""
self.request(EP_CLOSE_WEB_APP)
|
TheRealLink/pylgtv | pylgtv/webos_client.py | WebOsClient.register | python | def register(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self._register()) | Pair client with tv. | train | https://github.com/TheRealLink/pylgtv/blob/a7d9ad87ce47e77180fe9262da785465219f4ed6/pylgtv/webos_client.py#L139-L143 | null | class WebOsClient(object):
def __init__(self, ip, key_file_path=None, timeout_connect=2):
"""Initialize the client."""
self.ip = ip
self.port = 3000
self.key_file_path = key_file_path
self.client_key = None
self.web_socket = None
self.command_count = 0
self.last_response = None
self.timeout_connect = timeout_connect
self.load_key_file()
@staticmethod
def _get_key_file_path():
"""Return the key file path."""
if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME),
os.W_OK):
return os.path.join(os.getenv(USER_HOME), KEY_FILE_NAME)
return os.path.join(os.getcwd(), KEY_FILE_NAME)
def load_key_file(self):
"""Try to load the client key for the current ip."""
self.client_key = None
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
key_dict = {}
logger.debug('load keyfile from %s', key_file_path);
if os.path.isfile(key_file_path):
with open(key_file_path, 'r') as f:
raw_data = f.read()
if raw_data:
key_dict = json.loads(raw_data)
logger.debug('getting client_key for %s from %s', self.ip, key_file_path);
if self.ip in key_dict:
self.client_key = key_dict[self.ip]
def save_key_file(self):
"""Save the current client key."""
if self.client_key is None:
return
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
logger.debug('save keyfile to %s', key_file_path);
with open(key_file_path, 'w+') as f:
raw_data = f.read()
key_dict = {}
if raw_data:
key_dict = json.loads(raw_data)
key_dict[self.ip] = self.client_key
f.write(json.dumps(key_dict))
@asyncio.coroutine
def _send_register_payload(self, websocket):
"""Send the register payload."""
file = os.path.join(os.path.dirname(__file__), HANDSHAKE_FILE_NAME)
data = codecs.open(file, 'r', 'utf-8')
raw_handshake = data.read()
handshake = json.loads(raw_handshake)
handshake['payload']['client-key'] = self.client_key
yield from websocket.send(json.dumps(handshake))
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'response' and \
response['payload']['pairingType'] == 'PROMPT':
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'registered':
self.client_key = response['payload']['client-key']
self.save_key_file()
def is_registered(self):
"""Paired with the tv."""
return self.client_key is not None
@asyncio.coroutine
def _register(self):
"""Register wrapper."""
logger.debug('register on %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.error('register failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('register websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
finally:
logger.debug('close register connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
@asyncio.coroutine
def _command(self, msg):
"""Send a command to the tv."""
logger.debug('send command to %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.debug('command failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('command websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
if not self.client_key:
raise PyLGTVPairException("Unable to pair")
yield from websocket.send(json.dumps(msg))
if msg['type'] == 'request':
raw_response = yield from websocket.recv()
self.last_response = json.loads(raw_response)
finally:
logger.debug('close command connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def command(self, request_type, uri, payload):
"""Build and send a command."""
self.command_count += 1
if payload is None:
payload = {}
message = {
'id': "{}_{}".format(type, self.command_count),
'type': request_type,
'uri': "ssap://{}".format(uri),
'payload': payload,
}
self.last_response = None
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(asyncio.wait_for(self._command(message), self.timeout_connect, loop=loop))
finally:
loop.close()
def request(self, uri, payload=None):
"""Send a request."""
self.command('request', uri, payload)
def send_message(self, message, icon_path=None):
"""Show a floating message."""
icon_encoded_string = ''
icon_extension = ''
if icon_path is not None:
icon_extension = os.path.splitext(icon_path)[1][1:]
with open(icon_path, 'rb') as icon_file:
icon_encoded_string = base64.b64encode(icon_file.read()).decode('ascii')
self.request(EP_SHOW_MESSAGE, {
'message': message,
'iconData': icon_encoded_string,
'iconExtension': icon_extension
})
# Apps
def get_apps(self):
"""Return all apps."""
self.request(EP_GET_APPS)
return {} if self.last_response is None else self.last_response.get('payload').get('launchPoints')
def get_current_app(self):
"""Get the current app id."""
self.request(EP_GET_CURRENT_APP_INFO)
return None if self.last_response is None else self.last_response.get('payload').get('appId')
def launch_app(self, app):
"""Launch an app."""
self.command('request', EP_LAUNCH, {
'id': app
})
def launch_app_with_params(self, app, params):
"""Launch an app with parameters."""
self.request(EP_LAUNCH, {
'id': app,
'params': params
})
def launch_app_with_content_id(self, app, contentId):
"""Launch an app with contentId."""
self.request(EP_LAUNCH, {
'id': app,
'contentId': contentId
})
def close_app(self, app):
"""Close the current app."""
self.request(EP_LAUNCHER_CLOSE, {
'id': app
})
# Services
def get_services(self):
"""Get all services."""
self.request(EP_GET_SERVICES)
return {} if self.last_response is None else self.last_response.get('payload').get('services')
def get_software_info(self):
"""Return the current software status."""
self.request(EP_GET_SOFTWARE_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def power_off(self):
"""Play media."""
self.request(EP_POWER_OFF)
def power_on(self):
"""Play media."""
self.request(EP_POWER_ON)
# 3D Mode
def turn_3d_on(self):
"""Turn 3D on."""
self.request(EP_3D_ON)
def turn_3d_off(self):
"""Turn 3D off."""
self.request(EP_3D_OFF)
# Inputs
def get_inputs(self):
"""Get all inputs."""
self.request(EP_GET_INPUTS)
return {} if self.last_response is None else self.last_response.get('payload').get('devices')
def get_input(self):
"""Get current input."""
return self.get_current_app()
def set_input(self, input):
"""Set the current input."""
self.request(EP_SET_INPUT, {
'inputId': input
})
# Audio
def get_audio_status(self):
"""Get the current audio status"""
self.request(EP_GET_AUDIO_STATUS)
return {} if self.last_response is None else self.last_response.get('payload')
def get_muted(self):
"""Get mute status."""
return self.get_audio_status().get('mute')
def set_mute(self, mute):
"""Set mute."""
self.request(EP_SET_MUTE, {
'mute': mute
})
def get_volume(self):
"""Get the current volume."""
self.request(EP_GET_VOLUME)
return 0 if self.last_response is None else self.last_response.get('payload').get('volume')
def set_volume(self, volume):
"""Set volume."""
volume = max(0, volume)
self.request(EP_SET_VOLUME, {
'volume': volume
})
def volume_up(self):
"""Volume up."""
self.request(EP_VOLUME_UP)
def volume_down(self):
"""Volume down."""
self.request(EP_VOLUME_DOWN)
# TV Channel
def channel_up(self):
"""Channel up."""
self.request(EP_TV_CHANNEL_UP)
def channel_down(self):
"""Channel down."""
self.request(EP_TV_CHANNEL_DOWN)
def get_channels(self):
"""Get all tv channels."""
self.request(EP_GET_TV_CHANNELS)
return {} if self.last_response is None else self.last_response.get('payload').get('channelList')
def get_current_channel(self):
"""Get the current tv channel."""
self.request(EP_GET_CURRENT_CHANNEL)
return {} if self.last_response is None else self.last_response.get('payload')
def get_channel_info(self):
"""Get the current channel info."""
self.request(EP_GET_CHANNEL_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def set_channel(self, channel):
"""Set the current channel."""
self.request(EP_SET_CHANNEL, {
'channelId': channel
})
# Media control
def play(self):
"""Play media."""
self.request(EP_MEDIA_PLAY)
def pause(self):
"""Pause media."""
self.request(EP_MEDIA_PAUSE)
def stop(self):
"""Stop media."""
self.request(EP_MEDIA_STOP)
def close(self):
"""Close media."""
self.request(EP_MEDIA_CLOSE)
def rewind(self):
"""Rewind media."""
self.request(EP_MEDIA_REWIND)
def fast_forward(self):
"""Fast Forward media."""
self.request(EP_MEDIA_FAST_FORWARD)
# Keys
def send_enter_key(self):
"""Send enter key."""
self.request(EP_SEND_ENTER)
def send_delete_key(self):
"""Send delete key."""
self.request(EP_SEND_DELETE)
# Web
def open_url(self, url):
"""Open URL."""
self.request(EP_OPEN, {
'target': url
})
def close_web(self):
"""Close web app."""
self.request(EP_CLOSE_WEB_APP)
|
TheRealLink/pylgtv | pylgtv/webos_client.py | WebOsClient._command | python | def _command(self, msg):
logger.debug('send command to %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.debug('command failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('command websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
if not self.client_key:
raise PyLGTVPairException("Unable to pair")
yield from websocket.send(json.dumps(msg))
if msg['type'] == 'request':
raw_response = yield from websocket.recv()
self.last_response = json.loads(raw_response)
finally:
logger.debug('close command connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close() | Send a command to the tv. | train | https://github.com/TheRealLink/pylgtv/blob/a7d9ad87ce47e77180fe9262da785465219f4ed6/pylgtv/webos_client.py#L146-L172 | null | class WebOsClient(object):
def __init__(self, ip, key_file_path=None, timeout_connect=2):
"""Initialize the client."""
self.ip = ip
self.port = 3000
self.key_file_path = key_file_path
self.client_key = None
self.web_socket = None
self.command_count = 0
self.last_response = None
self.timeout_connect = timeout_connect
self.load_key_file()
@staticmethod
def _get_key_file_path():
"""Return the key file path."""
if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME),
os.W_OK):
return os.path.join(os.getenv(USER_HOME), KEY_FILE_NAME)
return os.path.join(os.getcwd(), KEY_FILE_NAME)
def load_key_file(self):
"""Try to load the client key for the current ip."""
self.client_key = None
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
key_dict = {}
logger.debug('load keyfile from %s', key_file_path);
if os.path.isfile(key_file_path):
with open(key_file_path, 'r') as f:
raw_data = f.read()
if raw_data:
key_dict = json.loads(raw_data)
logger.debug('getting client_key for %s from %s', self.ip, key_file_path);
if self.ip in key_dict:
self.client_key = key_dict[self.ip]
def save_key_file(self):
"""Save the current client key."""
if self.client_key is None:
return
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
logger.debug('save keyfile to %s', key_file_path);
with open(key_file_path, 'w+') as f:
raw_data = f.read()
key_dict = {}
if raw_data:
key_dict = json.loads(raw_data)
key_dict[self.ip] = self.client_key
f.write(json.dumps(key_dict))
@asyncio.coroutine
def _send_register_payload(self, websocket):
"""Send the register payload."""
file = os.path.join(os.path.dirname(__file__), HANDSHAKE_FILE_NAME)
data = codecs.open(file, 'r', 'utf-8')
raw_handshake = data.read()
handshake = json.loads(raw_handshake)
handshake['payload']['client-key'] = self.client_key
yield from websocket.send(json.dumps(handshake))
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'response' and \
response['payload']['pairingType'] == 'PROMPT':
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'registered':
self.client_key = response['payload']['client-key']
self.save_key_file()
def is_registered(self):
"""Paired with the tv."""
return self.client_key is not None
@asyncio.coroutine
def _register(self):
"""Register wrapper."""
logger.debug('register on %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.error('register failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('register websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
finally:
logger.debug('close register connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def register(self):
"""Pair client with tv."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self._register())
@asyncio.coroutine
def command(self, request_type, uri, payload):
"""Build and send a command."""
self.command_count += 1
if payload is None:
payload = {}
message = {
'id': "{}_{}".format(type, self.command_count),
'type': request_type,
'uri': "ssap://{}".format(uri),
'payload': payload,
}
self.last_response = None
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(asyncio.wait_for(self._command(message), self.timeout_connect, loop=loop))
finally:
loop.close()
def request(self, uri, payload=None):
"""Send a request."""
self.command('request', uri, payload)
def send_message(self, message, icon_path=None):
"""Show a floating message."""
icon_encoded_string = ''
icon_extension = ''
if icon_path is not None:
icon_extension = os.path.splitext(icon_path)[1][1:]
with open(icon_path, 'rb') as icon_file:
icon_encoded_string = base64.b64encode(icon_file.read()).decode('ascii')
self.request(EP_SHOW_MESSAGE, {
'message': message,
'iconData': icon_encoded_string,
'iconExtension': icon_extension
})
# Apps
def get_apps(self):
"""Return all apps."""
self.request(EP_GET_APPS)
return {} if self.last_response is None else self.last_response.get('payload').get('launchPoints')
def get_current_app(self):
"""Get the current app id."""
self.request(EP_GET_CURRENT_APP_INFO)
return None if self.last_response is None else self.last_response.get('payload').get('appId')
def launch_app(self, app):
"""Launch an app."""
self.command('request', EP_LAUNCH, {
'id': app
})
def launch_app_with_params(self, app, params):
"""Launch an app with parameters."""
self.request(EP_LAUNCH, {
'id': app,
'params': params
})
def launch_app_with_content_id(self, app, contentId):
"""Launch an app with contentId."""
self.request(EP_LAUNCH, {
'id': app,
'contentId': contentId
})
def close_app(self, app):
"""Close the current app."""
self.request(EP_LAUNCHER_CLOSE, {
'id': app
})
# Services
def get_services(self):
"""Get all services."""
self.request(EP_GET_SERVICES)
return {} if self.last_response is None else self.last_response.get('payload').get('services')
def get_software_info(self):
"""Return the current software status."""
self.request(EP_GET_SOFTWARE_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def power_off(self):
"""Play media."""
self.request(EP_POWER_OFF)
def power_on(self):
"""Play media."""
self.request(EP_POWER_ON)
# 3D Mode
def turn_3d_on(self):
"""Turn 3D on."""
self.request(EP_3D_ON)
def turn_3d_off(self):
"""Turn 3D off."""
self.request(EP_3D_OFF)
# Inputs
def get_inputs(self):
"""Get all inputs."""
self.request(EP_GET_INPUTS)
return {} if self.last_response is None else self.last_response.get('payload').get('devices')
def get_input(self):
"""Get current input."""
return self.get_current_app()
def set_input(self, input):
"""Set the current input."""
self.request(EP_SET_INPUT, {
'inputId': input
})
# Audio
def get_audio_status(self):
"""Get the current audio status"""
self.request(EP_GET_AUDIO_STATUS)
return {} if self.last_response is None else self.last_response.get('payload')
def get_muted(self):
"""Get mute status."""
return self.get_audio_status().get('mute')
def set_mute(self, mute):
"""Set mute."""
self.request(EP_SET_MUTE, {
'mute': mute
})
def get_volume(self):
"""Get the current volume."""
self.request(EP_GET_VOLUME)
return 0 if self.last_response is None else self.last_response.get('payload').get('volume')
def set_volume(self, volume):
"""Set volume."""
volume = max(0, volume)
self.request(EP_SET_VOLUME, {
'volume': volume
})
def volume_up(self):
"""Volume up."""
self.request(EP_VOLUME_UP)
def volume_down(self):
"""Volume down."""
self.request(EP_VOLUME_DOWN)
# TV Channel
def channel_up(self):
"""Channel up."""
self.request(EP_TV_CHANNEL_UP)
def channel_down(self):
"""Channel down."""
self.request(EP_TV_CHANNEL_DOWN)
def get_channels(self):
"""Get all tv channels."""
self.request(EP_GET_TV_CHANNELS)
return {} if self.last_response is None else self.last_response.get('payload').get('channelList')
def get_current_channel(self):
"""Get the current tv channel."""
self.request(EP_GET_CURRENT_CHANNEL)
return {} if self.last_response is None else self.last_response.get('payload')
def get_channel_info(self):
"""Get the current channel info."""
self.request(EP_GET_CHANNEL_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def set_channel(self, channel):
"""Set the current channel."""
self.request(EP_SET_CHANNEL, {
'channelId': channel
})
# Media control
def play(self):
"""Play media."""
self.request(EP_MEDIA_PLAY)
def pause(self):
"""Pause media."""
self.request(EP_MEDIA_PAUSE)
def stop(self):
"""Stop media."""
self.request(EP_MEDIA_STOP)
def close(self):
"""Close media."""
self.request(EP_MEDIA_CLOSE)
def rewind(self):
"""Rewind media."""
self.request(EP_MEDIA_REWIND)
def fast_forward(self):
"""Fast Forward media."""
self.request(EP_MEDIA_FAST_FORWARD)
# Keys
def send_enter_key(self):
"""Send enter key."""
self.request(EP_SEND_ENTER)
def send_delete_key(self):
"""Send delete key."""
self.request(EP_SEND_DELETE)
# Web
def open_url(self, url):
"""Open URL."""
self.request(EP_OPEN, {
'target': url
})
def close_web(self):
"""Close web app."""
self.request(EP_CLOSE_WEB_APP)
|
TheRealLink/pylgtv | pylgtv/webos_client.py | WebOsClient.command | python | def command(self, request_type, uri, payload):
self.command_count += 1
if payload is None:
payload = {}
message = {
'id': "{}_{}".format(type, self.command_count),
'type': request_type,
'uri': "ssap://{}".format(uri),
'payload': payload,
}
self.last_response = None
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(asyncio.wait_for(self._command(message), self.timeout_connect, loop=loop))
finally:
loop.close() | Build and send a command. | train | https://github.com/TheRealLink/pylgtv/blob/a7d9ad87ce47e77180fe9262da785465219f4ed6/pylgtv/webos_client.py#L174-L195 | null | class WebOsClient(object):
def __init__(self, ip, key_file_path=None, timeout_connect=2):
"""Initialize the client."""
self.ip = ip
self.port = 3000
self.key_file_path = key_file_path
self.client_key = None
self.web_socket = None
self.command_count = 0
self.last_response = None
self.timeout_connect = timeout_connect
self.load_key_file()
@staticmethod
def _get_key_file_path():
"""Return the key file path."""
if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME),
os.W_OK):
return os.path.join(os.getenv(USER_HOME), KEY_FILE_NAME)
return os.path.join(os.getcwd(), KEY_FILE_NAME)
def load_key_file(self):
"""Try to load the client key for the current ip."""
self.client_key = None
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
key_dict = {}
logger.debug('load keyfile from %s', key_file_path);
if os.path.isfile(key_file_path):
with open(key_file_path, 'r') as f:
raw_data = f.read()
if raw_data:
key_dict = json.loads(raw_data)
logger.debug('getting client_key for %s from %s', self.ip, key_file_path);
if self.ip in key_dict:
self.client_key = key_dict[self.ip]
def save_key_file(self):
"""Save the current client key."""
if self.client_key is None:
return
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
logger.debug('save keyfile to %s', key_file_path);
with open(key_file_path, 'w+') as f:
raw_data = f.read()
key_dict = {}
if raw_data:
key_dict = json.loads(raw_data)
key_dict[self.ip] = self.client_key
f.write(json.dumps(key_dict))
@asyncio.coroutine
def _send_register_payload(self, websocket):
"""Send the register payload."""
file = os.path.join(os.path.dirname(__file__), HANDSHAKE_FILE_NAME)
data = codecs.open(file, 'r', 'utf-8')
raw_handshake = data.read()
handshake = json.loads(raw_handshake)
handshake['payload']['client-key'] = self.client_key
yield from websocket.send(json.dumps(handshake))
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'response' and \
response['payload']['pairingType'] == 'PROMPT':
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'registered':
self.client_key = response['payload']['client-key']
self.save_key_file()
def is_registered(self):
"""Paired with the tv."""
return self.client_key is not None
@asyncio.coroutine
def _register(self):
"""Register wrapper."""
logger.debug('register on %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.error('register failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('register websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
finally:
logger.debug('close register connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def register(self):
"""Pair client with tv."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self._register())
@asyncio.coroutine
def _command(self, msg):
"""Send a command to the tv."""
logger.debug('send command to %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.debug('command failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('command websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
if not self.client_key:
raise PyLGTVPairException("Unable to pair")
yield from websocket.send(json.dumps(msg))
if msg['type'] == 'request':
raw_response = yield from websocket.recv()
self.last_response = json.loads(raw_response)
finally:
logger.debug('close command connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def request(self, uri, payload=None):
"""Send a request."""
self.command('request', uri, payload)
def send_message(self, message, icon_path=None):
"""Show a floating message."""
icon_encoded_string = ''
icon_extension = ''
if icon_path is not None:
icon_extension = os.path.splitext(icon_path)[1][1:]
with open(icon_path, 'rb') as icon_file:
icon_encoded_string = base64.b64encode(icon_file.read()).decode('ascii')
self.request(EP_SHOW_MESSAGE, {
'message': message,
'iconData': icon_encoded_string,
'iconExtension': icon_extension
})
# Apps
def get_apps(self):
"""Return all apps."""
self.request(EP_GET_APPS)
return {} if self.last_response is None else self.last_response.get('payload').get('launchPoints')
def get_current_app(self):
"""Get the current app id."""
self.request(EP_GET_CURRENT_APP_INFO)
return None if self.last_response is None else self.last_response.get('payload').get('appId')
def launch_app(self, app):
"""Launch an app."""
self.command('request', EP_LAUNCH, {
'id': app
})
def launch_app_with_params(self, app, params):
"""Launch an app with parameters."""
self.request(EP_LAUNCH, {
'id': app,
'params': params
})
def launch_app_with_content_id(self, app, contentId):
"""Launch an app with contentId."""
self.request(EP_LAUNCH, {
'id': app,
'contentId': contentId
})
def close_app(self, app):
"""Close the current app."""
self.request(EP_LAUNCHER_CLOSE, {
'id': app
})
# Services
def get_services(self):
"""Get all services."""
self.request(EP_GET_SERVICES)
return {} if self.last_response is None else self.last_response.get('payload').get('services')
def get_software_info(self):
"""Return the current software status."""
self.request(EP_GET_SOFTWARE_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def power_off(self):
"""Play media."""
self.request(EP_POWER_OFF)
def power_on(self):
"""Play media."""
self.request(EP_POWER_ON)
# 3D Mode
def turn_3d_on(self):
"""Turn 3D on."""
self.request(EP_3D_ON)
def turn_3d_off(self):
"""Turn 3D off."""
self.request(EP_3D_OFF)
# Inputs
def get_inputs(self):
"""Get all inputs."""
self.request(EP_GET_INPUTS)
return {} if self.last_response is None else self.last_response.get('payload').get('devices')
def get_input(self):
"""Get current input."""
return self.get_current_app()
def set_input(self, input):
"""Set the current input."""
self.request(EP_SET_INPUT, {
'inputId': input
})
# Audio
def get_audio_status(self):
"""Get the current audio status"""
self.request(EP_GET_AUDIO_STATUS)
return {} if self.last_response is None else self.last_response.get('payload')
def get_muted(self):
"""Get mute status."""
return self.get_audio_status().get('mute')
def set_mute(self, mute):
"""Set mute."""
self.request(EP_SET_MUTE, {
'mute': mute
})
def get_volume(self):
"""Get the current volume."""
self.request(EP_GET_VOLUME)
return 0 if self.last_response is None else self.last_response.get('payload').get('volume')
def set_volume(self, volume):
"""Set volume."""
volume = max(0, volume)
self.request(EP_SET_VOLUME, {
'volume': volume
})
def volume_up(self):
"""Volume up."""
self.request(EP_VOLUME_UP)
def volume_down(self):
"""Volume down."""
self.request(EP_VOLUME_DOWN)
# TV Channel
def channel_up(self):
"""Channel up."""
self.request(EP_TV_CHANNEL_UP)
def channel_down(self):
"""Channel down."""
self.request(EP_TV_CHANNEL_DOWN)
def get_channels(self):
"""Get all tv channels."""
self.request(EP_GET_TV_CHANNELS)
return {} if self.last_response is None else self.last_response.get('payload').get('channelList')
def get_current_channel(self):
"""Get the current tv channel."""
self.request(EP_GET_CURRENT_CHANNEL)
return {} if self.last_response is None else self.last_response.get('payload')
def get_channel_info(self):
"""Get the current channel info."""
self.request(EP_GET_CHANNEL_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def set_channel(self, channel):
"""Set the current channel."""
self.request(EP_SET_CHANNEL, {
'channelId': channel
})
# Media control
def play(self):
"""Play media."""
self.request(EP_MEDIA_PLAY)
def pause(self):
"""Pause media."""
self.request(EP_MEDIA_PAUSE)
def stop(self):
"""Stop media."""
self.request(EP_MEDIA_STOP)
def close(self):
"""Close media."""
self.request(EP_MEDIA_CLOSE)
def rewind(self):
"""Rewind media."""
self.request(EP_MEDIA_REWIND)
def fast_forward(self):
"""Fast Forward media."""
self.request(EP_MEDIA_FAST_FORWARD)
# Keys
def send_enter_key(self):
"""Send enter key."""
self.request(EP_SEND_ENTER)
def send_delete_key(self):
"""Send delete key."""
self.request(EP_SEND_DELETE)
# Web
def open_url(self, url):
"""Open URL."""
self.request(EP_OPEN, {
'target': url
})
def close_web(self):
"""Close web app."""
self.request(EP_CLOSE_WEB_APP)
|
TheRealLink/pylgtv | pylgtv/webos_client.py | WebOsClient.send_message | python | def send_message(self, message, icon_path=None):
icon_encoded_string = ''
icon_extension = ''
if icon_path is not None:
icon_extension = os.path.splitext(icon_path)[1][1:]
with open(icon_path, 'rb') as icon_file:
icon_encoded_string = base64.b64encode(icon_file.read()).decode('ascii')
self.request(EP_SHOW_MESSAGE, {
'message': message,
'iconData': icon_encoded_string,
'iconExtension': icon_extension
}) | Show a floating message. | train | https://github.com/TheRealLink/pylgtv/blob/a7d9ad87ce47e77180fe9262da785465219f4ed6/pylgtv/webos_client.py#L201-L215 | [
"def request(self, uri, payload=None):\n \"\"\"Send a request.\"\"\"\n self.command('request', uri, payload)\n"
] | class WebOsClient(object):
def __init__(self, ip, key_file_path=None, timeout_connect=2):
"""Initialize the client."""
self.ip = ip
self.port = 3000
self.key_file_path = key_file_path
self.client_key = None
self.web_socket = None
self.command_count = 0
self.last_response = None
self.timeout_connect = timeout_connect
self.load_key_file()
@staticmethod
def _get_key_file_path():
"""Return the key file path."""
if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME),
os.W_OK):
return os.path.join(os.getenv(USER_HOME), KEY_FILE_NAME)
return os.path.join(os.getcwd(), KEY_FILE_NAME)
def load_key_file(self):
"""Try to load the client key for the current ip."""
self.client_key = None
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
key_dict = {}
logger.debug('load keyfile from %s', key_file_path);
if os.path.isfile(key_file_path):
with open(key_file_path, 'r') as f:
raw_data = f.read()
if raw_data:
key_dict = json.loads(raw_data)
logger.debug('getting client_key for %s from %s', self.ip, key_file_path);
if self.ip in key_dict:
self.client_key = key_dict[self.ip]
def save_key_file(self):
"""Save the current client key."""
if self.client_key is None:
return
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
logger.debug('save keyfile to %s', key_file_path);
with open(key_file_path, 'w+') as f:
raw_data = f.read()
key_dict = {}
if raw_data:
key_dict = json.loads(raw_data)
key_dict[self.ip] = self.client_key
f.write(json.dumps(key_dict))
@asyncio.coroutine
def _send_register_payload(self, websocket):
"""Send the register payload."""
file = os.path.join(os.path.dirname(__file__), HANDSHAKE_FILE_NAME)
data = codecs.open(file, 'r', 'utf-8')
raw_handshake = data.read()
handshake = json.loads(raw_handshake)
handshake['payload']['client-key'] = self.client_key
yield from websocket.send(json.dumps(handshake))
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'response' and \
response['payload']['pairingType'] == 'PROMPT':
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'registered':
self.client_key = response['payload']['client-key']
self.save_key_file()
def is_registered(self):
"""Paired with the tv."""
return self.client_key is not None
@asyncio.coroutine
def _register(self):
"""Register wrapper."""
logger.debug('register on %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.error('register failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('register websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
finally:
logger.debug('close register connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def register(self):
"""Pair client with tv."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self._register())
@asyncio.coroutine
def _command(self, msg):
"""Send a command to the tv."""
logger.debug('send command to %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.debug('command failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('command websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
if not self.client_key:
raise PyLGTVPairException("Unable to pair")
yield from websocket.send(json.dumps(msg))
if msg['type'] == 'request':
raw_response = yield from websocket.recv()
self.last_response = json.loads(raw_response)
finally:
logger.debug('close command connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def command(self, request_type, uri, payload):
"""Build and send a command."""
self.command_count += 1
if payload is None:
payload = {}
message = {
'id': "{}_{}".format(type, self.command_count),
'type': request_type,
'uri': "ssap://{}".format(uri),
'payload': payload,
}
self.last_response = None
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(asyncio.wait_for(self._command(message), self.timeout_connect, loop=loop))
finally:
loop.close()
def request(self, uri, payload=None):
"""Send a request."""
self.command('request', uri, payload)
# Apps
def get_apps(self):
"""Return all apps."""
self.request(EP_GET_APPS)
return {} if self.last_response is None else self.last_response.get('payload').get('launchPoints')
def get_current_app(self):
"""Get the current app id."""
self.request(EP_GET_CURRENT_APP_INFO)
return None if self.last_response is None else self.last_response.get('payload').get('appId')
def launch_app(self, app):
"""Launch an app."""
self.command('request', EP_LAUNCH, {
'id': app
})
def launch_app_with_params(self, app, params):
"""Launch an app with parameters."""
self.request(EP_LAUNCH, {
'id': app,
'params': params
})
def launch_app_with_content_id(self, app, contentId):
"""Launch an app with contentId."""
self.request(EP_LAUNCH, {
'id': app,
'contentId': contentId
})
def close_app(self, app):
"""Close the current app."""
self.request(EP_LAUNCHER_CLOSE, {
'id': app
})
# Services
def get_services(self):
"""Get all services."""
self.request(EP_GET_SERVICES)
return {} if self.last_response is None else self.last_response.get('payload').get('services')
def get_software_info(self):
"""Return the current software status."""
self.request(EP_GET_SOFTWARE_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def power_off(self):
"""Play media."""
self.request(EP_POWER_OFF)
def power_on(self):
"""Play media."""
self.request(EP_POWER_ON)
# 3D Mode
def turn_3d_on(self):
"""Turn 3D on."""
self.request(EP_3D_ON)
def turn_3d_off(self):
"""Turn 3D off."""
self.request(EP_3D_OFF)
# Inputs
def get_inputs(self):
"""Get all inputs."""
self.request(EP_GET_INPUTS)
return {} if self.last_response is None else self.last_response.get('payload').get('devices')
def get_input(self):
"""Get current input."""
return self.get_current_app()
def set_input(self, input):
"""Set the current input."""
self.request(EP_SET_INPUT, {
'inputId': input
})
# Audio
def get_audio_status(self):
"""Get the current audio status"""
self.request(EP_GET_AUDIO_STATUS)
return {} if self.last_response is None else self.last_response.get('payload')
def get_muted(self):
"""Get mute status."""
return self.get_audio_status().get('mute')
def set_mute(self, mute):
"""Set mute."""
self.request(EP_SET_MUTE, {
'mute': mute
})
def get_volume(self):
"""Get the current volume."""
self.request(EP_GET_VOLUME)
return 0 if self.last_response is None else self.last_response.get('payload').get('volume')
def set_volume(self, volume):
"""Set volume."""
volume = max(0, volume)
self.request(EP_SET_VOLUME, {
'volume': volume
})
def volume_up(self):
"""Volume up."""
self.request(EP_VOLUME_UP)
def volume_down(self):
"""Volume down."""
self.request(EP_VOLUME_DOWN)
# TV Channel
def channel_up(self):
"""Channel up."""
self.request(EP_TV_CHANNEL_UP)
def channel_down(self):
"""Channel down."""
self.request(EP_TV_CHANNEL_DOWN)
def get_channels(self):
"""Get all tv channels."""
self.request(EP_GET_TV_CHANNELS)
return {} if self.last_response is None else self.last_response.get('payload').get('channelList')
def get_current_channel(self):
"""Get the current tv channel."""
self.request(EP_GET_CURRENT_CHANNEL)
return {} if self.last_response is None else self.last_response.get('payload')
def get_channel_info(self):
"""Get the current channel info."""
self.request(EP_GET_CHANNEL_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def set_channel(self, channel):
"""Set the current channel."""
self.request(EP_SET_CHANNEL, {
'channelId': channel
})
# Media control
def play(self):
"""Play media."""
self.request(EP_MEDIA_PLAY)
def pause(self):
"""Pause media."""
self.request(EP_MEDIA_PAUSE)
def stop(self):
"""Stop media."""
self.request(EP_MEDIA_STOP)
def close(self):
"""Close media."""
self.request(EP_MEDIA_CLOSE)
def rewind(self):
"""Rewind media."""
self.request(EP_MEDIA_REWIND)
def fast_forward(self):
"""Fast Forward media."""
self.request(EP_MEDIA_FAST_FORWARD)
# Keys
def send_enter_key(self):
"""Send enter key."""
self.request(EP_SEND_ENTER)
def send_delete_key(self):
"""Send delete key."""
self.request(EP_SEND_DELETE)
# Web
def open_url(self, url):
"""Open URL."""
self.request(EP_OPEN, {
'target': url
})
def close_web(self):
"""Close web app."""
self.request(EP_CLOSE_WEB_APP)
|
TheRealLink/pylgtv | pylgtv/webos_client.py | WebOsClient.get_apps | python | def get_apps(self):
self.request(EP_GET_APPS)
return {} if self.last_response is None else self.last_response.get('payload').get('launchPoints') | Return all apps. | train | https://github.com/TheRealLink/pylgtv/blob/a7d9ad87ce47e77180fe9262da785465219f4ed6/pylgtv/webos_client.py#L218-L221 | [
"def request(self, uri, payload=None):\n \"\"\"Send a request.\"\"\"\n self.command('request', uri, payload)\n"
] | class WebOsClient(object):
def __init__(self, ip, key_file_path=None, timeout_connect=2):
"""Initialize the client."""
self.ip = ip
self.port = 3000
self.key_file_path = key_file_path
self.client_key = None
self.web_socket = None
self.command_count = 0
self.last_response = None
self.timeout_connect = timeout_connect
self.load_key_file()
@staticmethod
def _get_key_file_path():
"""Return the key file path."""
if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME),
os.W_OK):
return os.path.join(os.getenv(USER_HOME), KEY_FILE_NAME)
return os.path.join(os.getcwd(), KEY_FILE_NAME)
def load_key_file(self):
"""Try to load the client key for the current ip."""
self.client_key = None
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
key_dict = {}
logger.debug('load keyfile from %s', key_file_path);
if os.path.isfile(key_file_path):
with open(key_file_path, 'r') as f:
raw_data = f.read()
if raw_data:
key_dict = json.loads(raw_data)
logger.debug('getting client_key for %s from %s', self.ip, key_file_path);
if self.ip in key_dict:
self.client_key = key_dict[self.ip]
def save_key_file(self):
"""Save the current client key."""
if self.client_key is None:
return
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
logger.debug('save keyfile to %s', key_file_path);
with open(key_file_path, 'w+') as f:
raw_data = f.read()
key_dict = {}
if raw_data:
key_dict = json.loads(raw_data)
key_dict[self.ip] = self.client_key
f.write(json.dumps(key_dict))
@asyncio.coroutine
def _send_register_payload(self, websocket):
"""Send the register payload."""
file = os.path.join(os.path.dirname(__file__), HANDSHAKE_FILE_NAME)
data = codecs.open(file, 'r', 'utf-8')
raw_handshake = data.read()
handshake = json.loads(raw_handshake)
handshake['payload']['client-key'] = self.client_key
yield from websocket.send(json.dumps(handshake))
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'response' and \
response['payload']['pairingType'] == 'PROMPT':
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'registered':
self.client_key = response['payload']['client-key']
self.save_key_file()
def is_registered(self):
"""Paired with the tv."""
return self.client_key is not None
@asyncio.coroutine
def _register(self):
"""Register wrapper."""
logger.debug('register on %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.error('register failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('register websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
finally:
logger.debug('close register connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def register(self):
"""Pair client with tv."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self._register())
@asyncio.coroutine
def _command(self, msg):
"""Send a command to the tv."""
logger.debug('send command to %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.debug('command failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('command websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
if not self.client_key:
raise PyLGTVPairException("Unable to pair")
yield from websocket.send(json.dumps(msg))
if msg['type'] == 'request':
raw_response = yield from websocket.recv()
self.last_response = json.loads(raw_response)
finally:
logger.debug('close command connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def command(self, request_type, uri, payload):
"""Build and send a command."""
self.command_count += 1
if payload is None:
payload = {}
message = {
'id': "{}_{}".format(type, self.command_count),
'type': request_type,
'uri': "ssap://{}".format(uri),
'payload': payload,
}
self.last_response = None
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(asyncio.wait_for(self._command(message), self.timeout_connect, loop=loop))
finally:
loop.close()
def request(self, uri, payload=None):
"""Send a request."""
self.command('request', uri, payload)
def send_message(self, message, icon_path=None):
"""Show a floating message."""
icon_encoded_string = ''
icon_extension = ''
if icon_path is not None:
icon_extension = os.path.splitext(icon_path)[1][1:]
with open(icon_path, 'rb') as icon_file:
icon_encoded_string = base64.b64encode(icon_file.read()).decode('ascii')
self.request(EP_SHOW_MESSAGE, {
'message': message,
'iconData': icon_encoded_string,
'iconExtension': icon_extension
})
# Apps
def get_current_app(self):
"""Get the current app id."""
self.request(EP_GET_CURRENT_APP_INFO)
return None if self.last_response is None else self.last_response.get('payload').get('appId')
def launch_app(self, app):
"""Launch an app."""
self.command('request', EP_LAUNCH, {
'id': app
})
def launch_app_with_params(self, app, params):
"""Launch an app with parameters."""
self.request(EP_LAUNCH, {
'id': app,
'params': params
})
def launch_app_with_content_id(self, app, contentId):
"""Launch an app with contentId."""
self.request(EP_LAUNCH, {
'id': app,
'contentId': contentId
})
def close_app(self, app):
"""Close the current app."""
self.request(EP_LAUNCHER_CLOSE, {
'id': app
})
# Services
def get_services(self):
"""Get all services."""
self.request(EP_GET_SERVICES)
return {} if self.last_response is None else self.last_response.get('payload').get('services')
def get_software_info(self):
"""Return the current software status."""
self.request(EP_GET_SOFTWARE_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def power_off(self):
"""Play media."""
self.request(EP_POWER_OFF)
def power_on(self):
"""Play media."""
self.request(EP_POWER_ON)
# 3D Mode
def turn_3d_on(self):
"""Turn 3D on."""
self.request(EP_3D_ON)
def turn_3d_off(self):
"""Turn 3D off."""
self.request(EP_3D_OFF)
# Inputs
def get_inputs(self):
"""Get all inputs."""
self.request(EP_GET_INPUTS)
return {} if self.last_response is None else self.last_response.get('payload').get('devices')
def get_input(self):
"""Get current input."""
return self.get_current_app()
def set_input(self, input):
"""Set the current input."""
self.request(EP_SET_INPUT, {
'inputId': input
})
# Audio
def get_audio_status(self):
"""Get the current audio status"""
self.request(EP_GET_AUDIO_STATUS)
return {} if self.last_response is None else self.last_response.get('payload')
def get_muted(self):
"""Get mute status."""
return self.get_audio_status().get('mute')
def set_mute(self, mute):
"""Set mute."""
self.request(EP_SET_MUTE, {
'mute': mute
})
def get_volume(self):
"""Get the current volume."""
self.request(EP_GET_VOLUME)
return 0 if self.last_response is None else self.last_response.get('payload').get('volume')
def set_volume(self, volume):
"""Set volume."""
volume = max(0, volume)
self.request(EP_SET_VOLUME, {
'volume': volume
})
def volume_up(self):
"""Volume up."""
self.request(EP_VOLUME_UP)
def volume_down(self):
"""Volume down."""
self.request(EP_VOLUME_DOWN)
# TV Channel
def channel_up(self):
"""Channel up."""
self.request(EP_TV_CHANNEL_UP)
def channel_down(self):
"""Channel down."""
self.request(EP_TV_CHANNEL_DOWN)
def get_channels(self):
"""Get all tv channels."""
self.request(EP_GET_TV_CHANNELS)
return {} if self.last_response is None else self.last_response.get('payload').get('channelList')
def get_current_channel(self):
"""Get the current tv channel."""
self.request(EP_GET_CURRENT_CHANNEL)
return {} if self.last_response is None else self.last_response.get('payload')
def get_channel_info(self):
"""Get the current channel info."""
self.request(EP_GET_CHANNEL_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def set_channel(self, channel):
"""Set the current channel."""
self.request(EP_SET_CHANNEL, {
'channelId': channel
})
# Media control
def play(self):
"""Play media."""
self.request(EP_MEDIA_PLAY)
def pause(self):
"""Pause media."""
self.request(EP_MEDIA_PAUSE)
def stop(self):
"""Stop media."""
self.request(EP_MEDIA_STOP)
def close(self):
"""Close media."""
self.request(EP_MEDIA_CLOSE)
def rewind(self):
"""Rewind media."""
self.request(EP_MEDIA_REWIND)
def fast_forward(self):
"""Fast Forward media."""
self.request(EP_MEDIA_FAST_FORWARD)
# Keys
def send_enter_key(self):
"""Send enter key."""
self.request(EP_SEND_ENTER)
def send_delete_key(self):
"""Send delete key."""
self.request(EP_SEND_DELETE)
# Web
def open_url(self, url):
"""Open URL."""
self.request(EP_OPEN, {
'target': url
})
def close_web(self):
"""Close web app."""
self.request(EP_CLOSE_WEB_APP)
|
TheRealLink/pylgtv | pylgtv/webos_client.py | WebOsClient.get_current_app | python | def get_current_app(self):
self.request(EP_GET_CURRENT_APP_INFO)
return None if self.last_response is None else self.last_response.get('payload').get('appId') | Get the current app id. | train | https://github.com/TheRealLink/pylgtv/blob/a7d9ad87ce47e77180fe9262da785465219f4ed6/pylgtv/webos_client.py#L223-L226 | [
"def request(self, uri, payload=None):\n \"\"\"Send a request.\"\"\"\n self.command('request', uri, payload)\n"
] | class WebOsClient(object):
def __init__(self, ip, key_file_path=None, timeout_connect=2):
"""Initialize the client."""
self.ip = ip
self.port = 3000
self.key_file_path = key_file_path
self.client_key = None
self.web_socket = None
self.command_count = 0
self.last_response = None
self.timeout_connect = timeout_connect
self.load_key_file()
@staticmethod
def _get_key_file_path():
"""Return the key file path."""
if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME),
os.W_OK):
return os.path.join(os.getenv(USER_HOME), KEY_FILE_NAME)
return os.path.join(os.getcwd(), KEY_FILE_NAME)
def load_key_file(self):
"""Try to load the client key for the current ip."""
self.client_key = None
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
key_dict = {}
logger.debug('load keyfile from %s', key_file_path);
if os.path.isfile(key_file_path):
with open(key_file_path, 'r') as f:
raw_data = f.read()
if raw_data:
key_dict = json.loads(raw_data)
logger.debug('getting client_key for %s from %s', self.ip, key_file_path);
if self.ip in key_dict:
self.client_key = key_dict[self.ip]
def save_key_file(self):
"""Save the current client key."""
if self.client_key is None:
return
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
logger.debug('save keyfile to %s', key_file_path);
with open(key_file_path, 'w+') as f:
raw_data = f.read()
key_dict = {}
if raw_data:
key_dict = json.loads(raw_data)
key_dict[self.ip] = self.client_key
f.write(json.dumps(key_dict))
@asyncio.coroutine
def _send_register_payload(self, websocket):
"""Send the register payload."""
file = os.path.join(os.path.dirname(__file__), HANDSHAKE_FILE_NAME)
data = codecs.open(file, 'r', 'utf-8')
raw_handshake = data.read()
handshake = json.loads(raw_handshake)
handshake['payload']['client-key'] = self.client_key
yield from websocket.send(json.dumps(handshake))
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'response' and \
response['payload']['pairingType'] == 'PROMPT':
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'registered':
self.client_key = response['payload']['client-key']
self.save_key_file()
def is_registered(self):
"""Paired with the tv."""
return self.client_key is not None
@asyncio.coroutine
def _register(self):
"""Register wrapper."""
logger.debug('register on %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.error('register failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('register websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
finally:
logger.debug('close register connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def register(self):
"""Pair client with tv."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self._register())
@asyncio.coroutine
def _command(self, msg):
"""Send a command to the tv."""
logger.debug('send command to %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.debug('command failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('command websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
if not self.client_key:
raise PyLGTVPairException("Unable to pair")
yield from websocket.send(json.dumps(msg))
if msg['type'] == 'request':
raw_response = yield from websocket.recv()
self.last_response = json.loads(raw_response)
finally:
logger.debug('close command connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def command(self, request_type, uri, payload):
"""Build and send a command."""
self.command_count += 1
if payload is None:
payload = {}
message = {
'id': "{}_{}".format(type, self.command_count),
'type': request_type,
'uri': "ssap://{}".format(uri),
'payload': payload,
}
self.last_response = None
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(asyncio.wait_for(self._command(message), self.timeout_connect, loop=loop))
finally:
loop.close()
def request(self, uri, payload=None):
"""Send a request."""
self.command('request', uri, payload)
def send_message(self, message, icon_path=None):
"""Show a floating message."""
icon_encoded_string = ''
icon_extension = ''
if icon_path is not None:
icon_extension = os.path.splitext(icon_path)[1][1:]
with open(icon_path, 'rb') as icon_file:
icon_encoded_string = base64.b64encode(icon_file.read()).decode('ascii')
self.request(EP_SHOW_MESSAGE, {
'message': message,
'iconData': icon_encoded_string,
'iconExtension': icon_extension
})
# Apps
def get_apps(self):
"""Return all apps."""
self.request(EP_GET_APPS)
return {} if self.last_response is None else self.last_response.get('payload').get('launchPoints')
def launch_app(self, app):
"""Launch an app."""
self.command('request', EP_LAUNCH, {
'id': app
})
def launch_app_with_params(self, app, params):
"""Launch an app with parameters."""
self.request(EP_LAUNCH, {
'id': app,
'params': params
})
def launch_app_with_content_id(self, app, contentId):
"""Launch an app with contentId."""
self.request(EP_LAUNCH, {
'id': app,
'contentId': contentId
})
def close_app(self, app):
"""Close the current app."""
self.request(EP_LAUNCHER_CLOSE, {
'id': app
})
# Services
def get_services(self):
"""Get all services."""
self.request(EP_GET_SERVICES)
return {} if self.last_response is None else self.last_response.get('payload').get('services')
def get_software_info(self):
"""Return the current software status."""
self.request(EP_GET_SOFTWARE_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def power_off(self):
"""Play media."""
self.request(EP_POWER_OFF)
def power_on(self):
"""Play media."""
self.request(EP_POWER_ON)
# 3D Mode
def turn_3d_on(self):
"""Turn 3D on."""
self.request(EP_3D_ON)
def turn_3d_off(self):
"""Turn 3D off."""
self.request(EP_3D_OFF)
# Inputs
def get_inputs(self):
"""Get all inputs."""
self.request(EP_GET_INPUTS)
return {} if self.last_response is None else self.last_response.get('payload').get('devices')
def get_input(self):
"""Get current input."""
return self.get_current_app()
def set_input(self, input):
"""Set the current input."""
self.request(EP_SET_INPUT, {
'inputId': input
})
# Audio
def get_audio_status(self):
"""Get the current audio status"""
self.request(EP_GET_AUDIO_STATUS)
return {} if self.last_response is None else self.last_response.get('payload')
def get_muted(self):
"""Get mute status."""
return self.get_audio_status().get('mute')
def set_mute(self, mute):
"""Set mute."""
self.request(EP_SET_MUTE, {
'mute': mute
})
def get_volume(self):
"""Get the current volume."""
self.request(EP_GET_VOLUME)
return 0 if self.last_response is None else self.last_response.get('payload').get('volume')
def set_volume(self, volume):
"""Set volume."""
volume = max(0, volume)
self.request(EP_SET_VOLUME, {
'volume': volume
})
def volume_up(self):
"""Volume up."""
self.request(EP_VOLUME_UP)
def volume_down(self):
"""Volume down."""
self.request(EP_VOLUME_DOWN)
# TV Channel
def channel_up(self):
"""Channel up."""
self.request(EP_TV_CHANNEL_UP)
def channel_down(self):
"""Channel down."""
self.request(EP_TV_CHANNEL_DOWN)
def get_channels(self):
"""Get all tv channels."""
self.request(EP_GET_TV_CHANNELS)
return {} if self.last_response is None else self.last_response.get('payload').get('channelList')
def get_current_channel(self):
"""Get the current tv channel."""
self.request(EP_GET_CURRENT_CHANNEL)
return {} if self.last_response is None else self.last_response.get('payload')
def get_channel_info(self):
"""Get the current channel info."""
self.request(EP_GET_CHANNEL_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def set_channel(self, channel):
"""Set the current channel."""
self.request(EP_SET_CHANNEL, {
'channelId': channel
})
# Media control
def play(self):
"""Play media."""
self.request(EP_MEDIA_PLAY)
def pause(self):
"""Pause media."""
self.request(EP_MEDIA_PAUSE)
def stop(self):
"""Stop media."""
self.request(EP_MEDIA_STOP)
def close(self):
"""Close media."""
self.request(EP_MEDIA_CLOSE)
def rewind(self):
"""Rewind media."""
self.request(EP_MEDIA_REWIND)
def fast_forward(self):
"""Fast Forward media."""
self.request(EP_MEDIA_FAST_FORWARD)
# Keys
def send_enter_key(self):
"""Send enter key."""
self.request(EP_SEND_ENTER)
def send_delete_key(self):
"""Send delete key."""
self.request(EP_SEND_DELETE)
# Web
def open_url(self, url):
"""Open URL."""
self.request(EP_OPEN, {
'target': url
})
def close_web(self):
"""Close web app."""
self.request(EP_CLOSE_WEB_APP)
|
TheRealLink/pylgtv | pylgtv/webos_client.py | WebOsClient.get_services | python | def get_services(self):
self.request(EP_GET_SERVICES)
return {} if self.last_response is None else self.last_response.get('payload').get('services') | Get all services. | train | https://github.com/TheRealLink/pylgtv/blob/a7d9ad87ce47e77180fe9262da785465219f4ed6/pylgtv/webos_client.py#L255-L258 | [
"def request(self, uri, payload=None):\n \"\"\"Send a request.\"\"\"\n self.command('request', uri, payload)\n"
] | class WebOsClient(object):
def __init__(self, ip, key_file_path=None, timeout_connect=2):
"""Initialize the client."""
self.ip = ip
self.port = 3000
self.key_file_path = key_file_path
self.client_key = None
self.web_socket = None
self.command_count = 0
self.last_response = None
self.timeout_connect = timeout_connect
self.load_key_file()
@staticmethod
def _get_key_file_path():
"""Return the key file path."""
if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME),
os.W_OK):
return os.path.join(os.getenv(USER_HOME), KEY_FILE_NAME)
return os.path.join(os.getcwd(), KEY_FILE_NAME)
def load_key_file(self):
"""Try to load the client key for the current ip."""
self.client_key = None
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
key_dict = {}
logger.debug('load keyfile from %s', key_file_path);
if os.path.isfile(key_file_path):
with open(key_file_path, 'r') as f:
raw_data = f.read()
if raw_data:
key_dict = json.loads(raw_data)
logger.debug('getting client_key for %s from %s', self.ip, key_file_path);
if self.ip in key_dict:
self.client_key = key_dict[self.ip]
def save_key_file(self):
"""Save the current client key."""
if self.client_key is None:
return
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
logger.debug('save keyfile to %s', key_file_path);
with open(key_file_path, 'w+') as f:
raw_data = f.read()
key_dict = {}
if raw_data:
key_dict = json.loads(raw_data)
key_dict[self.ip] = self.client_key
f.write(json.dumps(key_dict))
@asyncio.coroutine
def _send_register_payload(self, websocket):
"""Send the register payload."""
file = os.path.join(os.path.dirname(__file__), HANDSHAKE_FILE_NAME)
data = codecs.open(file, 'r', 'utf-8')
raw_handshake = data.read()
handshake = json.loads(raw_handshake)
handshake['payload']['client-key'] = self.client_key
yield from websocket.send(json.dumps(handshake))
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'response' and \
response['payload']['pairingType'] == 'PROMPT':
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'registered':
self.client_key = response['payload']['client-key']
self.save_key_file()
def is_registered(self):
"""Paired with the tv."""
return self.client_key is not None
@asyncio.coroutine
def _register(self):
"""Register wrapper."""
logger.debug('register on %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.error('register failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('register websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
finally:
logger.debug('close register connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def register(self):
"""Pair client with tv."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self._register())
@asyncio.coroutine
def _command(self, msg):
"""Send a command to the tv."""
logger.debug('send command to %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.debug('command failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('command websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
if not self.client_key:
raise PyLGTVPairException("Unable to pair")
yield from websocket.send(json.dumps(msg))
if msg['type'] == 'request':
raw_response = yield from websocket.recv()
self.last_response = json.loads(raw_response)
finally:
logger.debug('close command connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def command(self, request_type, uri, payload):
"""Build and send a command."""
self.command_count += 1
if payload is None:
payload = {}
message = {
'id': "{}_{}".format(type, self.command_count),
'type': request_type,
'uri': "ssap://{}".format(uri),
'payload': payload,
}
self.last_response = None
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(asyncio.wait_for(self._command(message), self.timeout_connect, loop=loop))
finally:
loop.close()
def request(self, uri, payload=None):
"""Send a request."""
self.command('request', uri, payload)
def send_message(self, message, icon_path=None):
"""Show a floating message."""
icon_encoded_string = ''
icon_extension = ''
if icon_path is not None:
icon_extension = os.path.splitext(icon_path)[1][1:]
with open(icon_path, 'rb') as icon_file:
icon_encoded_string = base64.b64encode(icon_file.read()).decode('ascii')
self.request(EP_SHOW_MESSAGE, {
'message': message,
'iconData': icon_encoded_string,
'iconExtension': icon_extension
})
# Apps
def get_apps(self):
"""Return all apps."""
self.request(EP_GET_APPS)
return {} if self.last_response is None else self.last_response.get('payload').get('launchPoints')
def get_current_app(self):
"""Get the current app id."""
self.request(EP_GET_CURRENT_APP_INFO)
return None if self.last_response is None else self.last_response.get('payload').get('appId')
def launch_app(self, app):
"""Launch an app."""
self.command('request', EP_LAUNCH, {
'id': app
})
def launch_app_with_params(self, app, params):
"""Launch an app with parameters."""
self.request(EP_LAUNCH, {
'id': app,
'params': params
})
def launch_app_with_content_id(self, app, contentId):
"""Launch an app with contentId."""
self.request(EP_LAUNCH, {
'id': app,
'contentId': contentId
})
def close_app(self, app):
"""Close the current app."""
self.request(EP_LAUNCHER_CLOSE, {
'id': app
})
# Services
def get_software_info(self):
"""Return the current software status."""
self.request(EP_GET_SOFTWARE_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def power_off(self):
"""Play media."""
self.request(EP_POWER_OFF)
def power_on(self):
"""Play media."""
self.request(EP_POWER_ON)
# 3D Mode
def turn_3d_on(self):
"""Turn 3D on."""
self.request(EP_3D_ON)
def turn_3d_off(self):
"""Turn 3D off."""
self.request(EP_3D_OFF)
# Inputs
def get_inputs(self):
"""Get all inputs."""
self.request(EP_GET_INPUTS)
return {} if self.last_response is None else self.last_response.get('payload').get('devices')
def get_input(self):
"""Get current input."""
return self.get_current_app()
def set_input(self, input):
"""Set the current input."""
self.request(EP_SET_INPUT, {
'inputId': input
})
# Audio
def get_audio_status(self):
"""Get the current audio status"""
self.request(EP_GET_AUDIO_STATUS)
return {} if self.last_response is None else self.last_response.get('payload')
def get_muted(self):
"""Get mute status."""
return self.get_audio_status().get('mute')
def set_mute(self, mute):
"""Set mute."""
self.request(EP_SET_MUTE, {
'mute': mute
})
def get_volume(self):
"""Get the current volume."""
self.request(EP_GET_VOLUME)
return 0 if self.last_response is None else self.last_response.get('payload').get('volume')
def set_volume(self, volume):
"""Set volume."""
volume = max(0, volume)
self.request(EP_SET_VOLUME, {
'volume': volume
})
def volume_up(self):
"""Volume up."""
self.request(EP_VOLUME_UP)
def volume_down(self):
"""Volume down."""
self.request(EP_VOLUME_DOWN)
# TV Channel
def channel_up(self):
"""Channel up."""
self.request(EP_TV_CHANNEL_UP)
def channel_down(self):
"""Channel down."""
self.request(EP_TV_CHANNEL_DOWN)
def get_channels(self):
"""Get all tv channels."""
self.request(EP_GET_TV_CHANNELS)
return {} if self.last_response is None else self.last_response.get('payload').get('channelList')
def get_current_channel(self):
"""Get the current tv channel."""
self.request(EP_GET_CURRENT_CHANNEL)
return {} if self.last_response is None else self.last_response.get('payload')
def get_channel_info(self):
"""Get the current channel info."""
self.request(EP_GET_CHANNEL_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def set_channel(self, channel):
"""Set the current channel."""
self.request(EP_SET_CHANNEL, {
'channelId': channel
})
# Media control
def play(self):
"""Play media."""
self.request(EP_MEDIA_PLAY)
def pause(self):
"""Pause media."""
self.request(EP_MEDIA_PAUSE)
def stop(self):
"""Stop media."""
self.request(EP_MEDIA_STOP)
def close(self):
"""Close media."""
self.request(EP_MEDIA_CLOSE)
def rewind(self):
"""Rewind media."""
self.request(EP_MEDIA_REWIND)
def fast_forward(self):
"""Fast Forward media."""
self.request(EP_MEDIA_FAST_FORWARD)
# Keys
def send_enter_key(self):
"""Send enter key."""
self.request(EP_SEND_ENTER)
def send_delete_key(self):
"""Send delete key."""
self.request(EP_SEND_DELETE)
# Web
def open_url(self, url):
"""Open URL."""
self.request(EP_OPEN, {
'target': url
})
def close_web(self):
"""Close web app."""
self.request(EP_CLOSE_WEB_APP)
|
TheRealLink/pylgtv | pylgtv/webos_client.py | WebOsClient.get_software_info | python | def get_software_info(self):
self.request(EP_GET_SOFTWARE_INFO)
return {} if self.last_response is None else self.last_response.get('payload') | Return the current software status. | train | https://github.com/TheRealLink/pylgtv/blob/a7d9ad87ce47e77180fe9262da785465219f4ed6/pylgtv/webos_client.py#L260-L263 | [
"def request(self, uri, payload=None):\n \"\"\"Send a request.\"\"\"\n self.command('request', uri, payload)\n"
] | class WebOsClient(object):
def __init__(self, ip, key_file_path=None, timeout_connect=2):
"""Initialize the client."""
self.ip = ip
self.port = 3000
self.key_file_path = key_file_path
self.client_key = None
self.web_socket = None
self.command_count = 0
self.last_response = None
self.timeout_connect = timeout_connect
self.load_key_file()
@staticmethod
def _get_key_file_path():
"""Return the key file path."""
if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME),
os.W_OK):
return os.path.join(os.getenv(USER_HOME), KEY_FILE_NAME)
return os.path.join(os.getcwd(), KEY_FILE_NAME)
def load_key_file(self):
"""Try to load the client key for the current ip."""
self.client_key = None
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
key_dict = {}
logger.debug('load keyfile from %s', key_file_path);
if os.path.isfile(key_file_path):
with open(key_file_path, 'r') as f:
raw_data = f.read()
if raw_data:
key_dict = json.loads(raw_data)
logger.debug('getting client_key for %s from %s', self.ip, key_file_path);
if self.ip in key_dict:
self.client_key = key_dict[self.ip]
def save_key_file(self):
"""Save the current client key."""
if self.client_key is None:
return
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
logger.debug('save keyfile to %s', key_file_path);
with open(key_file_path, 'w+') as f:
raw_data = f.read()
key_dict = {}
if raw_data:
key_dict = json.loads(raw_data)
key_dict[self.ip] = self.client_key
f.write(json.dumps(key_dict))
@asyncio.coroutine
def _send_register_payload(self, websocket):
"""Send the register payload."""
file = os.path.join(os.path.dirname(__file__), HANDSHAKE_FILE_NAME)
data = codecs.open(file, 'r', 'utf-8')
raw_handshake = data.read()
handshake = json.loads(raw_handshake)
handshake['payload']['client-key'] = self.client_key
yield from websocket.send(json.dumps(handshake))
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'response' and \
response['payload']['pairingType'] == 'PROMPT':
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'registered':
self.client_key = response['payload']['client-key']
self.save_key_file()
def is_registered(self):
"""Paired with the tv."""
return self.client_key is not None
@asyncio.coroutine
def _register(self):
"""Register wrapper."""
logger.debug('register on %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.error('register failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('register websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
finally:
logger.debug('close register connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def register(self):
"""Pair client with tv."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self._register())
@asyncio.coroutine
def _command(self, msg):
"""Send a command to the tv."""
logger.debug('send command to %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.debug('command failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('command websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
if not self.client_key:
raise PyLGTVPairException("Unable to pair")
yield from websocket.send(json.dumps(msg))
if msg['type'] == 'request':
raw_response = yield from websocket.recv()
self.last_response = json.loads(raw_response)
finally:
logger.debug('close command connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def command(self, request_type, uri, payload):
"""Build and send a command."""
self.command_count += 1
if payload is None:
payload = {}
message = {
'id': "{}_{}".format(type, self.command_count),
'type': request_type,
'uri': "ssap://{}".format(uri),
'payload': payload,
}
self.last_response = None
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(asyncio.wait_for(self._command(message), self.timeout_connect, loop=loop))
finally:
loop.close()
def request(self, uri, payload=None):
"""Send a request."""
self.command('request', uri, payload)
def send_message(self, message, icon_path=None):
"""Show a floating message."""
icon_encoded_string = ''
icon_extension = ''
if icon_path is not None:
icon_extension = os.path.splitext(icon_path)[1][1:]
with open(icon_path, 'rb') as icon_file:
icon_encoded_string = base64.b64encode(icon_file.read()).decode('ascii')
self.request(EP_SHOW_MESSAGE, {
'message': message,
'iconData': icon_encoded_string,
'iconExtension': icon_extension
})
# Apps
def get_apps(self):
"""Return all apps."""
self.request(EP_GET_APPS)
return {} if self.last_response is None else self.last_response.get('payload').get('launchPoints')
def get_current_app(self):
"""Get the current app id."""
self.request(EP_GET_CURRENT_APP_INFO)
return None if self.last_response is None else self.last_response.get('payload').get('appId')
def launch_app(self, app):
"""Launch an app."""
self.command('request', EP_LAUNCH, {
'id': app
})
def launch_app_with_params(self, app, params):
"""Launch an app with parameters."""
self.request(EP_LAUNCH, {
'id': app,
'params': params
})
def launch_app_with_content_id(self, app, contentId):
"""Launch an app with contentId."""
self.request(EP_LAUNCH, {
'id': app,
'contentId': contentId
})
def close_app(self, app):
"""Close the current app."""
self.request(EP_LAUNCHER_CLOSE, {
'id': app
})
# Services
def get_services(self):
"""Get all services."""
self.request(EP_GET_SERVICES)
return {} if self.last_response is None else self.last_response.get('payload').get('services')
def power_off(self):
"""Play media."""
self.request(EP_POWER_OFF)
def power_on(self):
"""Play media."""
self.request(EP_POWER_ON)
# 3D Mode
def turn_3d_on(self):
"""Turn 3D on."""
self.request(EP_3D_ON)
def turn_3d_off(self):
"""Turn 3D off."""
self.request(EP_3D_OFF)
# Inputs
def get_inputs(self):
"""Get all inputs."""
self.request(EP_GET_INPUTS)
return {} if self.last_response is None else self.last_response.get('payload').get('devices')
def get_input(self):
"""Get current input."""
return self.get_current_app()
def set_input(self, input):
"""Set the current input."""
self.request(EP_SET_INPUT, {
'inputId': input
})
# Audio
def get_audio_status(self):
"""Get the current audio status"""
self.request(EP_GET_AUDIO_STATUS)
return {} if self.last_response is None else self.last_response.get('payload')
def get_muted(self):
"""Get mute status."""
return self.get_audio_status().get('mute')
def set_mute(self, mute):
"""Set mute."""
self.request(EP_SET_MUTE, {
'mute': mute
})
def get_volume(self):
"""Get the current volume."""
self.request(EP_GET_VOLUME)
return 0 if self.last_response is None else self.last_response.get('payload').get('volume')
def set_volume(self, volume):
"""Set volume."""
volume = max(0, volume)
self.request(EP_SET_VOLUME, {
'volume': volume
})
def volume_up(self):
"""Volume up."""
self.request(EP_VOLUME_UP)
def volume_down(self):
"""Volume down."""
self.request(EP_VOLUME_DOWN)
# TV Channel
def channel_up(self):
"""Channel up."""
self.request(EP_TV_CHANNEL_UP)
def channel_down(self):
"""Channel down."""
self.request(EP_TV_CHANNEL_DOWN)
def get_channels(self):
"""Get all tv channels."""
self.request(EP_GET_TV_CHANNELS)
return {} if self.last_response is None else self.last_response.get('payload').get('channelList')
def get_current_channel(self):
"""Get the current tv channel."""
self.request(EP_GET_CURRENT_CHANNEL)
return {} if self.last_response is None else self.last_response.get('payload')
def get_channel_info(self):
"""Get the current channel info."""
self.request(EP_GET_CHANNEL_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def set_channel(self, channel):
"""Set the current channel."""
self.request(EP_SET_CHANNEL, {
'channelId': channel
})
# Media control
def play(self):
"""Play media."""
self.request(EP_MEDIA_PLAY)
def pause(self):
"""Pause media."""
self.request(EP_MEDIA_PAUSE)
def stop(self):
"""Stop media."""
self.request(EP_MEDIA_STOP)
def close(self):
"""Close media."""
self.request(EP_MEDIA_CLOSE)
def rewind(self):
"""Rewind media."""
self.request(EP_MEDIA_REWIND)
def fast_forward(self):
"""Fast Forward media."""
self.request(EP_MEDIA_FAST_FORWARD)
# Keys
def send_enter_key(self):
"""Send enter key."""
self.request(EP_SEND_ENTER)
def send_delete_key(self):
"""Send delete key."""
self.request(EP_SEND_DELETE)
# Web
def open_url(self, url):
"""Open URL."""
self.request(EP_OPEN, {
'target': url
})
def close_web(self):
"""Close web app."""
self.request(EP_CLOSE_WEB_APP)
|
TheRealLink/pylgtv | pylgtv/webos_client.py | WebOsClient.get_inputs | python | def get_inputs(self):
self.request(EP_GET_INPUTS)
return {} if self.last_response is None else self.last_response.get('payload').get('devices') | Get all inputs. | train | https://github.com/TheRealLink/pylgtv/blob/a7d9ad87ce47e77180fe9262da785465219f4ed6/pylgtv/webos_client.py#L283-L286 | [
"def request(self, uri, payload=None):\n \"\"\"Send a request.\"\"\"\n self.command('request', uri, payload)\n"
] | class WebOsClient(object):
def __init__(self, ip, key_file_path=None, timeout_connect=2):
"""Initialize the client."""
self.ip = ip
self.port = 3000
self.key_file_path = key_file_path
self.client_key = None
self.web_socket = None
self.command_count = 0
self.last_response = None
self.timeout_connect = timeout_connect
self.load_key_file()
@staticmethod
def _get_key_file_path():
"""Return the key file path."""
if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME),
os.W_OK):
return os.path.join(os.getenv(USER_HOME), KEY_FILE_NAME)
return os.path.join(os.getcwd(), KEY_FILE_NAME)
def load_key_file(self):
"""Try to load the client key for the current ip."""
self.client_key = None
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
key_dict = {}
logger.debug('load keyfile from %s', key_file_path);
if os.path.isfile(key_file_path):
with open(key_file_path, 'r') as f:
raw_data = f.read()
if raw_data:
key_dict = json.loads(raw_data)
logger.debug('getting client_key for %s from %s', self.ip, key_file_path);
if self.ip in key_dict:
self.client_key = key_dict[self.ip]
def save_key_file(self):
"""Save the current client key."""
if self.client_key is None:
return
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
logger.debug('save keyfile to %s', key_file_path);
with open(key_file_path, 'w+') as f:
raw_data = f.read()
key_dict = {}
if raw_data:
key_dict = json.loads(raw_data)
key_dict[self.ip] = self.client_key
f.write(json.dumps(key_dict))
@asyncio.coroutine
def _send_register_payload(self, websocket):
"""Send the register payload."""
file = os.path.join(os.path.dirname(__file__), HANDSHAKE_FILE_NAME)
data = codecs.open(file, 'r', 'utf-8')
raw_handshake = data.read()
handshake = json.loads(raw_handshake)
handshake['payload']['client-key'] = self.client_key
yield from websocket.send(json.dumps(handshake))
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'response' and \
response['payload']['pairingType'] == 'PROMPT':
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'registered':
self.client_key = response['payload']['client-key']
self.save_key_file()
def is_registered(self):
"""Paired with the tv."""
return self.client_key is not None
@asyncio.coroutine
def _register(self):
"""Register wrapper."""
logger.debug('register on %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.error('register failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('register websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
finally:
logger.debug('close register connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def register(self):
"""Pair client with tv."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self._register())
@asyncio.coroutine
def _command(self, msg):
"""Send a command to the tv."""
logger.debug('send command to %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.debug('command failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('command websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
if not self.client_key:
raise PyLGTVPairException("Unable to pair")
yield from websocket.send(json.dumps(msg))
if msg['type'] == 'request':
raw_response = yield from websocket.recv()
self.last_response = json.loads(raw_response)
finally:
logger.debug('close command connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def command(self, request_type, uri, payload):
"""Build and send a command."""
self.command_count += 1
if payload is None:
payload = {}
message = {
'id': "{}_{}".format(type, self.command_count),
'type': request_type,
'uri': "ssap://{}".format(uri),
'payload': payload,
}
self.last_response = None
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(asyncio.wait_for(self._command(message), self.timeout_connect, loop=loop))
finally:
loop.close()
def request(self, uri, payload=None):
"""Send a request."""
self.command('request', uri, payload)
def send_message(self, message, icon_path=None):
"""Show a floating message."""
icon_encoded_string = ''
icon_extension = ''
if icon_path is not None:
icon_extension = os.path.splitext(icon_path)[1][1:]
with open(icon_path, 'rb') as icon_file:
icon_encoded_string = base64.b64encode(icon_file.read()).decode('ascii')
self.request(EP_SHOW_MESSAGE, {
'message': message,
'iconData': icon_encoded_string,
'iconExtension': icon_extension
})
# Apps
def get_apps(self):
"""Return all apps."""
self.request(EP_GET_APPS)
return {} if self.last_response is None else self.last_response.get('payload').get('launchPoints')
def get_current_app(self):
"""Get the current app id."""
self.request(EP_GET_CURRENT_APP_INFO)
return None if self.last_response is None else self.last_response.get('payload').get('appId')
def launch_app(self, app):
"""Launch an app."""
self.command('request', EP_LAUNCH, {
'id': app
})
def launch_app_with_params(self, app, params):
"""Launch an app with parameters."""
self.request(EP_LAUNCH, {
'id': app,
'params': params
})
def launch_app_with_content_id(self, app, contentId):
"""Launch an app with contentId."""
self.request(EP_LAUNCH, {
'id': app,
'contentId': contentId
})
def close_app(self, app):
"""Close the current app."""
self.request(EP_LAUNCHER_CLOSE, {
'id': app
})
# Services
def get_services(self):
"""Get all services."""
self.request(EP_GET_SERVICES)
return {} if self.last_response is None else self.last_response.get('payload').get('services')
def get_software_info(self):
"""Return the current software status."""
self.request(EP_GET_SOFTWARE_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def power_off(self):
"""Play media."""
self.request(EP_POWER_OFF)
def power_on(self):
"""Play media."""
self.request(EP_POWER_ON)
# 3D Mode
def turn_3d_on(self):
"""Turn 3D on."""
self.request(EP_3D_ON)
def turn_3d_off(self):
"""Turn 3D off."""
self.request(EP_3D_OFF)
# Inputs
def get_input(self):
"""Get current input."""
return self.get_current_app()
def set_input(self, input):
"""Set the current input."""
self.request(EP_SET_INPUT, {
'inputId': input
})
# Audio
def get_audio_status(self):
"""Get the current audio status"""
self.request(EP_GET_AUDIO_STATUS)
return {} if self.last_response is None else self.last_response.get('payload')
def get_muted(self):
"""Get mute status."""
return self.get_audio_status().get('mute')
def set_mute(self, mute):
"""Set mute."""
self.request(EP_SET_MUTE, {
'mute': mute
})
def get_volume(self):
"""Get the current volume."""
self.request(EP_GET_VOLUME)
return 0 if self.last_response is None else self.last_response.get('payload').get('volume')
def set_volume(self, volume):
"""Set volume."""
volume = max(0, volume)
self.request(EP_SET_VOLUME, {
'volume': volume
})
def volume_up(self):
"""Volume up."""
self.request(EP_VOLUME_UP)
def volume_down(self):
"""Volume down."""
self.request(EP_VOLUME_DOWN)
# TV Channel
def channel_up(self):
"""Channel up."""
self.request(EP_TV_CHANNEL_UP)
def channel_down(self):
"""Channel down."""
self.request(EP_TV_CHANNEL_DOWN)
def get_channels(self):
"""Get all tv channels."""
self.request(EP_GET_TV_CHANNELS)
return {} if self.last_response is None else self.last_response.get('payload').get('channelList')
def get_current_channel(self):
"""Get the current tv channel."""
self.request(EP_GET_CURRENT_CHANNEL)
return {} if self.last_response is None else self.last_response.get('payload')
def get_channel_info(self):
"""Get the current channel info."""
self.request(EP_GET_CHANNEL_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def set_channel(self, channel):
"""Set the current channel."""
self.request(EP_SET_CHANNEL, {
'channelId': channel
})
# Media control
def play(self):
"""Play media."""
self.request(EP_MEDIA_PLAY)
def pause(self):
"""Pause media."""
self.request(EP_MEDIA_PAUSE)
def stop(self):
"""Stop media."""
self.request(EP_MEDIA_STOP)
def close(self):
"""Close media."""
self.request(EP_MEDIA_CLOSE)
def rewind(self):
"""Rewind media."""
self.request(EP_MEDIA_REWIND)
def fast_forward(self):
"""Fast Forward media."""
self.request(EP_MEDIA_FAST_FORWARD)
# Keys
def send_enter_key(self):
"""Send enter key."""
self.request(EP_SEND_ENTER)
def send_delete_key(self):
"""Send delete key."""
self.request(EP_SEND_DELETE)
# Web
def open_url(self, url):
"""Open URL."""
self.request(EP_OPEN, {
'target': url
})
def close_web(self):
"""Close web app."""
self.request(EP_CLOSE_WEB_APP)
|
TheRealLink/pylgtv | pylgtv/webos_client.py | WebOsClient.get_audio_status | python | def get_audio_status(self):
self.request(EP_GET_AUDIO_STATUS)
return {} if self.last_response is None else self.last_response.get('payload') | Get the current audio status | train | https://github.com/TheRealLink/pylgtv/blob/a7d9ad87ce47e77180fe9262da785465219f4ed6/pylgtv/webos_client.py#L299-L302 | [
"def request(self, uri, payload=None):\n \"\"\"Send a request.\"\"\"\n self.command('request', uri, payload)\n"
] | class WebOsClient(object):
def __init__(self, ip, key_file_path=None, timeout_connect=2):
"""Initialize the client."""
self.ip = ip
self.port = 3000
self.key_file_path = key_file_path
self.client_key = None
self.web_socket = None
self.command_count = 0
self.last_response = None
self.timeout_connect = timeout_connect
self.load_key_file()
@staticmethod
def _get_key_file_path():
"""Return the key file path."""
if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME),
os.W_OK):
return os.path.join(os.getenv(USER_HOME), KEY_FILE_NAME)
return os.path.join(os.getcwd(), KEY_FILE_NAME)
def load_key_file(self):
"""Try to load the client key for the current ip."""
self.client_key = None
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
key_dict = {}
logger.debug('load keyfile from %s', key_file_path);
if os.path.isfile(key_file_path):
with open(key_file_path, 'r') as f:
raw_data = f.read()
if raw_data:
key_dict = json.loads(raw_data)
logger.debug('getting client_key for %s from %s', self.ip, key_file_path);
if self.ip in key_dict:
self.client_key = key_dict[self.ip]
def save_key_file(self):
"""Save the current client key."""
if self.client_key is None:
return
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
logger.debug('save keyfile to %s', key_file_path);
with open(key_file_path, 'w+') as f:
raw_data = f.read()
key_dict = {}
if raw_data:
key_dict = json.loads(raw_data)
key_dict[self.ip] = self.client_key
f.write(json.dumps(key_dict))
@asyncio.coroutine
def _send_register_payload(self, websocket):
"""Send the register payload."""
file = os.path.join(os.path.dirname(__file__), HANDSHAKE_FILE_NAME)
data = codecs.open(file, 'r', 'utf-8')
raw_handshake = data.read()
handshake = json.loads(raw_handshake)
handshake['payload']['client-key'] = self.client_key
yield from websocket.send(json.dumps(handshake))
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'response' and \
response['payload']['pairingType'] == 'PROMPT':
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'registered':
self.client_key = response['payload']['client-key']
self.save_key_file()
def is_registered(self):
"""Paired with the tv."""
return self.client_key is not None
@asyncio.coroutine
def _register(self):
"""Register wrapper."""
logger.debug('register on %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.error('register failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('register websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
finally:
logger.debug('close register connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def register(self):
"""Pair client with tv."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self._register())
@asyncio.coroutine
def _command(self, msg):
"""Send a command to the tv."""
logger.debug('send command to %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.debug('command failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('command websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
if not self.client_key:
raise PyLGTVPairException("Unable to pair")
yield from websocket.send(json.dumps(msg))
if msg['type'] == 'request':
raw_response = yield from websocket.recv()
self.last_response = json.loads(raw_response)
finally:
logger.debug('close command connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def command(self, request_type, uri, payload):
"""Build and send a command."""
self.command_count += 1
if payload is None:
payload = {}
message = {
'id': "{}_{}".format(type, self.command_count),
'type': request_type,
'uri': "ssap://{}".format(uri),
'payload': payload,
}
self.last_response = None
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(asyncio.wait_for(self._command(message), self.timeout_connect, loop=loop))
finally:
loop.close()
def request(self, uri, payload=None):
"""Send a request."""
self.command('request', uri, payload)
def send_message(self, message, icon_path=None):
"""Show a floating message."""
icon_encoded_string = ''
icon_extension = ''
if icon_path is not None:
icon_extension = os.path.splitext(icon_path)[1][1:]
with open(icon_path, 'rb') as icon_file:
icon_encoded_string = base64.b64encode(icon_file.read()).decode('ascii')
self.request(EP_SHOW_MESSAGE, {
'message': message,
'iconData': icon_encoded_string,
'iconExtension': icon_extension
})
# Apps
def get_apps(self):
"""Return all apps."""
self.request(EP_GET_APPS)
return {} if self.last_response is None else self.last_response.get('payload').get('launchPoints')
def get_current_app(self):
"""Get the current app id."""
self.request(EP_GET_CURRENT_APP_INFO)
return None if self.last_response is None else self.last_response.get('payload').get('appId')
def launch_app(self, app):
"""Launch an app."""
self.command('request', EP_LAUNCH, {
'id': app
})
def launch_app_with_params(self, app, params):
"""Launch an app with parameters."""
self.request(EP_LAUNCH, {
'id': app,
'params': params
})
def launch_app_with_content_id(self, app, contentId):
"""Launch an app with contentId."""
self.request(EP_LAUNCH, {
'id': app,
'contentId': contentId
})
def close_app(self, app):
"""Close the current app."""
self.request(EP_LAUNCHER_CLOSE, {
'id': app
})
# Services
def get_services(self):
"""Get all services."""
self.request(EP_GET_SERVICES)
return {} if self.last_response is None else self.last_response.get('payload').get('services')
def get_software_info(self):
"""Return the current software status."""
self.request(EP_GET_SOFTWARE_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def power_off(self):
"""Play media."""
self.request(EP_POWER_OFF)
def power_on(self):
"""Play media."""
self.request(EP_POWER_ON)
# 3D Mode
def turn_3d_on(self):
"""Turn 3D on."""
self.request(EP_3D_ON)
def turn_3d_off(self):
"""Turn 3D off."""
self.request(EP_3D_OFF)
# Inputs
def get_inputs(self):
"""Get all inputs."""
self.request(EP_GET_INPUTS)
return {} if self.last_response is None else self.last_response.get('payload').get('devices')
def get_input(self):
"""Get current input."""
return self.get_current_app()
def set_input(self, input):
"""Set the current input."""
self.request(EP_SET_INPUT, {
'inputId': input
})
# Audio
def get_muted(self):
"""Get mute status."""
return self.get_audio_status().get('mute')
def set_mute(self, mute):
"""Set mute."""
self.request(EP_SET_MUTE, {
'mute': mute
})
def get_volume(self):
"""Get the current volume."""
self.request(EP_GET_VOLUME)
return 0 if self.last_response is None else self.last_response.get('payload').get('volume')
def set_volume(self, volume):
"""Set volume."""
volume = max(0, volume)
self.request(EP_SET_VOLUME, {
'volume': volume
})
def volume_up(self):
"""Volume up."""
self.request(EP_VOLUME_UP)
def volume_down(self):
"""Volume down."""
self.request(EP_VOLUME_DOWN)
# TV Channel
def channel_up(self):
"""Channel up."""
self.request(EP_TV_CHANNEL_UP)
def channel_down(self):
"""Channel down."""
self.request(EP_TV_CHANNEL_DOWN)
def get_channels(self):
"""Get all tv channels."""
self.request(EP_GET_TV_CHANNELS)
return {} if self.last_response is None else self.last_response.get('payload').get('channelList')
def get_current_channel(self):
"""Get the current tv channel."""
self.request(EP_GET_CURRENT_CHANNEL)
return {} if self.last_response is None else self.last_response.get('payload')
def get_channel_info(self):
"""Get the current channel info."""
self.request(EP_GET_CHANNEL_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def set_channel(self, channel):
"""Set the current channel."""
self.request(EP_SET_CHANNEL, {
'channelId': channel
})
# Media control
def play(self):
"""Play media."""
self.request(EP_MEDIA_PLAY)
def pause(self):
"""Pause media."""
self.request(EP_MEDIA_PAUSE)
def stop(self):
"""Stop media."""
self.request(EP_MEDIA_STOP)
def close(self):
"""Close media."""
self.request(EP_MEDIA_CLOSE)
def rewind(self):
"""Rewind media."""
self.request(EP_MEDIA_REWIND)
def fast_forward(self):
"""Fast Forward media."""
self.request(EP_MEDIA_FAST_FORWARD)
# Keys
def send_enter_key(self):
"""Send enter key."""
self.request(EP_SEND_ENTER)
def send_delete_key(self):
"""Send delete key."""
self.request(EP_SEND_DELETE)
# Web
def open_url(self, url):
"""Open URL."""
self.request(EP_OPEN, {
'target': url
})
def close_web(self):
"""Close web app."""
self.request(EP_CLOSE_WEB_APP)
|
TheRealLink/pylgtv | pylgtv/webos_client.py | WebOsClient.get_volume | python | def get_volume(self):
self.request(EP_GET_VOLUME)
return 0 if self.last_response is None else self.last_response.get('payload').get('volume') | Get the current volume. | train | https://github.com/TheRealLink/pylgtv/blob/a7d9ad87ce47e77180fe9262da785465219f4ed6/pylgtv/webos_client.py#L314-L317 | [
"def request(self, uri, payload=None):\n \"\"\"Send a request.\"\"\"\n self.command('request', uri, payload)\n"
] | class WebOsClient(object):
def __init__(self, ip, key_file_path=None, timeout_connect=2):
"""Initialize the client."""
self.ip = ip
self.port = 3000
self.key_file_path = key_file_path
self.client_key = None
self.web_socket = None
self.command_count = 0
self.last_response = None
self.timeout_connect = timeout_connect
self.load_key_file()
@staticmethod
def _get_key_file_path():
"""Return the key file path."""
if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME),
os.W_OK):
return os.path.join(os.getenv(USER_HOME), KEY_FILE_NAME)
return os.path.join(os.getcwd(), KEY_FILE_NAME)
def load_key_file(self):
"""Try to load the client key for the current ip."""
self.client_key = None
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
key_dict = {}
logger.debug('load keyfile from %s', key_file_path);
if os.path.isfile(key_file_path):
with open(key_file_path, 'r') as f:
raw_data = f.read()
if raw_data:
key_dict = json.loads(raw_data)
logger.debug('getting client_key for %s from %s', self.ip, key_file_path);
if self.ip in key_dict:
self.client_key = key_dict[self.ip]
def save_key_file(self):
"""Save the current client key."""
if self.client_key is None:
return
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
logger.debug('save keyfile to %s', key_file_path);
with open(key_file_path, 'w+') as f:
raw_data = f.read()
key_dict = {}
if raw_data:
key_dict = json.loads(raw_data)
key_dict[self.ip] = self.client_key
f.write(json.dumps(key_dict))
@asyncio.coroutine
def _send_register_payload(self, websocket):
"""Send the register payload."""
file = os.path.join(os.path.dirname(__file__), HANDSHAKE_FILE_NAME)
data = codecs.open(file, 'r', 'utf-8')
raw_handshake = data.read()
handshake = json.loads(raw_handshake)
handshake['payload']['client-key'] = self.client_key
yield from websocket.send(json.dumps(handshake))
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'response' and \
response['payload']['pairingType'] == 'PROMPT':
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'registered':
self.client_key = response['payload']['client-key']
self.save_key_file()
def is_registered(self):
"""Paired with the tv."""
return self.client_key is not None
@asyncio.coroutine
def _register(self):
"""Register wrapper."""
logger.debug('register on %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.error('register failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('register websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
finally:
logger.debug('close register connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def register(self):
"""Pair client with tv."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self._register())
@asyncio.coroutine
def _command(self, msg):
"""Send a command to the tv."""
logger.debug('send command to %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.debug('command failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('command websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
if not self.client_key:
raise PyLGTVPairException("Unable to pair")
yield from websocket.send(json.dumps(msg))
if msg['type'] == 'request':
raw_response = yield from websocket.recv()
self.last_response = json.loads(raw_response)
finally:
logger.debug('close command connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def command(self, request_type, uri, payload):
"""Build and send a command."""
self.command_count += 1
if payload is None:
payload = {}
message = {
'id': "{}_{}".format(type, self.command_count),
'type': request_type,
'uri': "ssap://{}".format(uri),
'payload': payload,
}
self.last_response = None
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(asyncio.wait_for(self._command(message), self.timeout_connect, loop=loop))
finally:
loop.close()
def request(self, uri, payload=None):
"""Send a request."""
self.command('request', uri, payload)
def send_message(self, message, icon_path=None):
"""Show a floating message."""
icon_encoded_string = ''
icon_extension = ''
if icon_path is not None:
icon_extension = os.path.splitext(icon_path)[1][1:]
with open(icon_path, 'rb') as icon_file:
icon_encoded_string = base64.b64encode(icon_file.read()).decode('ascii')
self.request(EP_SHOW_MESSAGE, {
'message': message,
'iconData': icon_encoded_string,
'iconExtension': icon_extension
})
# Apps
def get_apps(self):
"""Return all apps."""
self.request(EP_GET_APPS)
return {} if self.last_response is None else self.last_response.get('payload').get('launchPoints')
def get_current_app(self):
"""Get the current app id."""
self.request(EP_GET_CURRENT_APP_INFO)
return None if self.last_response is None else self.last_response.get('payload').get('appId')
def launch_app(self, app):
"""Launch an app."""
self.command('request', EP_LAUNCH, {
'id': app
})
def launch_app_with_params(self, app, params):
"""Launch an app with parameters."""
self.request(EP_LAUNCH, {
'id': app,
'params': params
})
def launch_app_with_content_id(self, app, contentId):
"""Launch an app with contentId."""
self.request(EP_LAUNCH, {
'id': app,
'contentId': contentId
})
def close_app(self, app):
"""Close the current app."""
self.request(EP_LAUNCHER_CLOSE, {
'id': app
})
# Services
def get_services(self):
"""Get all services."""
self.request(EP_GET_SERVICES)
return {} if self.last_response is None else self.last_response.get('payload').get('services')
def get_software_info(self):
"""Return the current software status."""
self.request(EP_GET_SOFTWARE_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def power_off(self):
"""Play media."""
self.request(EP_POWER_OFF)
def power_on(self):
"""Play media."""
self.request(EP_POWER_ON)
# 3D Mode
def turn_3d_on(self):
"""Turn 3D on."""
self.request(EP_3D_ON)
def turn_3d_off(self):
"""Turn 3D off."""
self.request(EP_3D_OFF)
# Inputs
def get_inputs(self):
"""Get all inputs."""
self.request(EP_GET_INPUTS)
return {} if self.last_response is None else self.last_response.get('payload').get('devices')
def get_input(self):
"""Get current input."""
return self.get_current_app()
def set_input(self, input):
"""Set the current input."""
self.request(EP_SET_INPUT, {
'inputId': input
})
# Audio
def get_audio_status(self):
"""Get the current audio status"""
self.request(EP_GET_AUDIO_STATUS)
return {} if self.last_response is None else self.last_response.get('payload')
def get_muted(self):
"""Get mute status."""
return self.get_audio_status().get('mute')
def set_mute(self, mute):
"""Set mute."""
self.request(EP_SET_MUTE, {
'mute': mute
})
def set_volume(self, volume):
"""Set volume."""
volume = max(0, volume)
self.request(EP_SET_VOLUME, {
'volume': volume
})
def volume_up(self):
"""Volume up."""
self.request(EP_VOLUME_UP)
def volume_down(self):
"""Volume down."""
self.request(EP_VOLUME_DOWN)
# TV Channel
def channel_up(self):
"""Channel up."""
self.request(EP_TV_CHANNEL_UP)
def channel_down(self):
"""Channel down."""
self.request(EP_TV_CHANNEL_DOWN)
def get_channels(self):
"""Get all tv channels."""
self.request(EP_GET_TV_CHANNELS)
return {} if self.last_response is None else self.last_response.get('payload').get('channelList')
def get_current_channel(self):
"""Get the current tv channel."""
self.request(EP_GET_CURRENT_CHANNEL)
return {} if self.last_response is None else self.last_response.get('payload')
def get_channel_info(self):
"""Get the current channel info."""
self.request(EP_GET_CHANNEL_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def set_channel(self, channel):
"""Set the current channel."""
self.request(EP_SET_CHANNEL, {
'channelId': channel
})
# Media control
def play(self):
"""Play media."""
self.request(EP_MEDIA_PLAY)
def pause(self):
"""Pause media."""
self.request(EP_MEDIA_PAUSE)
def stop(self):
"""Stop media."""
self.request(EP_MEDIA_STOP)
def close(self):
"""Close media."""
self.request(EP_MEDIA_CLOSE)
def rewind(self):
"""Rewind media."""
self.request(EP_MEDIA_REWIND)
def fast_forward(self):
"""Fast Forward media."""
self.request(EP_MEDIA_FAST_FORWARD)
# Keys
def send_enter_key(self):
"""Send enter key."""
self.request(EP_SEND_ENTER)
def send_delete_key(self):
"""Send delete key."""
self.request(EP_SEND_DELETE)
# Web
def open_url(self, url):
"""Open URL."""
self.request(EP_OPEN, {
'target': url
})
def close_web(self):
"""Close web app."""
self.request(EP_CLOSE_WEB_APP)
|
TheRealLink/pylgtv | pylgtv/webos_client.py | WebOsClient.set_volume | python | def set_volume(self, volume):
volume = max(0, volume)
self.request(EP_SET_VOLUME, {
'volume': volume
}) | Set volume. | train | https://github.com/TheRealLink/pylgtv/blob/a7d9ad87ce47e77180fe9262da785465219f4ed6/pylgtv/webos_client.py#L319-L324 | [
"def request(self, uri, payload=None):\n \"\"\"Send a request.\"\"\"\n self.command('request', uri, payload)\n"
] | class WebOsClient(object):
def __init__(self, ip, key_file_path=None, timeout_connect=2):
"""Initialize the client."""
self.ip = ip
self.port = 3000
self.key_file_path = key_file_path
self.client_key = None
self.web_socket = None
self.command_count = 0
self.last_response = None
self.timeout_connect = timeout_connect
self.load_key_file()
@staticmethod
def _get_key_file_path():
"""Return the key file path."""
if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME),
os.W_OK):
return os.path.join(os.getenv(USER_HOME), KEY_FILE_NAME)
return os.path.join(os.getcwd(), KEY_FILE_NAME)
def load_key_file(self):
"""Try to load the client key for the current ip."""
self.client_key = None
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
key_dict = {}
logger.debug('load keyfile from %s', key_file_path);
if os.path.isfile(key_file_path):
with open(key_file_path, 'r') as f:
raw_data = f.read()
if raw_data:
key_dict = json.loads(raw_data)
logger.debug('getting client_key for %s from %s', self.ip, key_file_path);
if self.ip in key_dict:
self.client_key = key_dict[self.ip]
def save_key_file(self):
"""Save the current client key."""
if self.client_key is None:
return
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
logger.debug('save keyfile to %s', key_file_path);
with open(key_file_path, 'w+') as f:
raw_data = f.read()
key_dict = {}
if raw_data:
key_dict = json.loads(raw_data)
key_dict[self.ip] = self.client_key
f.write(json.dumps(key_dict))
@asyncio.coroutine
def _send_register_payload(self, websocket):
"""Send the register payload."""
file = os.path.join(os.path.dirname(__file__), HANDSHAKE_FILE_NAME)
data = codecs.open(file, 'r', 'utf-8')
raw_handshake = data.read()
handshake = json.loads(raw_handshake)
handshake['payload']['client-key'] = self.client_key
yield from websocket.send(json.dumps(handshake))
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'response' and \
response['payload']['pairingType'] == 'PROMPT':
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'registered':
self.client_key = response['payload']['client-key']
self.save_key_file()
def is_registered(self):
"""Paired with the tv."""
return self.client_key is not None
@asyncio.coroutine
def _register(self):
"""Register wrapper."""
logger.debug('register on %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.error('register failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('register websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
finally:
logger.debug('close register connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def register(self):
"""Pair client with tv."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self._register())
@asyncio.coroutine
def _command(self, msg):
"""Send a command to the tv."""
logger.debug('send command to %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.debug('command failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('command websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
if not self.client_key:
raise PyLGTVPairException("Unable to pair")
yield from websocket.send(json.dumps(msg))
if msg['type'] == 'request':
raw_response = yield from websocket.recv()
self.last_response = json.loads(raw_response)
finally:
logger.debug('close command connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def command(self, request_type, uri, payload):
"""Build and send a command."""
self.command_count += 1
if payload is None:
payload = {}
message = {
'id': "{}_{}".format(type, self.command_count),
'type': request_type,
'uri': "ssap://{}".format(uri),
'payload': payload,
}
self.last_response = None
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(asyncio.wait_for(self._command(message), self.timeout_connect, loop=loop))
finally:
loop.close()
def request(self, uri, payload=None):
"""Send a request."""
self.command('request', uri, payload)
def send_message(self, message, icon_path=None):
"""Show a floating message."""
icon_encoded_string = ''
icon_extension = ''
if icon_path is not None:
icon_extension = os.path.splitext(icon_path)[1][1:]
with open(icon_path, 'rb') as icon_file:
icon_encoded_string = base64.b64encode(icon_file.read()).decode('ascii')
self.request(EP_SHOW_MESSAGE, {
'message': message,
'iconData': icon_encoded_string,
'iconExtension': icon_extension
})
# Apps
def get_apps(self):
"""Return all apps."""
self.request(EP_GET_APPS)
return {} if self.last_response is None else self.last_response.get('payload').get('launchPoints')
def get_current_app(self):
"""Get the current app id."""
self.request(EP_GET_CURRENT_APP_INFO)
return None if self.last_response is None else self.last_response.get('payload').get('appId')
def launch_app(self, app):
"""Launch an app."""
self.command('request', EP_LAUNCH, {
'id': app
})
def launch_app_with_params(self, app, params):
"""Launch an app with parameters."""
self.request(EP_LAUNCH, {
'id': app,
'params': params
})
def launch_app_with_content_id(self, app, contentId):
"""Launch an app with contentId."""
self.request(EP_LAUNCH, {
'id': app,
'contentId': contentId
})
def close_app(self, app):
"""Close the current app."""
self.request(EP_LAUNCHER_CLOSE, {
'id': app
})
# Services
def get_services(self):
"""Get all services."""
self.request(EP_GET_SERVICES)
return {} if self.last_response is None else self.last_response.get('payload').get('services')
def get_software_info(self):
"""Return the current software status."""
self.request(EP_GET_SOFTWARE_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def power_off(self):
"""Play media."""
self.request(EP_POWER_OFF)
def power_on(self):
"""Play media."""
self.request(EP_POWER_ON)
# 3D Mode
def turn_3d_on(self):
"""Turn 3D on."""
self.request(EP_3D_ON)
def turn_3d_off(self):
"""Turn 3D off."""
self.request(EP_3D_OFF)
# Inputs
def get_inputs(self):
"""Get all inputs."""
self.request(EP_GET_INPUTS)
return {} if self.last_response is None else self.last_response.get('payload').get('devices')
def get_input(self):
"""Get current input."""
return self.get_current_app()
def set_input(self, input):
"""Set the current input."""
self.request(EP_SET_INPUT, {
'inputId': input
})
# Audio
def get_audio_status(self):
"""Get the current audio status"""
self.request(EP_GET_AUDIO_STATUS)
return {} if self.last_response is None else self.last_response.get('payload')
def get_muted(self):
"""Get mute status."""
return self.get_audio_status().get('mute')
def set_mute(self, mute):
"""Set mute."""
self.request(EP_SET_MUTE, {
'mute': mute
})
def get_volume(self):
"""Get the current volume."""
self.request(EP_GET_VOLUME)
return 0 if self.last_response is None else self.last_response.get('payload').get('volume')
def volume_up(self):
"""Volume up."""
self.request(EP_VOLUME_UP)
def volume_down(self):
"""Volume down."""
self.request(EP_VOLUME_DOWN)
# TV Channel
def channel_up(self):
"""Channel up."""
self.request(EP_TV_CHANNEL_UP)
def channel_down(self):
"""Channel down."""
self.request(EP_TV_CHANNEL_DOWN)
def get_channels(self):
"""Get all tv channels."""
self.request(EP_GET_TV_CHANNELS)
return {} if self.last_response is None else self.last_response.get('payload').get('channelList')
def get_current_channel(self):
"""Get the current tv channel."""
self.request(EP_GET_CURRENT_CHANNEL)
return {} if self.last_response is None else self.last_response.get('payload')
def get_channel_info(self):
"""Get the current channel info."""
self.request(EP_GET_CHANNEL_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def set_channel(self, channel):
"""Set the current channel."""
self.request(EP_SET_CHANNEL, {
'channelId': channel
})
# Media control
def play(self):
"""Play media."""
self.request(EP_MEDIA_PLAY)
def pause(self):
"""Pause media."""
self.request(EP_MEDIA_PAUSE)
def stop(self):
"""Stop media."""
self.request(EP_MEDIA_STOP)
def close(self):
"""Close media."""
self.request(EP_MEDIA_CLOSE)
def rewind(self):
"""Rewind media."""
self.request(EP_MEDIA_REWIND)
def fast_forward(self):
"""Fast Forward media."""
self.request(EP_MEDIA_FAST_FORWARD)
# Keys
def send_enter_key(self):
"""Send enter key."""
self.request(EP_SEND_ENTER)
def send_delete_key(self):
"""Send delete key."""
self.request(EP_SEND_DELETE)
# Web
def open_url(self, url):
"""Open URL."""
self.request(EP_OPEN, {
'target': url
})
def close_web(self):
"""Close web app."""
self.request(EP_CLOSE_WEB_APP)
|
TheRealLink/pylgtv | pylgtv/webos_client.py | WebOsClient.get_channels | python | def get_channels(self):
self.request(EP_GET_TV_CHANNELS)
return {} if self.last_response is None else self.last_response.get('payload').get('channelList') | Get all tv channels. | train | https://github.com/TheRealLink/pylgtv/blob/a7d9ad87ce47e77180fe9262da785465219f4ed6/pylgtv/webos_client.py#L343-L346 | [
"def request(self, uri, payload=None):\n \"\"\"Send a request.\"\"\"\n self.command('request', uri, payload)\n"
] | class WebOsClient(object):
def __init__(self, ip, key_file_path=None, timeout_connect=2):
"""Initialize the client."""
self.ip = ip
self.port = 3000
self.key_file_path = key_file_path
self.client_key = None
self.web_socket = None
self.command_count = 0
self.last_response = None
self.timeout_connect = timeout_connect
self.load_key_file()
@staticmethod
def _get_key_file_path():
"""Return the key file path."""
if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME),
os.W_OK):
return os.path.join(os.getenv(USER_HOME), KEY_FILE_NAME)
return os.path.join(os.getcwd(), KEY_FILE_NAME)
def load_key_file(self):
"""Try to load the client key for the current ip."""
self.client_key = None
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
key_dict = {}
logger.debug('load keyfile from %s', key_file_path);
if os.path.isfile(key_file_path):
with open(key_file_path, 'r') as f:
raw_data = f.read()
if raw_data:
key_dict = json.loads(raw_data)
logger.debug('getting client_key for %s from %s', self.ip, key_file_path);
if self.ip in key_dict:
self.client_key = key_dict[self.ip]
def save_key_file(self):
"""Save the current client key."""
if self.client_key is None:
return
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
logger.debug('save keyfile to %s', key_file_path);
with open(key_file_path, 'w+') as f:
raw_data = f.read()
key_dict = {}
if raw_data:
key_dict = json.loads(raw_data)
key_dict[self.ip] = self.client_key
f.write(json.dumps(key_dict))
@asyncio.coroutine
def _send_register_payload(self, websocket):
"""Send the register payload."""
file = os.path.join(os.path.dirname(__file__), HANDSHAKE_FILE_NAME)
data = codecs.open(file, 'r', 'utf-8')
raw_handshake = data.read()
handshake = json.loads(raw_handshake)
handshake['payload']['client-key'] = self.client_key
yield from websocket.send(json.dumps(handshake))
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'response' and \
response['payload']['pairingType'] == 'PROMPT':
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'registered':
self.client_key = response['payload']['client-key']
self.save_key_file()
def is_registered(self):
"""Paired with the tv."""
return self.client_key is not None
@asyncio.coroutine
def _register(self):
"""Register wrapper."""
logger.debug('register on %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.error('register failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('register websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
finally:
logger.debug('close register connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def register(self):
"""Pair client with tv."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self._register())
@asyncio.coroutine
def _command(self, msg):
"""Send a command to the tv."""
logger.debug('send command to %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.debug('command failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('command websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
if not self.client_key:
raise PyLGTVPairException("Unable to pair")
yield from websocket.send(json.dumps(msg))
if msg['type'] == 'request':
raw_response = yield from websocket.recv()
self.last_response = json.loads(raw_response)
finally:
logger.debug('close command connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def command(self, request_type, uri, payload):
"""Build and send a command."""
self.command_count += 1
if payload is None:
payload = {}
message = {
'id': "{}_{}".format(type, self.command_count),
'type': request_type,
'uri': "ssap://{}".format(uri),
'payload': payload,
}
self.last_response = None
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(asyncio.wait_for(self._command(message), self.timeout_connect, loop=loop))
finally:
loop.close()
def request(self, uri, payload=None):
"""Send a request."""
self.command('request', uri, payload)
def send_message(self, message, icon_path=None):
"""Show a floating message."""
icon_encoded_string = ''
icon_extension = ''
if icon_path is not None:
icon_extension = os.path.splitext(icon_path)[1][1:]
with open(icon_path, 'rb') as icon_file:
icon_encoded_string = base64.b64encode(icon_file.read()).decode('ascii')
self.request(EP_SHOW_MESSAGE, {
'message': message,
'iconData': icon_encoded_string,
'iconExtension': icon_extension
})
# Apps
def get_apps(self):
"""Return all apps."""
self.request(EP_GET_APPS)
return {} if self.last_response is None else self.last_response.get('payload').get('launchPoints')
def get_current_app(self):
"""Get the current app id."""
self.request(EP_GET_CURRENT_APP_INFO)
return None if self.last_response is None else self.last_response.get('payload').get('appId')
def launch_app(self, app):
"""Launch an app."""
self.command('request', EP_LAUNCH, {
'id': app
})
def launch_app_with_params(self, app, params):
"""Launch an app with parameters."""
self.request(EP_LAUNCH, {
'id': app,
'params': params
})
def launch_app_with_content_id(self, app, contentId):
"""Launch an app with contentId."""
self.request(EP_LAUNCH, {
'id': app,
'contentId': contentId
})
def close_app(self, app):
"""Close the current app."""
self.request(EP_LAUNCHER_CLOSE, {
'id': app
})
# Services
def get_services(self):
"""Get all services."""
self.request(EP_GET_SERVICES)
return {} if self.last_response is None else self.last_response.get('payload').get('services')
def get_software_info(self):
"""Return the current software status."""
self.request(EP_GET_SOFTWARE_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def power_off(self):
"""Play media."""
self.request(EP_POWER_OFF)
def power_on(self):
"""Play media."""
self.request(EP_POWER_ON)
# 3D Mode
def turn_3d_on(self):
"""Turn 3D on."""
self.request(EP_3D_ON)
def turn_3d_off(self):
"""Turn 3D off."""
self.request(EP_3D_OFF)
# Inputs
def get_inputs(self):
"""Get all inputs."""
self.request(EP_GET_INPUTS)
return {} if self.last_response is None else self.last_response.get('payload').get('devices')
def get_input(self):
"""Get current input."""
return self.get_current_app()
def set_input(self, input):
"""Set the current input."""
self.request(EP_SET_INPUT, {
'inputId': input
})
# Audio
def get_audio_status(self):
"""Get the current audio status"""
self.request(EP_GET_AUDIO_STATUS)
return {} if self.last_response is None else self.last_response.get('payload')
def get_muted(self):
"""Get mute status."""
return self.get_audio_status().get('mute')
def set_mute(self, mute):
"""Set mute."""
self.request(EP_SET_MUTE, {
'mute': mute
})
def get_volume(self):
"""Get the current volume."""
self.request(EP_GET_VOLUME)
return 0 if self.last_response is None else self.last_response.get('payload').get('volume')
def set_volume(self, volume):
"""Set volume."""
volume = max(0, volume)
self.request(EP_SET_VOLUME, {
'volume': volume
})
def volume_up(self):
"""Volume up."""
self.request(EP_VOLUME_UP)
def volume_down(self):
"""Volume down."""
self.request(EP_VOLUME_DOWN)
# TV Channel
def channel_up(self):
"""Channel up."""
self.request(EP_TV_CHANNEL_UP)
def channel_down(self):
"""Channel down."""
self.request(EP_TV_CHANNEL_DOWN)
def get_current_channel(self):
"""Get the current tv channel."""
self.request(EP_GET_CURRENT_CHANNEL)
return {} if self.last_response is None else self.last_response.get('payload')
def get_channel_info(self):
"""Get the current channel info."""
self.request(EP_GET_CHANNEL_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def set_channel(self, channel):
"""Set the current channel."""
self.request(EP_SET_CHANNEL, {
'channelId': channel
})
# Media control
def play(self):
"""Play media."""
self.request(EP_MEDIA_PLAY)
def pause(self):
"""Pause media."""
self.request(EP_MEDIA_PAUSE)
def stop(self):
"""Stop media."""
self.request(EP_MEDIA_STOP)
def close(self):
"""Close media."""
self.request(EP_MEDIA_CLOSE)
def rewind(self):
"""Rewind media."""
self.request(EP_MEDIA_REWIND)
def fast_forward(self):
"""Fast Forward media."""
self.request(EP_MEDIA_FAST_FORWARD)
# Keys
def send_enter_key(self):
"""Send enter key."""
self.request(EP_SEND_ENTER)
def send_delete_key(self):
"""Send delete key."""
self.request(EP_SEND_DELETE)
# Web
def open_url(self, url):
"""Open URL."""
self.request(EP_OPEN, {
'target': url
})
def close_web(self):
"""Close web app."""
self.request(EP_CLOSE_WEB_APP)
|
TheRealLink/pylgtv | pylgtv/webos_client.py | WebOsClient.get_current_channel | python | def get_current_channel(self):
self.request(EP_GET_CURRENT_CHANNEL)
return {} if self.last_response is None else self.last_response.get('payload') | Get the current tv channel. | train | https://github.com/TheRealLink/pylgtv/blob/a7d9ad87ce47e77180fe9262da785465219f4ed6/pylgtv/webos_client.py#L348-L351 | [
"def request(self, uri, payload=None):\n \"\"\"Send a request.\"\"\"\n self.command('request', uri, payload)\n"
] | class WebOsClient(object):
def __init__(self, ip, key_file_path=None, timeout_connect=2):
"""Initialize the client."""
self.ip = ip
self.port = 3000
self.key_file_path = key_file_path
self.client_key = None
self.web_socket = None
self.command_count = 0
self.last_response = None
self.timeout_connect = timeout_connect
self.load_key_file()
@staticmethod
def _get_key_file_path():
"""Return the key file path."""
if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME),
os.W_OK):
return os.path.join(os.getenv(USER_HOME), KEY_FILE_NAME)
return os.path.join(os.getcwd(), KEY_FILE_NAME)
def load_key_file(self):
"""Try to load the client key for the current ip."""
self.client_key = None
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
key_dict = {}
logger.debug('load keyfile from %s', key_file_path);
if os.path.isfile(key_file_path):
with open(key_file_path, 'r') as f:
raw_data = f.read()
if raw_data:
key_dict = json.loads(raw_data)
logger.debug('getting client_key for %s from %s', self.ip, key_file_path);
if self.ip in key_dict:
self.client_key = key_dict[self.ip]
def save_key_file(self):
"""Save the current client key."""
if self.client_key is None:
return
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
logger.debug('save keyfile to %s', key_file_path);
with open(key_file_path, 'w+') as f:
raw_data = f.read()
key_dict = {}
if raw_data:
key_dict = json.loads(raw_data)
key_dict[self.ip] = self.client_key
f.write(json.dumps(key_dict))
@asyncio.coroutine
def _send_register_payload(self, websocket):
"""Send the register payload."""
file = os.path.join(os.path.dirname(__file__), HANDSHAKE_FILE_NAME)
data = codecs.open(file, 'r', 'utf-8')
raw_handshake = data.read()
handshake = json.loads(raw_handshake)
handshake['payload']['client-key'] = self.client_key
yield from websocket.send(json.dumps(handshake))
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'response' and \
response['payload']['pairingType'] == 'PROMPT':
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'registered':
self.client_key = response['payload']['client-key']
self.save_key_file()
def is_registered(self):
"""Paired with the tv."""
return self.client_key is not None
@asyncio.coroutine
def _register(self):
"""Register wrapper."""
logger.debug('register on %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.error('register failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('register websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
finally:
logger.debug('close register connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def register(self):
"""Pair client with tv."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self._register())
@asyncio.coroutine
def _command(self, msg):
"""Send a command to the tv."""
logger.debug('send command to %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.debug('command failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('command websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
if not self.client_key:
raise PyLGTVPairException("Unable to pair")
yield from websocket.send(json.dumps(msg))
if msg['type'] == 'request':
raw_response = yield from websocket.recv()
self.last_response = json.loads(raw_response)
finally:
logger.debug('close command connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def command(self, request_type, uri, payload):
"""Build and send a command."""
self.command_count += 1
if payload is None:
payload = {}
message = {
'id': "{}_{}".format(type, self.command_count),
'type': request_type,
'uri': "ssap://{}".format(uri),
'payload': payload,
}
self.last_response = None
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(asyncio.wait_for(self._command(message), self.timeout_connect, loop=loop))
finally:
loop.close()
def request(self, uri, payload=None):
"""Send a request."""
self.command('request', uri, payload)
def send_message(self, message, icon_path=None):
"""Show a floating message."""
icon_encoded_string = ''
icon_extension = ''
if icon_path is not None:
icon_extension = os.path.splitext(icon_path)[1][1:]
with open(icon_path, 'rb') as icon_file:
icon_encoded_string = base64.b64encode(icon_file.read()).decode('ascii')
self.request(EP_SHOW_MESSAGE, {
'message': message,
'iconData': icon_encoded_string,
'iconExtension': icon_extension
})
# Apps
def get_apps(self):
"""Return all apps."""
self.request(EP_GET_APPS)
return {} if self.last_response is None else self.last_response.get('payload').get('launchPoints')
def get_current_app(self):
"""Get the current app id."""
self.request(EP_GET_CURRENT_APP_INFO)
return None if self.last_response is None else self.last_response.get('payload').get('appId')
def launch_app(self, app):
"""Launch an app."""
self.command('request', EP_LAUNCH, {
'id': app
})
def launch_app_with_params(self, app, params):
"""Launch an app with parameters."""
self.request(EP_LAUNCH, {
'id': app,
'params': params
})
def launch_app_with_content_id(self, app, contentId):
"""Launch an app with contentId."""
self.request(EP_LAUNCH, {
'id': app,
'contentId': contentId
})
def close_app(self, app):
"""Close the current app."""
self.request(EP_LAUNCHER_CLOSE, {
'id': app
})
# Services
def get_services(self):
"""Get all services."""
self.request(EP_GET_SERVICES)
return {} if self.last_response is None else self.last_response.get('payload').get('services')
def get_software_info(self):
"""Return the current software status."""
self.request(EP_GET_SOFTWARE_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def power_off(self):
"""Play media."""
self.request(EP_POWER_OFF)
def power_on(self):
"""Play media."""
self.request(EP_POWER_ON)
# 3D Mode
def turn_3d_on(self):
"""Turn 3D on."""
self.request(EP_3D_ON)
def turn_3d_off(self):
"""Turn 3D off."""
self.request(EP_3D_OFF)
# Inputs
def get_inputs(self):
"""Get all inputs."""
self.request(EP_GET_INPUTS)
return {} if self.last_response is None else self.last_response.get('payload').get('devices')
def get_input(self):
"""Get current input."""
return self.get_current_app()
def set_input(self, input):
"""Set the current input."""
self.request(EP_SET_INPUT, {
'inputId': input
})
# Audio
def get_audio_status(self):
"""Get the current audio status"""
self.request(EP_GET_AUDIO_STATUS)
return {} if self.last_response is None else self.last_response.get('payload')
def get_muted(self):
"""Get mute status."""
return self.get_audio_status().get('mute')
def set_mute(self, mute):
"""Set mute."""
self.request(EP_SET_MUTE, {
'mute': mute
})
def get_volume(self):
"""Get the current volume."""
self.request(EP_GET_VOLUME)
return 0 if self.last_response is None else self.last_response.get('payload').get('volume')
def set_volume(self, volume):
"""Set volume."""
volume = max(0, volume)
self.request(EP_SET_VOLUME, {
'volume': volume
})
def volume_up(self):
"""Volume up."""
self.request(EP_VOLUME_UP)
def volume_down(self):
"""Volume down."""
self.request(EP_VOLUME_DOWN)
# TV Channel
def channel_up(self):
"""Channel up."""
self.request(EP_TV_CHANNEL_UP)
def channel_down(self):
"""Channel down."""
self.request(EP_TV_CHANNEL_DOWN)
def get_channels(self):
"""Get all tv channels."""
self.request(EP_GET_TV_CHANNELS)
return {} if self.last_response is None else self.last_response.get('payload').get('channelList')
def get_channel_info(self):
"""Get the current channel info."""
self.request(EP_GET_CHANNEL_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def set_channel(self, channel):
"""Set the current channel."""
self.request(EP_SET_CHANNEL, {
'channelId': channel
})
# Media control
def play(self):
"""Play media."""
self.request(EP_MEDIA_PLAY)
def pause(self):
"""Pause media."""
self.request(EP_MEDIA_PAUSE)
def stop(self):
"""Stop media."""
self.request(EP_MEDIA_STOP)
def close(self):
"""Close media."""
self.request(EP_MEDIA_CLOSE)
def rewind(self):
"""Rewind media."""
self.request(EP_MEDIA_REWIND)
def fast_forward(self):
"""Fast Forward media."""
self.request(EP_MEDIA_FAST_FORWARD)
# Keys
def send_enter_key(self):
"""Send enter key."""
self.request(EP_SEND_ENTER)
def send_delete_key(self):
"""Send delete key."""
self.request(EP_SEND_DELETE)
# Web
def open_url(self, url):
"""Open URL."""
self.request(EP_OPEN, {
'target': url
})
def close_web(self):
"""Close web app."""
self.request(EP_CLOSE_WEB_APP)
|
TheRealLink/pylgtv | pylgtv/webos_client.py | WebOsClient.get_channel_info | python | def get_channel_info(self):
self.request(EP_GET_CHANNEL_INFO)
return {} if self.last_response is None else self.last_response.get('payload') | Get the current channel info. | train | https://github.com/TheRealLink/pylgtv/blob/a7d9ad87ce47e77180fe9262da785465219f4ed6/pylgtv/webos_client.py#L353-L356 | [
"def request(self, uri, payload=None):\n \"\"\"Send a request.\"\"\"\n self.command('request', uri, payload)\n"
] | class WebOsClient(object):
def __init__(self, ip, key_file_path=None, timeout_connect=2):
"""Initialize the client."""
self.ip = ip
self.port = 3000
self.key_file_path = key_file_path
self.client_key = None
self.web_socket = None
self.command_count = 0
self.last_response = None
self.timeout_connect = timeout_connect
self.load_key_file()
@staticmethod
def _get_key_file_path():
"""Return the key file path."""
if os.getenv(USER_HOME) is not None and os.access(os.getenv(USER_HOME),
os.W_OK):
return os.path.join(os.getenv(USER_HOME), KEY_FILE_NAME)
return os.path.join(os.getcwd(), KEY_FILE_NAME)
def load_key_file(self):
"""Try to load the client key for the current ip."""
self.client_key = None
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
key_dict = {}
logger.debug('load keyfile from %s', key_file_path);
if os.path.isfile(key_file_path):
with open(key_file_path, 'r') as f:
raw_data = f.read()
if raw_data:
key_dict = json.loads(raw_data)
logger.debug('getting client_key for %s from %s', self.ip, key_file_path);
if self.ip in key_dict:
self.client_key = key_dict[self.ip]
def save_key_file(self):
"""Save the current client key."""
if self.client_key is None:
return
if self.key_file_path:
key_file_path = self.key_file_path
else:
key_file_path = self._get_key_file_path()
logger.debug('save keyfile to %s', key_file_path);
with open(key_file_path, 'w+') as f:
raw_data = f.read()
key_dict = {}
if raw_data:
key_dict = json.loads(raw_data)
key_dict[self.ip] = self.client_key
f.write(json.dumps(key_dict))
@asyncio.coroutine
def _send_register_payload(self, websocket):
"""Send the register payload."""
file = os.path.join(os.path.dirname(__file__), HANDSHAKE_FILE_NAME)
data = codecs.open(file, 'r', 'utf-8')
raw_handshake = data.read()
handshake = json.loads(raw_handshake)
handshake['payload']['client-key'] = self.client_key
yield from websocket.send(json.dumps(handshake))
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'response' and \
response['payload']['pairingType'] == 'PROMPT':
raw_response = yield from websocket.recv()
response = json.loads(raw_response)
if response['type'] == 'registered':
self.client_key = response['payload']['client-key']
self.save_key_file()
def is_registered(self):
"""Paired with the tv."""
return self.client_key is not None
@asyncio.coroutine
def _register(self):
"""Register wrapper."""
logger.debug('register on %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.error('register failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('register websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
finally:
logger.debug('close register connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def register(self):
"""Pair client with tv."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self._register())
@asyncio.coroutine
def _command(self, msg):
"""Send a command to the tv."""
logger.debug('send command to %s', "ws://{}:{}".format(self.ip, self.port));
try:
websocket = yield from websockets.connect(
"ws://{}:{}".format(self.ip, self.port), timeout=self.timeout_connect)
except:
logger.debug('command failed to connect to %s', "ws://{}:{}".format(self.ip, self.port));
return False
logger.debug('command websocket connected to %s', "ws://{}:{}".format(self.ip, self.port));
try:
yield from self._send_register_payload(websocket)
if not self.client_key:
raise PyLGTVPairException("Unable to pair")
yield from websocket.send(json.dumps(msg))
if msg['type'] == 'request':
raw_response = yield from websocket.recv()
self.last_response = json.loads(raw_response)
finally:
logger.debug('close command connection to %s', "ws://{}:{}".format(self.ip, self.port));
yield from websocket.close()
def command(self, request_type, uri, payload):
"""Build and send a command."""
self.command_count += 1
if payload is None:
payload = {}
message = {
'id': "{}_{}".format(type, self.command_count),
'type': request_type,
'uri': "ssap://{}".format(uri),
'payload': payload,
}
self.last_response = None
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(asyncio.wait_for(self._command(message), self.timeout_connect, loop=loop))
finally:
loop.close()
def request(self, uri, payload=None):
"""Send a request."""
self.command('request', uri, payload)
def send_message(self, message, icon_path=None):
"""Show a floating message."""
icon_encoded_string = ''
icon_extension = ''
if icon_path is not None:
icon_extension = os.path.splitext(icon_path)[1][1:]
with open(icon_path, 'rb') as icon_file:
icon_encoded_string = base64.b64encode(icon_file.read()).decode('ascii')
self.request(EP_SHOW_MESSAGE, {
'message': message,
'iconData': icon_encoded_string,
'iconExtension': icon_extension
})
# Apps
def get_apps(self):
"""Return all apps."""
self.request(EP_GET_APPS)
return {} if self.last_response is None else self.last_response.get('payload').get('launchPoints')
def get_current_app(self):
"""Get the current app id."""
self.request(EP_GET_CURRENT_APP_INFO)
return None if self.last_response is None else self.last_response.get('payload').get('appId')
def launch_app(self, app):
"""Launch an app."""
self.command('request', EP_LAUNCH, {
'id': app
})
def launch_app_with_params(self, app, params):
"""Launch an app with parameters."""
self.request(EP_LAUNCH, {
'id': app,
'params': params
})
def launch_app_with_content_id(self, app, contentId):
"""Launch an app with contentId."""
self.request(EP_LAUNCH, {
'id': app,
'contentId': contentId
})
def close_app(self, app):
"""Close the current app."""
self.request(EP_LAUNCHER_CLOSE, {
'id': app
})
# Services
def get_services(self):
"""Get all services."""
self.request(EP_GET_SERVICES)
return {} if self.last_response is None else self.last_response.get('payload').get('services')
def get_software_info(self):
"""Return the current software status."""
self.request(EP_GET_SOFTWARE_INFO)
return {} if self.last_response is None else self.last_response.get('payload')
def power_off(self):
"""Play media."""
self.request(EP_POWER_OFF)
def power_on(self):
"""Play media."""
self.request(EP_POWER_ON)
# 3D Mode
def turn_3d_on(self):
"""Turn 3D on."""
self.request(EP_3D_ON)
def turn_3d_off(self):
"""Turn 3D off."""
self.request(EP_3D_OFF)
# Inputs
def get_inputs(self):
"""Get all inputs."""
self.request(EP_GET_INPUTS)
return {} if self.last_response is None else self.last_response.get('payload').get('devices')
def get_input(self):
"""Get current input."""
return self.get_current_app()
def set_input(self, input):
"""Set the current input."""
self.request(EP_SET_INPUT, {
'inputId': input
})
# Audio
def get_audio_status(self):
"""Get the current audio status"""
self.request(EP_GET_AUDIO_STATUS)
return {} if self.last_response is None else self.last_response.get('payload')
def get_muted(self):
"""Get mute status."""
return self.get_audio_status().get('mute')
def set_mute(self, mute):
"""Set mute."""
self.request(EP_SET_MUTE, {
'mute': mute
})
def get_volume(self):
"""Get the current volume."""
self.request(EP_GET_VOLUME)
return 0 if self.last_response is None else self.last_response.get('payload').get('volume')
def set_volume(self, volume):
"""Set volume."""
volume = max(0, volume)
self.request(EP_SET_VOLUME, {
'volume': volume
})
def volume_up(self):
"""Volume up."""
self.request(EP_VOLUME_UP)
def volume_down(self):
"""Volume down."""
self.request(EP_VOLUME_DOWN)
# TV Channel
def channel_up(self):
"""Channel up."""
self.request(EP_TV_CHANNEL_UP)
def channel_down(self):
"""Channel down."""
self.request(EP_TV_CHANNEL_DOWN)
def get_channels(self):
"""Get all tv channels."""
self.request(EP_GET_TV_CHANNELS)
return {} if self.last_response is None else self.last_response.get('payload').get('channelList')
def get_current_channel(self):
"""Get the current tv channel."""
self.request(EP_GET_CURRENT_CHANNEL)
return {} if self.last_response is None else self.last_response.get('payload')
def set_channel(self, channel):
"""Set the current channel."""
self.request(EP_SET_CHANNEL, {
'channelId': channel
})
# Media control
def play(self):
"""Play media."""
self.request(EP_MEDIA_PLAY)
def pause(self):
"""Pause media."""
self.request(EP_MEDIA_PAUSE)
def stop(self):
"""Stop media."""
self.request(EP_MEDIA_STOP)
def close(self):
"""Close media."""
self.request(EP_MEDIA_CLOSE)
def rewind(self):
"""Rewind media."""
self.request(EP_MEDIA_REWIND)
def fast_forward(self):
"""Fast Forward media."""
self.request(EP_MEDIA_FAST_FORWARD)
# Keys
def send_enter_key(self):
"""Send enter key."""
self.request(EP_SEND_ENTER)
def send_delete_key(self):
"""Send delete key."""
self.request(EP_SEND_DELETE)
# Web
def open_url(self, url):
"""Open URL."""
self.request(EP_OPEN, {
'target': url
})
def close_web(self):
"""Close web app."""
self.request(EP_CLOSE_WEB_APP)
|
Salamek/cron-descriptor | examples/crontabReader.py | CrontabReader.parse_cron_line | python | def parse_cron_line(self, line):
stripped = line.strip()
if stripped and stripped.startswith('#') is False:
rexres = self.rex.search(stripped)
if rexres:
return ' '.join(rexres.group(1).split())
return None | Parses crontab line and returns only starting time string
Args:
line: crontab line
Returns:
Time part of cron line | train | https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/examples/crontabReader.py#L58-L73 | null | class CrontabReader(object):
"""
Simple example reading /etc/contab
"""
rex = re.compile(r"^(\S{1,3}\s+\S{1,3}\s+\S{1,3}\s+\S{1,3}\s+\S{1,3}).+$")
def __init__(self, cronfile):
"""Initialize CrontabReader
Args:
cronfile: Path to cronfile
Returns:
None
"""
options = Options()
options.day_of_week_start_index_zero = False
options.use_24hour_time_format = True
with open(cronfile) as f:
for line in f.readlines():
parsed_line = self.parse_cron_line(line)
if parsed_line:
print("{} -> {}".format(parsed_line, ExpressionDescriptor(parsed_line, options)))
|
Salamek/cron-descriptor | cron_descriptor/ExpressionParser.py | ExpressionParser.parse | python | def parse(self):
# Initialize all elements of parsed array to empty strings
parsed = ['', '', '', '', '', '', '']
if self._expression is None or len(self._expression) == 0:
raise MissingFieldException("ExpressionDescriptor.expression")
else:
expression_parts_temp = self._expression.split()
expression_parts_temp_length = len(expression_parts_temp)
if expression_parts_temp_length < 5:
raise FormatException(
"Error: Expression only has {0} parts. At least 5 part are required.".format(
expression_parts_temp_length))
elif expression_parts_temp_length == 5:
# 5 part cron so shift array past seconds element
for i, expression_part_temp in enumerate(expression_parts_temp):
parsed[i + 1] = expression_part_temp
elif expression_parts_temp_length == 6:
# If last element ends with 4 digits, a year element has been
# supplied and no seconds element
year_regex = re.compile(r"\d{4}$")
if year_regex.search(expression_parts_temp[5]) is not None:
for i, expression_part_temp in enumerate(expression_parts_temp):
parsed[i + 1] = expression_part_temp
else:
for i, expression_part_temp in enumerate(expression_parts_temp):
parsed[i] = expression_part_temp
elif expression_parts_temp_length == 7:
parsed = expression_parts_temp
else:
raise FormatException(
"Error: Expression has too many parts ({0}). Expression must not have more than 7 parts.".format(
expression_parts_temp_length))
self.normalize_expression(parsed)
return parsed | Parses the cron expression string
Returns:
A 7 part string array, one part for each component of the cron expression (seconds, minutes, etc.)
Raises:
MissingFieldException: if _expression is empty or None
FormatException: if _expression has wrong format | train | https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionParser.py#L72-L114 | [
"def normalize_expression(self, expression_parts):\n \"\"\"Converts cron expression components into consistent, predictable formats.\n Args:\n expression_parts: A 7 part string array, one part for each component of the cron expression\n Returns:\n None\n \"\"\"\n # convert ? to * only for DOM and DOW\n expression_parts[3] = expression_parts[3].replace(\"?\", \"*\")\n expression_parts[5] = expression_parts[5].replace(\"?\", \"*\")\n\n # convert 0/, 1/ to */\n if expression_parts[0].startswith(\"0/\"):\n expression_parts[0] = expression_parts[\n 0].replace(\"0/\", \"*/\") # seconds\n\n if expression_parts[1].startswith(\"0/\"):\n expression_parts[1] = expression_parts[\n 1].replace(\"0/\", \"*/\") # minutes\n\n if expression_parts[2].startswith(\"0/\"):\n expression_parts[2] = expression_parts[\n 2].replace(\"0/\", \"*/\") # hours\n\n if expression_parts[3].startswith(\"1/\"):\n expression_parts[3] = expression_parts[3].replace(\"1/\", \"*/\") # DOM\n\n if expression_parts[4].startswith(\"1/\"):\n expression_parts[4] = expression_parts[\n 4].replace(\"1/\", \"*/\") # Month\n\n if expression_parts[5].startswith(\"1/\"):\n expression_parts[5] = expression_parts[5].replace(\"1/\", \"*/\") # DOW\n\n if expression_parts[6].startswith(\"1/\"):\n expression_parts[6] = expression_parts[6].replace(\"1/\", \"*/\")\n\n # handle DayOfWeekStartIndexZero option where SUN=1 rather than SUN=0\n if self._options.day_of_week_start_index_zero is False:\n expression_parts[5] = self.decrease_days_of_week(expression_parts[5])\n\n if expression_parts[3] == \"?\":\n expression_parts[3] = \"*\"\n\n # convert SUN-SAT format to 0-6 format\n for day_number in self._cron_days:\n expression_parts[5] = expression_parts[5].upper().replace(self._cron_days[day_number], str(day_number))\n\n # convert JAN-DEC format to 1-12 format\n for month_number in self._cron_months:\n expression_parts[4] = expression_parts[4].upper().replace(\n self._cron_months[month_number], str(month_number))\n\n # convert 0 second to (empty)\n if expression_parts[0] == \"0\":\n expression_parts[0] = ''\n\n # Loop through all parts and apply global normalization\n length = len(expression_parts)\n for i in range(length):\n\n # convert all '*/1' to '*'\n if expression_parts[i] == \"*/1\":\n expression_parts[i] = \"*\"\n\n \"\"\"\n Convert Month,DOW,Year step values with a starting value (i.e. not '*') to between expressions.\n This allows us to reuse the between expression handling for step values.\n\n For Example:\n - month part '3/2' will be converted to '3-12/2' (every 2 months between March and December)\n - DOW part '3/2' will be converted to '3-6/2' (every 2 days between Tuesday and Saturday)\n \"\"\"\n\n if \"/\" in expression_parts[i] and any(exp in expression_parts[i] for exp in ['*', '-', ',']) is False:\n choices = {\n 4: \"12\",\n 5: \"6\",\n 6: \"9999\"\n }\n\n step_range_through = choices.get(i)\n\n if step_range_through is not None:\n parts = expression_parts[i].split('/')\n expression_parts[i] = \"{0}-{1}/{2}\".format(parts[0], step_range_through, parts[1])\n"
] | class ExpressionParser(object):
"""
Parses and validates a Cron Expression into list of fixed len()
"""
_expression = ''
_options = None
_cron_days = {
0: 'SUN',
1: 'MON',
2: 'TUE',
3: 'WED',
4: 'THU',
5: 'FRI',
6: 'SAT'
}
_cron_months = {
1: 'JAN',
2: 'FEB',
3: 'MAR',
4: 'APR',
5: 'MAY',
6: 'JUN',
7: 'JUL',
8: 'AUG',
9: 'SEP',
10: 'OCT',
11: 'NOV',
12: 'DEC'
}
def __init__(self, expression, options):
"""Initializes a new instance of the ExpressionParser class
Args:
expression: The cron expression string
options: Parsing options
"""
self._expression = expression
self._options = options
"""
@param:
"""
def normalize_expression(self, expression_parts):
"""Converts cron expression components into consistent, predictable formats.
Args:
expression_parts: A 7 part string array, one part for each component of the cron expression
Returns:
None
"""
# convert ? to * only for DOM and DOW
expression_parts[3] = expression_parts[3].replace("?", "*")
expression_parts[5] = expression_parts[5].replace("?", "*")
# convert 0/, 1/ to */
if expression_parts[0].startswith("0/"):
expression_parts[0] = expression_parts[
0].replace("0/", "*/") # seconds
if expression_parts[1].startswith("0/"):
expression_parts[1] = expression_parts[
1].replace("0/", "*/") # minutes
if expression_parts[2].startswith("0/"):
expression_parts[2] = expression_parts[
2].replace("0/", "*/") # hours
if expression_parts[3].startswith("1/"):
expression_parts[3] = expression_parts[3].replace("1/", "*/") # DOM
if expression_parts[4].startswith("1/"):
expression_parts[4] = expression_parts[
4].replace("1/", "*/") # Month
if expression_parts[5].startswith("1/"):
expression_parts[5] = expression_parts[5].replace("1/", "*/") # DOW
if expression_parts[6].startswith("1/"):
expression_parts[6] = expression_parts[6].replace("1/", "*/")
# handle DayOfWeekStartIndexZero option where SUN=1 rather than SUN=0
if self._options.day_of_week_start_index_zero is False:
expression_parts[5] = self.decrease_days_of_week(expression_parts[5])
if expression_parts[3] == "?":
expression_parts[3] = "*"
# convert SUN-SAT format to 0-6 format
for day_number in self._cron_days:
expression_parts[5] = expression_parts[5].upper().replace(self._cron_days[day_number], str(day_number))
# convert JAN-DEC format to 1-12 format
for month_number in self._cron_months:
expression_parts[4] = expression_parts[4].upper().replace(
self._cron_months[month_number], str(month_number))
# convert 0 second to (empty)
if expression_parts[0] == "0":
expression_parts[0] = ''
# Loop through all parts and apply global normalization
length = len(expression_parts)
for i in range(length):
# convert all '*/1' to '*'
if expression_parts[i] == "*/1":
expression_parts[i] = "*"
"""
Convert Month,DOW,Year step values with a starting value (i.e. not '*') to between expressions.
This allows us to reuse the between expression handling for step values.
For Example:
- month part '3/2' will be converted to '3-12/2' (every 2 months between March and December)
- DOW part '3/2' will be converted to '3-6/2' (every 2 days between Tuesday and Saturday)
"""
if "/" in expression_parts[i] and any(exp in expression_parts[i] for exp in ['*', '-', ',']) is False:
choices = {
4: "12",
5: "6",
6: "9999"
}
step_range_through = choices.get(i)
if step_range_through is not None:
parts = expression_parts[i].split('/')
expression_parts[i] = "{0}-{1}/{2}".format(parts[0], step_range_through, parts[1])
def decrease_days_of_week(self, day_of_week_expression_part):
dow_chars = list(day_of_week_expression_part)
for i, dow_char in enumerate(dow_chars):
if i == 0 or dow_chars[i - 1] != '#' and dow_chars[i - 1] != '/':
try:
char_numeric = int(dow_char)
dow_chars[i] = str(char_numeric - 1)[0]
except ValueError:
pass
return ''.join(dow_chars)
|
Salamek/cron-descriptor | cron_descriptor/ExpressionParser.py | ExpressionParser.normalize_expression | python | def normalize_expression(self, expression_parts):
# convert ? to * only for DOM and DOW
expression_parts[3] = expression_parts[3].replace("?", "*")
expression_parts[5] = expression_parts[5].replace("?", "*")
# convert 0/, 1/ to */
if expression_parts[0].startswith("0/"):
expression_parts[0] = expression_parts[
0].replace("0/", "*/") # seconds
if expression_parts[1].startswith("0/"):
expression_parts[1] = expression_parts[
1].replace("0/", "*/") # minutes
if expression_parts[2].startswith("0/"):
expression_parts[2] = expression_parts[
2].replace("0/", "*/") # hours
if expression_parts[3].startswith("1/"):
expression_parts[3] = expression_parts[3].replace("1/", "*/") # DOM
if expression_parts[4].startswith("1/"):
expression_parts[4] = expression_parts[
4].replace("1/", "*/") # Month
if expression_parts[5].startswith("1/"):
expression_parts[5] = expression_parts[5].replace("1/", "*/") # DOW
if expression_parts[6].startswith("1/"):
expression_parts[6] = expression_parts[6].replace("1/", "*/")
# handle DayOfWeekStartIndexZero option where SUN=1 rather than SUN=0
if self._options.day_of_week_start_index_zero is False:
expression_parts[5] = self.decrease_days_of_week(expression_parts[5])
if expression_parts[3] == "?":
expression_parts[3] = "*"
# convert SUN-SAT format to 0-6 format
for day_number in self._cron_days:
expression_parts[5] = expression_parts[5].upper().replace(self._cron_days[day_number], str(day_number))
# convert JAN-DEC format to 1-12 format
for month_number in self._cron_months:
expression_parts[4] = expression_parts[4].upper().replace(
self._cron_months[month_number], str(month_number))
# convert 0 second to (empty)
if expression_parts[0] == "0":
expression_parts[0] = ''
# Loop through all parts and apply global normalization
length = len(expression_parts)
for i in range(length):
# convert all '*/1' to '*'
if expression_parts[i] == "*/1":
expression_parts[i] = "*"
"""
Convert Month,DOW,Year step values with a starting value (i.e. not '*') to between expressions.
This allows us to reuse the between expression handling for step values.
For Example:
- month part '3/2' will be converted to '3-12/2' (every 2 months between March and December)
- DOW part '3/2' will be converted to '3-6/2' (every 2 days between Tuesday and Saturday)
"""
if "/" in expression_parts[i] and any(exp in expression_parts[i] for exp in ['*', '-', ',']) is False:
choices = {
4: "12",
5: "6",
6: "9999"
}
step_range_through = choices.get(i)
if step_range_through is not None:
parts = expression_parts[i].split('/')
expression_parts[i] = "{0}-{1}/{2}".format(parts[0], step_range_through, parts[1]) | Converts cron expression components into consistent, predictable formats.
Args:
expression_parts: A 7 part string array, one part for each component of the cron expression
Returns:
None | train | https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionParser.py#L121-L206 | [
"def decrease_days_of_week(self, day_of_week_expression_part):\n dow_chars = list(day_of_week_expression_part)\n for i, dow_char in enumerate(dow_chars):\n if i == 0 or dow_chars[i - 1] != '#' and dow_chars[i - 1] != '/':\n try:\n char_numeric = int(dow_char)\n dow_chars[i] = str(char_numeric - 1)[0]\n except ValueError:\n pass\n return ''.join(dow_chars)\n"
] | class ExpressionParser(object):
"""
Parses and validates a Cron Expression into list of fixed len()
"""
_expression = ''
_options = None
_cron_days = {
0: 'SUN',
1: 'MON',
2: 'TUE',
3: 'WED',
4: 'THU',
5: 'FRI',
6: 'SAT'
}
_cron_months = {
1: 'JAN',
2: 'FEB',
3: 'MAR',
4: 'APR',
5: 'MAY',
6: 'JUN',
7: 'JUL',
8: 'AUG',
9: 'SEP',
10: 'OCT',
11: 'NOV',
12: 'DEC'
}
def __init__(self, expression, options):
"""Initializes a new instance of the ExpressionParser class
Args:
expression: The cron expression string
options: Parsing options
"""
self._expression = expression
self._options = options
def parse(self):
"""Parses the cron expression string
Returns:
A 7 part string array, one part for each component of the cron expression (seconds, minutes, etc.)
Raises:
MissingFieldException: if _expression is empty or None
FormatException: if _expression has wrong format
"""
# Initialize all elements of parsed array to empty strings
parsed = ['', '', '', '', '', '', '']
if self._expression is None or len(self._expression) == 0:
raise MissingFieldException("ExpressionDescriptor.expression")
else:
expression_parts_temp = self._expression.split()
expression_parts_temp_length = len(expression_parts_temp)
if expression_parts_temp_length < 5:
raise FormatException(
"Error: Expression only has {0} parts. At least 5 part are required.".format(
expression_parts_temp_length))
elif expression_parts_temp_length == 5:
# 5 part cron so shift array past seconds element
for i, expression_part_temp in enumerate(expression_parts_temp):
parsed[i + 1] = expression_part_temp
elif expression_parts_temp_length == 6:
# If last element ends with 4 digits, a year element has been
# supplied and no seconds element
year_regex = re.compile(r"\d{4}$")
if year_regex.search(expression_parts_temp[5]) is not None:
for i, expression_part_temp in enumerate(expression_parts_temp):
parsed[i + 1] = expression_part_temp
else:
for i, expression_part_temp in enumerate(expression_parts_temp):
parsed[i] = expression_part_temp
elif expression_parts_temp_length == 7:
parsed = expression_parts_temp
else:
raise FormatException(
"Error: Expression has too many parts ({0}). Expression must not have more than 7 parts.".format(
expression_parts_temp_length))
self.normalize_expression(parsed)
return parsed
"""
@param:
"""
def decrease_days_of_week(self, day_of_week_expression_part):
dow_chars = list(day_of_week_expression_part)
for i, dow_char in enumerate(dow_chars):
if i == 0 or dow_chars[i - 1] != '#' and dow_chars[i - 1] != '/':
try:
char_numeric = int(dow_char)
dow_chars[i] = str(char_numeric - 1)[0]
except ValueError:
pass
return ''.join(dow_chars)
|
Salamek/cron-descriptor | cron_descriptor/ExpressionDescriptor.py | get_description | python | def get_description(expression, options=None):
descripter = ExpressionDescriptor(expression, options)
return descripter.get_description(DescriptionTypeEnum.FULL) | Generates a human readable string for the Cron Expression
Args:
expression: The cron expression string
options: Options to control the output description
Returns:
The cron expression description | train | https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L605-L615 | [
"def get_description(self, description_type=DescriptionTypeEnum.FULL):\n \"\"\"Generates a human readable string for the Cron Expression\n\n Args:\n description_type: Which part(s) of the expression to describe\n Returns:\n The cron expression description\n Raises:\n Exception: if throw_exception_on_parse_error is True\n\n \"\"\"\n try:\n if self._parsed is False:\n parser = ExpressionParser(self._expression, self._options)\n self._expression_parts = parser.parse()\n self._parsed = True\n\n choices = {\n DescriptionTypeEnum.FULL: self.get_full_description,\n DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description,\n DescriptionTypeEnum.HOURS: self.get_hours_description,\n DescriptionTypeEnum.MINUTES: self.get_minutes_description,\n DescriptionTypeEnum.SECONDS: self.get_seconds_description,\n DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description,\n DescriptionTypeEnum.MONTH: self.get_month_description,\n DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description,\n DescriptionTypeEnum.YEAR: self.get_year_description,\n }\n\n description = choices.get(description_type, self.get_seconds_description)()\n\n except Exception as ex:\n if self._options.throw_exception_on_parse_error:\n raise\n else:\n description = str(ex)\n return description\n"
] | # The MIT License (MIT)
#
# Copyright (c) 2016 Adam Schubert
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import datetime
import calendar
from .GetText import GetText
from .CasingTypeEnum import CasingTypeEnum
from .DescriptionTypeEnum import DescriptionTypeEnum
from .ExpressionParser import ExpressionParser
from .Options import Options
from .StringBuilder import StringBuilder
from .Exception import FormatException, WrongArgumentException
class ExpressionDescriptor(object):
"""
Converts a Cron Expression into a human readable string
"""
_special_characters = ['/', '-', ',', '*']
_expression = ''
_options = None
_expression_parts = []
_parsed = False
def __init__(self, expression, options=None, **kwargs):
"""Initializes a new instance of the ExpressionDescriptorclass
Args:
expression: The cron expression string
options: Options to control the output description
Raises:
WrongArgumentException: if kwarg is unknow
"""
if options is None:
options = Options()
self._expression = expression
self._options = options
self._expression_parts = []
self._parsed = False
# if kwargs in _options, overwrite it, if not raise exeption
for kwarg in kwargs:
if hasattr(self._options, kwarg):
setattr(self._options, kwarg, kwargs[kwarg])
else:
raise WrongArgumentException(
"Unknow {} configuration argument".format(kwarg))
# Initializes localization
GetText(options.locale_code)
def get_description(self, description_type=DescriptionTypeEnum.FULL):
"""Generates a human readable string for the Cron Expression
Args:
description_type: Which part(s) of the expression to describe
Returns:
The cron expression description
Raises:
Exception: if throw_exception_on_parse_error is True
"""
try:
if self._parsed is False:
parser = ExpressionParser(self._expression, self._options)
self._expression_parts = parser.parse()
self._parsed = True
choices = {
DescriptionTypeEnum.FULL: self.get_full_description,
DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description,
DescriptionTypeEnum.HOURS: self.get_hours_description,
DescriptionTypeEnum.MINUTES: self.get_minutes_description,
DescriptionTypeEnum.SECONDS: self.get_seconds_description,
DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description,
DescriptionTypeEnum.MONTH: self.get_month_description,
DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description,
DescriptionTypeEnum.YEAR: self.get_year_description,
}
description = choices.get(description_type, self.get_seconds_description)()
except Exception as ex:
if self._options.throw_exception_on_parse_error:
raise
else:
description = str(ex)
return description
def get_full_description(self):
"""Generates the FULL description
Returns:
The FULL description
Raises:
FormatException: if formating fails and throw_exception_on_parse_error is True
"""
try:
time_segment = self.get_time_of_day_description()
day_of_month_desc = self.get_day_of_month_description()
month_desc = self.get_month_description()
day_of_week_desc = self.get_day_of_week_description()
year_desc = self.get_year_description()
description = "{0}{1}{2}{3}{4}".format(
time_segment,
day_of_month_desc,
day_of_week_desc,
month_desc,
year_desc)
description = self.transform_verbosity(
description, self._options.verbose)
description = self.transform_case(
description,
self._options.casing_type)
except Exception:
description = _(
"An error occured when generating the expression description. Check the cron expression syntax.")
if self._options.throw_exception_on_parse_error:
raise FormatException(description)
return description
def get_time_of_day_description(self):
"""Generates a description for only the TIMEOFDAY portion of the expression
Returns:
The TIMEOFDAY description
"""
seconds_expression = self._expression_parts[0]
minute_expression = self._expression_parts[1]
hour_expression = self._expression_parts[2]
description = StringBuilder()
# handle special cases first
if any(exp in minute_expression for exp in self._special_characters) is False and \
any(exp in hour_expression for exp in self._special_characters) is False and \
any(exp in seconds_expression for exp in self._special_characters) is False:
# specific time of day (i.e. 10 14)
description.append(_("At "))
description.append(
self.format_time(
hour_expression,
minute_expression,
seconds_expression))
elif "-" in minute_expression and \
"," not in minute_expression and \
any(exp in hour_expression for exp in self._special_characters) is False:
# minute range in single hour (i.e. 0-10 11)
minute_parts = minute_expression.split('-')
description.append(_("Every minute between {0} and {1}").format(
self.format_time(hour_expression, minute_parts[0]), self.format_time(hour_expression, minute_parts[1])))
elif "," in hour_expression and "-" not in hour_expression and \
any(exp in minute_expression for exp in self._special_characters) is False:
# hours list with single minute (o.e. 30 6,14,16)
hour_parts = hour_expression.split(',')
description.append(_("At"))
for i, hour_part in enumerate(hour_parts):
description.append(" ")
description.append(
self.format_time(hour_part, minute_expression))
if i < (len(hour_parts) - 2):
description.append(",")
if i == len(hour_parts) - 2:
description.append(_(" and"))
else:
# default time description
seconds_description = self.get_seconds_description()
minutes_description = self.get_minutes_description()
hours_description = self.get_hours_description()
description.append(seconds_description)
if description:
description.append(", ")
description.append(minutes_description)
if description:
description.append(", ")
description.append(hours_description)
return str(description)
def get_seconds_description(self):
"""Generates a description for only the SECONDS portion of the expression
Returns:
The SECONDS description
"""
return self.get_segment_description(
self._expression_parts[0],
_("every second"),
lambda s: s,
lambda s: _("every {0} seconds").format(s),
lambda s: _("seconds {0} through {1} past the minute"),
lambda s: _("at {0} seconds past the minute")
)
def get_minutes_description(self):
"""Generates a description for only the MINUTE portion of the expression
Returns:
The MINUTE description
"""
return self.get_segment_description(
self._expression_parts[1],
_("every minute"),
lambda s: s,
lambda s: _("every {0} minutes").format(s),
lambda s: _("minutes {0} through {1} past the hour"),
lambda s: '' if s == "0" else _("at {0} minutes past the hour")
)
def get_hours_description(self):
"""Generates a description for only the HOUR portion of the expression
Returns:
The HOUR description
"""
expression = self._expression_parts[2]
return self.get_segment_description(
expression,
_("every hour"),
lambda s: self.format_time(s, "0"),
lambda s: _("every {0} hours").format(s),
lambda s: _("between {0} and {1}"),
lambda s: _("at {0}")
)
def get_day_of_week_description(self):
"""Generates a description for only the DAYOFWEEK portion of the expression
Returns:
The DAYOFWEEK description
"""
if self._expression_parts[5] == "*" and self._expression_parts[3] != "*":
# DOM is specified and DOW is * so to prevent contradiction like "on day 1 of the month, every day"
# we will not specified a DOW description.
return ""
def get_day_name(s):
exp = s
if "#" in s:
exp, useless = s.split("#", 2)
elif "L" in s:
exp = exp.replace("L", '')
return self.number_to_day(int(exp))
def get_format(s):
if "#" in s:
day_of_week_of_month = s[s.find("#") + 1:]
try:
day_of_week_of_month_number = int(day_of_week_of_month)
choices = {
1: _("first"),
2: _("second"),
3: _("third"),
4: _("forth"),
5: _("fifth"),
}
day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '')
except ValueError:
day_of_week_of_month_description = ''
formated = "{}{}{}".format(_(", on the "),
day_of_week_of_month_description, _(" {0} of the month"))
elif "L" in s:
formated = _(", on the last {0} of the month")
else:
formated = _(", only on {0}")
return formated
return self.get_segment_description(
self._expression_parts[5],
_(", every day"),
lambda s: get_day_name(s),
lambda s: _(", every {0} days of the week").format(s),
lambda s: _(", {0} through {1}"),
lambda s: get_format(s)
)
def get_month_description(self):
"""Generates a description for only the MONTH portion of the expression
Returns:
The MONTH description
"""
return self.get_segment_description(
self._expression_parts[4],
'',
lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime("%B"),
lambda s: _(", every {0} months").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_day_of_month_description(self):
"""Generates a description for only the DAYOFMONTH portion of the expression
Returns:
The DAYOFMONTH description
"""
expression = self._expression_parts[3]
expression = expression.replace("?", "*")
if expression == "L":
description = _(", on the last day of the month")
elif expression == "LW" or expression == "WL":
description = _(", on the last weekday of the month")
else:
regex = re.compile("(\\d{1,2}W)|(W\\d{1,2})")
if regex.match(expression):
m = regex.match(expression)
day_number = int(m.group().replace("W", ""))
day_string = _("first weekday") if day_number == 1 else _("weekday nearest day {0}").format(
day_number)
description = _(", on the {0} of the month").format(
day_string)
else:
description = self.get_segment_description(
expression,
_(", every day"),
lambda s: s,
lambda s: _(", every day") if s == "1" else _(", every {0} days"),
lambda s: _(", between day {0} and {1} of the month"),
lambda s: _(", on day {0} of the month")
)
return description
def get_year_description(self):
"""Generates a description for only the YEAR portion of the expression
Returns:
The YEAR description
"""
def format_year(s):
regex = re.compile(r"^\d+$")
if regex.match(s):
year_int = int(s)
if year_int < 1900:
return year_int
return datetime.date(year_int, 1, 1).strftime("%Y")
else:
return s
return self.get_segment_description(
self._expression_parts[6],
'',
lambda s: format_year(s),
lambda s: _(", every {0} years").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_segment_description(
self,
expression,
all_description,
get_single_item_description,
get_interval_description_format,
get_between_description_format,
get_description_format
):
"""Returns segment description
Args:
expression: Segment to descript
all_description: *
get_single_item_description: 1
get_interval_description_format: 1/2
get_between_description_format: 1-2
get_description_format: format get_single_item_description
Returns:
segment description
"""
description = None
if expression is None or expression == '':
description = ''
elif expression == "*":
description = all_description
elif any(ext in expression for ext in ['/', '-', ',']) is False:
description = get_description_format(expression).format(
get_single_item_description(expression))
elif "/" in expression:
segments = expression.split('/')
description = get_interval_description_format(
segments[1]).format(get_single_item_description(segments[1]))
# interval contains 'between' piece (i.e. 2-59/3 )
if "-" in segments[0]:
between_segment_description = self.generate_between_segment_description(
segments[0], get_between_description_format, get_single_item_description)
if not between_segment_description.startswith(", "):
description += ", "
description += between_segment_description
elif any(ext in segments[0] for ext in ['*', ',']) is False:
range_item_description = get_description_format(segments[0]).format(
get_single_item_description(segments[0])
)
range_item_description = range_item_description.replace(", ", "")
description += _(", starting {0}").format(range_item_description)
elif "," in expression:
segments = expression.split(',')
description_content = ''
for i, segment in enumerate(segments):
if i > 0 and len(segments) > 2:
description_content += ","
if i < len(segments) - 1:
description_content += " "
if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2):
description_content += _(" and ")
if "-" in segment:
between_description = self.generate_between_segment_description(
segment,
lambda s: _(", {0} through {1}"),
get_single_item_description
)
between_description = between_description.replace(", ", "")
description_content += between_description
else:
description_content += get_single_item_description(segment)
description = get_description_format(
expression).format(
description_content)
elif "-" in expression:
description = self.generate_between_segment_description(
expression, get_between_description_format, get_single_item_description)
return description
def generate_between_segment_description(
self,
between_expression,
get_between_description_format,
get_single_item_description
):
"""
Generates the between segment description
:param between_expression:
:param get_between_description_format:
:param get_single_item_description:
:return: The between segment description
"""
description = ""
between_segments = between_expression.split('-')
between_segment_1_description = get_single_item_description(between_segments[0])
between_segment_2_description = get_single_item_description(between_segments[1])
between_segment_2_description = between_segment_2_description.replace(
":00", ":59")
between_description_format = get_between_description_format(between_expression)
description += between_description_format.format(between_segment_1_description, between_segment_2_description)
return description
def format_time(
self,
hour_expression,
minute_expression,
second_expression=''
):
"""Given time parts, will contruct a formatted time description
Args:
hour_expression: Hours part
minute_expression: Minutes part
second_expression: Seconds part
Returns:
Formatted time description
"""
hour = int(hour_expression)
period = ''
if self._options.use_24hour_time_format is False:
period = " PM" if (hour >= 12) else " AM"
if hour > 12:
hour -= 12
minute = str(int(minute_expression)) # !FIXME WUT ???
second = ''
if second_expression is not None and second_expression:
second = "{}{}".format(":", str(int(second_expression)).zfill(2))
return "{0}:{1}{2}{3}".format(str(hour).zfill(2), minute.zfill(2), second, period)
def transform_verbosity(self, description, use_verbose_format):
"""Transforms the verbosity of the expression description by stripping verbosity from original description
Args:
description: The description to transform
use_verbose_format: If True, will leave description as it, if False, will strip verbose parts
second_expression: Seconds part
Returns:
The transformed description with proper verbosity
"""
if use_verbose_format is False:
description = description.replace(
_(", every minute"), '')
description = description.replace(_(", every hour"), '')
description = description.replace(_(", every day"), '')
return description
def transform_case(self, description, case_type):
"""Transforms the case of the expression description, based on options
Args:
description: The description to transform
case_type: The casing type that controls the output casing
second_expression: Seconds part
Returns:
The transformed description with proper casing
"""
if case_type == CasingTypeEnum.Sentence:
description = "{}{}".format(
description[0].upper(),
description[1:])
elif case_type == CasingTypeEnum.Title:
description = description.title()
else:
description = description.lower()
return description
def number_to_day(self, day_number):
"""Returns localized day name by its CRON number
Args:
day_number: Number of a day
Returns:
Day corresponding to day_number
Raises:
IndexError: When day_number is not found
"""
return [
calendar.day_name[6],
calendar.day_name[0],
calendar.day_name[1],
calendar.day_name[2],
calendar.day_name[3],
calendar.day_name[4],
calendar.day_name[5]
][day_number]
def __str__(self):
return self.get_description()
def __repr__(self):
return self.get_description()
|
Salamek/cron-descriptor | cron_descriptor/ExpressionDescriptor.py | ExpressionDescriptor.get_description | python | def get_description(self, description_type=DescriptionTypeEnum.FULL):
try:
if self._parsed is False:
parser = ExpressionParser(self._expression, self._options)
self._expression_parts = parser.parse()
self._parsed = True
choices = {
DescriptionTypeEnum.FULL: self.get_full_description,
DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description,
DescriptionTypeEnum.HOURS: self.get_hours_description,
DescriptionTypeEnum.MINUTES: self.get_minutes_description,
DescriptionTypeEnum.SECONDS: self.get_seconds_description,
DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description,
DescriptionTypeEnum.MONTH: self.get_month_description,
DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description,
DescriptionTypeEnum.YEAR: self.get_year_description,
}
description = choices.get(description_type, self.get_seconds_description)()
except Exception as ex:
if self._options.throw_exception_on_parse_error:
raise
else:
description = str(ex)
return description | Generates a human readable string for the Cron Expression
Args:
description_type: Which part(s) of the expression to describe
Returns:
The cron expression description
Raises:
Exception: if throw_exception_on_parse_error is True | train | https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L76-L112 | [
"def parse(self):\n \"\"\"Parses the cron expression string\n Returns:\n A 7 part string array, one part for each component of the cron expression (seconds, minutes, etc.)\n Raises:\n MissingFieldException: if _expression is empty or None\n FormatException: if _expression has wrong format\n \"\"\"\n # Initialize all elements of parsed array to empty strings\n parsed = ['', '', '', '', '', '', '']\n\n if self._expression is None or len(self._expression) == 0:\n raise MissingFieldException(\"ExpressionDescriptor.expression\")\n else:\n expression_parts_temp = self._expression.split()\n expression_parts_temp_length = len(expression_parts_temp)\n if expression_parts_temp_length < 5:\n raise FormatException(\n \"Error: Expression only has {0} parts. At least 5 part are required.\".format(\n expression_parts_temp_length))\n elif expression_parts_temp_length == 5:\n # 5 part cron so shift array past seconds element\n for i, expression_part_temp in enumerate(expression_parts_temp):\n parsed[i + 1] = expression_part_temp\n elif expression_parts_temp_length == 6:\n # If last element ends with 4 digits, a year element has been\n # supplied and no seconds element\n year_regex = re.compile(r\"\\d{4}$\")\n if year_regex.search(expression_parts_temp[5]) is not None:\n for i, expression_part_temp in enumerate(expression_parts_temp):\n parsed[i + 1] = expression_part_temp\n else:\n for i, expression_part_temp in enumerate(expression_parts_temp):\n parsed[i] = expression_part_temp\n elif expression_parts_temp_length == 7:\n parsed = expression_parts_temp\n else:\n raise FormatException(\n \"Error: Expression has too many parts ({0}). Expression must not have more than 7 parts.\".format(\n expression_parts_temp_length))\n self.normalize_expression(parsed)\n\n return parsed\n"
] | class ExpressionDescriptor(object):
"""
Converts a Cron Expression into a human readable string
"""
_special_characters = ['/', '-', ',', '*']
_expression = ''
_options = None
_expression_parts = []
_parsed = False
def __init__(self, expression, options=None, **kwargs):
"""Initializes a new instance of the ExpressionDescriptorclass
Args:
expression: The cron expression string
options: Options to control the output description
Raises:
WrongArgumentException: if kwarg is unknow
"""
if options is None:
options = Options()
self._expression = expression
self._options = options
self._expression_parts = []
self._parsed = False
# if kwargs in _options, overwrite it, if not raise exeption
for kwarg in kwargs:
if hasattr(self._options, kwarg):
setattr(self._options, kwarg, kwargs[kwarg])
else:
raise WrongArgumentException(
"Unknow {} configuration argument".format(kwarg))
# Initializes localization
GetText(options.locale_code)
def get_full_description(self):
"""Generates the FULL description
Returns:
The FULL description
Raises:
FormatException: if formating fails and throw_exception_on_parse_error is True
"""
try:
time_segment = self.get_time_of_day_description()
day_of_month_desc = self.get_day_of_month_description()
month_desc = self.get_month_description()
day_of_week_desc = self.get_day_of_week_description()
year_desc = self.get_year_description()
description = "{0}{1}{2}{3}{4}".format(
time_segment,
day_of_month_desc,
day_of_week_desc,
month_desc,
year_desc)
description = self.transform_verbosity(
description, self._options.verbose)
description = self.transform_case(
description,
self._options.casing_type)
except Exception:
description = _(
"An error occured when generating the expression description. Check the cron expression syntax.")
if self._options.throw_exception_on_parse_error:
raise FormatException(description)
return description
def get_time_of_day_description(self):
"""Generates a description for only the TIMEOFDAY portion of the expression
Returns:
The TIMEOFDAY description
"""
seconds_expression = self._expression_parts[0]
minute_expression = self._expression_parts[1]
hour_expression = self._expression_parts[2]
description = StringBuilder()
# handle special cases first
if any(exp in minute_expression for exp in self._special_characters) is False and \
any(exp in hour_expression for exp in self._special_characters) is False and \
any(exp in seconds_expression for exp in self._special_characters) is False:
# specific time of day (i.e. 10 14)
description.append(_("At "))
description.append(
self.format_time(
hour_expression,
minute_expression,
seconds_expression))
elif "-" in minute_expression and \
"," not in minute_expression and \
any(exp in hour_expression for exp in self._special_characters) is False:
# minute range in single hour (i.e. 0-10 11)
minute_parts = minute_expression.split('-')
description.append(_("Every minute between {0} and {1}").format(
self.format_time(hour_expression, minute_parts[0]), self.format_time(hour_expression, minute_parts[1])))
elif "," in hour_expression and "-" not in hour_expression and \
any(exp in minute_expression for exp in self._special_characters) is False:
# hours list with single minute (o.e. 30 6,14,16)
hour_parts = hour_expression.split(',')
description.append(_("At"))
for i, hour_part in enumerate(hour_parts):
description.append(" ")
description.append(
self.format_time(hour_part, minute_expression))
if i < (len(hour_parts) - 2):
description.append(",")
if i == len(hour_parts) - 2:
description.append(_(" and"))
else:
# default time description
seconds_description = self.get_seconds_description()
minutes_description = self.get_minutes_description()
hours_description = self.get_hours_description()
description.append(seconds_description)
if description:
description.append(", ")
description.append(minutes_description)
if description:
description.append(", ")
description.append(hours_description)
return str(description)
def get_seconds_description(self):
"""Generates a description for only the SECONDS portion of the expression
Returns:
The SECONDS description
"""
return self.get_segment_description(
self._expression_parts[0],
_("every second"),
lambda s: s,
lambda s: _("every {0} seconds").format(s),
lambda s: _("seconds {0} through {1} past the minute"),
lambda s: _("at {0} seconds past the minute")
)
def get_minutes_description(self):
"""Generates a description for only the MINUTE portion of the expression
Returns:
The MINUTE description
"""
return self.get_segment_description(
self._expression_parts[1],
_("every minute"),
lambda s: s,
lambda s: _("every {0} minutes").format(s),
lambda s: _("minutes {0} through {1} past the hour"),
lambda s: '' if s == "0" else _("at {0} minutes past the hour")
)
def get_hours_description(self):
"""Generates a description for only the HOUR portion of the expression
Returns:
The HOUR description
"""
expression = self._expression_parts[2]
return self.get_segment_description(
expression,
_("every hour"),
lambda s: self.format_time(s, "0"),
lambda s: _("every {0} hours").format(s),
lambda s: _("between {0} and {1}"),
lambda s: _("at {0}")
)
def get_day_of_week_description(self):
"""Generates a description for only the DAYOFWEEK portion of the expression
Returns:
The DAYOFWEEK description
"""
if self._expression_parts[5] == "*" and self._expression_parts[3] != "*":
# DOM is specified and DOW is * so to prevent contradiction like "on day 1 of the month, every day"
# we will not specified a DOW description.
return ""
def get_day_name(s):
exp = s
if "#" in s:
exp, useless = s.split("#", 2)
elif "L" in s:
exp = exp.replace("L", '')
return self.number_to_day(int(exp))
def get_format(s):
if "#" in s:
day_of_week_of_month = s[s.find("#") + 1:]
try:
day_of_week_of_month_number = int(day_of_week_of_month)
choices = {
1: _("first"),
2: _("second"),
3: _("third"),
4: _("forth"),
5: _("fifth"),
}
day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '')
except ValueError:
day_of_week_of_month_description = ''
formated = "{}{}{}".format(_(", on the "),
day_of_week_of_month_description, _(" {0} of the month"))
elif "L" in s:
formated = _(", on the last {0} of the month")
else:
formated = _(", only on {0}")
return formated
return self.get_segment_description(
self._expression_parts[5],
_(", every day"),
lambda s: get_day_name(s),
lambda s: _(", every {0} days of the week").format(s),
lambda s: _(", {0} through {1}"),
lambda s: get_format(s)
)
def get_month_description(self):
"""Generates a description for only the MONTH portion of the expression
Returns:
The MONTH description
"""
return self.get_segment_description(
self._expression_parts[4],
'',
lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime("%B"),
lambda s: _(", every {0} months").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_day_of_month_description(self):
"""Generates a description for only the DAYOFMONTH portion of the expression
Returns:
The DAYOFMONTH description
"""
expression = self._expression_parts[3]
expression = expression.replace("?", "*")
if expression == "L":
description = _(", on the last day of the month")
elif expression == "LW" or expression == "WL":
description = _(", on the last weekday of the month")
else:
regex = re.compile("(\\d{1,2}W)|(W\\d{1,2})")
if regex.match(expression):
m = regex.match(expression)
day_number = int(m.group().replace("W", ""))
day_string = _("first weekday") if day_number == 1 else _("weekday nearest day {0}").format(
day_number)
description = _(", on the {0} of the month").format(
day_string)
else:
description = self.get_segment_description(
expression,
_(", every day"),
lambda s: s,
lambda s: _(", every day") if s == "1" else _(", every {0} days"),
lambda s: _(", between day {0} and {1} of the month"),
lambda s: _(", on day {0} of the month")
)
return description
def get_year_description(self):
"""Generates a description for only the YEAR portion of the expression
Returns:
The YEAR description
"""
def format_year(s):
regex = re.compile(r"^\d+$")
if regex.match(s):
year_int = int(s)
if year_int < 1900:
return year_int
return datetime.date(year_int, 1, 1).strftime("%Y")
else:
return s
return self.get_segment_description(
self._expression_parts[6],
'',
lambda s: format_year(s),
lambda s: _(", every {0} years").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_segment_description(
self,
expression,
all_description,
get_single_item_description,
get_interval_description_format,
get_between_description_format,
get_description_format
):
"""Returns segment description
Args:
expression: Segment to descript
all_description: *
get_single_item_description: 1
get_interval_description_format: 1/2
get_between_description_format: 1-2
get_description_format: format get_single_item_description
Returns:
segment description
"""
description = None
if expression is None or expression == '':
description = ''
elif expression == "*":
description = all_description
elif any(ext in expression for ext in ['/', '-', ',']) is False:
description = get_description_format(expression).format(
get_single_item_description(expression))
elif "/" in expression:
segments = expression.split('/')
description = get_interval_description_format(
segments[1]).format(get_single_item_description(segments[1]))
# interval contains 'between' piece (i.e. 2-59/3 )
if "-" in segments[0]:
between_segment_description = self.generate_between_segment_description(
segments[0], get_between_description_format, get_single_item_description)
if not between_segment_description.startswith(", "):
description += ", "
description += between_segment_description
elif any(ext in segments[0] for ext in ['*', ',']) is False:
range_item_description = get_description_format(segments[0]).format(
get_single_item_description(segments[0])
)
range_item_description = range_item_description.replace(", ", "")
description += _(", starting {0}").format(range_item_description)
elif "," in expression:
segments = expression.split(',')
description_content = ''
for i, segment in enumerate(segments):
if i > 0 and len(segments) > 2:
description_content += ","
if i < len(segments) - 1:
description_content += " "
if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2):
description_content += _(" and ")
if "-" in segment:
between_description = self.generate_between_segment_description(
segment,
lambda s: _(", {0} through {1}"),
get_single_item_description
)
between_description = between_description.replace(", ", "")
description_content += between_description
else:
description_content += get_single_item_description(segment)
description = get_description_format(
expression).format(
description_content)
elif "-" in expression:
description = self.generate_between_segment_description(
expression, get_between_description_format, get_single_item_description)
return description
def generate_between_segment_description(
self,
between_expression,
get_between_description_format,
get_single_item_description
):
"""
Generates the between segment description
:param between_expression:
:param get_between_description_format:
:param get_single_item_description:
:return: The between segment description
"""
description = ""
between_segments = between_expression.split('-')
between_segment_1_description = get_single_item_description(between_segments[0])
between_segment_2_description = get_single_item_description(between_segments[1])
between_segment_2_description = between_segment_2_description.replace(
":00", ":59")
between_description_format = get_between_description_format(between_expression)
description += between_description_format.format(between_segment_1_description, between_segment_2_description)
return description
def format_time(
self,
hour_expression,
minute_expression,
second_expression=''
):
"""Given time parts, will contruct a formatted time description
Args:
hour_expression: Hours part
minute_expression: Minutes part
second_expression: Seconds part
Returns:
Formatted time description
"""
hour = int(hour_expression)
period = ''
if self._options.use_24hour_time_format is False:
period = " PM" if (hour >= 12) else " AM"
if hour > 12:
hour -= 12
minute = str(int(minute_expression)) # !FIXME WUT ???
second = ''
if second_expression is not None and second_expression:
second = "{}{}".format(":", str(int(second_expression)).zfill(2))
return "{0}:{1}{2}{3}".format(str(hour).zfill(2), minute.zfill(2), second, period)
def transform_verbosity(self, description, use_verbose_format):
"""Transforms the verbosity of the expression description by stripping verbosity from original description
Args:
description: The description to transform
use_verbose_format: If True, will leave description as it, if False, will strip verbose parts
second_expression: Seconds part
Returns:
The transformed description with proper verbosity
"""
if use_verbose_format is False:
description = description.replace(
_(", every minute"), '')
description = description.replace(_(", every hour"), '')
description = description.replace(_(", every day"), '')
return description
def transform_case(self, description, case_type):
"""Transforms the case of the expression description, based on options
Args:
description: The description to transform
case_type: The casing type that controls the output casing
second_expression: Seconds part
Returns:
The transformed description with proper casing
"""
if case_type == CasingTypeEnum.Sentence:
description = "{}{}".format(
description[0].upper(),
description[1:])
elif case_type == CasingTypeEnum.Title:
description = description.title()
else:
description = description.lower()
return description
def number_to_day(self, day_number):
"""Returns localized day name by its CRON number
Args:
day_number: Number of a day
Returns:
Day corresponding to day_number
Raises:
IndexError: When day_number is not found
"""
return [
calendar.day_name[6],
calendar.day_name[0],
calendar.day_name[1],
calendar.day_name[2],
calendar.day_name[3],
calendar.day_name[4],
calendar.day_name[5]
][day_number]
def __str__(self):
return self.get_description()
def __repr__(self):
return self.get_description()
|
Salamek/cron-descriptor | cron_descriptor/ExpressionDescriptor.py | ExpressionDescriptor.get_full_description | python | def get_full_description(self):
try:
time_segment = self.get_time_of_day_description()
day_of_month_desc = self.get_day_of_month_description()
month_desc = self.get_month_description()
day_of_week_desc = self.get_day_of_week_description()
year_desc = self.get_year_description()
description = "{0}{1}{2}{3}{4}".format(
time_segment,
day_of_month_desc,
day_of_week_desc,
month_desc,
year_desc)
description = self.transform_verbosity(
description, self._options.verbose)
description = self.transform_case(
description,
self._options.casing_type)
except Exception:
description = _(
"An error occured when generating the expression description. Check the cron expression syntax.")
if self._options.throw_exception_on_parse_error:
raise FormatException(description)
return description | Generates the FULL description
Returns:
The FULL description
Raises:
FormatException: if formating fails and throw_exception_on_parse_error is True | train | https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L114-L149 | [
"def get_time_of_day_description(self):\n \"\"\"Generates a description for only the TIMEOFDAY portion of the expression\n\n Returns:\n The TIMEOFDAY description\n\n \"\"\"\n seconds_expression = self._expression_parts[0]\n minute_expression = self._expression_parts[1]\n hour_expression = self._expression_parts[2]\n\n description = StringBuilder()\n\n # handle special cases first\n if any(exp in minute_expression for exp in self._special_characters) is False and \\\n any(exp in hour_expression for exp in self._special_characters) is False and \\\n any(exp in seconds_expression for exp in self._special_characters) is False:\n # specific time of day (i.e. 10 14)\n description.append(_(\"At \"))\n description.append(\n self.format_time(\n hour_expression,\n minute_expression,\n seconds_expression))\n elif \"-\" in minute_expression and \\\n \",\" not in minute_expression and \\\n any(exp in hour_expression for exp in self._special_characters) is False:\n # minute range in single hour (i.e. 0-10 11)\n minute_parts = minute_expression.split('-')\n description.append(_(\"Every minute between {0} and {1}\").format(\n self.format_time(hour_expression, minute_parts[0]), self.format_time(hour_expression, minute_parts[1])))\n elif \",\" in hour_expression and \"-\" not in hour_expression and \\\n any(exp in minute_expression for exp in self._special_characters) is False:\n # hours list with single minute (o.e. 30 6,14,16)\n hour_parts = hour_expression.split(',')\n description.append(_(\"At\"))\n for i, hour_part in enumerate(hour_parts):\n description.append(\" \")\n description.append(\n self.format_time(hour_part, minute_expression))\n\n if i < (len(hour_parts) - 2):\n description.append(\",\")\n\n if i == len(hour_parts) - 2:\n description.append(_(\" and\"))\n else:\n # default time description\n seconds_description = self.get_seconds_description()\n minutes_description = self.get_minutes_description()\n hours_description = self.get_hours_description()\n\n description.append(seconds_description)\n\n if description:\n description.append(\", \")\n\n description.append(minutes_description)\n\n if description:\n description.append(\", \")\n\n description.append(hours_description)\n return str(description)\n",
"def get_day_of_week_description(self):\n \"\"\"Generates a description for only the DAYOFWEEK portion of the expression\n\n Returns:\n The DAYOFWEEK description\n\n \"\"\"\n\n if self._expression_parts[5] == \"*\" and self._expression_parts[3] != \"*\":\n # DOM is specified and DOW is * so to prevent contradiction like \"on day 1 of the month, every day\"\n # we will not specified a DOW description.\n return \"\"\n\n def get_day_name(s):\n exp = s\n if \"#\" in s:\n exp, useless = s.split(\"#\", 2)\n elif \"L\" in s:\n exp = exp.replace(\"L\", '')\n return self.number_to_day(int(exp))\n\n def get_format(s):\n if \"#\" in s:\n day_of_week_of_month = s[s.find(\"#\") + 1:]\n\n try:\n day_of_week_of_month_number = int(day_of_week_of_month)\n choices = {\n 1: _(\"first\"),\n 2: _(\"second\"),\n 3: _(\"third\"),\n 4: _(\"forth\"),\n 5: _(\"fifth\"),\n }\n day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '')\n except ValueError:\n day_of_week_of_month_description = ''\n\n formated = \"{}{}{}\".format(_(\", on the \"),\n day_of_week_of_month_description, _(\" {0} of the month\"))\n elif \"L\" in s:\n formated = _(\", on the last {0} of the month\")\n else:\n formated = _(\", only on {0}\")\n\n return formated\n\n return self.get_segment_description(\n self._expression_parts[5],\n _(\", every day\"),\n lambda s: get_day_name(s),\n lambda s: _(\", every {0} days of the week\").format(s),\n lambda s: _(\", {0} through {1}\"),\n lambda s: get_format(s)\n )\n",
"def get_month_description(self):\n \"\"\"Generates a description for only the MONTH portion of the expression\n\n Returns:\n The MONTH description\n\n \"\"\"\n return self.get_segment_description(\n self._expression_parts[4],\n '',\n lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime(\"%B\"),\n lambda s: _(\", every {0} months\").format(s),\n lambda s: _(\", {0} through {1}\"),\n lambda s: _(\", only in {0}\")\n )\n",
"def get_day_of_month_description(self):\n \"\"\"Generates a description for only the DAYOFMONTH portion of the expression\n\n Returns:\n The DAYOFMONTH description\n\n \"\"\"\n expression = self._expression_parts[3]\n expression = expression.replace(\"?\", \"*\")\n\n if expression == \"L\":\n description = _(\", on the last day of the month\")\n elif expression == \"LW\" or expression == \"WL\":\n description = _(\", on the last weekday of the month\")\n else:\n regex = re.compile(\"(\\\\d{1,2}W)|(W\\\\d{1,2})\")\n if regex.match(expression):\n m = regex.match(expression)\n day_number = int(m.group().replace(\"W\", \"\"))\n\n day_string = _(\"first weekday\") if day_number == 1 else _(\"weekday nearest day {0}\").format(\n day_number)\n description = _(\", on the {0} of the month\").format(\n day_string)\n else:\n description = self.get_segment_description(\n expression,\n _(\", every day\"),\n lambda s: s,\n lambda s: _(\", every day\") if s == \"1\" else _(\", every {0} days\"),\n lambda s: _(\", between day {0} and {1} of the month\"),\n lambda s: _(\", on day {0} of the month\")\n )\n\n return description\n",
"def get_year_description(self):\n \"\"\"Generates a description for only the YEAR portion of the expression\n\n Returns:\n The YEAR description\n\n \"\"\"\n\n def format_year(s):\n regex = re.compile(r\"^\\d+$\")\n if regex.match(s):\n year_int = int(s)\n if year_int < 1900:\n return year_int\n return datetime.date(year_int, 1, 1).strftime(\"%Y\")\n else:\n return s\n\n return self.get_segment_description(\n self._expression_parts[6],\n '',\n lambda s: format_year(s),\n lambda s: _(\", every {0} years\").format(s),\n lambda s: _(\", {0} through {1}\"),\n lambda s: _(\", only in {0}\")\n )\n",
"def transform_verbosity(self, description, use_verbose_format):\n \"\"\"Transforms the verbosity of the expression description by stripping verbosity from original description\n Args:\n description: The description to transform\n use_verbose_format: If True, will leave description as it, if False, will strip verbose parts\n second_expression: Seconds part\n Returns:\n The transformed description with proper verbosity\n\n \"\"\"\n if use_verbose_format is False:\n description = description.replace(\n _(\", every minute\"), '')\n description = description.replace(_(\", every hour\"), '')\n description = description.replace(_(\", every day\"), '')\n return description\n",
"def transform_case(self, description, case_type):\n \"\"\"Transforms the case of the expression description, based on options\n Args:\n description: The description to transform\n case_type: The casing type that controls the output casing\n second_expression: Seconds part\n Returns:\n The transformed description with proper casing\n\n \"\"\"\n if case_type == CasingTypeEnum.Sentence:\n description = \"{}{}\".format(\n description[0].upper(),\n description[1:])\n elif case_type == CasingTypeEnum.Title:\n description = description.title()\n else:\n description = description.lower()\n return description\n"
] | class ExpressionDescriptor(object):
"""
Converts a Cron Expression into a human readable string
"""
_special_characters = ['/', '-', ',', '*']
_expression = ''
_options = None
_expression_parts = []
_parsed = False
def __init__(self, expression, options=None, **kwargs):
"""Initializes a new instance of the ExpressionDescriptorclass
Args:
expression: The cron expression string
options: Options to control the output description
Raises:
WrongArgumentException: if kwarg is unknow
"""
if options is None:
options = Options()
self._expression = expression
self._options = options
self._expression_parts = []
self._parsed = False
# if kwargs in _options, overwrite it, if not raise exeption
for kwarg in kwargs:
if hasattr(self._options, kwarg):
setattr(self._options, kwarg, kwargs[kwarg])
else:
raise WrongArgumentException(
"Unknow {} configuration argument".format(kwarg))
# Initializes localization
GetText(options.locale_code)
def get_description(self, description_type=DescriptionTypeEnum.FULL):
"""Generates a human readable string for the Cron Expression
Args:
description_type: Which part(s) of the expression to describe
Returns:
The cron expression description
Raises:
Exception: if throw_exception_on_parse_error is True
"""
try:
if self._parsed is False:
parser = ExpressionParser(self._expression, self._options)
self._expression_parts = parser.parse()
self._parsed = True
choices = {
DescriptionTypeEnum.FULL: self.get_full_description,
DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description,
DescriptionTypeEnum.HOURS: self.get_hours_description,
DescriptionTypeEnum.MINUTES: self.get_minutes_description,
DescriptionTypeEnum.SECONDS: self.get_seconds_description,
DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description,
DescriptionTypeEnum.MONTH: self.get_month_description,
DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description,
DescriptionTypeEnum.YEAR: self.get_year_description,
}
description = choices.get(description_type, self.get_seconds_description)()
except Exception as ex:
if self._options.throw_exception_on_parse_error:
raise
else:
description = str(ex)
return description
def get_time_of_day_description(self):
"""Generates a description for only the TIMEOFDAY portion of the expression
Returns:
The TIMEOFDAY description
"""
seconds_expression = self._expression_parts[0]
minute_expression = self._expression_parts[1]
hour_expression = self._expression_parts[2]
description = StringBuilder()
# handle special cases first
if any(exp in minute_expression for exp in self._special_characters) is False and \
any(exp in hour_expression for exp in self._special_characters) is False and \
any(exp in seconds_expression for exp in self._special_characters) is False:
# specific time of day (i.e. 10 14)
description.append(_("At "))
description.append(
self.format_time(
hour_expression,
minute_expression,
seconds_expression))
elif "-" in minute_expression and \
"," not in minute_expression and \
any(exp in hour_expression for exp in self._special_characters) is False:
# minute range in single hour (i.e. 0-10 11)
minute_parts = minute_expression.split('-')
description.append(_("Every minute between {0} and {1}").format(
self.format_time(hour_expression, minute_parts[0]), self.format_time(hour_expression, minute_parts[1])))
elif "," in hour_expression and "-" not in hour_expression and \
any(exp in minute_expression for exp in self._special_characters) is False:
# hours list with single minute (o.e. 30 6,14,16)
hour_parts = hour_expression.split(',')
description.append(_("At"))
for i, hour_part in enumerate(hour_parts):
description.append(" ")
description.append(
self.format_time(hour_part, minute_expression))
if i < (len(hour_parts) - 2):
description.append(",")
if i == len(hour_parts) - 2:
description.append(_(" and"))
else:
# default time description
seconds_description = self.get_seconds_description()
minutes_description = self.get_minutes_description()
hours_description = self.get_hours_description()
description.append(seconds_description)
if description:
description.append(", ")
description.append(minutes_description)
if description:
description.append(", ")
description.append(hours_description)
return str(description)
def get_seconds_description(self):
"""Generates a description for only the SECONDS portion of the expression
Returns:
The SECONDS description
"""
return self.get_segment_description(
self._expression_parts[0],
_("every second"),
lambda s: s,
lambda s: _("every {0} seconds").format(s),
lambda s: _("seconds {0} through {1} past the minute"),
lambda s: _("at {0} seconds past the minute")
)
def get_minutes_description(self):
"""Generates a description for only the MINUTE portion of the expression
Returns:
The MINUTE description
"""
return self.get_segment_description(
self._expression_parts[1],
_("every minute"),
lambda s: s,
lambda s: _("every {0} minutes").format(s),
lambda s: _("minutes {0} through {1} past the hour"),
lambda s: '' if s == "0" else _("at {0} minutes past the hour")
)
def get_hours_description(self):
"""Generates a description for only the HOUR portion of the expression
Returns:
The HOUR description
"""
expression = self._expression_parts[2]
return self.get_segment_description(
expression,
_("every hour"),
lambda s: self.format_time(s, "0"),
lambda s: _("every {0} hours").format(s),
lambda s: _("between {0} and {1}"),
lambda s: _("at {0}")
)
def get_day_of_week_description(self):
"""Generates a description for only the DAYOFWEEK portion of the expression
Returns:
The DAYOFWEEK description
"""
if self._expression_parts[5] == "*" and self._expression_parts[3] != "*":
# DOM is specified and DOW is * so to prevent contradiction like "on day 1 of the month, every day"
# we will not specified a DOW description.
return ""
def get_day_name(s):
exp = s
if "#" in s:
exp, useless = s.split("#", 2)
elif "L" in s:
exp = exp.replace("L", '')
return self.number_to_day(int(exp))
def get_format(s):
if "#" in s:
day_of_week_of_month = s[s.find("#") + 1:]
try:
day_of_week_of_month_number = int(day_of_week_of_month)
choices = {
1: _("first"),
2: _("second"),
3: _("third"),
4: _("forth"),
5: _("fifth"),
}
day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '')
except ValueError:
day_of_week_of_month_description = ''
formated = "{}{}{}".format(_(", on the "),
day_of_week_of_month_description, _(" {0} of the month"))
elif "L" in s:
formated = _(", on the last {0} of the month")
else:
formated = _(", only on {0}")
return formated
return self.get_segment_description(
self._expression_parts[5],
_(", every day"),
lambda s: get_day_name(s),
lambda s: _(", every {0} days of the week").format(s),
lambda s: _(", {0} through {1}"),
lambda s: get_format(s)
)
def get_month_description(self):
"""Generates a description for only the MONTH portion of the expression
Returns:
The MONTH description
"""
return self.get_segment_description(
self._expression_parts[4],
'',
lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime("%B"),
lambda s: _(", every {0} months").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_day_of_month_description(self):
"""Generates a description for only the DAYOFMONTH portion of the expression
Returns:
The DAYOFMONTH description
"""
expression = self._expression_parts[3]
expression = expression.replace("?", "*")
if expression == "L":
description = _(", on the last day of the month")
elif expression == "LW" or expression == "WL":
description = _(", on the last weekday of the month")
else:
regex = re.compile("(\\d{1,2}W)|(W\\d{1,2})")
if regex.match(expression):
m = regex.match(expression)
day_number = int(m.group().replace("W", ""))
day_string = _("first weekday") if day_number == 1 else _("weekday nearest day {0}").format(
day_number)
description = _(", on the {0} of the month").format(
day_string)
else:
description = self.get_segment_description(
expression,
_(", every day"),
lambda s: s,
lambda s: _(", every day") if s == "1" else _(", every {0} days"),
lambda s: _(", between day {0} and {1} of the month"),
lambda s: _(", on day {0} of the month")
)
return description
def get_year_description(self):
"""Generates a description for only the YEAR portion of the expression
Returns:
The YEAR description
"""
def format_year(s):
regex = re.compile(r"^\d+$")
if regex.match(s):
year_int = int(s)
if year_int < 1900:
return year_int
return datetime.date(year_int, 1, 1).strftime("%Y")
else:
return s
return self.get_segment_description(
self._expression_parts[6],
'',
lambda s: format_year(s),
lambda s: _(", every {0} years").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_segment_description(
self,
expression,
all_description,
get_single_item_description,
get_interval_description_format,
get_between_description_format,
get_description_format
):
"""Returns segment description
Args:
expression: Segment to descript
all_description: *
get_single_item_description: 1
get_interval_description_format: 1/2
get_between_description_format: 1-2
get_description_format: format get_single_item_description
Returns:
segment description
"""
description = None
if expression is None or expression == '':
description = ''
elif expression == "*":
description = all_description
elif any(ext in expression for ext in ['/', '-', ',']) is False:
description = get_description_format(expression).format(
get_single_item_description(expression))
elif "/" in expression:
segments = expression.split('/')
description = get_interval_description_format(
segments[1]).format(get_single_item_description(segments[1]))
# interval contains 'between' piece (i.e. 2-59/3 )
if "-" in segments[0]:
between_segment_description = self.generate_between_segment_description(
segments[0], get_between_description_format, get_single_item_description)
if not between_segment_description.startswith(", "):
description += ", "
description += between_segment_description
elif any(ext in segments[0] for ext in ['*', ',']) is False:
range_item_description = get_description_format(segments[0]).format(
get_single_item_description(segments[0])
)
range_item_description = range_item_description.replace(", ", "")
description += _(", starting {0}").format(range_item_description)
elif "," in expression:
segments = expression.split(',')
description_content = ''
for i, segment in enumerate(segments):
if i > 0 and len(segments) > 2:
description_content += ","
if i < len(segments) - 1:
description_content += " "
if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2):
description_content += _(" and ")
if "-" in segment:
between_description = self.generate_between_segment_description(
segment,
lambda s: _(", {0} through {1}"),
get_single_item_description
)
between_description = between_description.replace(", ", "")
description_content += between_description
else:
description_content += get_single_item_description(segment)
description = get_description_format(
expression).format(
description_content)
elif "-" in expression:
description = self.generate_between_segment_description(
expression, get_between_description_format, get_single_item_description)
return description
def generate_between_segment_description(
self,
between_expression,
get_between_description_format,
get_single_item_description
):
"""
Generates the between segment description
:param between_expression:
:param get_between_description_format:
:param get_single_item_description:
:return: The between segment description
"""
description = ""
between_segments = between_expression.split('-')
between_segment_1_description = get_single_item_description(between_segments[0])
between_segment_2_description = get_single_item_description(between_segments[1])
between_segment_2_description = between_segment_2_description.replace(
":00", ":59")
between_description_format = get_between_description_format(between_expression)
description += between_description_format.format(between_segment_1_description, between_segment_2_description)
return description
def format_time(
self,
hour_expression,
minute_expression,
second_expression=''
):
"""Given time parts, will contruct a formatted time description
Args:
hour_expression: Hours part
minute_expression: Minutes part
second_expression: Seconds part
Returns:
Formatted time description
"""
hour = int(hour_expression)
period = ''
if self._options.use_24hour_time_format is False:
period = " PM" if (hour >= 12) else " AM"
if hour > 12:
hour -= 12
minute = str(int(minute_expression)) # !FIXME WUT ???
second = ''
if second_expression is not None and second_expression:
second = "{}{}".format(":", str(int(second_expression)).zfill(2))
return "{0}:{1}{2}{3}".format(str(hour).zfill(2), minute.zfill(2), second, period)
def transform_verbosity(self, description, use_verbose_format):
"""Transforms the verbosity of the expression description by stripping verbosity from original description
Args:
description: The description to transform
use_verbose_format: If True, will leave description as it, if False, will strip verbose parts
second_expression: Seconds part
Returns:
The transformed description with proper verbosity
"""
if use_verbose_format is False:
description = description.replace(
_(", every minute"), '')
description = description.replace(_(", every hour"), '')
description = description.replace(_(", every day"), '')
return description
def transform_case(self, description, case_type):
"""Transforms the case of the expression description, based on options
Args:
description: The description to transform
case_type: The casing type that controls the output casing
second_expression: Seconds part
Returns:
The transformed description with proper casing
"""
if case_type == CasingTypeEnum.Sentence:
description = "{}{}".format(
description[0].upper(),
description[1:])
elif case_type == CasingTypeEnum.Title:
description = description.title()
else:
description = description.lower()
return description
def number_to_day(self, day_number):
"""Returns localized day name by its CRON number
Args:
day_number: Number of a day
Returns:
Day corresponding to day_number
Raises:
IndexError: When day_number is not found
"""
return [
calendar.day_name[6],
calendar.day_name[0],
calendar.day_name[1],
calendar.day_name[2],
calendar.day_name[3],
calendar.day_name[4],
calendar.day_name[5]
][day_number]
def __str__(self):
return self.get_description()
def __repr__(self):
return self.get_description()
|
Salamek/cron-descriptor | cron_descriptor/ExpressionDescriptor.py | ExpressionDescriptor.get_time_of_day_description | python | def get_time_of_day_description(self):
seconds_expression = self._expression_parts[0]
minute_expression = self._expression_parts[1]
hour_expression = self._expression_parts[2]
description = StringBuilder()
# handle special cases first
if any(exp in minute_expression for exp in self._special_characters) is False and \
any(exp in hour_expression for exp in self._special_characters) is False and \
any(exp in seconds_expression for exp in self._special_characters) is False:
# specific time of day (i.e. 10 14)
description.append(_("At "))
description.append(
self.format_time(
hour_expression,
minute_expression,
seconds_expression))
elif "-" in minute_expression and \
"," not in minute_expression and \
any(exp in hour_expression for exp in self._special_characters) is False:
# minute range in single hour (i.e. 0-10 11)
minute_parts = minute_expression.split('-')
description.append(_("Every minute between {0} and {1}").format(
self.format_time(hour_expression, minute_parts[0]), self.format_time(hour_expression, minute_parts[1])))
elif "," in hour_expression and "-" not in hour_expression and \
any(exp in minute_expression for exp in self._special_characters) is False:
# hours list with single minute (o.e. 30 6,14,16)
hour_parts = hour_expression.split(',')
description.append(_("At"))
for i, hour_part in enumerate(hour_parts):
description.append(" ")
description.append(
self.format_time(hour_part, minute_expression))
if i < (len(hour_parts) - 2):
description.append(",")
if i == len(hour_parts) - 2:
description.append(_(" and"))
else:
# default time description
seconds_description = self.get_seconds_description()
minutes_description = self.get_minutes_description()
hours_description = self.get_hours_description()
description.append(seconds_description)
if description:
description.append(", ")
description.append(minutes_description)
if description:
description.append(", ")
description.append(hours_description)
return str(description) | Generates a description for only the TIMEOFDAY portion of the expression
Returns:
The TIMEOFDAY description | train | https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L151-L214 | [
"def append(self, string):\n \"\"\"Appends non empty string\n\n Args:\n string: String to append\n Returns:\n None\n \"\"\"\n if string:\n self.string.append(string)\n"
] | class ExpressionDescriptor(object):
"""
Converts a Cron Expression into a human readable string
"""
_special_characters = ['/', '-', ',', '*']
_expression = ''
_options = None
_expression_parts = []
_parsed = False
def __init__(self, expression, options=None, **kwargs):
"""Initializes a new instance of the ExpressionDescriptorclass
Args:
expression: The cron expression string
options: Options to control the output description
Raises:
WrongArgumentException: if kwarg is unknow
"""
if options is None:
options = Options()
self._expression = expression
self._options = options
self._expression_parts = []
self._parsed = False
# if kwargs in _options, overwrite it, if not raise exeption
for kwarg in kwargs:
if hasattr(self._options, kwarg):
setattr(self._options, kwarg, kwargs[kwarg])
else:
raise WrongArgumentException(
"Unknow {} configuration argument".format(kwarg))
# Initializes localization
GetText(options.locale_code)
def get_description(self, description_type=DescriptionTypeEnum.FULL):
"""Generates a human readable string for the Cron Expression
Args:
description_type: Which part(s) of the expression to describe
Returns:
The cron expression description
Raises:
Exception: if throw_exception_on_parse_error is True
"""
try:
if self._parsed is False:
parser = ExpressionParser(self._expression, self._options)
self._expression_parts = parser.parse()
self._parsed = True
choices = {
DescriptionTypeEnum.FULL: self.get_full_description,
DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description,
DescriptionTypeEnum.HOURS: self.get_hours_description,
DescriptionTypeEnum.MINUTES: self.get_minutes_description,
DescriptionTypeEnum.SECONDS: self.get_seconds_description,
DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description,
DescriptionTypeEnum.MONTH: self.get_month_description,
DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description,
DescriptionTypeEnum.YEAR: self.get_year_description,
}
description = choices.get(description_type, self.get_seconds_description)()
except Exception as ex:
if self._options.throw_exception_on_parse_error:
raise
else:
description = str(ex)
return description
def get_full_description(self):
"""Generates the FULL description
Returns:
The FULL description
Raises:
FormatException: if formating fails and throw_exception_on_parse_error is True
"""
try:
time_segment = self.get_time_of_day_description()
day_of_month_desc = self.get_day_of_month_description()
month_desc = self.get_month_description()
day_of_week_desc = self.get_day_of_week_description()
year_desc = self.get_year_description()
description = "{0}{1}{2}{3}{4}".format(
time_segment,
day_of_month_desc,
day_of_week_desc,
month_desc,
year_desc)
description = self.transform_verbosity(
description, self._options.verbose)
description = self.transform_case(
description,
self._options.casing_type)
except Exception:
description = _(
"An error occured when generating the expression description. Check the cron expression syntax.")
if self._options.throw_exception_on_parse_error:
raise FormatException(description)
return description
def get_seconds_description(self):
"""Generates a description for only the SECONDS portion of the expression
Returns:
The SECONDS description
"""
return self.get_segment_description(
self._expression_parts[0],
_("every second"),
lambda s: s,
lambda s: _("every {0} seconds").format(s),
lambda s: _("seconds {0} through {1} past the minute"),
lambda s: _("at {0} seconds past the minute")
)
def get_minutes_description(self):
"""Generates a description for only the MINUTE portion of the expression
Returns:
The MINUTE description
"""
return self.get_segment_description(
self._expression_parts[1],
_("every minute"),
lambda s: s,
lambda s: _("every {0} minutes").format(s),
lambda s: _("minutes {0} through {1} past the hour"),
lambda s: '' if s == "0" else _("at {0} minutes past the hour")
)
def get_hours_description(self):
"""Generates a description for only the HOUR portion of the expression
Returns:
The HOUR description
"""
expression = self._expression_parts[2]
return self.get_segment_description(
expression,
_("every hour"),
lambda s: self.format_time(s, "0"),
lambda s: _("every {0} hours").format(s),
lambda s: _("between {0} and {1}"),
lambda s: _("at {0}")
)
def get_day_of_week_description(self):
"""Generates a description for only the DAYOFWEEK portion of the expression
Returns:
The DAYOFWEEK description
"""
if self._expression_parts[5] == "*" and self._expression_parts[3] != "*":
# DOM is specified and DOW is * so to prevent contradiction like "on day 1 of the month, every day"
# we will not specified a DOW description.
return ""
def get_day_name(s):
exp = s
if "#" in s:
exp, useless = s.split("#", 2)
elif "L" in s:
exp = exp.replace("L", '')
return self.number_to_day(int(exp))
def get_format(s):
if "#" in s:
day_of_week_of_month = s[s.find("#") + 1:]
try:
day_of_week_of_month_number = int(day_of_week_of_month)
choices = {
1: _("first"),
2: _("second"),
3: _("third"),
4: _("forth"),
5: _("fifth"),
}
day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '')
except ValueError:
day_of_week_of_month_description = ''
formated = "{}{}{}".format(_(", on the "),
day_of_week_of_month_description, _(" {0} of the month"))
elif "L" in s:
formated = _(", on the last {0} of the month")
else:
formated = _(", only on {0}")
return formated
return self.get_segment_description(
self._expression_parts[5],
_(", every day"),
lambda s: get_day_name(s),
lambda s: _(", every {0} days of the week").format(s),
lambda s: _(", {0} through {1}"),
lambda s: get_format(s)
)
def get_month_description(self):
"""Generates a description for only the MONTH portion of the expression
Returns:
The MONTH description
"""
return self.get_segment_description(
self._expression_parts[4],
'',
lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime("%B"),
lambda s: _(", every {0} months").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_day_of_month_description(self):
"""Generates a description for only the DAYOFMONTH portion of the expression
Returns:
The DAYOFMONTH description
"""
expression = self._expression_parts[3]
expression = expression.replace("?", "*")
if expression == "L":
description = _(", on the last day of the month")
elif expression == "LW" or expression == "WL":
description = _(", on the last weekday of the month")
else:
regex = re.compile("(\\d{1,2}W)|(W\\d{1,2})")
if regex.match(expression):
m = regex.match(expression)
day_number = int(m.group().replace("W", ""))
day_string = _("first weekday") if day_number == 1 else _("weekday nearest day {0}").format(
day_number)
description = _(", on the {0} of the month").format(
day_string)
else:
description = self.get_segment_description(
expression,
_(", every day"),
lambda s: s,
lambda s: _(", every day") if s == "1" else _(", every {0} days"),
lambda s: _(", between day {0} and {1} of the month"),
lambda s: _(", on day {0} of the month")
)
return description
def get_year_description(self):
"""Generates a description for only the YEAR portion of the expression
Returns:
The YEAR description
"""
def format_year(s):
regex = re.compile(r"^\d+$")
if regex.match(s):
year_int = int(s)
if year_int < 1900:
return year_int
return datetime.date(year_int, 1, 1).strftime("%Y")
else:
return s
return self.get_segment_description(
self._expression_parts[6],
'',
lambda s: format_year(s),
lambda s: _(", every {0} years").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_segment_description(
self,
expression,
all_description,
get_single_item_description,
get_interval_description_format,
get_between_description_format,
get_description_format
):
"""Returns segment description
Args:
expression: Segment to descript
all_description: *
get_single_item_description: 1
get_interval_description_format: 1/2
get_between_description_format: 1-2
get_description_format: format get_single_item_description
Returns:
segment description
"""
description = None
if expression is None or expression == '':
description = ''
elif expression == "*":
description = all_description
elif any(ext in expression for ext in ['/', '-', ',']) is False:
description = get_description_format(expression).format(
get_single_item_description(expression))
elif "/" in expression:
segments = expression.split('/')
description = get_interval_description_format(
segments[1]).format(get_single_item_description(segments[1]))
# interval contains 'between' piece (i.e. 2-59/3 )
if "-" in segments[0]:
between_segment_description = self.generate_between_segment_description(
segments[0], get_between_description_format, get_single_item_description)
if not between_segment_description.startswith(", "):
description += ", "
description += between_segment_description
elif any(ext in segments[0] for ext in ['*', ',']) is False:
range_item_description = get_description_format(segments[0]).format(
get_single_item_description(segments[0])
)
range_item_description = range_item_description.replace(", ", "")
description += _(", starting {0}").format(range_item_description)
elif "," in expression:
segments = expression.split(',')
description_content = ''
for i, segment in enumerate(segments):
if i > 0 and len(segments) > 2:
description_content += ","
if i < len(segments) - 1:
description_content += " "
if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2):
description_content += _(" and ")
if "-" in segment:
between_description = self.generate_between_segment_description(
segment,
lambda s: _(", {0} through {1}"),
get_single_item_description
)
between_description = between_description.replace(", ", "")
description_content += between_description
else:
description_content += get_single_item_description(segment)
description = get_description_format(
expression).format(
description_content)
elif "-" in expression:
description = self.generate_between_segment_description(
expression, get_between_description_format, get_single_item_description)
return description
def generate_between_segment_description(
self,
between_expression,
get_between_description_format,
get_single_item_description
):
"""
Generates the between segment description
:param between_expression:
:param get_between_description_format:
:param get_single_item_description:
:return: The between segment description
"""
description = ""
between_segments = between_expression.split('-')
between_segment_1_description = get_single_item_description(between_segments[0])
between_segment_2_description = get_single_item_description(between_segments[1])
between_segment_2_description = between_segment_2_description.replace(
":00", ":59")
between_description_format = get_between_description_format(between_expression)
description += between_description_format.format(between_segment_1_description, between_segment_2_description)
return description
def format_time(
self,
hour_expression,
minute_expression,
second_expression=''
):
"""Given time parts, will contruct a formatted time description
Args:
hour_expression: Hours part
minute_expression: Minutes part
second_expression: Seconds part
Returns:
Formatted time description
"""
hour = int(hour_expression)
period = ''
if self._options.use_24hour_time_format is False:
period = " PM" if (hour >= 12) else " AM"
if hour > 12:
hour -= 12
minute = str(int(minute_expression)) # !FIXME WUT ???
second = ''
if second_expression is not None and second_expression:
second = "{}{}".format(":", str(int(second_expression)).zfill(2))
return "{0}:{1}{2}{3}".format(str(hour).zfill(2), minute.zfill(2), second, period)
def transform_verbosity(self, description, use_verbose_format):
"""Transforms the verbosity of the expression description by stripping verbosity from original description
Args:
description: The description to transform
use_verbose_format: If True, will leave description as it, if False, will strip verbose parts
second_expression: Seconds part
Returns:
The transformed description with proper verbosity
"""
if use_verbose_format is False:
description = description.replace(
_(", every minute"), '')
description = description.replace(_(", every hour"), '')
description = description.replace(_(", every day"), '')
return description
def transform_case(self, description, case_type):
"""Transforms the case of the expression description, based on options
Args:
description: The description to transform
case_type: The casing type that controls the output casing
second_expression: Seconds part
Returns:
The transformed description with proper casing
"""
if case_type == CasingTypeEnum.Sentence:
description = "{}{}".format(
description[0].upper(),
description[1:])
elif case_type == CasingTypeEnum.Title:
description = description.title()
else:
description = description.lower()
return description
def number_to_day(self, day_number):
"""Returns localized day name by its CRON number
Args:
day_number: Number of a day
Returns:
Day corresponding to day_number
Raises:
IndexError: When day_number is not found
"""
return [
calendar.day_name[6],
calendar.day_name[0],
calendar.day_name[1],
calendar.day_name[2],
calendar.day_name[3],
calendar.day_name[4],
calendar.day_name[5]
][day_number]
def __str__(self):
return self.get_description()
def __repr__(self):
return self.get_description()
|
Salamek/cron-descriptor | cron_descriptor/ExpressionDescriptor.py | ExpressionDescriptor.get_seconds_description | python | def get_seconds_description(self):
return self.get_segment_description(
self._expression_parts[0],
_("every second"),
lambda s: s,
lambda s: _("every {0} seconds").format(s),
lambda s: _("seconds {0} through {1} past the minute"),
lambda s: _("at {0} seconds past the minute")
) | Generates a description for only the SECONDS portion of the expression
Returns:
The SECONDS description | train | https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L216-L231 | [
"def get_segment_description(\n self,\n expression,\n all_description,\n get_single_item_description,\n get_interval_description_format,\n get_between_description_format,\n get_description_format\n):\n \"\"\"Returns segment description\n Args:\n expression: Segment to descript\n all_description: *\n get_single_item_description: 1\n get_interval_description_format: 1/2\n get_between_description_format: 1-2\n get_description_format: format get_single_item_description\n Returns:\n segment description\n\n \"\"\"\n description = None\n if expression is None or expression == '':\n description = ''\n elif expression == \"*\":\n description = all_description\n elif any(ext in expression for ext in ['/', '-', ',']) is False:\n description = get_description_format(expression).format(\n get_single_item_description(expression))\n elif \"/\" in expression:\n segments = expression.split('/')\n description = get_interval_description_format(\n segments[1]).format(get_single_item_description(segments[1]))\n\n # interval contains 'between' piece (i.e. 2-59/3 )\n if \"-\" in segments[0]:\n between_segment_description = self.generate_between_segment_description(\n segments[0], get_between_description_format, get_single_item_description)\n if not between_segment_description.startswith(\", \"):\n description += \", \"\n description += between_segment_description\n elif any(ext in segments[0] for ext in ['*', ',']) is False:\n range_item_description = get_description_format(segments[0]).format(\n get_single_item_description(segments[0])\n )\n range_item_description = range_item_description.replace(\", \", \"\")\n\n description += _(\", starting {0}\").format(range_item_description)\n elif \",\" in expression:\n segments = expression.split(',')\n\n description_content = ''\n for i, segment in enumerate(segments):\n if i > 0 and len(segments) > 2:\n description_content += \",\"\n\n if i < len(segments) - 1:\n description_content += \" \"\n\n if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2):\n description_content += _(\" and \")\n\n if \"-\" in segment:\n between_description = self.generate_between_segment_description(\n segment,\n lambda s: _(\", {0} through {1}\"),\n get_single_item_description\n )\n\n between_description = between_description.replace(\", \", \"\")\n\n description_content += between_description\n else:\n description_content += get_single_item_description(segment)\n\n description = get_description_format(\n expression).format(\n description_content)\n elif \"-\" in expression:\n description = self.generate_between_segment_description(\n expression, get_between_description_format, get_single_item_description)\n\n return description\n"
] | class ExpressionDescriptor(object):
"""
Converts a Cron Expression into a human readable string
"""
_special_characters = ['/', '-', ',', '*']
_expression = ''
_options = None
_expression_parts = []
_parsed = False
def __init__(self, expression, options=None, **kwargs):
"""Initializes a new instance of the ExpressionDescriptorclass
Args:
expression: The cron expression string
options: Options to control the output description
Raises:
WrongArgumentException: if kwarg is unknow
"""
if options is None:
options = Options()
self._expression = expression
self._options = options
self._expression_parts = []
self._parsed = False
# if kwargs in _options, overwrite it, if not raise exeption
for kwarg in kwargs:
if hasattr(self._options, kwarg):
setattr(self._options, kwarg, kwargs[kwarg])
else:
raise WrongArgumentException(
"Unknow {} configuration argument".format(kwarg))
# Initializes localization
GetText(options.locale_code)
def get_description(self, description_type=DescriptionTypeEnum.FULL):
"""Generates a human readable string for the Cron Expression
Args:
description_type: Which part(s) of the expression to describe
Returns:
The cron expression description
Raises:
Exception: if throw_exception_on_parse_error is True
"""
try:
if self._parsed is False:
parser = ExpressionParser(self._expression, self._options)
self._expression_parts = parser.parse()
self._parsed = True
choices = {
DescriptionTypeEnum.FULL: self.get_full_description,
DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description,
DescriptionTypeEnum.HOURS: self.get_hours_description,
DescriptionTypeEnum.MINUTES: self.get_minutes_description,
DescriptionTypeEnum.SECONDS: self.get_seconds_description,
DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description,
DescriptionTypeEnum.MONTH: self.get_month_description,
DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description,
DescriptionTypeEnum.YEAR: self.get_year_description,
}
description = choices.get(description_type, self.get_seconds_description)()
except Exception as ex:
if self._options.throw_exception_on_parse_error:
raise
else:
description = str(ex)
return description
def get_full_description(self):
"""Generates the FULL description
Returns:
The FULL description
Raises:
FormatException: if formating fails and throw_exception_on_parse_error is True
"""
try:
time_segment = self.get_time_of_day_description()
day_of_month_desc = self.get_day_of_month_description()
month_desc = self.get_month_description()
day_of_week_desc = self.get_day_of_week_description()
year_desc = self.get_year_description()
description = "{0}{1}{2}{3}{4}".format(
time_segment,
day_of_month_desc,
day_of_week_desc,
month_desc,
year_desc)
description = self.transform_verbosity(
description, self._options.verbose)
description = self.transform_case(
description,
self._options.casing_type)
except Exception:
description = _(
"An error occured when generating the expression description. Check the cron expression syntax.")
if self._options.throw_exception_on_parse_error:
raise FormatException(description)
return description
def get_time_of_day_description(self):
"""Generates a description for only the TIMEOFDAY portion of the expression
Returns:
The TIMEOFDAY description
"""
seconds_expression = self._expression_parts[0]
minute_expression = self._expression_parts[1]
hour_expression = self._expression_parts[2]
description = StringBuilder()
# handle special cases first
if any(exp in minute_expression for exp in self._special_characters) is False and \
any(exp in hour_expression for exp in self._special_characters) is False and \
any(exp in seconds_expression for exp in self._special_characters) is False:
# specific time of day (i.e. 10 14)
description.append(_("At "))
description.append(
self.format_time(
hour_expression,
minute_expression,
seconds_expression))
elif "-" in minute_expression and \
"," not in minute_expression and \
any(exp in hour_expression for exp in self._special_characters) is False:
# minute range in single hour (i.e. 0-10 11)
minute_parts = minute_expression.split('-')
description.append(_("Every minute between {0} and {1}").format(
self.format_time(hour_expression, minute_parts[0]), self.format_time(hour_expression, minute_parts[1])))
elif "," in hour_expression and "-" not in hour_expression and \
any(exp in minute_expression for exp in self._special_characters) is False:
# hours list with single minute (o.e. 30 6,14,16)
hour_parts = hour_expression.split(',')
description.append(_("At"))
for i, hour_part in enumerate(hour_parts):
description.append(" ")
description.append(
self.format_time(hour_part, minute_expression))
if i < (len(hour_parts) - 2):
description.append(",")
if i == len(hour_parts) - 2:
description.append(_(" and"))
else:
# default time description
seconds_description = self.get_seconds_description()
minutes_description = self.get_minutes_description()
hours_description = self.get_hours_description()
description.append(seconds_description)
if description:
description.append(", ")
description.append(minutes_description)
if description:
description.append(", ")
description.append(hours_description)
return str(description)
def get_minutes_description(self):
"""Generates a description for only the MINUTE portion of the expression
Returns:
The MINUTE description
"""
return self.get_segment_description(
self._expression_parts[1],
_("every minute"),
lambda s: s,
lambda s: _("every {0} minutes").format(s),
lambda s: _("minutes {0} through {1} past the hour"),
lambda s: '' if s == "0" else _("at {0} minutes past the hour")
)
def get_hours_description(self):
"""Generates a description for only the HOUR portion of the expression
Returns:
The HOUR description
"""
expression = self._expression_parts[2]
return self.get_segment_description(
expression,
_("every hour"),
lambda s: self.format_time(s, "0"),
lambda s: _("every {0} hours").format(s),
lambda s: _("between {0} and {1}"),
lambda s: _("at {0}")
)
def get_day_of_week_description(self):
"""Generates a description for only the DAYOFWEEK portion of the expression
Returns:
The DAYOFWEEK description
"""
if self._expression_parts[5] == "*" and self._expression_parts[3] != "*":
# DOM is specified and DOW is * so to prevent contradiction like "on day 1 of the month, every day"
# we will not specified a DOW description.
return ""
def get_day_name(s):
exp = s
if "#" in s:
exp, useless = s.split("#", 2)
elif "L" in s:
exp = exp.replace("L", '')
return self.number_to_day(int(exp))
def get_format(s):
if "#" in s:
day_of_week_of_month = s[s.find("#") + 1:]
try:
day_of_week_of_month_number = int(day_of_week_of_month)
choices = {
1: _("first"),
2: _("second"),
3: _("third"),
4: _("forth"),
5: _("fifth"),
}
day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '')
except ValueError:
day_of_week_of_month_description = ''
formated = "{}{}{}".format(_(", on the "),
day_of_week_of_month_description, _(" {0} of the month"))
elif "L" in s:
formated = _(", on the last {0} of the month")
else:
formated = _(", only on {0}")
return formated
return self.get_segment_description(
self._expression_parts[5],
_(", every day"),
lambda s: get_day_name(s),
lambda s: _(", every {0} days of the week").format(s),
lambda s: _(", {0} through {1}"),
lambda s: get_format(s)
)
def get_month_description(self):
"""Generates a description for only the MONTH portion of the expression
Returns:
The MONTH description
"""
return self.get_segment_description(
self._expression_parts[4],
'',
lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime("%B"),
lambda s: _(", every {0} months").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_day_of_month_description(self):
"""Generates a description for only the DAYOFMONTH portion of the expression
Returns:
The DAYOFMONTH description
"""
expression = self._expression_parts[3]
expression = expression.replace("?", "*")
if expression == "L":
description = _(", on the last day of the month")
elif expression == "LW" or expression == "WL":
description = _(", on the last weekday of the month")
else:
regex = re.compile("(\\d{1,2}W)|(W\\d{1,2})")
if regex.match(expression):
m = regex.match(expression)
day_number = int(m.group().replace("W", ""))
day_string = _("first weekday") if day_number == 1 else _("weekday nearest day {0}").format(
day_number)
description = _(", on the {0} of the month").format(
day_string)
else:
description = self.get_segment_description(
expression,
_(", every day"),
lambda s: s,
lambda s: _(", every day") if s == "1" else _(", every {0} days"),
lambda s: _(", between day {0} and {1} of the month"),
lambda s: _(", on day {0} of the month")
)
return description
def get_year_description(self):
"""Generates a description for only the YEAR portion of the expression
Returns:
The YEAR description
"""
def format_year(s):
regex = re.compile(r"^\d+$")
if regex.match(s):
year_int = int(s)
if year_int < 1900:
return year_int
return datetime.date(year_int, 1, 1).strftime("%Y")
else:
return s
return self.get_segment_description(
self._expression_parts[6],
'',
lambda s: format_year(s),
lambda s: _(", every {0} years").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_segment_description(
self,
expression,
all_description,
get_single_item_description,
get_interval_description_format,
get_between_description_format,
get_description_format
):
"""Returns segment description
Args:
expression: Segment to descript
all_description: *
get_single_item_description: 1
get_interval_description_format: 1/2
get_between_description_format: 1-2
get_description_format: format get_single_item_description
Returns:
segment description
"""
description = None
if expression is None or expression == '':
description = ''
elif expression == "*":
description = all_description
elif any(ext in expression for ext in ['/', '-', ',']) is False:
description = get_description_format(expression).format(
get_single_item_description(expression))
elif "/" in expression:
segments = expression.split('/')
description = get_interval_description_format(
segments[1]).format(get_single_item_description(segments[1]))
# interval contains 'between' piece (i.e. 2-59/3 )
if "-" in segments[0]:
between_segment_description = self.generate_between_segment_description(
segments[0], get_between_description_format, get_single_item_description)
if not between_segment_description.startswith(", "):
description += ", "
description += between_segment_description
elif any(ext in segments[0] for ext in ['*', ',']) is False:
range_item_description = get_description_format(segments[0]).format(
get_single_item_description(segments[0])
)
range_item_description = range_item_description.replace(", ", "")
description += _(", starting {0}").format(range_item_description)
elif "," in expression:
segments = expression.split(',')
description_content = ''
for i, segment in enumerate(segments):
if i > 0 and len(segments) > 2:
description_content += ","
if i < len(segments) - 1:
description_content += " "
if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2):
description_content += _(" and ")
if "-" in segment:
between_description = self.generate_between_segment_description(
segment,
lambda s: _(", {0} through {1}"),
get_single_item_description
)
between_description = between_description.replace(", ", "")
description_content += between_description
else:
description_content += get_single_item_description(segment)
description = get_description_format(
expression).format(
description_content)
elif "-" in expression:
description = self.generate_between_segment_description(
expression, get_between_description_format, get_single_item_description)
return description
def generate_between_segment_description(
self,
between_expression,
get_between_description_format,
get_single_item_description
):
"""
Generates the between segment description
:param between_expression:
:param get_between_description_format:
:param get_single_item_description:
:return: The between segment description
"""
description = ""
between_segments = between_expression.split('-')
between_segment_1_description = get_single_item_description(between_segments[0])
between_segment_2_description = get_single_item_description(between_segments[1])
between_segment_2_description = between_segment_2_description.replace(
":00", ":59")
between_description_format = get_between_description_format(between_expression)
description += between_description_format.format(between_segment_1_description, between_segment_2_description)
return description
def format_time(
self,
hour_expression,
minute_expression,
second_expression=''
):
"""Given time parts, will contruct a formatted time description
Args:
hour_expression: Hours part
minute_expression: Minutes part
second_expression: Seconds part
Returns:
Formatted time description
"""
hour = int(hour_expression)
period = ''
if self._options.use_24hour_time_format is False:
period = " PM" if (hour >= 12) else " AM"
if hour > 12:
hour -= 12
minute = str(int(minute_expression)) # !FIXME WUT ???
second = ''
if second_expression is not None and second_expression:
second = "{}{}".format(":", str(int(second_expression)).zfill(2))
return "{0}:{1}{2}{3}".format(str(hour).zfill(2), minute.zfill(2), second, period)
def transform_verbosity(self, description, use_verbose_format):
"""Transforms the verbosity of the expression description by stripping verbosity from original description
Args:
description: The description to transform
use_verbose_format: If True, will leave description as it, if False, will strip verbose parts
second_expression: Seconds part
Returns:
The transformed description with proper verbosity
"""
if use_verbose_format is False:
description = description.replace(
_(", every minute"), '')
description = description.replace(_(", every hour"), '')
description = description.replace(_(", every day"), '')
return description
def transform_case(self, description, case_type):
"""Transforms the case of the expression description, based on options
Args:
description: The description to transform
case_type: The casing type that controls the output casing
second_expression: Seconds part
Returns:
The transformed description with proper casing
"""
if case_type == CasingTypeEnum.Sentence:
description = "{}{}".format(
description[0].upper(),
description[1:])
elif case_type == CasingTypeEnum.Title:
description = description.title()
else:
description = description.lower()
return description
def number_to_day(self, day_number):
"""Returns localized day name by its CRON number
Args:
day_number: Number of a day
Returns:
Day corresponding to day_number
Raises:
IndexError: When day_number is not found
"""
return [
calendar.day_name[6],
calendar.day_name[0],
calendar.day_name[1],
calendar.day_name[2],
calendar.day_name[3],
calendar.day_name[4],
calendar.day_name[5]
][day_number]
def __str__(self):
return self.get_description()
def __repr__(self):
return self.get_description()
|
Salamek/cron-descriptor | cron_descriptor/ExpressionDescriptor.py | ExpressionDescriptor.get_minutes_description | python | def get_minutes_description(self):
return self.get_segment_description(
self._expression_parts[1],
_("every minute"),
lambda s: s,
lambda s: _("every {0} minutes").format(s),
lambda s: _("minutes {0} through {1} past the hour"),
lambda s: '' if s == "0" else _("at {0} minutes past the hour")
) | Generates a description for only the MINUTE portion of the expression
Returns:
The MINUTE description | train | https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L233-L248 | [
"def get_segment_description(\n self,\n expression,\n all_description,\n get_single_item_description,\n get_interval_description_format,\n get_between_description_format,\n get_description_format\n):\n \"\"\"Returns segment description\n Args:\n expression: Segment to descript\n all_description: *\n get_single_item_description: 1\n get_interval_description_format: 1/2\n get_between_description_format: 1-2\n get_description_format: format get_single_item_description\n Returns:\n segment description\n\n \"\"\"\n description = None\n if expression is None or expression == '':\n description = ''\n elif expression == \"*\":\n description = all_description\n elif any(ext in expression for ext in ['/', '-', ',']) is False:\n description = get_description_format(expression).format(\n get_single_item_description(expression))\n elif \"/\" in expression:\n segments = expression.split('/')\n description = get_interval_description_format(\n segments[1]).format(get_single_item_description(segments[1]))\n\n # interval contains 'between' piece (i.e. 2-59/3 )\n if \"-\" in segments[0]:\n between_segment_description = self.generate_between_segment_description(\n segments[0], get_between_description_format, get_single_item_description)\n if not between_segment_description.startswith(\", \"):\n description += \", \"\n description += between_segment_description\n elif any(ext in segments[0] for ext in ['*', ',']) is False:\n range_item_description = get_description_format(segments[0]).format(\n get_single_item_description(segments[0])\n )\n range_item_description = range_item_description.replace(\", \", \"\")\n\n description += _(\", starting {0}\").format(range_item_description)\n elif \",\" in expression:\n segments = expression.split(',')\n\n description_content = ''\n for i, segment in enumerate(segments):\n if i > 0 and len(segments) > 2:\n description_content += \",\"\n\n if i < len(segments) - 1:\n description_content += \" \"\n\n if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2):\n description_content += _(\" and \")\n\n if \"-\" in segment:\n between_description = self.generate_between_segment_description(\n segment,\n lambda s: _(\", {0} through {1}\"),\n get_single_item_description\n )\n\n between_description = between_description.replace(\", \", \"\")\n\n description_content += between_description\n else:\n description_content += get_single_item_description(segment)\n\n description = get_description_format(\n expression).format(\n description_content)\n elif \"-\" in expression:\n description = self.generate_between_segment_description(\n expression, get_between_description_format, get_single_item_description)\n\n return description\n"
] | class ExpressionDescriptor(object):
"""
Converts a Cron Expression into a human readable string
"""
_special_characters = ['/', '-', ',', '*']
_expression = ''
_options = None
_expression_parts = []
_parsed = False
def __init__(self, expression, options=None, **kwargs):
"""Initializes a new instance of the ExpressionDescriptorclass
Args:
expression: The cron expression string
options: Options to control the output description
Raises:
WrongArgumentException: if kwarg is unknow
"""
if options is None:
options = Options()
self._expression = expression
self._options = options
self._expression_parts = []
self._parsed = False
# if kwargs in _options, overwrite it, if not raise exeption
for kwarg in kwargs:
if hasattr(self._options, kwarg):
setattr(self._options, kwarg, kwargs[kwarg])
else:
raise WrongArgumentException(
"Unknow {} configuration argument".format(kwarg))
# Initializes localization
GetText(options.locale_code)
def get_description(self, description_type=DescriptionTypeEnum.FULL):
"""Generates a human readable string for the Cron Expression
Args:
description_type: Which part(s) of the expression to describe
Returns:
The cron expression description
Raises:
Exception: if throw_exception_on_parse_error is True
"""
try:
if self._parsed is False:
parser = ExpressionParser(self._expression, self._options)
self._expression_parts = parser.parse()
self._parsed = True
choices = {
DescriptionTypeEnum.FULL: self.get_full_description,
DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description,
DescriptionTypeEnum.HOURS: self.get_hours_description,
DescriptionTypeEnum.MINUTES: self.get_minutes_description,
DescriptionTypeEnum.SECONDS: self.get_seconds_description,
DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description,
DescriptionTypeEnum.MONTH: self.get_month_description,
DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description,
DescriptionTypeEnum.YEAR: self.get_year_description,
}
description = choices.get(description_type, self.get_seconds_description)()
except Exception as ex:
if self._options.throw_exception_on_parse_error:
raise
else:
description = str(ex)
return description
def get_full_description(self):
"""Generates the FULL description
Returns:
The FULL description
Raises:
FormatException: if formating fails and throw_exception_on_parse_error is True
"""
try:
time_segment = self.get_time_of_day_description()
day_of_month_desc = self.get_day_of_month_description()
month_desc = self.get_month_description()
day_of_week_desc = self.get_day_of_week_description()
year_desc = self.get_year_description()
description = "{0}{1}{2}{3}{4}".format(
time_segment,
day_of_month_desc,
day_of_week_desc,
month_desc,
year_desc)
description = self.transform_verbosity(
description, self._options.verbose)
description = self.transform_case(
description,
self._options.casing_type)
except Exception:
description = _(
"An error occured when generating the expression description. Check the cron expression syntax.")
if self._options.throw_exception_on_parse_error:
raise FormatException(description)
return description
def get_time_of_day_description(self):
"""Generates a description for only the TIMEOFDAY portion of the expression
Returns:
The TIMEOFDAY description
"""
seconds_expression = self._expression_parts[0]
minute_expression = self._expression_parts[1]
hour_expression = self._expression_parts[2]
description = StringBuilder()
# handle special cases first
if any(exp in minute_expression for exp in self._special_characters) is False and \
any(exp in hour_expression for exp in self._special_characters) is False and \
any(exp in seconds_expression for exp in self._special_characters) is False:
# specific time of day (i.e. 10 14)
description.append(_("At "))
description.append(
self.format_time(
hour_expression,
minute_expression,
seconds_expression))
elif "-" in minute_expression and \
"," not in minute_expression and \
any(exp in hour_expression for exp in self._special_characters) is False:
# minute range in single hour (i.e. 0-10 11)
minute_parts = minute_expression.split('-')
description.append(_("Every minute between {0} and {1}").format(
self.format_time(hour_expression, minute_parts[0]), self.format_time(hour_expression, minute_parts[1])))
elif "," in hour_expression and "-" not in hour_expression and \
any(exp in minute_expression for exp in self._special_characters) is False:
# hours list with single minute (o.e. 30 6,14,16)
hour_parts = hour_expression.split(',')
description.append(_("At"))
for i, hour_part in enumerate(hour_parts):
description.append(" ")
description.append(
self.format_time(hour_part, minute_expression))
if i < (len(hour_parts) - 2):
description.append(",")
if i == len(hour_parts) - 2:
description.append(_(" and"))
else:
# default time description
seconds_description = self.get_seconds_description()
minutes_description = self.get_minutes_description()
hours_description = self.get_hours_description()
description.append(seconds_description)
if description:
description.append(", ")
description.append(minutes_description)
if description:
description.append(", ")
description.append(hours_description)
return str(description)
def get_seconds_description(self):
"""Generates a description for only the SECONDS portion of the expression
Returns:
The SECONDS description
"""
return self.get_segment_description(
self._expression_parts[0],
_("every second"),
lambda s: s,
lambda s: _("every {0} seconds").format(s),
lambda s: _("seconds {0} through {1} past the minute"),
lambda s: _("at {0} seconds past the minute")
)
def get_hours_description(self):
"""Generates a description for only the HOUR portion of the expression
Returns:
The HOUR description
"""
expression = self._expression_parts[2]
return self.get_segment_description(
expression,
_("every hour"),
lambda s: self.format_time(s, "0"),
lambda s: _("every {0} hours").format(s),
lambda s: _("between {0} and {1}"),
lambda s: _("at {0}")
)
def get_day_of_week_description(self):
"""Generates a description for only the DAYOFWEEK portion of the expression
Returns:
The DAYOFWEEK description
"""
if self._expression_parts[5] == "*" and self._expression_parts[3] != "*":
# DOM is specified and DOW is * so to prevent contradiction like "on day 1 of the month, every day"
# we will not specified a DOW description.
return ""
def get_day_name(s):
exp = s
if "#" in s:
exp, useless = s.split("#", 2)
elif "L" in s:
exp = exp.replace("L", '')
return self.number_to_day(int(exp))
def get_format(s):
if "#" in s:
day_of_week_of_month = s[s.find("#") + 1:]
try:
day_of_week_of_month_number = int(day_of_week_of_month)
choices = {
1: _("first"),
2: _("second"),
3: _("third"),
4: _("forth"),
5: _("fifth"),
}
day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '')
except ValueError:
day_of_week_of_month_description = ''
formated = "{}{}{}".format(_(", on the "),
day_of_week_of_month_description, _(" {0} of the month"))
elif "L" in s:
formated = _(", on the last {0} of the month")
else:
formated = _(", only on {0}")
return formated
return self.get_segment_description(
self._expression_parts[5],
_(", every day"),
lambda s: get_day_name(s),
lambda s: _(", every {0} days of the week").format(s),
lambda s: _(", {0} through {1}"),
lambda s: get_format(s)
)
def get_month_description(self):
"""Generates a description for only the MONTH portion of the expression
Returns:
The MONTH description
"""
return self.get_segment_description(
self._expression_parts[4],
'',
lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime("%B"),
lambda s: _(", every {0} months").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_day_of_month_description(self):
"""Generates a description for only the DAYOFMONTH portion of the expression
Returns:
The DAYOFMONTH description
"""
expression = self._expression_parts[3]
expression = expression.replace("?", "*")
if expression == "L":
description = _(", on the last day of the month")
elif expression == "LW" or expression == "WL":
description = _(", on the last weekday of the month")
else:
regex = re.compile("(\\d{1,2}W)|(W\\d{1,2})")
if regex.match(expression):
m = regex.match(expression)
day_number = int(m.group().replace("W", ""))
day_string = _("first weekday") if day_number == 1 else _("weekday nearest day {0}").format(
day_number)
description = _(", on the {0} of the month").format(
day_string)
else:
description = self.get_segment_description(
expression,
_(", every day"),
lambda s: s,
lambda s: _(", every day") if s == "1" else _(", every {0} days"),
lambda s: _(", between day {0} and {1} of the month"),
lambda s: _(", on day {0} of the month")
)
return description
def get_year_description(self):
"""Generates a description for only the YEAR portion of the expression
Returns:
The YEAR description
"""
def format_year(s):
regex = re.compile(r"^\d+$")
if regex.match(s):
year_int = int(s)
if year_int < 1900:
return year_int
return datetime.date(year_int, 1, 1).strftime("%Y")
else:
return s
return self.get_segment_description(
self._expression_parts[6],
'',
lambda s: format_year(s),
lambda s: _(", every {0} years").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_segment_description(
self,
expression,
all_description,
get_single_item_description,
get_interval_description_format,
get_between_description_format,
get_description_format
):
"""Returns segment description
Args:
expression: Segment to descript
all_description: *
get_single_item_description: 1
get_interval_description_format: 1/2
get_between_description_format: 1-2
get_description_format: format get_single_item_description
Returns:
segment description
"""
description = None
if expression is None or expression == '':
description = ''
elif expression == "*":
description = all_description
elif any(ext in expression for ext in ['/', '-', ',']) is False:
description = get_description_format(expression).format(
get_single_item_description(expression))
elif "/" in expression:
segments = expression.split('/')
description = get_interval_description_format(
segments[1]).format(get_single_item_description(segments[1]))
# interval contains 'between' piece (i.e. 2-59/3 )
if "-" in segments[0]:
between_segment_description = self.generate_between_segment_description(
segments[0], get_between_description_format, get_single_item_description)
if not between_segment_description.startswith(", "):
description += ", "
description += between_segment_description
elif any(ext in segments[0] for ext in ['*', ',']) is False:
range_item_description = get_description_format(segments[0]).format(
get_single_item_description(segments[0])
)
range_item_description = range_item_description.replace(", ", "")
description += _(", starting {0}").format(range_item_description)
elif "," in expression:
segments = expression.split(',')
description_content = ''
for i, segment in enumerate(segments):
if i > 0 and len(segments) > 2:
description_content += ","
if i < len(segments) - 1:
description_content += " "
if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2):
description_content += _(" and ")
if "-" in segment:
between_description = self.generate_between_segment_description(
segment,
lambda s: _(", {0} through {1}"),
get_single_item_description
)
between_description = between_description.replace(", ", "")
description_content += between_description
else:
description_content += get_single_item_description(segment)
description = get_description_format(
expression).format(
description_content)
elif "-" in expression:
description = self.generate_between_segment_description(
expression, get_between_description_format, get_single_item_description)
return description
def generate_between_segment_description(
self,
between_expression,
get_between_description_format,
get_single_item_description
):
"""
Generates the between segment description
:param between_expression:
:param get_between_description_format:
:param get_single_item_description:
:return: The between segment description
"""
description = ""
between_segments = between_expression.split('-')
between_segment_1_description = get_single_item_description(between_segments[0])
between_segment_2_description = get_single_item_description(between_segments[1])
between_segment_2_description = between_segment_2_description.replace(
":00", ":59")
between_description_format = get_between_description_format(between_expression)
description += between_description_format.format(between_segment_1_description, between_segment_2_description)
return description
def format_time(
self,
hour_expression,
minute_expression,
second_expression=''
):
"""Given time parts, will contruct a formatted time description
Args:
hour_expression: Hours part
minute_expression: Minutes part
second_expression: Seconds part
Returns:
Formatted time description
"""
hour = int(hour_expression)
period = ''
if self._options.use_24hour_time_format is False:
period = " PM" if (hour >= 12) else " AM"
if hour > 12:
hour -= 12
minute = str(int(minute_expression)) # !FIXME WUT ???
second = ''
if second_expression is not None and second_expression:
second = "{}{}".format(":", str(int(second_expression)).zfill(2))
return "{0}:{1}{2}{3}".format(str(hour).zfill(2), minute.zfill(2), second, period)
def transform_verbosity(self, description, use_verbose_format):
"""Transforms the verbosity of the expression description by stripping verbosity from original description
Args:
description: The description to transform
use_verbose_format: If True, will leave description as it, if False, will strip verbose parts
second_expression: Seconds part
Returns:
The transformed description with proper verbosity
"""
if use_verbose_format is False:
description = description.replace(
_(", every minute"), '')
description = description.replace(_(", every hour"), '')
description = description.replace(_(", every day"), '')
return description
def transform_case(self, description, case_type):
"""Transforms the case of the expression description, based on options
Args:
description: The description to transform
case_type: The casing type that controls the output casing
second_expression: Seconds part
Returns:
The transformed description with proper casing
"""
if case_type == CasingTypeEnum.Sentence:
description = "{}{}".format(
description[0].upper(),
description[1:])
elif case_type == CasingTypeEnum.Title:
description = description.title()
else:
description = description.lower()
return description
def number_to_day(self, day_number):
"""Returns localized day name by its CRON number
Args:
day_number: Number of a day
Returns:
Day corresponding to day_number
Raises:
IndexError: When day_number is not found
"""
return [
calendar.day_name[6],
calendar.day_name[0],
calendar.day_name[1],
calendar.day_name[2],
calendar.day_name[3],
calendar.day_name[4],
calendar.day_name[5]
][day_number]
def __str__(self):
return self.get_description()
def __repr__(self):
return self.get_description()
|
Salamek/cron-descriptor | cron_descriptor/ExpressionDescriptor.py | ExpressionDescriptor.get_hours_description | python | def get_hours_description(self):
expression = self._expression_parts[2]
return self.get_segment_description(
expression,
_("every hour"),
lambda s: self.format_time(s, "0"),
lambda s: _("every {0} hours").format(s),
lambda s: _("between {0} and {1}"),
lambda s: _("at {0}")
) | Generates a description for only the HOUR portion of the expression
Returns:
The HOUR description | train | https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L250-L265 | [
"def get_segment_description(\n self,\n expression,\n all_description,\n get_single_item_description,\n get_interval_description_format,\n get_between_description_format,\n get_description_format\n):\n \"\"\"Returns segment description\n Args:\n expression: Segment to descript\n all_description: *\n get_single_item_description: 1\n get_interval_description_format: 1/2\n get_between_description_format: 1-2\n get_description_format: format get_single_item_description\n Returns:\n segment description\n\n \"\"\"\n description = None\n if expression is None or expression == '':\n description = ''\n elif expression == \"*\":\n description = all_description\n elif any(ext in expression for ext in ['/', '-', ',']) is False:\n description = get_description_format(expression).format(\n get_single_item_description(expression))\n elif \"/\" in expression:\n segments = expression.split('/')\n description = get_interval_description_format(\n segments[1]).format(get_single_item_description(segments[1]))\n\n # interval contains 'between' piece (i.e. 2-59/3 )\n if \"-\" in segments[0]:\n between_segment_description = self.generate_between_segment_description(\n segments[0], get_between_description_format, get_single_item_description)\n if not between_segment_description.startswith(\", \"):\n description += \", \"\n description += between_segment_description\n elif any(ext in segments[0] for ext in ['*', ',']) is False:\n range_item_description = get_description_format(segments[0]).format(\n get_single_item_description(segments[0])\n )\n range_item_description = range_item_description.replace(\", \", \"\")\n\n description += _(\", starting {0}\").format(range_item_description)\n elif \",\" in expression:\n segments = expression.split(',')\n\n description_content = ''\n for i, segment in enumerate(segments):\n if i > 0 and len(segments) > 2:\n description_content += \",\"\n\n if i < len(segments) - 1:\n description_content += \" \"\n\n if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2):\n description_content += _(\" and \")\n\n if \"-\" in segment:\n between_description = self.generate_between_segment_description(\n segment,\n lambda s: _(\", {0} through {1}\"),\n get_single_item_description\n )\n\n between_description = between_description.replace(\", \", \"\")\n\n description_content += between_description\n else:\n description_content += get_single_item_description(segment)\n\n description = get_description_format(\n expression).format(\n description_content)\n elif \"-\" in expression:\n description = self.generate_between_segment_description(\n expression, get_between_description_format, get_single_item_description)\n\n return description\n"
] | class ExpressionDescriptor(object):
"""
Converts a Cron Expression into a human readable string
"""
_special_characters = ['/', '-', ',', '*']
_expression = ''
_options = None
_expression_parts = []
_parsed = False
def __init__(self, expression, options=None, **kwargs):
"""Initializes a new instance of the ExpressionDescriptorclass
Args:
expression: The cron expression string
options: Options to control the output description
Raises:
WrongArgumentException: if kwarg is unknow
"""
if options is None:
options = Options()
self._expression = expression
self._options = options
self._expression_parts = []
self._parsed = False
# if kwargs in _options, overwrite it, if not raise exeption
for kwarg in kwargs:
if hasattr(self._options, kwarg):
setattr(self._options, kwarg, kwargs[kwarg])
else:
raise WrongArgumentException(
"Unknow {} configuration argument".format(kwarg))
# Initializes localization
GetText(options.locale_code)
def get_description(self, description_type=DescriptionTypeEnum.FULL):
"""Generates a human readable string for the Cron Expression
Args:
description_type: Which part(s) of the expression to describe
Returns:
The cron expression description
Raises:
Exception: if throw_exception_on_parse_error is True
"""
try:
if self._parsed is False:
parser = ExpressionParser(self._expression, self._options)
self._expression_parts = parser.parse()
self._parsed = True
choices = {
DescriptionTypeEnum.FULL: self.get_full_description,
DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description,
DescriptionTypeEnum.HOURS: self.get_hours_description,
DescriptionTypeEnum.MINUTES: self.get_minutes_description,
DescriptionTypeEnum.SECONDS: self.get_seconds_description,
DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description,
DescriptionTypeEnum.MONTH: self.get_month_description,
DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description,
DescriptionTypeEnum.YEAR: self.get_year_description,
}
description = choices.get(description_type, self.get_seconds_description)()
except Exception as ex:
if self._options.throw_exception_on_parse_error:
raise
else:
description = str(ex)
return description
def get_full_description(self):
"""Generates the FULL description
Returns:
The FULL description
Raises:
FormatException: if formating fails and throw_exception_on_parse_error is True
"""
try:
time_segment = self.get_time_of_day_description()
day_of_month_desc = self.get_day_of_month_description()
month_desc = self.get_month_description()
day_of_week_desc = self.get_day_of_week_description()
year_desc = self.get_year_description()
description = "{0}{1}{2}{3}{4}".format(
time_segment,
day_of_month_desc,
day_of_week_desc,
month_desc,
year_desc)
description = self.transform_verbosity(
description, self._options.verbose)
description = self.transform_case(
description,
self._options.casing_type)
except Exception:
description = _(
"An error occured when generating the expression description. Check the cron expression syntax.")
if self._options.throw_exception_on_parse_error:
raise FormatException(description)
return description
def get_time_of_day_description(self):
"""Generates a description for only the TIMEOFDAY portion of the expression
Returns:
The TIMEOFDAY description
"""
seconds_expression = self._expression_parts[0]
minute_expression = self._expression_parts[1]
hour_expression = self._expression_parts[2]
description = StringBuilder()
# handle special cases first
if any(exp in minute_expression for exp in self._special_characters) is False and \
any(exp in hour_expression for exp in self._special_characters) is False and \
any(exp in seconds_expression for exp in self._special_characters) is False:
# specific time of day (i.e. 10 14)
description.append(_("At "))
description.append(
self.format_time(
hour_expression,
minute_expression,
seconds_expression))
elif "-" in minute_expression and \
"," not in minute_expression and \
any(exp in hour_expression for exp in self._special_characters) is False:
# minute range in single hour (i.e. 0-10 11)
minute_parts = minute_expression.split('-')
description.append(_("Every minute between {0} and {1}").format(
self.format_time(hour_expression, minute_parts[0]), self.format_time(hour_expression, minute_parts[1])))
elif "," in hour_expression and "-" not in hour_expression and \
any(exp in minute_expression for exp in self._special_characters) is False:
# hours list with single minute (o.e. 30 6,14,16)
hour_parts = hour_expression.split(',')
description.append(_("At"))
for i, hour_part in enumerate(hour_parts):
description.append(" ")
description.append(
self.format_time(hour_part, minute_expression))
if i < (len(hour_parts) - 2):
description.append(",")
if i == len(hour_parts) - 2:
description.append(_(" and"))
else:
# default time description
seconds_description = self.get_seconds_description()
minutes_description = self.get_minutes_description()
hours_description = self.get_hours_description()
description.append(seconds_description)
if description:
description.append(", ")
description.append(minutes_description)
if description:
description.append(", ")
description.append(hours_description)
return str(description)
def get_seconds_description(self):
"""Generates a description for only the SECONDS portion of the expression
Returns:
The SECONDS description
"""
return self.get_segment_description(
self._expression_parts[0],
_("every second"),
lambda s: s,
lambda s: _("every {0} seconds").format(s),
lambda s: _("seconds {0} through {1} past the minute"),
lambda s: _("at {0} seconds past the minute")
)
def get_minutes_description(self):
"""Generates a description for only the MINUTE portion of the expression
Returns:
The MINUTE description
"""
return self.get_segment_description(
self._expression_parts[1],
_("every minute"),
lambda s: s,
lambda s: _("every {0} minutes").format(s),
lambda s: _("minutes {0} through {1} past the hour"),
lambda s: '' if s == "0" else _("at {0} minutes past the hour")
)
def get_day_of_week_description(self):
"""Generates a description for only the DAYOFWEEK portion of the expression
Returns:
The DAYOFWEEK description
"""
if self._expression_parts[5] == "*" and self._expression_parts[3] != "*":
# DOM is specified and DOW is * so to prevent contradiction like "on day 1 of the month, every day"
# we will not specified a DOW description.
return ""
def get_day_name(s):
exp = s
if "#" in s:
exp, useless = s.split("#", 2)
elif "L" in s:
exp = exp.replace("L", '')
return self.number_to_day(int(exp))
def get_format(s):
if "#" in s:
day_of_week_of_month = s[s.find("#") + 1:]
try:
day_of_week_of_month_number = int(day_of_week_of_month)
choices = {
1: _("first"),
2: _("second"),
3: _("third"),
4: _("forth"),
5: _("fifth"),
}
day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '')
except ValueError:
day_of_week_of_month_description = ''
formated = "{}{}{}".format(_(", on the "),
day_of_week_of_month_description, _(" {0} of the month"))
elif "L" in s:
formated = _(", on the last {0} of the month")
else:
formated = _(", only on {0}")
return formated
return self.get_segment_description(
self._expression_parts[5],
_(", every day"),
lambda s: get_day_name(s),
lambda s: _(", every {0} days of the week").format(s),
lambda s: _(", {0} through {1}"),
lambda s: get_format(s)
)
def get_month_description(self):
"""Generates a description for only the MONTH portion of the expression
Returns:
The MONTH description
"""
return self.get_segment_description(
self._expression_parts[4],
'',
lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime("%B"),
lambda s: _(", every {0} months").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_day_of_month_description(self):
"""Generates a description for only the DAYOFMONTH portion of the expression
Returns:
The DAYOFMONTH description
"""
expression = self._expression_parts[3]
expression = expression.replace("?", "*")
if expression == "L":
description = _(", on the last day of the month")
elif expression == "LW" or expression == "WL":
description = _(", on the last weekday of the month")
else:
regex = re.compile("(\\d{1,2}W)|(W\\d{1,2})")
if regex.match(expression):
m = regex.match(expression)
day_number = int(m.group().replace("W", ""))
day_string = _("first weekday") if day_number == 1 else _("weekday nearest day {0}").format(
day_number)
description = _(", on the {0} of the month").format(
day_string)
else:
description = self.get_segment_description(
expression,
_(", every day"),
lambda s: s,
lambda s: _(", every day") if s == "1" else _(", every {0} days"),
lambda s: _(", between day {0} and {1} of the month"),
lambda s: _(", on day {0} of the month")
)
return description
def get_year_description(self):
"""Generates a description for only the YEAR portion of the expression
Returns:
The YEAR description
"""
def format_year(s):
regex = re.compile(r"^\d+$")
if regex.match(s):
year_int = int(s)
if year_int < 1900:
return year_int
return datetime.date(year_int, 1, 1).strftime("%Y")
else:
return s
return self.get_segment_description(
self._expression_parts[6],
'',
lambda s: format_year(s),
lambda s: _(", every {0} years").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_segment_description(
self,
expression,
all_description,
get_single_item_description,
get_interval_description_format,
get_between_description_format,
get_description_format
):
"""Returns segment description
Args:
expression: Segment to descript
all_description: *
get_single_item_description: 1
get_interval_description_format: 1/2
get_between_description_format: 1-2
get_description_format: format get_single_item_description
Returns:
segment description
"""
description = None
if expression is None or expression == '':
description = ''
elif expression == "*":
description = all_description
elif any(ext in expression for ext in ['/', '-', ',']) is False:
description = get_description_format(expression).format(
get_single_item_description(expression))
elif "/" in expression:
segments = expression.split('/')
description = get_interval_description_format(
segments[1]).format(get_single_item_description(segments[1]))
# interval contains 'between' piece (i.e. 2-59/3 )
if "-" in segments[0]:
between_segment_description = self.generate_between_segment_description(
segments[0], get_between_description_format, get_single_item_description)
if not between_segment_description.startswith(", "):
description += ", "
description += between_segment_description
elif any(ext in segments[0] for ext in ['*', ',']) is False:
range_item_description = get_description_format(segments[0]).format(
get_single_item_description(segments[0])
)
range_item_description = range_item_description.replace(", ", "")
description += _(", starting {0}").format(range_item_description)
elif "," in expression:
segments = expression.split(',')
description_content = ''
for i, segment in enumerate(segments):
if i > 0 and len(segments) > 2:
description_content += ","
if i < len(segments) - 1:
description_content += " "
if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2):
description_content += _(" and ")
if "-" in segment:
between_description = self.generate_between_segment_description(
segment,
lambda s: _(", {0} through {1}"),
get_single_item_description
)
between_description = between_description.replace(", ", "")
description_content += between_description
else:
description_content += get_single_item_description(segment)
description = get_description_format(
expression).format(
description_content)
elif "-" in expression:
description = self.generate_between_segment_description(
expression, get_between_description_format, get_single_item_description)
return description
def generate_between_segment_description(
self,
between_expression,
get_between_description_format,
get_single_item_description
):
"""
Generates the between segment description
:param between_expression:
:param get_between_description_format:
:param get_single_item_description:
:return: The between segment description
"""
description = ""
between_segments = between_expression.split('-')
between_segment_1_description = get_single_item_description(between_segments[0])
between_segment_2_description = get_single_item_description(between_segments[1])
between_segment_2_description = between_segment_2_description.replace(
":00", ":59")
between_description_format = get_between_description_format(between_expression)
description += between_description_format.format(between_segment_1_description, between_segment_2_description)
return description
def format_time(
self,
hour_expression,
minute_expression,
second_expression=''
):
"""Given time parts, will contruct a formatted time description
Args:
hour_expression: Hours part
minute_expression: Minutes part
second_expression: Seconds part
Returns:
Formatted time description
"""
hour = int(hour_expression)
period = ''
if self._options.use_24hour_time_format is False:
period = " PM" if (hour >= 12) else " AM"
if hour > 12:
hour -= 12
minute = str(int(minute_expression)) # !FIXME WUT ???
second = ''
if second_expression is not None and second_expression:
second = "{}{}".format(":", str(int(second_expression)).zfill(2))
return "{0}:{1}{2}{3}".format(str(hour).zfill(2), minute.zfill(2), second, period)
def transform_verbosity(self, description, use_verbose_format):
"""Transforms the verbosity of the expression description by stripping verbosity from original description
Args:
description: The description to transform
use_verbose_format: If True, will leave description as it, if False, will strip verbose parts
second_expression: Seconds part
Returns:
The transformed description with proper verbosity
"""
if use_verbose_format is False:
description = description.replace(
_(", every minute"), '')
description = description.replace(_(", every hour"), '')
description = description.replace(_(", every day"), '')
return description
def transform_case(self, description, case_type):
"""Transforms the case of the expression description, based on options
Args:
description: The description to transform
case_type: The casing type that controls the output casing
second_expression: Seconds part
Returns:
The transformed description with proper casing
"""
if case_type == CasingTypeEnum.Sentence:
description = "{}{}".format(
description[0].upper(),
description[1:])
elif case_type == CasingTypeEnum.Title:
description = description.title()
else:
description = description.lower()
return description
def number_to_day(self, day_number):
"""Returns localized day name by its CRON number
Args:
day_number: Number of a day
Returns:
Day corresponding to day_number
Raises:
IndexError: When day_number is not found
"""
return [
calendar.day_name[6],
calendar.day_name[0],
calendar.day_name[1],
calendar.day_name[2],
calendar.day_name[3],
calendar.day_name[4],
calendar.day_name[5]
][day_number]
def __str__(self):
return self.get_description()
def __repr__(self):
return self.get_description()
|
Salamek/cron-descriptor | cron_descriptor/ExpressionDescriptor.py | ExpressionDescriptor.get_day_of_week_description | python | def get_day_of_week_description(self):
if self._expression_parts[5] == "*" and self._expression_parts[3] != "*":
# DOM is specified and DOW is * so to prevent contradiction like "on day 1 of the month, every day"
# we will not specified a DOW description.
return ""
def get_day_name(s):
exp = s
if "#" in s:
exp, useless = s.split("#", 2)
elif "L" in s:
exp = exp.replace("L", '')
return self.number_to_day(int(exp))
def get_format(s):
if "#" in s:
day_of_week_of_month = s[s.find("#") + 1:]
try:
day_of_week_of_month_number = int(day_of_week_of_month)
choices = {
1: _("first"),
2: _("second"),
3: _("third"),
4: _("forth"),
5: _("fifth"),
}
day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '')
except ValueError:
day_of_week_of_month_description = ''
formated = "{}{}{}".format(_(", on the "),
day_of_week_of_month_description, _(" {0} of the month"))
elif "L" in s:
formated = _(", on the last {0} of the month")
else:
formated = _(", only on {0}")
return formated
return self.get_segment_description(
self._expression_parts[5],
_(", every day"),
lambda s: get_day_name(s),
lambda s: _(", every {0} days of the week").format(s),
lambda s: _(", {0} through {1}"),
lambda s: get_format(s)
) | Generates a description for only the DAYOFWEEK portion of the expression
Returns:
The DAYOFWEEK description | train | https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L267-L321 | [
"def get_segment_description(\n self,\n expression,\n all_description,\n get_single_item_description,\n get_interval_description_format,\n get_between_description_format,\n get_description_format\n):\n \"\"\"Returns segment description\n Args:\n expression: Segment to descript\n all_description: *\n get_single_item_description: 1\n get_interval_description_format: 1/2\n get_between_description_format: 1-2\n get_description_format: format get_single_item_description\n Returns:\n segment description\n\n \"\"\"\n description = None\n if expression is None or expression == '':\n description = ''\n elif expression == \"*\":\n description = all_description\n elif any(ext in expression for ext in ['/', '-', ',']) is False:\n description = get_description_format(expression).format(\n get_single_item_description(expression))\n elif \"/\" in expression:\n segments = expression.split('/')\n description = get_interval_description_format(\n segments[1]).format(get_single_item_description(segments[1]))\n\n # interval contains 'between' piece (i.e. 2-59/3 )\n if \"-\" in segments[0]:\n between_segment_description = self.generate_between_segment_description(\n segments[0], get_between_description_format, get_single_item_description)\n if not between_segment_description.startswith(\", \"):\n description += \", \"\n description += between_segment_description\n elif any(ext in segments[0] for ext in ['*', ',']) is False:\n range_item_description = get_description_format(segments[0]).format(\n get_single_item_description(segments[0])\n )\n range_item_description = range_item_description.replace(\", \", \"\")\n\n description += _(\", starting {0}\").format(range_item_description)\n elif \",\" in expression:\n segments = expression.split(',')\n\n description_content = ''\n for i, segment in enumerate(segments):\n if i > 0 and len(segments) > 2:\n description_content += \",\"\n\n if i < len(segments) - 1:\n description_content += \" \"\n\n if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2):\n description_content += _(\" and \")\n\n if \"-\" in segment:\n between_description = self.generate_between_segment_description(\n segment,\n lambda s: _(\", {0} through {1}\"),\n get_single_item_description\n )\n\n between_description = between_description.replace(\", \", \"\")\n\n description_content += between_description\n else:\n description_content += get_single_item_description(segment)\n\n description = get_description_format(\n expression).format(\n description_content)\n elif \"-\" in expression:\n description = self.generate_between_segment_description(\n expression, get_between_description_format, get_single_item_description)\n\n return description\n"
] | class ExpressionDescriptor(object):
"""
Converts a Cron Expression into a human readable string
"""
_special_characters = ['/', '-', ',', '*']
_expression = ''
_options = None
_expression_parts = []
_parsed = False
def __init__(self, expression, options=None, **kwargs):
"""Initializes a new instance of the ExpressionDescriptorclass
Args:
expression: The cron expression string
options: Options to control the output description
Raises:
WrongArgumentException: if kwarg is unknow
"""
if options is None:
options = Options()
self._expression = expression
self._options = options
self._expression_parts = []
self._parsed = False
# if kwargs in _options, overwrite it, if not raise exeption
for kwarg in kwargs:
if hasattr(self._options, kwarg):
setattr(self._options, kwarg, kwargs[kwarg])
else:
raise WrongArgumentException(
"Unknow {} configuration argument".format(kwarg))
# Initializes localization
GetText(options.locale_code)
def get_description(self, description_type=DescriptionTypeEnum.FULL):
"""Generates a human readable string for the Cron Expression
Args:
description_type: Which part(s) of the expression to describe
Returns:
The cron expression description
Raises:
Exception: if throw_exception_on_parse_error is True
"""
try:
if self._parsed is False:
parser = ExpressionParser(self._expression, self._options)
self._expression_parts = parser.parse()
self._parsed = True
choices = {
DescriptionTypeEnum.FULL: self.get_full_description,
DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description,
DescriptionTypeEnum.HOURS: self.get_hours_description,
DescriptionTypeEnum.MINUTES: self.get_minutes_description,
DescriptionTypeEnum.SECONDS: self.get_seconds_description,
DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description,
DescriptionTypeEnum.MONTH: self.get_month_description,
DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description,
DescriptionTypeEnum.YEAR: self.get_year_description,
}
description = choices.get(description_type, self.get_seconds_description)()
except Exception as ex:
if self._options.throw_exception_on_parse_error:
raise
else:
description = str(ex)
return description
def get_full_description(self):
"""Generates the FULL description
Returns:
The FULL description
Raises:
FormatException: if formating fails and throw_exception_on_parse_error is True
"""
try:
time_segment = self.get_time_of_day_description()
day_of_month_desc = self.get_day_of_month_description()
month_desc = self.get_month_description()
day_of_week_desc = self.get_day_of_week_description()
year_desc = self.get_year_description()
description = "{0}{1}{2}{3}{4}".format(
time_segment,
day_of_month_desc,
day_of_week_desc,
month_desc,
year_desc)
description = self.transform_verbosity(
description, self._options.verbose)
description = self.transform_case(
description,
self._options.casing_type)
except Exception:
description = _(
"An error occured when generating the expression description. Check the cron expression syntax.")
if self._options.throw_exception_on_parse_error:
raise FormatException(description)
return description
def get_time_of_day_description(self):
"""Generates a description for only the TIMEOFDAY portion of the expression
Returns:
The TIMEOFDAY description
"""
seconds_expression = self._expression_parts[0]
minute_expression = self._expression_parts[1]
hour_expression = self._expression_parts[2]
description = StringBuilder()
# handle special cases first
if any(exp in minute_expression for exp in self._special_characters) is False and \
any(exp in hour_expression for exp in self._special_characters) is False and \
any(exp in seconds_expression for exp in self._special_characters) is False:
# specific time of day (i.e. 10 14)
description.append(_("At "))
description.append(
self.format_time(
hour_expression,
minute_expression,
seconds_expression))
elif "-" in minute_expression and \
"," not in minute_expression and \
any(exp in hour_expression for exp in self._special_characters) is False:
# minute range in single hour (i.e. 0-10 11)
minute_parts = minute_expression.split('-')
description.append(_("Every minute between {0} and {1}").format(
self.format_time(hour_expression, minute_parts[0]), self.format_time(hour_expression, minute_parts[1])))
elif "," in hour_expression and "-" not in hour_expression and \
any(exp in minute_expression for exp in self._special_characters) is False:
# hours list with single minute (o.e. 30 6,14,16)
hour_parts = hour_expression.split(',')
description.append(_("At"))
for i, hour_part in enumerate(hour_parts):
description.append(" ")
description.append(
self.format_time(hour_part, minute_expression))
if i < (len(hour_parts) - 2):
description.append(",")
if i == len(hour_parts) - 2:
description.append(_(" and"))
else:
# default time description
seconds_description = self.get_seconds_description()
minutes_description = self.get_minutes_description()
hours_description = self.get_hours_description()
description.append(seconds_description)
if description:
description.append(", ")
description.append(minutes_description)
if description:
description.append(", ")
description.append(hours_description)
return str(description)
def get_seconds_description(self):
"""Generates a description for only the SECONDS portion of the expression
Returns:
The SECONDS description
"""
return self.get_segment_description(
self._expression_parts[0],
_("every second"),
lambda s: s,
lambda s: _("every {0} seconds").format(s),
lambda s: _("seconds {0} through {1} past the minute"),
lambda s: _("at {0} seconds past the minute")
)
def get_minutes_description(self):
"""Generates a description for only the MINUTE portion of the expression
Returns:
The MINUTE description
"""
return self.get_segment_description(
self._expression_parts[1],
_("every minute"),
lambda s: s,
lambda s: _("every {0} minutes").format(s),
lambda s: _("minutes {0} through {1} past the hour"),
lambda s: '' if s == "0" else _("at {0} minutes past the hour")
)
def get_hours_description(self):
"""Generates a description for only the HOUR portion of the expression
Returns:
The HOUR description
"""
expression = self._expression_parts[2]
return self.get_segment_description(
expression,
_("every hour"),
lambda s: self.format_time(s, "0"),
lambda s: _("every {0} hours").format(s),
lambda s: _("between {0} and {1}"),
lambda s: _("at {0}")
)
def get_month_description(self):
"""Generates a description for only the MONTH portion of the expression
Returns:
The MONTH description
"""
return self.get_segment_description(
self._expression_parts[4],
'',
lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime("%B"),
lambda s: _(", every {0} months").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_day_of_month_description(self):
"""Generates a description for only the DAYOFMONTH portion of the expression
Returns:
The DAYOFMONTH description
"""
expression = self._expression_parts[3]
expression = expression.replace("?", "*")
if expression == "L":
description = _(", on the last day of the month")
elif expression == "LW" or expression == "WL":
description = _(", on the last weekday of the month")
else:
regex = re.compile("(\\d{1,2}W)|(W\\d{1,2})")
if regex.match(expression):
m = regex.match(expression)
day_number = int(m.group().replace("W", ""))
day_string = _("first weekday") if day_number == 1 else _("weekday nearest day {0}").format(
day_number)
description = _(", on the {0} of the month").format(
day_string)
else:
description = self.get_segment_description(
expression,
_(", every day"),
lambda s: s,
lambda s: _(", every day") if s == "1" else _(", every {0} days"),
lambda s: _(", between day {0} and {1} of the month"),
lambda s: _(", on day {0} of the month")
)
return description
def get_year_description(self):
"""Generates a description for only the YEAR portion of the expression
Returns:
The YEAR description
"""
def format_year(s):
regex = re.compile(r"^\d+$")
if regex.match(s):
year_int = int(s)
if year_int < 1900:
return year_int
return datetime.date(year_int, 1, 1).strftime("%Y")
else:
return s
return self.get_segment_description(
self._expression_parts[6],
'',
lambda s: format_year(s),
lambda s: _(", every {0} years").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_segment_description(
self,
expression,
all_description,
get_single_item_description,
get_interval_description_format,
get_between_description_format,
get_description_format
):
"""Returns segment description
Args:
expression: Segment to descript
all_description: *
get_single_item_description: 1
get_interval_description_format: 1/2
get_between_description_format: 1-2
get_description_format: format get_single_item_description
Returns:
segment description
"""
description = None
if expression is None or expression == '':
description = ''
elif expression == "*":
description = all_description
elif any(ext in expression for ext in ['/', '-', ',']) is False:
description = get_description_format(expression).format(
get_single_item_description(expression))
elif "/" in expression:
segments = expression.split('/')
description = get_interval_description_format(
segments[1]).format(get_single_item_description(segments[1]))
# interval contains 'between' piece (i.e. 2-59/3 )
if "-" in segments[0]:
between_segment_description = self.generate_between_segment_description(
segments[0], get_between_description_format, get_single_item_description)
if not between_segment_description.startswith(", "):
description += ", "
description += between_segment_description
elif any(ext in segments[0] for ext in ['*', ',']) is False:
range_item_description = get_description_format(segments[0]).format(
get_single_item_description(segments[0])
)
range_item_description = range_item_description.replace(", ", "")
description += _(", starting {0}").format(range_item_description)
elif "," in expression:
segments = expression.split(',')
description_content = ''
for i, segment in enumerate(segments):
if i > 0 and len(segments) > 2:
description_content += ","
if i < len(segments) - 1:
description_content += " "
if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2):
description_content += _(" and ")
if "-" in segment:
between_description = self.generate_between_segment_description(
segment,
lambda s: _(", {0} through {1}"),
get_single_item_description
)
between_description = between_description.replace(", ", "")
description_content += between_description
else:
description_content += get_single_item_description(segment)
description = get_description_format(
expression).format(
description_content)
elif "-" in expression:
description = self.generate_between_segment_description(
expression, get_between_description_format, get_single_item_description)
return description
def generate_between_segment_description(
self,
between_expression,
get_between_description_format,
get_single_item_description
):
"""
Generates the between segment description
:param between_expression:
:param get_between_description_format:
:param get_single_item_description:
:return: The between segment description
"""
description = ""
between_segments = between_expression.split('-')
between_segment_1_description = get_single_item_description(between_segments[0])
between_segment_2_description = get_single_item_description(between_segments[1])
between_segment_2_description = between_segment_2_description.replace(
":00", ":59")
between_description_format = get_between_description_format(between_expression)
description += between_description_format.format(between_segment_1_description, between_segment_2_description)
return description
def format_time(
self,
hour_expression,
minute_expression,
second_expression=''
):
"""Given time parts, will contruct a formatted time description
Args:
hour_expression: Hours part
minute_expression: Minutes part
second_expression: Seconds part
Returns:
Formatted time description
"""
hour = int(hour_expression)
period = ''
if self._options.use_24hour_time_format is False:
period = " PM" if (hour >= 12) else " AM"
if hour > 12:
hour -= 12
minute = str(int(minute_expression)) # !FIXME WUT ???
second = ''
if second_expression is not None and second_expression:
second = "{}{}".format(":", str(int(second_expression)).zfill(2))
return "{0}:{1}{2}{3}".format(str(hour).zfill(2), minute.zfill(2), second, period)
def transform_verbosity(self, description, use_verbose_format):
"""Transforms the verbosity of the expression description by stripping verbosity from original description
Args:
description: The description to transform
use_verbose_format: If True, will leave description as it, if False, will strip verbose parts
second_expression: Seconds part
Returns:
The transformed description with proper verbosity
"""
if use_verbose_format is False:
description = description.replace(
_(", every minute"), '')
description = description.replace(_(", every hour"), '')
description = description.replace(_(", every day"), '')
return description
def transform_case(self, description, case_type):
"""Transforms the case of the expression description, based on options
Args:
description: The description to transform
case_type: The casing type that controls the output casing
second_expression: Seconds part
Returns:
The transformed description with proper casing
"""
if case_type == CasingTypeEnum.Sentence:
description = "{}{}".format(
description[0].upper(),
description[1:])
elif case_type == CasingTypeEnum.Title:
description = description.title()
else:
description = description.lower()
return description
def number_to_day(self, day_number):
"""Returns localized day name by its CRON number
Args:
day_number: Number of a day
Returns:
Day corresponding to day_number
Raises:
IndexError: When day_number is not found
"""
return [
calendar.day_name[6],
calendar.day_name[0],
calendar.day_name[1],
calendar.day_name[2],
calendar.day_name[3],
calendar.day_name[4],
calendar.day_name[5]
][day_number]
def __str__(self):
return self.get_description()
def __repr__(self):
return self.get_description()
|
Salamek/cron-descriptor | cron_descriptor/ExpressionDescriptor.py | ExpressionDescriptor.get_month_description | python | def get_month_description(self):
return self.get_segment_description(
self._expression_parts[4],
'',
lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime("%B"),
lambda s: _(", every {0} months").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
) | Generates a description for only the MONTH portion of the expression
Returns:
The MONTH description | train | https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L323-L337 | [
"def get_segment_description(\n self,\n expression,\n all_description,\n get_single_item_description,\n get_interval_description_format,\n get_between_description_format,\n get_description_format\n):\n \"\"\"Returns segment description\n Args:\n expression: Segment to descript\n all_description: *\n get_single_item_description: 1\n get_interval_description_format: 1/2\n get_between_description_format: 1-2\n get_description_format: format get_single_item_description\n Returns:\n segment description\n\n \"\"\"\n description = None\n if expression is None or expression == '':\n description = ''\n elif expression == \"*\":\n description = all_description\n elif any(ext in expression for ext in ['/', '-', ',']) is False:\n description = get_description_format(expression).format(\n get_single_item_description(expression))\n elif \"/\" in expression:\n segments = expression.split('/')\n description = get_interval_description_format(\n segments[1]).format(get_single_item_description(segments[1]))\n\n # interval contains 'between' piece (i.e. 2-59/3 )\n if \"-\" in segments[0]:\n between_segment_description = self.generate_between_segment_description(\n segments[0], get_between_description_format, get_single_item_description)\n if not between_segment_description.startswith(\", \"):\n description += \", \"\n description += between_segment_description\n elif any(ext in segments[0] for ext in ['*', ',']) is False:\n range_item_description = get_description_format(segments[0]).format(\n get_single_item_description(segments[0])\n )\n range_item_description = range_item_description.replace(\", \", \"\")\n\n description += _(\", starting {0}\").format(range_item_description)\n elif \",\" in expression:\n segments = expression.split(',')\n\n description_content = ''\n for i, segment in enumerate(segments):\n if i > 0 and len(segments) > 2:\n description_content += \",\"\n\n if i < len(segments) - 1:\n description_content += \" \"\n\n if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2):\n description_content += _(\" and \")\n\n if \"-\" in segment:\n between_description = self.generate_between_segment_description(\n segment,\n lambda s: _(\", {0} through {1}\"),\n get_single_item_description\n )\n\n between_description = between_description.replace(\", \", \"\")\n\n description_content += between_description\n else:\n description_content += get_single_item_description(segment)\n\n description = get_description_format(\n expression).format(\n description_content)\n elif \"-\" in expression:\n description = self.generate_between_segment_description(\n expression, get_between_description_format, get_single_item_description)\n\n return description\n"
] | class ExpressionDescriptor(object):
"""
Converts a Cron Expression into a human readable string
"""
_special_characters = ['/', '-', ',', '*']
_expression = ''
_options = None
_expression_parts = []
_parsed = False
def __init__(self, expression, options=None, **kwargs):
"""Initializes a new instance of the ExpressionDescriptorclass
Args:
expression: The cron expression string
options: Options to control the output description
Raises:
WrongArgumentException: if kwarg is unknow
"""
if options is None:
options = Options()
self._expression = expression
self._options = options
self._expression_parts = []
self._parsed = False
# if kwargs in _options, overwrite it, if not raise exeption
for kwarg in kwargs:
if hasattr(self._options, kwarg):
setattr(self._options, kwarg, kwargs[kwarg])
else:
raise WrongArgumentException(
"Unknow {} configuration argument".format(kwarg))
# Initializes localization
GetText(options.locale_code)
def get_description(self, description_type=DescriptionTypeEnum.FULL):
"""Generates a human readable string for the Cron Expression
Args:
description_type: Which part(s) of the expression to describe
Returns:
The cron expression description
Raises:
Exception: if throw_exception_on_parse_error is True
"""
try:
if self._parsed is False:
parser = ExpressionParser(self._expression, self._options)
self._expression_parts = parser.parse()
self._parsed = True
choices = {
DescriptionTypeEnum.FULL: self.get_full_description,
DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description,
DescriptionTypeEnum.HOURS: self.get_hours_description,
DescriptionTypeEnum.MINUTES: self.get_minutes_description,
DescriptionTypeEnum.SECONDS: self.get_seconds_description,
DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description,
DescriptionTypeEnum.MONTH: self.get_month_description,
DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description,
DescriptionTypeEnum.YEAR: self.get_year_description,
}
description = choices.get(description_type, self.get_seconds_description)()
except Exception as ex:
if self._options.throw_exception_on_parse_error:
raise
else:
description = str(ex)
return description
def get_full_description(self):
"""Generates the FULL description
Returns:
The FULL description
Raises:
FormatException: if formating fails and throw_exception_on_parse_error is True
"""
try:
time_segment = self.get_time_of_day_description()
day_of_month_desc = self.get_day_of_month_description()
month_desc = self.get_month_description()
day_of_week_desc = self.get_day_of_week_description()
year_desc = self.get_year_description()
description = "{0}{1}{2}{3}{4}".format(
time_segment,
day_of_month_desc,
day_of_week_desc,
month_desc,
year_desc)
description = self.transform_verbosity(
description, self._options.verbose)
description = self.transform_case(
description,
self._options.casing_type)
except Exception:
description = _(
"An error occured when generating the expression description. Check the cron expression syntax.")
if self._options.throw_exception_on_parse_error:
raise FormatException(description)
return description
def get_time_of_day_description(self):
"""Generates a description for only the TIMEOFDAY portion of the expression
Returns:
The TIMEOFDAY description
"""
seconds_expression = self._expression_parts[0]
minute_expression = self._expression_parts[1]
hour_expression = self._expression_parts[2]
description = StringBuilder()
# handle special cases first
if any(exp in minute_expression for exp in self._special_characters) is False and \
any(exp in hour_expression for exp in self._special_characters) is False and \
any(exp in seconds_expression for exp in self._special_characters) is False:
# specific time of day (i.e. 10 14)
description.append(_("At "))
description.append(
self.format_time(
hour_expression,
minute_expression,
seconds_expression))
elif "-" in minute_expression and \
"," not in minute_expression and \
any(exp in hour_expression for exp in self._special_characters) is False:
# minute range in single hour (i.e. 0-10 11)
minute_parts = minute_expression.split('-')
description.append(_("Every minute between {0} and {1}").format(
self.format_time(hour_expression, minute_parts[0]), self.format_time(hour_expression, minute_parts[1])))
elif "," in hour_expression and "-" not in hour_expression and \
any(exp in minute_expression for exp in self._special_characters) is False:
# hours list with single minute (o.e. 30 6,14,16)
hour_parts = hour_expression.split(',')
description.append(_("At"))
for i, hour_part in enumerate(hour_parts):
description.append(" ")
description.append(
self.format_time(hour_part, minute_expression))
if i < (len(hour_parts) - 2):
description.append(",")
if i == len(hour_parts) - 2:
description.append(_(" and"))
else:
# default time description
seconds_description = self.get_seconds_description()
minutes_description = self.get_minutes_description()
hours_description = self.get_hours_description()
description.append(seconds_description)
if description:
description.append(", ")
description.append(minutes_description)
if description:
description.append(", ")
description.append(hours_description)
return str(description)
def get_seconds_description(self):
"""Generates a description for only the SECONDS portion of the expression
Returns:
The SECONDS description
"""
return self.get_segment_description(
self._expression_parts[0],
_("every second"),
lambda s: s,
lambda s: _("every {0} seconds").format(s),
lambda s: _("seconds {0} through {1} past the minute"),
lambda s: _("at {0} seconds past the minute")
)
def get_minutes_description(self):
"""Generates a description for only the MINUTE portion of the expression
Returns:
The MINUTE description
"""
return self.get_segment_description(
self._expression_parts[1],
_("every minute"),
lambda s: s,
lambda s: _("every {0} minutes").format(s),
lambda s: _("minutes {0} through {1} past the hour"),
lambda s: '' if s == "0" else _("at {0} minutes past the hour")
)
def get_hours_description(self):
"""Generates a description for only the HOUR portion of the expression
Returns:
The HOUR description
"""
expression = self._expression_parts[2]
return self.get_segment_description(
expression,
_("every hour"),
lambda s: self.format_time(s, "0"),
lambda s: _("every {0} hours").format(s),
lambda s: _("between {0} and {1}"),
lambda s: _("at {0}")
)
def get_day_of_week_description(self):
"""Generates a description for only the DAYOFWEEK portion of the expression
Returns:
The DAYOFWEEK description
"""
if self._expression_parts[5] == "*" and self._expression_parts[3] != "*":
# DOM is specified and DOW is * so to prevent contradiction like "on day 1 of the month, every day"
# we will not specified a DOW description.
return ""
def get_day_name(s):
exp = s
if "#" in s:
exp, useless = s.split("#", 2)
elif "L" in s:
exp = exp.replace("L", '')
return self.number_to_day(int(exp))
def get_format(s):
if "#" in s:
day_of_week_of_month = s[s.find("#") + 1:]
try:
day_of_week_of_month_number = int(day_of_week_of_month)
choices = {
1: _("first"),
2: _("second"),
3: _("third"),
4: _("forth"),
5: _("fifth"),
}
day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '')
except ValueError:
day_of_week_of_month_description = ''
formated = "{}{}{}".format(_(", on the "),
day_of_week_of_month_description, _(" {0} of the month"))
elif "L" in s:
formated = _(", on the last {0} of the month")
else:
formated = _(", only on {0}")
return formated
return self.get_segment_description(
self._expression_parts[5],
_(", every day"),
lambda s: get_day_name(s),
lambda s: _(", every {0} days of the week").format(s),
lambda s: _(", {0} through {1}"),
lambda s: get_format(s)
)
def get_day_of_month_description(self):
"""Generates a description for only the DAYOFMONTH portion of the expression
Returns:
The DAYOFMONTH description
"""
expression = self._expression_parts[3]
expression = expression.replace("?", "*")
if expression == "L":
description = _(", on the last day of the month")
elif expression == "LW" or expression == "WL":
description = _(", on the last weekday of the month")
else:
regex = re.compile("(\\d{1,2}W)|(W\\d{1,2})")
if regex.match(expression):
m = regex.match(expression)
day_number = int(m.group().replace("W", ""))
day_string = _("first weekday") if day_number == 1 else _("weekday nearest day {0}").format(
day_number)
description = _(", on the {0} of the month").format(
day_string)
else:
description = self.get_segment_description(
expression,
_(", every day"),
lambda s: s,
lambda s: _(", every day") if s == "1" else _(", every {0} days"),
lambda s: _(", between day {0} and {1} of the month"),
lambda s: _(", on day {0} of the month")
)
return description
def get_year_description(self):
"""Generates a description for only the YEAR portion of the expression
Returns:
The YEAR description
"""
def format_year(s):
regex = re.compile(r"^\d+$")
if regex.match(s):
year_int = int(s)
if year_int < 1900:
return year_int
return datetime.date(year_int, 1, 1).strftime("%Y")
else:
return s
return self.get_segment_description(
self._expression_parts[6],
'',
lambda s: format_year(s),
lambda s: _(", every {0} years").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_segment_description(
self,
expression,
all_description,
get_single_item_description,
get_interval_description_format,
get_between_description_format,
get_description_format
):
"""Returns segment description
Args:
expression: Segment to descript
all_description: *
get_single_item_description: 1
get_interval_description_format: 1/2
get_between_description_format: 1-2
get_description_format: format get_single_item_description
Returns:
segment description
"""
description = None
if expression is None or expression == '':
description = ''
elif expression == "*":
description = all_description
elif any(ext in expression for ext in ['/', '-', ',']) is False:
description = get_description_format(expression).format(
get_single_item_description(expression))
elif "/" in expression:
segments = expression.split('/')
description = get_interval_description_format(
segments[1]).format(get_single_item_description(segments[1]))
# interval contains 'between' piece (i.e. 2-59/3 )
if "-" in segments[0]:
between_segment_description = self.generate_between_segment_description(
segments[0], get_between_description_format, get_single_item_description)
if not between_segment_description.startswith(", "):
description += ", "
description += between_segment_description
elif any(ext in segments[0] for ext in ['*', ',']) is False:
range_item_description = get_description_format(segments[0]).format(
get_single_item_description(segments[0])
)
range_item_description = range_item_description.replace(", ", "")
description += _(", starting {0}").format(range_item_description)
elif "," in expression:
segments = expression.split(',')
description_content = ''
for i, segment in enumerate(segments):
if i > 0 and len(segments) > 2:
description_content += ","
if i < len(segments) - 1:
description_content += " "
if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2):
description_content += _(" and ")
if "-" in segment:
between_description = self.generate_between_segment_description(
segment,
lambda s: _(", {0} through {1}"),
get_single_item_description
)
between_description = between_description.replace(", ", "")
description_content += between_description
else:
description_content += get_single_item_description(segment)
description = get_description_format(
expression).format(
description_content)
elif "-" in expression:
description = self.generate_between_segment_description(
expression, get_between_description_format, get_single_item_description)
return description
def generate_between_segment_description(
self,
between_expression,
get_between_description_format,
get_single_item_description
):
"""
Generates the between segment description
:param between_expression:
:param get_between_description_format:
:param get_single_item_description:
:return: The between segment description
"""
description = ""
between_segments = between_expression.split('-')
between_segment_1_description = get_single_item_description(between_segments[0])
between_segment_2_description = get_single_item_description(between_segments[1])
between_segment_2_description = between_segment_2_description.replace(
":00", ":59")
between_description_format = get_between_description_format(between_expression)
description += between_description_format.format(between_segment_1_description, between_segment_2_description)
return description
def format_time(
self,
hour_expression,
minute_expression,
second_expression=''
):
"""Given time parts, will contruct a formatted time description
Args:
hour_expression: Hours part
minute_expression: Minutes part
second_expression: Seconds part
Returns:
Formatted time description
"""
hour = int(hour_expression)
period = ''
if self._options.use_24hour_time_format is False:
period = " PM" if (hour >= 12) else " AM"
if hour > 12:
hour -= 12
minute = str(int(minute_expression)) # !FIXME WUT ???
second = ''
if second_expression is not None and second_expression:
second = "{}{}".format(":", str(int(second_expression)).zfill(2))
return "{0}:{1}{2}{3}".format(str(hour).zfill(2), minute.zfill(2), second, period)
def transform_verbosity(self, description, use_verbose_format):
"""Transforms the verbosity of the expression description by stripping verbosity from original description
Args:
description: The description to transform
use_verbose_format: If True, will leave description as it, if False, will strip verbose parts
second_expression: Seconds part
Returns:
The transformed description with proper verbosity
"""
if use_verbose_format is False:
description = description.replace(
_(", every minute"), '')
description = description.replace(_(", every hour"), '')
description = description.replace(_(", every day"), '')
return description
def transform_case(self, description, case_type):
"""Transforms the case of the expression description, based on options
Args:
description: The description to transform
case_type: The casing type that controls the output casing
second_expression: Seconds part
Returns:
The transformed description with proper casing
"""
if case_type == CasingTypeEnum.Sentence:
description = "{}{}".format(
description[0].upper(),
description[1:])
elif case_type == CasingTypeEnum.Title:
description = description.title()
else:
description = description.lower()
return description
def number_to_day(self, day_number):
"""Returns localized day name by its CRON number
Args:
day_number: Number of a day
Returns:
Day corresponding to day_number
Raises:
IndexError: When day_number is not found
"""
return [
calendar.day_name[6],
calendar.day_name[0],
calendar.day_name[1],
calendar.day_name[2],
calendar.day_name[3],
calendar.day_name[4],
calendar.day_name[5]
][day_number]
def __str__(self):
return self.get_description()
def __repr__(self):
return self.get_description()
|
Salamek/cron-descriptor | cron_descriptor/ExpressionDescriptor.py | ExpressionDescriptor.get_day_of_month_description | python | def get_day_of_month_description(self):
expression = self._expression_parts[3]
expression = expression.replace("?", "*")
if expression == "L":
description = _(", on the last day of the month")
elif expression == "LW" or expression == "WL":
description = _(", on the last weekday of the month")
else:
regex = re.compile("(\\d{1,2}W)|(W\\d{1,2})")
if regex.match(expression):
m = regex.match(expression)
day_number = int(m.group().replace("W", ""))
day_string = _("first weekday") if day_number == 1 else _("weekday nearest day {0}").format(
day_number)
description = _(", on the {0} of the month").format(
day_string)
else:
description = self.get_segment_description(
expression,
_(", every day"),
lambda s: s,
lambda s: _(", every day") if s == "1" else _(", every {0} days"),
lambda s: _(", between day {0} and {1} of the month"),
lambda s: _(", on day {0} of the month")
)
return description | Generates a description for only the DAYOFMONTH portion of the expression
Returns:
The DAYOFMONTH description | train | https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L339-L373 | [
"def get_segment_description(\n self,\n expression,\n all_description,\n get_single_item_description,\n get_interval_description_format,\n get_between_description_format,\n get_description_format\n):\n \"\"\"Returns segment description\n Args:\n expression: Segment to descript\n all_description: *\n get_single_item_description: 1\n get_interval_description_format: 1/2\n get_between_description_format: 1-2\n get_description_format: format get_single_item_description\n Returns:\n segment description\n\n \"\"\"\n description = None\n if expression is None or expression == '':\n description = ''\n elif expression == \"*\":\n description = all_description\n elif any(ext in expression for ext in ['/', '-', ',']) is False:\n description = get_description_format(expression).format(\n get_single_item_description(expression))\n elif \"/\" in expression:\n segments = expression.split('/')\n description = get_interval_description_format(\n segments[1]).format(get_single_item_description(segments[1]))\n\n # interval contains 'between' piece (i.e. 2-59/3 )\n if \"-\" in segments[0]:\n between_segment_description = self.generate_between_segment_description(\n segments[0], get_between_description_format, get_single_item_description)\n if not between_segment_description.startswith(\", \"):\n description += \", \"\n description += between_segment_description\n elif any(ext in segments[0] for ext in ['*', ',']) is False:\n range_item_description = get_description_format(segments[0]).format(\n get_single_item_description(segments[0])\n )\n range_item_description = range_item_description.replace(\", \", \"\")\n\n description += _(\", starting {0}\").format(range_item_description)\n elif \",\" in expression:\n segments = expression.split(',')\n\n description_content = ''\n for i, segment in enumerate(segments):\n if i > 0 and len(segments) > 2:\n description_content += \",\"\n\n if i < len(segments) - 1:\n description_content += \" \"\n\n if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2):\n description_content += _(\" and \")\n\n if \"-\" in segment:\n between_description = self.generate_between_segment_description(\n segment,\n lambda s: _(\", {0} through {1}\"),\n get_single_item_description\n )\n\n between_description = between_description.replace(\", \", \"\")\n\n description_content += between_description\n else:\n description_content += get_single_item_description(segment)\n\n description = get_description_format(\n expression).format(\n description_content)\n elif \"-\" in expression:\n description = self.generate_between_segment_description(\n expression, get_between_description_format, get_single_item_description)\n\n return description\n"
] | class ExpressionDescriptor(object):
"""
Converts a Cron Expression into a human readable string
"""
_special_characters = ['/', '-', ',', '*']
_expression = ''
_options = None
_expression_parts = []
_parsed = False
def __init__(self, expression, options=None, **kwargs):
"""Initializes a new instance of the ExpressionDescriptorclass
Args:
expression: The cron expression string
options: Options to control the output description
Raises:
WrongArgumentException: if kwarg is unknow
"""
if options is None:
options = Options()
self._expression = expression
self._options = options
self._expression_parts = []
self._parsed = False
# if kwargs in _options, overwrite it, if not raise exeption
for kwarg in kwargs:
if hasattr(self._options, kwarg):
setattr(self._options, kwarg, kwargs[kwarg])
else:
raise WrongArgumentException(
"Unknow {} configuration argument".format(kwarg))
# Initializes localization
GetText(options.locale_code)
def get_description(self, description_type=DescriptionTypeEnum.FULL):
"""Generates a human readable string for the Cron Expression
Args:
description_type: Which part(s) of the expression to describe
Returns:
The cron expression description
Raises:
Exception: if throw_exception_on_parse_error is True
"""
try:
if self._parsed is False:
parser = ExpressionParser(self._expression, self._options)
self._expression_parts = parser.parse()
self._parsed = True
choices = {
DescriptionTypeEnum.FULL: self.get_full_description,
DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description,
DescriptionTypeEnum.HOURS: self.get_hours_description,
DescriptionTypeEnum.MINUTES: self.get_minutes_description,
DescriptionTypeEnum.SECONDS: self.get_seconds_description,
DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description,
DescriptionTypeEnum.MONTH: self.get_month_description,
DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description,
DescriptionTypeEnum.YEAR: self.get_year_description,
}
description = choices.get(description_type, self.get_seconds_description)()
except Exception as ex:
if self._options.throw_exception_on_parse_error:
raise
else:
description = str(ex)
return description
def get_full_description(self):
"""Generates the FULL description
Returns:
The FULL description
Raises:
FormatException: if formating fails and throw_exception_on_parse_error is True
"""
try:
time_segment = self.get_time_of_day_description()
day_of_month_desc = self.get_day_of_month_description()
month_desc = self.get_month_description()
day_of_week_desc = self.get_day_of_week_description()
year_desc = self.get_year_description()
description = "{0}{1}{2}{3}{4}".format(
time_segment,
day_of_month_desc,
day_of_week_desc,
month_desc,
year_desc)
description = self.transform_verbosity(
description, self._options.verbose)
description = self.transform_case(
description,
self._options.casing_type)
except Exception:
description = _(
"An error occured when generating the expression description. Check the cron expression syntax.")
if self._options.throw_exception_on_parse_error:
raise FormatException(description)
return description
def get_time_of_day_description(self):
"""Generates a description for only the TIMEOFDAY portion of the expression
Returns:
The TIMEOFDAY description
"""
seconds_expression = self._expression_parts[0]
minute_expression = self._expression_parts[1]
hour_expression = self._expression_parts[2]
description = StringBuilder()
# handle special cases first
if any(exp in minute_expression for exp in self._special_characters) is False and \
any(exp in hour_expression for exp in self._special_characters) is False and \
any(exp in seconds_expression for exp in self._special_characters) is False:
# specific time of day (i.e. 10 14)
description.append(_("At "))
description.append(
self.format_time(
hour_expression,
minute_expression,
seconds_expression))
elif "-" in minute_expression and \
"," not in minute_expression and \
any(exp in hour_expression for exp in self._special_characters) is False:
# minute range in single hour (i.e. 0-10 11)
minute_parts = minute_expression.split('-')
description.append(_("Every minute between {0} and {1}").format(
self.format_time(hour_expression, minute_parts[0]), self.format_time(hour_expression, minute_parts[1])))
elif "," in hour_expression and "-" not in hour_expression and \
any(exp in minute_expression for exp in self._special_characters) is False:
# hours list with single minute (o.e. 30 6,14,16)
hour_parts = hour_expression.split(',')
description.append(_("At"))
for i, hour_part in enumerate(hour_parts):
description.append(" ")
description.append(
self.format_time(hour_part, minute_expression))
if i < (len(hour_parts) - 2):
description.append(",")
if i == len(hour_parts) - 2:
description.append(_(" and"))
else:
# default time description
seconds_description = self.get_seconds_description()
minutes_description = self.get_minutes_description()
hours_description = self.get_hours_description()
description.append(seconds_description)
if description:
description.append(", ")
description.append(minutes_description)
if description:
description.append(", ")
description.append(hours_description)
return str(description)
def get_seconds_description(self):
"""Generates a description for only the SECONDS portion of the expression
Returns:
The SECONDS description
"""
return self.get_segment_description(
self._expression_parts[0],
_("every second"),
lambda s: s,
lambda s: _("every {0} seconds").format(s),
lambda s: _("seconds {0} through {1} past the minute"),
lambda s: _("at {0} seconds past the minute")
)
def get_minutes_description(self):
"""Generates a description for only the MINUTE portion of the expression
Returns:
The MINUTE description
"""
return self.get_segment_description(
self._expression_parts[1],
_("every minute"),
lambda s: s,
lambda s: _("every {0} minutes").format(s),
lambda s: _("minutes {0} through {1} past the hour"),
lambda s: '' if s == "0" else _("at {0} minutes past the hour")
)
def get_hours_description(self):
"""Generates a description for only the HOUR portion of the expression
Returns:
The HOUR description
"""
expression = self._expression_parts[2]
return self.get_segment_description(
expression,
_("every hour"),
lambda s: self.format_time(s, "0"),
lambda s: _("every {0} hours").format(s),
lambda s: _("between {0} and {1}"),
lambda s: _("at {0}")
)
def get_day_of_week_description(self):
"""Generates a description for only the DAYOFWEEK portion of the expression
Returns:
The DAYOFWEEK description
"""
if self._expression_parts[5] == "*" and self._expression_parts[3] != "*":
# DOM is specified and DOW is * so to prevent contradiction like "on day 1 of the month, every day"
# we will not specified a DOW description.
return ""
def get_day_name(s):
exp = s
if "#" in s:
exp, useless = s.split("#", 2)
elif "L" in s:
exp = exp.replace("L", '')
return self.number_to_day(int(exp))
def get_format(s):
if "#" in s:
day_of_week_of_month = s[s.find("#") + 1:]
try:
day_of_week_of_month_number = int(day_of_week_of_month)
choices = {
1: _("first"),
2: _("second"),
3: _("third"),
4: _("forth"),
5: _("fifth"),
}
day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '')
except ValueError:
day_of_week_of_month_description = ''
formated = "{}{}{}".format(_(", on the "),
day_of_week_of_month_description, _(" {0} of the month"))
elif "L" in s:
formated = _(", on the last {0} of the month")
else:
formated = _(", only on {0}")
return formated
return self.get_segment_description(
self._expression_parts[5],
_(", every day"),
lambda s: get_day_name(s),
lambda s: _(", every {0} days of the week").format(s),
lambda s: _(", {0} through {1}"),
lambda s: get_format(s)
)
def get_month_description(self):
"""Generates a description for only the MONTH portion of the expression
Returns:
The MONTH description
"""
return self.get_segment_description(
self._expression_parts[4],
'',
lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime("%B"),
lambda s: _(", every {0} months").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_year_description(self):
"""Generates a description for only the YEAR portion of the expression
Returns:
The YEAR description
"""
def format_year(s):
regex = re.compile(r"^\d+$")
if regex.match(s):
year_int = int(s)
if year_int < 1900:
return year_int
return datetime.date(year_int, 1, 1).strftime("%Y")
else:
return s
return self.get_segment_description(
self._expression_parts[6],
'',
lambda s: format_year(s),
lambda s: _(", every {0} years").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_segment_description(
self,
expression,
all_description,
get_single_item_description,
get_interval_description_format,
get_between_description_format,
get_description_format
):
"""Returns segment description
Args:
expression: Segment to descript
all_description: *
get_single_item_description: 1
get_interval_description_format: 1/2
get_between_description_format: 1-2
get_description_format: format get_single_item_description
Returns:
segment description
"""
description = None
if expression is None or expression == '':
description = ''
elif expression == "*":
description = all_description
elif any(ext in expression for ext in ['/', '-', ',']) is False:
description = get_description_format(expression).format(
get_single_item_description(expression))
elif "/" in expression:
segments = expression.split('/')
description = get_interval_description_format(
segments[1]).format(get_single_item_description(segments[1]))
# interval contains 'between' piece (i.e. 2-59/3 )
if "-" in segments[0]:
between_segment_description = self.generate_between_segment_description(
segments[0], get_between_description_format, get_single_item_description)
if not between_segment_description.startswith(", "):
description += ", "
description += between_segment_description
elif any(ext in segments[0] for ext in ['*', ',']) is False:
range_item_description = get_description_format(segments[0]).format(
get_single_item_description(segments[0])
)
range_item_description = range_item_description.replace(", ", "")
description += _(", starting {0}").format(range_item_description)
elif "," in expression:
segments = expression.split(',')
description_content = ''
for i, segment in enumerate(segments):
if i > 0 and len(segments) > 2:
description_content += ","
if i < len(segments) - 1:
description_content += " "
if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2):
description_content += _(" and ")
if "-" in segment:
between_description = self.generate_between_segment_description(
segment,
lambda s: _(", {0} through {1}"),
get_single_item_description
)
between_description = between_description.replace(", ", "")
description_content += between_description
else:
description_content += get_single_item_description(segment)
description = get_description_format(
expression).format(
description_content)
elif "-" in expression:
description = self.generate_between_segment_description(
expression, get_between_description_format, get_single_item_description)
return description
def generate_between_segment_description(
self,
between_expression,
get_between_description_format,
get_single_item_description
):
"""
Generates the between segment description
:param between_expression:
:param get_between_description_format:
:param get_single_item_description:
:return: The between segment description
"""
description = ""
between_segments = between_expression.split('-')
between_segment_1_description = get_single_item_description(between_segments[0])
between_segment_2_description = get_single_item_description(between_segments[1])
between_segment_2_description = between_segment_2_description.replace(
":00", ":59")
between_description_format = get_between_description_format(between_expression)
description += between_description_format.format(between_segment_1_description, between_segment_2_description)
return description
def format_time(
self,
hour_expression,
minute_expression,
second_expression=''
):
"""Given time parts, will contruct a formatted time description
Args:
hour_expression: Hours part
minute_expression: Minutes part
second_expression: Seconds part
Returns:
Formatted time description
"""
hour = int(hour_expression)
period = ''
if self._options.use_24hour_time_format is False:
period = " PM" if (hour >= 12) else " AM"
if hour > 12:
hour -= 12
minute = str(int(minute_expression)) # !FIXME WUT ???
second = ''
if second_expression is not None and second_expression:
second = "{}{}".format(":", str(int(second_expression)).zfill(2))
return "{0}:{1}{2}{3}".format(str(hour).zfill(2), minute.zfill(2), second, period)
def transform_verbosity(self, description, use_verbose_format):
"""Transforms the verbosity of the expression description by stripping verbosity from original description
Args:
description: The description to transform
use_verbose_format: If True, will leave description as it, if False, will strip verbose parts
second_expression: Seconds part
Returns:
The transformed description with proper verbosity
"""
if use_verbose_format is False:
description = description.replace(
_(", every minute"), '')
description = description.replace(_(", every hour"), '')
description = description.replace(_(", every day"), '')
return description
def transform_case(self, description, case_type):
"""Transforms the case of the expression description, based on options
Args:
description: The description to transform
case_type: The casing type that controls the output casing
second_expression: Seconds part
Returns:
The transformed description with proper casing
"""
if case_type == CasingTypeEnum.Sentence:
description = "{}{}".format(
description[0].upper(),
description[1:])
elif case_type == CasingTypeEnum.Title:
description = description.title()
else:
description = description.lower()
return description
def number_to_day(self, day_number):
"""Returns localized day name by its CRON number
Args:
day_number: Number of a day
Returns:
Day corresponding to day_number
Raises:
IndexError: When day_number is not found
"""
return [
calendar.day_name[6],
calendar.day_name[0],
calendar.day_name[1],
calendar.day_name[2],
calendar.day_name[3],
calendar.day_name[4],
calendar.day_name[5]
][day_number]
def __str__(self):
return self.get_description()
def __repr__(self):
return self.get_description()
|
Salamek/cron-descriptor | cron_descriptor/ExpressionDescriptor.py | ExpressionDescriptor.get_year_description | python | def get_year_description(self):
def format_year(s):
regex = re.compile(r"^\d+$")
if regex.match(s):
year_int = int(s)
if year_int < 1900:
return year_int
return datetime.date(year_int, 1, 1).strftime("%Y")
else:
return s
return self.get_segment_description(
self._expression_parts[6],
'',
lambda s: format_year(s),
lambda s: _(", every {0} years").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
) | Generates a description for only the YEAR portion of the expression
Returns:
The YEAR description | train | https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L375-L400 | [
"def get_segment_description(\n self,\n expression,\n all_description,\n get_single_item_description,\n get_interval_description_format,\n get_between_description_format,\n get_description_format\n):\n \"\"\"Returns segment description\n Args:\n expression: Segment to descript\n all_description: *\n get_single_item_description: 1\n get_interval_description_format: 1/2\n get_between_description_format: 1-2\n get_description_format: format get_single_item_description\n Returns:\n segment description\n\n \"\"\"\n description = None\n if expression is None or expression == '':\n description = ''\n elif expression == \"*\":\n description = all_description\n elif any(ext in expression for ext in ['/', '-', ',']) is False:\n description = get_description_format(expression).format(\n get_single_item_description(expression))\n elif \"/\" in expression:\n segments = expression.split('/')\n description = get_interval_description_format(\n segments[1]).format(get_single_item_description(segments[1]))\n\n # interval contains 'between' piece (i.e. 2-59/3 )\n if \"-\" in segments[0]:\n between_segment_description = self.generate_between_segment_description(\n segments[0], get_between_description_format, get_single_item_description)\n if not between_segment_description.startswith(\", \"):\n description += \", \"\n description += between_segment_description\n elif any(ext in segments[0] for ext in ['*', ',']) is False:\n range_item_description = get_description_format(segments[0]).format(\n get_single_item_description(segments[0])\n )\n range_item_description = range_item_description.replace(\", \", \"\")\n\n description += _(\", starting {0}\").format(range_item_description)\n elif \",\" in expression:\n segments = expression.split(',')\n\n description_content = ''\n for i, segment in enumerate(segments):\n if i > 0 and len(segments) > 2:\n description_content += \",\"\n\n if i < len(segments) - 1:\n description_content += \" \"\n\n if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2):\n description_content += _(\" and \")\n\n if \"-\" in segment:\n between_description = self.generate_between_segment_description(\n segment,\n lambda s: _(\", {0} through {1}\"),\n get_single_item_description\n )\n\n between_description = between_description.replace(\", \", \"\")\n\n description_content += between_description\n else:\n description_content += get_single_item_description(segment)\n\n description = get_description_format(\n expression).format(\n description_content)\n elif \"-\" in expression:\n description = self.generate_between_segment_description(\n expression, get_between_description_format, get_single_item_description)\n\n return description\n"
] | class ExpressionDescriptor(object):
"""
Converts a Cron Expression into a human readable string
"""
_special_characters = ['/', '-', ',', '*']
_expression = ''
_options = None
_expression_parts = []
_parsed = False
def __init__(self, expression, options=None, **kwargs):
"""Initializes a new instance of the ExpressionDescriptorclass
Args:
expression: The cron expression string
options: Options to control the output description
Raises:
WrongArgumentException: if kwarg is unknow
"""
if options is None:
options = Options()
self._expression = expression
self._options = options
self._expression_parts = []
self._parsed = False
# if kwargs in _options, overwrite it, if not raise exeption
for kwarg in kwargs:
if hasattr(self._options, kwarg):
setattr(self._options, kwarg, kwargs[kwarg])
else:
raise WrongArgumentException(
"Unknow {} configuration argument".format(kwarg))
# Initializes localization
GetText(options.locale_code)
def get_description(self, description_type=DescriptionTypeEnum.FULL):
"""Generates a human readable string for the Cron Expression
Args:
description_type: Which part(s) of the expression to describe
Returns:
The cron expression description
Raises:
Exception: if throw_exception_on_parse_error is True
"""
try:
if self._parsed is False:
parser = ExpressionParser(self._expression, self._options)
self._expression_parts = parser.parse()
self._parsed = True
choices = {
DescriptionTypeEnum.FULL: self.get_full_description,
DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description,
DescriptionTypeEnum.HOURS: self.get_hours_description,
DescriptionTypeEnum.MINUTES: self.get_minutes_description,
DescriptionTypeEnum.SECONDS: self.get_seconds_description,
DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description,
DescriptionTypeEnum.MONTH: self.get_month_description,
DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description,
DescriptionTypeEnum.YEAR: self.get_year_description,
}
description = choices.get(description_type, self.get_seconds_description)()
except Exception as ex:
if self._options.throw_exception_on_parse_error:
raise
else:
description = str(ex)
return description
def get_full_description(self):
"""Generates the FULL description
Returns:
The FULL description
Raises:
FormatException: if formating fails and throw_exception_on_parse_error is True
"""
try:
time_segment = self.get_time_of_day_description()
day_of_month_desc = self.get_day_of_month_description()
month_desc = self.get_month_description()
day_of_week_desc = self.get_day_of_week_description()
year_desc = self.get_year_description()
description = "{0}{1}{2}{3}{4}".format(
time_segment,
day_of_month_desc,
day_of_week_desc,
month_desc,
year_desc)
description = self.transform_verbosity(
description, self._options.verbose)
description = self.transform_case(
description,
self._options.casing_type)
except Exception:
description = _(
"An error occured when generating the expression description. Check the cron expression syntax.")
if self._options.throw_exception_on_parse_error:
raise FormatException(description)
return description
def get_time_of_day_description(self):
"""Generates a description for only the TIMEOFDAY portion of the expression
Returns:
The TIMEOFDAY description
"""
seconds_expression = self._expression_parts[0]
minute_expression = self._expression_parts[1]
hour_expression = self._expression_parts[2]
description = StringBuilder()
# handle special cases first
if any(exp in minute_expression for exp in self._special_characters) is False and \
any(exp in hour_expression for exp in self._special_characters) is False and \
any(exp in seconds_expression for exp in self._special_characters) is False:
# specific time of day (i.e. 10 14)
description.append(_("At "))
description.append(
self.format_time(
hour_expression,
minute_expression,
seconds_expression))
elif "-" in minute_expression and \
"," not in minute_expression and \
any(exp in hour_expression for exp in self._special_characters) is False:
# minute range in single hour (i.e. 0-10 11)
minute_parts = minute_expression.split('-')
description.append(_("Every minute between {0} and {1}").format(
self.format_time(hour_expression, minute_parts[0]), self.format_time(hour_expression, minute_parts[1])))
elif "," in hour_expression and "-" not in hour_expression and \
any(exp in minute_expression for exp in self._special_characters) is False:
# hours list with single minute (o.e. 30 6,14,16)
hour_parts = hour_expression.split(',')
description.append(_("At"))
for i, hour_part in enumerate(hour_parts):
description.append(" ")
description.append(
self.format_time(hour_part, minute_expression))
if i < (len(hour_parts) - 2):
description.append(",")
if i == len(hour_parts) - 2:
description.append(_(" and"))
else:
# default time description
seconds_description = self.get_seconds_description()
minutes_description = self.get_minutes_description()
hours_description = self.get_hours_description()
description.append(seconds_description)
if description:
description.append(", ")
description.append(minutes_description)
if description:
description.append(", ")
description.append(hours_description)
return str(description)
def get_seconds_description(self):
"""Generates a description for only the SECONDS portion of the expression
Returns:
The SECONDS description
"""
return self.get_segment_description(
self._expression_parts[0],
_("every second"),
lambda s: s,
lambda s: _("every {0} seconds").format(s),
lambda s: _("seconds {0} through {1} past the minute"),
lambda s: _("at {0} seconds past the minute")
)
def get_minutes_description(self):
"""Generates a description for only the MINUTE portion of the expression
Returns:
The MINUTE description
"""
return self.get_segment_description(
self._expression_parts[1],
_("every minute"),
lambda s: s,
lambda s: _("every {0} minutes").format(s),
lambda s: _("minutes {0} through {1} past the hour"),
lambda s: '' if s == "0" else _("at {0} minutes past the hour")
)
def get_hours_description(self):
"""Generates a description for only the HOUR portion of the expression
Returns:
The HOUR description
"""
expression = self._expression_parts[2]
return self.get_segment_description(
expression,
_("every hour"),
lambda s: self.format_time(s, "0"),
lambda s: _("every {0} hours").format(s),
lambda s: _("between {0} and {1}"),
lambda s: _("at {0}")
)
def get_day_of_week_description(self):
"""Generates a description for only the DAYOFWEEK portion of the expression
Returns:
The DAYOFWEEK description
"""
if self._expression_parts[5] == "*" and self._expression_parts[3] != "*":
# DOM is specified and DOW is * so to prevent contradiction like "on day 1 of the month, every day"
# we will not specified a DOW description.
return ""
def get_day_name(s):
exp = s
if "#" in s:
exp, useless = s.split("#", 2)
elif "L" in s:
exp = exp.replace("L", '')
return self.number_to_day(int(exp))
def get_format(s):
if "#" in s:
day_of_week_of_month = s[s.find("#") + 1:]
try:
day_of_week_of_month_number = int(day_of_week_of_month)
choices = {
1: _("first"),
2: _("second"),
3: _("third"),
4: _("forth"),
5: _("fifth"),
}
day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '')
except ValueError:
day_of_week_of_month_description = ''
formated = "{}{}{}".format(_(", on the "),
day_of_week_of_month_description, _(" {0} of the month"))
elif "L" in s:
formated = _(", on the last {0} of the month")
else:
formated = _(", only on {0}")
return formated
return self.get_segment_description(
self._expression_parts[5],
_(", every day"),
lambda s: get_day_name(s),
lambda s: _(", every {0} days of the week").format(s),
lambda s: _(", {0} through {1}"),
lambda s: get_format(s)
)
def get_month_description(self):
"""Generates a description for only the MONTH portion of the expression
Returns:
The MONTH description
"""
return self.get_segment_description(
self._expression_parts[4],
'',
lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime("%B"),
lambda s: _(", every {0} months").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_day_of_month_description(self):
"""Generates a description for only the DAYOFMONTH portion of the expression
Returns:
The DAYOFMONTH description
"""
expression = self._expression_parts[3]
expression = expression.replace("?", "*")
if expression == "L":
description = _(", on the last day of the month")
elif expression == "LW" or expression == "WL":
description = _(", on the last weekday of the month")
else:
regex = re.compile("(\\d{1,2}W)|(W\\d{1,2})")
if regex.match(expression):
m = regex.match(expression)
day_number = int(m.group().replace("W", ""))
day_string = _("first weekday") if day_number == 1 else _("weekday nearest day {0}").format(
day_number)
description = _(", on the {0} of the month").format(
day_string)
else:
description = self.get_segment_description(
expression,
_(", every day"),
lambda s: s,
lambda s: _(", every day") if s == "1" else _(", every {0} days"),
lambda s: _(", between day {0} and {1} of the month"),
lambda s: _(", on day {0} of the month")
)
return description
def get_segment_description(
self,
expression,
all_description,
get_single_item_description,
get_interval_description_format,
get_between_description_format,
get_description_format
):
"""Returns segment description
Args:
expression: Segment to descript
all_description: *
get_single_item_description: 1
get_interval_description_format: 1/2
get_between_description_format: 1-2
get_description_format: format get_single_item_description
Returns:
segment description
"""
description = None
if expression is None or expression == '':
description = ''
elif expression == "*":
description = all_description
elif any(ext in expression for ext in ['/', '-', ',']) is False:
description = get_description_format(expression).format(
get_single_item_description(expression))
elif "/" in expression:
segments = expression.split('/')
description = get_interval_description_format(
segments[1]).format(get_single_item_description(segments[1]))
# interval contains 'between' piece (i.e. 2-59/3 )
if "-" in segments[0]:
between_segment_description = self.generate_between_segment_description(
segments[0], get_between_description_format, get_single_item_description)
if not between_segment_description.startswith(", "):
description += ", "
description += between_segment_description
elif any(ext in segments[0] for ext in ['*', ',']) is False:
range_item_description = get_description_format(segments[0]).format(
get_single_item_description(segments[0])
)
range_item_description = range_item_description.replace(", ", "")
description += _(", starting {0}").format(range_item_description)
elif "," in expression:
segments = expression.split(',')
description_content = ''
for i, segment in enumerate(segments):
if i > 0 and len(segments) > 2:
description_content += ","
if i < len(segments) - 1:
description_content += " "
if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2):
description_content += _(" and ")
if "-" in segment:
between_description = self.generate_between_segment_description(
segment,
lambda s: _(", {0} through {1}"),
get_single_item_description
)
between_description = between_description.replace(", ", "")
description_content += between_description
else:
description_content += get_single_item_description(segment)
description = get_description_format(
expression).format(
description_content)
elif "-" in expression:
description = self.generate_between_segment_description(
expression, get_between_description_format, get_single_item_description)
return description
def generate_between_segment_description(
self,
between_expression,
get_between_description_format,
get_single_item_description
):
"""
Generates the between segment description
:param between_expression:
:param get_between_description_format:
:param get_single_item_description:
:return: The between segment description
"""
description = ""
between_segments = between_expression.split('-')
between_segment_1_description = get_single_item_description(between_segments[0])
between_segment_2_description = get_single_item_description(between_segments[1])
between_segment_2_description = between_segment_2_description.replace(
":00", ":59")
between_description_format = get_between_description_format(between_expression)
description += between_description_format.format(between_segment_1_description, between_segment_2_description)
return description
def format_time(
self,
hour_expression,
minute_expression,
second_expression=''
):
"""Given time parts, will contruct a formatted time description
Args:
hour_expression: Hours part
minute_expression: Minutes part
second_expression: Seconds part
Returns:
Formatted time description
"""
hour = int(hour_expression)
period = ''
if self._options.use_24hour_time_format is False:
period = " PM" if (hour >= 12) else " AM"
if hour > 12:
hour -= 12
minute = str(int(minute_expression)) # !FIXME WUT ???
second = ''
if second_expression is not None and second_expression:
second = "{}{}".format(":", str(int(second_expression)).zfill(2))
return "{0}:{1}{2}{3}".format(str(hour).zfill(2), minute.zfill(2), second, period)
def transform_verbosity(self, description, use_verbose_format):
"""Transforms the verbosity of the expression description by stripping verbosity from original description
Args:
description: The description to transform
use_verbose_format: If True, will leave description as it, if False, will strip verbose parts
second_expression: Seconds part
Returns:
The transformed description with proper verbosity
"""
if use_verbose_format is False:
description = description.replace(
_(", every minute"), '')
description = description.replace(_(", every hour"), '')
description = description.replace(_(", every day"), '')
return description
def transform_case(self, description, case_type):
"""Transforms the case of the expression description, based on options
Args:
description: The description to transform
case_type: The casing type that controls the output casing
second_expression: Seconds part
Returns:
The transformed description with proper casing
"""
if case_type == CasingTypeEnum.Sentence:
description = "{}{}".format(
description[0].upper(),
description[1:])
elif case_type == CasingTypeEnum.Title:
description = description.title()
else:
description = description.lower()
return description
def number_to_day(self, day_number):
"""Returns localized day name by its CRON number
Args:
day_number: Number of a day
Returns:
Day corresponding to day_number
Raises:
IndexError: When day_number is not found
"""
return [
calendar.day_name[6],
calendar.day_name[0],
calendar.day_name[1],
calendar.day_name[2],
calendar.day_name[3],
calendar.day_name[4],
calendar.day_name[5]
][day_number]
def __str__(self):
return self.get_description()
def __repr__(self):
return self.get_description()
|
Salamek/cron-descriptor | cron_descriptor/ExpressionDescriptor.py | ExpressionDescriptor.get_segment_description | python | def get_segment_description(
self,
expression,
all_description,
get_single_item_description,
get_interval_description_format,
get_between_description_format,
get_description_format
):
description = None
if expression is None or expression == '':
description = ''
elif expression == "*":
description = all_description
elif any(ext in expression for ext in ['/', '-', ',']) is False:
description = get_description_format(expression).format(
get_single_item_description(expression))
elif "/" in expression:
segments = expression.split('/')
description = get_interval_description_format(
segments[1]).format(get_single_item_description(segments[1]))
# interval contains 'between' piece (i.e. 2-59/3 )
if "-" in segments[0]:
between_segment_description = self.generate_between_segment_description(
segments[0], get_between_description_format, get_single_item_description)
if not between_segment_description.startswith(", "):
description += ", "
description += between_segment_description
elif any(ext in segments[0] for ext in ['*', ',']) is False:
range_item_description = get_description_format(segments[0]).format(
get_single_item_description(segments[0])
)
range_item_description = range_item_description.replace(", ", "")
description += _(", starting {0}").format(range_item_description)
elif "," in expression:
segments = expression.split(',')
description_content = ''
for i, segment in enumerate(segments):
if i > 0 and len(segments) > 2:
description_content += ","
if i < len(segments) - 1:
description_content += " "
if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2):
description_content += _(" and ")
if "-" in segment:
between_description = self.generate_between_segment_description(
segment,
lambda s: _(", {0} through {1}"),
get_single_item_description
)
between_description = between_description.replace(", ", "")
description_content += between_description
else:
description_content += get_single_item_description(segment)
description = get_description_format(
expression).format(
description_content)
elif "-" in expression:
description = self.generate_between_segment_description(
expression, get_between_description_format, get_single_item_description)
return description | Returns segment description
Args:
expression: Segment to descript
all_description: *
get_single_item_description: 1
get_interval_description_format: 1/2
get_between_description_format: 1-2
get_description_format: format get_single_item_description
Returns:
segment description | train | https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L402-L484 | [
"def generate_between_segment_description(\n self,\n between_expression,\n get_between_description_format,\n get_single_item_description\n):\n \"\"\"\n Generates the between segment description\n :param between_expression:\n :param get_between_description_format:\n :param get_single_item_description:\n :return: The between segment description\n \"\"\"\n description = \"\"\n between_segments = between_expression.split('-')\n between_segment_1_description = get_single_item_description(between_segments[0])\n between_segment_2_description = get_single_item_description(between_segments[1])\n between_segment_2_description = between_segment_2_description.replace(\n \":00\", \":59\")\n\n between_description_format = get_between_description_format(between_expression)\n description += between_description_format.format(between_segment_1_description, between_segment_2_description)\n\n return description\n",
"lambda s: s,\n",
"lambda s: _(\"every {0} seconds\").format(s),\n",
"lambda s: _(\"at {0} seconds past the minute\")\n",
"lambda s: s,\n",
"lambda s: _(\"every {0} minutes\").format(s),\n",
"lambda s: '' if s == \"0\" else _(\"at {0} minutes past the hour\")\n",
"lambda s: self.format_time(s, \"0\"),\n",
"lambda s: _(\"every {0} hours\").format(s),\n",
"lambda s: _(\"at {0}\")\n",
"lambda s: get_day_name(s),\n",
"lambda s: _(\", every {0} days of the week\").format(s),\n",
"lambda s: get_format(s)\n",
"lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime(\"%B\"),\n",
"lambda s: _(\", every {0} months\").format(s),\n",
"lambda s: _(\", only in {0}\")\n",
"lambda s: format_year(s),\n",
"lambda s: _(\", every {0} years\").format(s),\n",
"lambda s: _(\", only in {0}\")\n"
] | class ExpressionDescriptor(object):
"""
Converts a Cron Expression into a human readable string
"""
_special_characters = ['/', '-', ',', '*']
_expression = ''
_options = None
_expression_parts = []
_parsed = False
def __init__(self, expression, options=None, **kwargs):
"""Initializes a new instance of the ExpressionDescriptorclass
Args:
expression: The cron expression string
options: Options to control the output description
Raises:
WrongArgumentException: if kwarg is unknow
"""
if options is None:
options = Options()
self._expression = expression
self._options = options
self._expression_parts = []
self._parsed = False
# if kwargs in _options, overwrite it, if not raise exeption
for kwarg in kwargs:
if hasattr(self._options, kwarg):
setattr(self._options, kwarg, kwargs[kwarg])
else:
raise WrongArgumentException(
"Unknow {} configuration argument".format(kwarg))
# Initializes localization
GetText(options.locale_code)
def get_description(self, description_type=DescriptionTypeEnum.FULL):
"""Generates a human readable string for the Cron Expression
Args:
description_type: Which part(s) of the expression to describe
Returns:
The cron expression description
Raises:
Exception: if throw_exception_on_parse_error is True
"""
try:
if self._parsed is False:
parser = ExpressionParser(self._expression, self._options)
self._expression_parts = parser.parse()
self._parsed = True
choices = {
DescriptionTypeEnum.FULL: self.get_full_description,
DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description,
DescriptionTypeEnum.HOURS: self.get_hours_description,
DescriptionTypeEnum.MINUTES: self.get_minutes_description,
DescriptionTypeEnum.SECONDS: self.get_seconds_description,
DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description,
DescriptionTypeEnum.MONTH: self.get_month_description,
DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description,
DescriptionTypeEnum.YEAR: self.get_year_description,
}
description = choices.get(description_type, self.get_seconds_description)()
except Exception as ex:
if self._options.throw_exception_on_parse_error:
raise
else:
description = str(ex)
return description
def get_full_description(self):
"""Generates the FULL description
Returns:
The FULL description
Raises:
FormatException: if formating fails and throw_exception_on_parse_error is True
"""
try:
time_segment = self.get_time_of_day_description()
day_of_month_desc = self.get_day_of_month_description()
month_desc = self.get_month_description()
day_of_week_desc = self.get_day_of_week_description()
year_desc = self.get_year_description()
description = "{0}{1}{2}{3}{4}".format(
time_segment,
day_of_month_desc,
day_of_week_desc,
month_desc,
year_desc)
description = self.transform_verbosity(
description, self._options.verbose)
description = self.transform_case(
description,
self._options.casing_type)
except Exception:
description = _(
"An error occured when generating the expression description. Check the cron expression syntax.")
if self._options.throw_exception_on_parse_error:
raise FormatException(description)
return description
def get_time_of_day_description(self):
"""Generates a description for only the TIMEOFDAY portion of the expression
Returns:
The TIMEOFDAY description
"""
seconds_expression = self._expression_parts[0]
minute_expression = self._expression_parts[1]
hour_expression = self._expression_parts[2]
description = StringBuilder()
# handle special cases first
if any(exp in minute_expression for exp in self._special_characters) is False and \
any(exp in hour_expression for exp in self._special_characters) is False and \
any(exp in seconds_expression for exp in self._special_characters) is False:
# specific time of day (i.e. 10 14)
description.append(_("At "))
description.append(
self.format_time(
hour_expression,
minute_expression,
seconds_expression))
elif "-" in minute_expression and \
"," not in minute_expression and \
any(exp in hour_expression for exp in self._special_characters) is False:
# minute range in single hour (i.e. 0-10 11)
minute_parts = minute_expression.split('-')
description.append(_("Every minute between {0} and {1}").format(
self.format_time(hour_expression, minute_parts[0]), self.format_time(hour_expression, minute_parts[1])))
elif "," in hour_expression and "-" not in hour_expression and \
any(exp in minute_expression for exp in self._special_characters) is False:
# hours list with single minute (o.e. 30 6,14,16)
hour_parts = hour_expression.split(',')
description.append(_("At"))
for i, hour_part in enumerate(hour_parts):
description.append(" ")
description.append(
self.format_time(hour_part, minute_expression))
if i < (len(hour_parts) - 2):
description.append(",")
if i == len(hour_parts) - 2:
description.append(_(" and"))
else:
# default time description
seconds_description = self.get_seconds_description()
minutes_description = self.get_minutes_description()
hours_description = self.get_hours_description()
description.append(seconds_description)
if description:
description.append(", ")
description.append(minutes_description)
if description:
description.append(", ")
description.append(hours_description)
return str(description)
def get_seconds_description(self):
"""Generates a description for only the SECONDS portion of the expression
Returns:
The SECONDS description
"""
return self.get_segment_description(
self._expression_parts[0],
_("every second"),
lambda s: s,
lambda s: _("every {0} seconds").format(s),
lambda s: _("seconds {0} through {1} past the minute"),
lambda s: _("at {0} seconds past the minute")
)
def get_minutes_description(self):
"""Generates a description for only the MINUTE portion of the expression
Returns:
The MINUTE description
"""
return self.get_segment_description(
self._expression_parts[1],
_("every minute"),
lambda s: s,
lambda s: _("every {0} minutes").format(s),
lambda s: _("minutes {0} through {1} past the hour"),
lambda s: '' if s == "0" else _("at {0} minutes past the hour")
)
def get_hours_description(self):
"""Generates a description for only the HOUR portion of the expression
Returns:
The HOUR description
"""
expression = self._expression_parts[2]
return self.get_segment_description(
expression,
_("every hour"),
lambda s: self.format_time(s, "0"),
lambda s: _("every {0} hours").format(s),
lambda s: _("between {0} and {1}"),
lambda s: _("at {0}")
)
def get_day_of_week_description(self):
"""Generates a description for only the DAYOFWEEK portion of the expression
Returns:
The DAYOFWEEK description
"""
if self._expression_parts[5] == "*" and self._expression_parts[3] != "*":
# DOM is specified and DOW is * so to prevent contradiction like "on day 1 of the month, every day"
# we will not specified a DOW description.
return ""
def get_day_name(s):
exp = s
if "#" in s:
exp, useless = s.split("#", 2)
elif "L" in s:
exp = exp.replace("L", '')
return self.number_to_day(int(exp))
def get_format(s):
if "#" in s:
day_of_week_of_month = s[s.find("#") + 1:]
try:
day_of_week_of_month_number = int(day_of_week_of_month)
choices = {
1: _("first"),
2: _("second"),
3: _("third"),
4: _("forth"),
5: _("fifth"),
}
day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '')
except ValueError:
day_of_week_of_month_description = ''
formated = "{}{}{}".format(_(", on the "),
day_of_week_of_month_description, _(" {0} of the month"))
elif "L" in s:
formated = _(", on the last {0} of the month")
else:
formated = _(", only on {0}")
return formated
return self.get_segment_description(
self._expression_parts[5],
_(", every day"),
lambda s: get_day_name(s),
lambda s: _(", every {0} days of the week").format(s),
lambda s: _(", {0} through {1}"),
lambda s: get_format(s)
)
def get_month_description(self):
"""Generates a description for only the MONTH portion of the expression
Returns:
The MONTH description
"""
return self.get_segment_description(
self._expression_parts[4],
'',
lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime("%B"),
lambda s: _(", every {0} months").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_day_of_month_description(self):
"""Generates a description for only the DAYOFMONTH portion of the expression
Returns:
The DAYOFMONTH description
"""
expression = self._expression_parts[3]
expression = expression.replace("?", "*")
if expression == "L":
description = _(", on the last day of the month")
elif expression == "LW" or expression == "WL":
description = _(", on the last weekday of the month")
else:
regex = re.compile("(\\d{1,2}W)|(W\\d{1,2})")
if regex.match(expression):
m = regex.match(expression)
day_number = int(m.group().replace("W", ""))
day_string = _("first weekday") if day_number == 1 else _("weekday nearest day {0}").format(
day_number)
description = _(", on the {0} of the month").format(
day_string)
else:
description = self.get_segment_description(
expression,
_(", every day"),
lambda s: s,
lambda s: _(", every day") if s == "1" else _(", every {0} days"),
lambda s: _(", between day {0} and {1} of the month"),
lambda s: _(", on day {0} of the month")
)
return description
def get_year_description(self):
"""Generates a description for only the YEAR portion of the expression
Returns:
The YEAR description
"""
def format_year(s):
regex = re.compile(r"^\d+$")
if regex.match(s):
year_int = int(s)
if year_int < 1900:
return year_int
return datetime.date(year_int, 1, 1).strftime("%Y")
else:
return s
return self.get_segment_description(
self._expression_parts[6],
'',
lambda s: format_year(s),
lambda s: _(", every {0} years").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def generate_between_segment_description(
self,
between_expression,
get_between_description_format,
get_single_item_description
):
"""
Generates the between segment description
:param between_expression:
:param get_between_description_format:
:param get_single_item_description:
:return: The between segment description
"""
description = ""
between_segments = between_expression.split('-')
between_segment_1_description = get_single_item_description(between_segments[0])
between_segment_2_description = get_single_item_description(between_segments[1])
between_segment_2_description = between_segment_2_description.replace(
":00", ":59")
between_description_format = get_between_description_format(between_expression)
description += between_description_format.format(between_segment_1_description, between_segment_2_description)
return description
def format_time(
self,
hour_expression,
minute_expression,
second_expression=''
):
"""Given time parts, will contruct a formatted time description
Args:
hour_expression: Hours part
minute_expression: Minutes part
second_expression: Seconds part
Returns:
Formatted time description
"""
hour = int(hour_expression)
period = ''
if self._options.use_24hour_time_format is False:
period = " PM" if (hour >= 12) else " AM"
if hour > 12:
hour -= 12
minute = str(int(minute_expression)) # !FIXME WUT ???
second = ''
if second_expression is not None and second_expression:
second = "{}{}".format(":", str(int(second_expression)).zfill(2))
return "{0}:{1}{2}{3}".format(str(hour).zfill(2), minute.zfill(2), second, period)
def transform_verbosity(self, description, use_verbose_format):
"""Transforms the verbosity of the expression description by stripping verbosity from original description
Args:
description: The description to transform
use_verbose_format: If True, will leave description as it, if False, will strip verbose parts
second_expression: Seconds part
Returns:
The transformed description with proper verbosity
"""
if use_verbose_format is False:
description = description.replace(
_(", every minute"), '')
description = description.replace(_(", every hour"), '')
description = description.replace(_(", every day"), '')
return description
def transform_case(self, description, case_type):
"""Transforms the case of the expression description, based on options
Args:
description: The description to transform
case_type: The casing type that controls the output casing
second_expression: Seconds part
Returns:
The transformed description with proper casing
"""
if case_type == CasingTypeEnum.Sentence:
description = "{}{}".format(
description[0].upper(),
description[1:])
elif case_type == CasingTypeEnum.Title:
description = description.title()
else:
description = description.lower()
return description
def number_to_day(self, day_number):
"""Returns localized day name by its CRON number
Args:
day_number: Number of a day
Returns:
Day corresponding to day_number
Raises:
IndexError: When day_number is not found
"""
return [
calendar.day_name[6],
calendar.day_name[0],
calendar.day_name[1],
calendar.day_name[2],
calendar.day_name[3],
calendar.day_name[4],
calendar.day_name[5]
][day_number]
def __str__(self):
return self.get_description()
def __repr__(self):
return self.get_description()
|
Salamek/cron-descriptor | cron_descriptor/ExpressionDescriptor.py | ExpressionDescriptor.generate_between_segment_description | python | def generate_between_segment_description(
self,
between_expression,
get_between_description_format,
get_single_item_description
):
description = ""
between_segments = between_expression.split('-')
between_segment_1_description = get_single_item_description(between_segments[0])
between_segment_2_description = get_single_item_description(between_segments[1])
between_segment_2_description = between_segment_2_description.replace(
":00", ":59")
between_description_format = get_between_description_format(between_expression)
description += between_description_format.format(between_segment_1_description, between_segment_2_description)
return description | Generates the between segment description
:param between_expression:
:param get_between_description_format:
:param get_single_item_description:
:return: The between segment description | train | https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L486-L509 | null | class ExpressionDescriptor(object):
"""
Converts a Cron Expression into a human readable string
"""
_special_characters = ['/', '-', ',', '*']
_expression = ''
_options = None
_expression_parts = []
_parsed = False
def __init__(self, expression, options=None, **kwargs):
"""Initializes a new instance of the ExpressionDescriptorclass
Args:
expression: The cron expression string
options: Options to control the output description
Raises:
WrongArgumentException: if kwarg is unknow
"""
if options is None:
options = Options()
self._expression = expression
self._options = options
self._expression_parts = []
self._parsed = False
# if kwargs in _options, overwrite it, if not raise exeption
for kwarg in kwargs:
if hasattr(self._options, kwarg):
setattr(self._options, kwarg, kwargs[kwarg])
else:
raise WrongArgumentException(
"Unknow {} configuration argument".format(kwarg))
# Initializes localization
GetText(options.locale_code)
def get_description(self, description_type=DescriptionTypeEnum.FULL):
"""Generates a human readable string for the Cron Expression
Args:
description_type: Which part(s) of the expression to describe
Returns:
The cron expression description
Raises:
Exception: if throw_exception_on_parse_error is True
"""
try:
if self._parsed is False:
parser = ExpressionParser(self._expression, self._options)
self._expression_parts = parser.parse()
self._parsed = True
choices = {
DescriptionTypeEnum.FULL: self.get_full_description,
DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description,
DescriptionTypeEnum.HOURS: self.get_hours_description,
DescriptionTypeEnum.MINUTES: self.get_minutes_description,
DescriptionTypeEnum.SECONDS: self.get_seconds_description,
DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description,
DescriptionTypeEnum.MONTH: self.get_month_description,
DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description,
DescriptionTypeEnum.YEAR: self.get_year_description,
}
description = choices.get(description_type, self.get_seconds_description)()
except Exception as ex:
if self._options.throw_exception_on_parse_error:
raise
else:
description = str(ex)
return description
def get_full_description(self):
"""Generates the FULL description
Returns:
The FULL description
Raises:
FormatException: if formating fails and throw_exception_on_parse_error is True
"""
try:
time_segment = self.get_time_of_day_description()
day_of_month_desc = self.get_day_of_month_description()
month_desc = self.get_month_description()
day_of_week_desc = self.get_day_of_week_description()
year_desc = self.get_year_description()
description = "{0}{1}{2}{3}{4}".format(
time_segment,
day_of_month_desc,
day_of_week_desc,
month_desc,
year_desc)
description = self.transform_verbosity(
description, self._options.verbose)
description = self.transform_case(
description,
self._options.casing_type)
except Exception:
description = _(
"An error occured when generating the expression description. Check the cron expression syntax.")
if self._options.throw_exception_on_parse_error:
raise FormatException(description)
return description
def get_time_of_day_description(self):
"""Generates a description for only the TIMEOFDAY portion of the expression
Returns:
The TIMEOFDAY description
"""
seconds_expression = self._expression_parts[0]
minute_expression = self._expression_parts[1]
hour_expression = self._expression_parts[2]
description = StringBuilder()
# handle special cases first
if any(exp in minute_expression for exp in self._special_characters) is False and \
any(exp in hour_expression for exp in self._special_characters) is False and \
any(exp in seconds_expression for exp in self._special_characters) is False:
# specific time of day (i.e. 10 14)
description.append(_("At "))
description.append(
self.format_time(
hour_expression,
minute_expression,
seconds_expression))
elif "-" in minute_expression and \
"," not in minute_expression and \
any(exp in hour_expression for exp in self._special_characters) is False:
# minute range in single hour (i.e. 0-10 11)
minute_parts = minute_expression.split('-')
description.append(_("Every minute between {0} and {1}").format(
self.format_time(hour_expression, minute_parts[0]), self.format_time(hour_expression, minute_parts[1])))
elif "," in hour_expression and "-" not in hour_expression and \
any(exp in minute_expression for exp in self._special_characters) is False:
# hours list with single minute (o.e. 30 6,14,16)
hour_parts = hour_expression.split(',')
description.append(_("At"))
for i, hour_part in enumerate(hour_parts):
description.append(" ")
description.append(
self.format_time(hour_part, minute_expression))
if i < (len(hour_parts) - 2):
description.append(",")
if i == len(hour_parts) - 2:
description.append(_(" and"))
else:
# default time description
seconds_description = self.get_seconds_description()
minutes_description = self.get_minutes_description()
hours_description = self.get_hours_description()
description.append(seconds_description)
if description:
description.append(", ")
description.append(minutes_description)
if description:
description.append(", ")
description.append(hours_description)
return str(description)
def get_seconds_description(self):
"""Generates a description for only the SECONDS portion of the expression
Returns:
The SECONDS description
"""
return self.get_segment_description(
self._expression_parts[0],
_("every second"),
lambda s: s,
lambda s: _("every {0} seconds").format(s),
lambda s: _("seconds {0} through {1} past the minute"),
lambda s: _("at {0} seconds past the minute")
)
def get_minutes_description(self):
"""Generates a description for only the MINUTE portion of the expression
Returns:
The MINUTE description
"""
return self.get_segment_description(
self._expression_parts[1],
_("every minute"),
lambda s: s,
lambda s: _("every {0} minutes").format(s),
lambda s: _("minutes {0} through {1} past the hour"),
lambda s: '' if s == "0" else _("at {0} minutes past the hour")
)
def get_hours_description(self):
"""Generates a description for only the HOUR portion of the expression
Returns:
The HOUR description
"""
expression = self._expression_parts[2]
return self.get_segment_description(
expression,
_("every hour"),
lambda s: self.format_time(s, "0"),
lambda s: _("every {0} hours").format(s),
lambda s: _("between {0} and {1}"),
lambda s: _("at {0}")
)
def get_day_of_week_description(self):
"""Generates a description for only the DAYOFWEEK portion of the expression
Returns:
The DAYOFWEEK description
"""
if self._expression_parts[5] == "*" and self._expression_parts[3] != "*":
# DOM is specified and DOW is * so to prevent contradiction like "on day 1 of the month, every day"
# we will not specified a DOW description.
return ""
def get_day_name(s):
exp = s
if "#" in s:
exp, useless = s.split("#", 2)
elif "L" in s:
exp = exp.replace("L", '')
return self.number_to_day(int(exp))
def get_format(s):
if "#" in s:
day_of_week_of_month = s[s.find("#") + 1:]
try:
day_of_week_of_month_number = int(day_of_week_of_month)
choices = {
1: _("first"),
2: _("second"),
3: _("third"),
4: _("forth"),
5: _("fifth"),
}
day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '')
except ValueError:
day_of_week_of_month_description = ''
formated = "{}{}{}".format(_(", on the "),
day_of_week_of_month_description, _(" {0} of the month"))
elif "L" in s:
formated = _(", on the last {0} of the month")
else:
formated = _(", only on {0}")
return formated
return self.get_segment_description(
self._expression_parts[5],
_(", every day"),
lambda s: get_day_name(s),
lambda s: _(", every {0} days of the week").format(s),
lambda s: _(", {0} through {1}"),
lambda s: get_format(s)
)
def get_month_description(self):
"""Generates a description for only the MONTH portion of the expression
Returns:
The MONTH description
"""
return self.get_segment_description(
self._expression_parts[4],
'',
lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime("%B"),
lambda s: _(", every {0} months").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_day_of_month_description(self):
"""Generates a description for only the DAYOFMONTH portion of the expression
Returns:
The DAYOFMONTH description
"""
expression = self._expression_parts[3]
expression = expression.replace("?", "*")
if expression == "L":
description = _(", on the last day of the month")
elif expression == "LW" or expression == "WL":
description = _(", on the last weekday of the month")
else:
regex = re.compile("(\\d{1,2}W)|(W\\d{1,2})")
if regex.match(expression):
m = regex.match(expression)
day_number = int(m.group().replace("W", ""))
day_string = _("first weekday") if day_number == 1 else _("weekday nearest day {0}").format(
day_number)
description = _(", on the {0} of the month").format(
day_string)
else:
description = self.get_segment_description(
expression,
_(", every day"),
lambda s: s,
lambda s: _(", every day") if s == "1" else _(", every {0} days"),
lambda s: _(", between day {0} and {1} of the month"),
lambda s: _(", on day {0} of the month")
)
return description
def get_year_description(self):
"""Generates a description for only the YEAR portion of the expression
Returns:
The YEAR description
"""
def format_year(s):
regex = re.compile(r"^\d+$")
if regex.match(s):
year_int = int(s)
if year_int < 1900:
return year_int
return datetime.date(year_int, 1, 1).strftime("%Y")
else:
return s
return self.get_segment_description(
self._expression_parts[6],
'',
lambda s: format_year(s),
lambda s: _(", every {0} years").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_segment_description(
self,
expression,
all_description,
get_single_item_description,
get_interval_description_format,
get_between_description_format,
get_description_format
):
"""Returns segment description
Args:
expression: Segment to descript
all_description: *
get_single_item_description: 1
get_interval_description_format: 1/2
get_between_description_format: 1-2
get_description_format: format get_single_item_description
Returns:
segment description
"""
description = None
if expression is None or expression == '':
description = ''
elif expression == "*":
description = all_description
elif any(ext in expression for ext in ['/', '-', ',']) is False:
description = get_description_format(expression).format(
get_single_item_description(expression))
elif "/" in expression:
segments = expression.split('/')
description = get_interval_description_format(
segments[1]).format(get_single_item_description(segments[1]))
# interval contains 'between' piece (i.e. 2-59/3 )
if "-" in segments[0]:
between_segment_description = self.generate_between_segment_description(
segments[0], get_between_description_format, get_single_item_description)
if not between_segment_description.startswith(", "):
description += ", "
description += between_segment_description
elif any(ext in segments[0] for ext in ['*', ',']) is False:
range_item_description = get_description_format(segments[0]).format(
get_single_item_description(segments[0])
)
range_item_description = range_item_description.replace(", ", "")
description += _(", starting {0}").format(range_item_description)
elif "," in expression:
segments = expression.split(',')
description_content = ''
for i, segment in enumerate(segments):
if i > 0 and len(segments) > 2:
description_content += ","
if i < len(segments) - 1:
description_content += " "
if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2):
description_content += _(" and ")
if "-" in segment:
between_description = self.generate_between_segment_description(
segment,
lambda s: _(", {0} through {1}"),
get_single_item_description
)
between_description = between_description.replace(", ", "")
description_content += between_description
else:
description_content += get_single_item_description(segment)
description = get_description_format(
expression).format(
description_content)
elif "-" in expression:
description = self.generate_between_segment_description(
expression, get_between_description_format, get_single_item_description)
return description
def format_time(
self,
hour_expression,
minute_expression,
second_expression=''
):
"""Given time parts, will contruct a formatted time description
Args:
hour_expression: Hours part
minute_expression: Minutes part
second_expression: Seconds part
Returns:
Formatted time description
"""
hour = int(hour_expression)
period = ''
if self._options.use_24hour_time_format is False:
period = " PM" if (hour >= 12) else " AM"
if hour > 12:
hour -= 12
minute = str(int(minute_expression)) # !FIXME WUT ???
second = ''
if second_expression is not None and second_expression:
second = "{}{}".format(":", str(int(second_expression)).zfill(2))
return "{0}:{1}{2}{3}".format(str(hour).zfill(2), minute.zfill(2), second, period)
def transform_verbosity(self, description, use_verbose_format):
"""Transforms the verbosity of the expression description by stripping verbosity from original description
Args:
description: The description to transform
use_verbose_format: If True, will leave description as it, if False, will strip verbose parts
second_expression: Seconds part
Returns:
The transformed description with proper verbosity
"""
if use_verbose_format is False:
description = description.replace(
_(", every minute"), '')
description = description.replace(_(", every hour"), '')
description = description.replace(_(", every day"), '')
return description
def transform_case(self, description, case_type):
"""Transforms the case of the expression description, based on options
Args:
description: The description to transform
case_type: The casing type that controls the output casing
second_expression: Seconds part
Returns:
The transformed description with proper casing
"""
if case_type == CasingTypeEnum.Sentence:
description = "{}{}".format(
description[0].upper(),
description[1:])
elif case_type == CasingTypeEnum.Title:
description = description.title()
else:
description = description.lower()
return description
def number_to_day(self, day_number):
"""Returns localized day name by its CRON number
Args:
day_number: Number of a day
Returns:
Day corresponding to day_number
Raises:
IndexError: When day_number is not found
"""
return [
calendar.day_name[6],
calendar.day_name[0],
calendar.day_name[1],
calendar.day_name[2],
calendar.day_name[3],
calendar.day_name[4],
calendar.day_name[5]
][day_number]
def __str__(self):
return self.get_description()
def __repr__(self):
return self.get_description()
|
Salamek/cron-descriptor | cron_descriptor/ExpressionDescriptor.py | ExpressionDescriptor.format_time | python | def format_time(
self,
hour_expression,
minute_expression,
second_expression=''
):
hour = int(hour_expression)
period = ''
if self._options.use_24hour_time_format is False:
period = " PM" if (hour >= 12) else " AM"
if hour > 12:
hour -= 12
minute = str(int(minute_expression)) # !FIXME WUT ???
second = ''
if second_expression is not None and second_expression:
second = "{}{}".format(":", str(int(second_expression)).zfill(2))
return "{0}:{1}{2}{3}".format(str(hour).zfill(2), minute.zfill(2), second, period) | Given time parts, will contruct a formatted time description
Args:
hour_expression: Hours part
minute_expression: Minutes part
second_expression: Seconds part
Returns:
Formatted time description | train | https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L511-L539 | null | class ExpressionDescriptor(object):
"""
Converts a Cron Expression into a human readable string
"""
_special_characters = ['/', '-', ',', '*']
_expression = ''
_options = None
_expression_parts = []
_parsed = False
def __init__(self, expression, options=None, **kwargs):
"""Initializes a new instance of the ExpressionDescriptorclass
Args:
expression: The cron expression string
options: Options to control the output description
Raises:
WrongArgumentException: if kwarg is unknow
"""
if options is None:
options = Options()
self._expression = expression
self._options = options
self._expression_parts = []
self._parsed = False
# if kwargs in _options, overwrite it, if not raise exeption
for kwarg in kwargs:
if hasattr(self._options, kwarg):
setattr(self._options, kwarg, kwargs[kwarg])
else:
raise WrongArgumentException(
"Unknow {} configuration argument".format(kwarg))
# Initializes localization
GetText(options.locale_code)
def get_description(self, description_type=DescriptionTypeEnum.FULL):
"""Generates a human readable string for the Cron Expression
Args:
description_type: Which part(s) of the expression to describe
Returns:
The cron expression description
Raises:
Exception: if throw_exception_on_parse_error is True
"""
try:
if self._parsed is False:
parser = ExpressionParser(self._expression, self._options)
self._expression_parts = parser.parse()
self._parsed = True
choices = {
DescriptionTypeEnum.FULL: self.get_full_description,
DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description,
DescriptionTypeEnum.HOURS: self.get_hours_description,
DescriptionTypeEnum.MINUTES: self.get_minutes_description,
DescriptionTypeEnum.SECONDS: self.get_seconds_description,
DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description,
DescriptionTypeEnum.MONTH: self.get_month_description,
DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description,
DescriptionTypeEnum.YEAR: self.get_year_description,
}
description = choices.get(description_type, self.get_seconds_description)()
except Exception as ex:
if self._options.throw_exception_on_parse_error:
raise
else:
description = str(ex)
return description
def get_full_description(self):
"""Generates the FULL description
Returns:
The FULL description
Raises:
FormatException: if formating fails and throw_exception_on_parse_error is True
"""
try:
time_segment = self.get_time_of_day_description()
day_of_month_desc = self.get_day_of_month_description()
month_desc = self.get_month_description()
day_of_week_desc = self.get_day_of_week_description()
year_desc = self.get_year_description()
description = "{0}{1}{2}{3}{4}".format(
time_segment,
day_of_month_desc,
day_of_week_desc,
month_desc,
year_desc)
description = self.transform_verbosity(
description, self._options.verbose)
description = self.transform_case(
description,
self._options.casing_type)
except Exception:
description = _(
"An error occured when generating the expression description. Check the cron expression syntax.")
if self._options.throw_exception_on_parse_error:
raise FormatException(description)
return description
def get_time_of_day_description(self):
"""Generates a description for only the TIMEOFDAY portion of the expression
Returns:
The TIMEOFDAY description
"""
seconds_expression = self._expression_parts[0]
minute_expression = self._expression_parts[1]
hour_expression = self._expression_parts[2]
description = StringBuilder()
# handle special cases first
if any(exp in minute_expression for exp in self._special_characters) is False and \
any(exp in hour_expression for exp in self._special_characters) is False and \
any(exp in seconds_expression for exp in self._special_characters) is False:
# specific time of day (i.e. 10 14)
description.append(_("At "))
description.append(
self.format_time(
hour_expression,
minute_expression,
seconds_expression))
elif "-" in minute_expression and \
"," not in minute_expression and \
any(exp in hour_expression for exp in self._special_characters) is False:
# minute range in single hour (i.e. 0-10 11)
minute_parts = minute_expression.split('-')
description.append(_("Every minute between {0} and {1}").format(
self.format_time(hour_expression, minute_parts[0]), self.format_time(hour_expression, minute_parts[1])))
elif "," in hour_expression and "-" not in hour_expression and \
any(exp in minute_expression for exp in self._special_characters) is False:
# hours list with single minute (o.e. 30 6,14,16)
hour_parts = hour_expression.split(',')
description.append(_("At"))
for i, hour_part in enumerate(hour_parts):
description.append(" ")
description.append(
self.format_time(hour_part, minute_expression))
if i < (len(hour_parts) - 2):
description.append(",")
if i == len(hour_parts) - 2:
description.append(_(" and"))
else:
# default time description
seconds_description = self.get_seconds_description()
minutes_description = self.get_minutes_description()
hours_description = self.get_hours_description()
description.append(seconds_description)
if description:
description.append(", ")
description.append(minutes_description)
if description:
description.append(", ")
description.append(hours_description)
return str(description)
def get_seconds_description(self):
"""Generates a description for only the SECONDS portion of the expression
Returns:
The SECONDS description
"""
return self.get_segment_description(
self._expression_parts[0],
_("every second"),
lambda s: s,
lambda s: _("every {0} seconds").format(s),
lambda s: _("seconds {0} through {1} past the minute"),
lambda s: _("at {0} seconds past the minute")
)
def get_minutes_description(self):
"""Generates a description for only the MINUTE portion of the expression
Returns:
The MINUTE description
"""
return self.get_segment_description(
self._expression_parts[1],
_("every minute"),
lambda s: s,
lambda s: _("every {0} minutes").format(s),
lambda s: _("minutes {0} through {1} past the hour"),
lambda s: '' if s == "0" else _("at {0} minutes past the hour")
)
def get_hours_description(self):
"""Generates a description for only the HOUR portion of the expression
Returns:
The HOUR description
"""
expression = self._expression_parts[2]
return self.get_segment_description(
expression,
_("every hour"),
lambda s: self.format_time(s, "0"),
lambda s: _("every {0} hours").format(s),
lambda s: _("between {0} and {1}"),
lambda s: _("at {0}")
)
def get_day_of_week_description(self):
"""Generates a description for only the DAYOFWEEK portion of the expression
Returns:
The DAYOFWEEK description
"""
if self._expression_parts[5] == "*" and self._expression_parts[3] != "*":
# DOM is specified and DOW is * so to prevent contradiction like "on day 1 of the month, every day"
# we will not specified a DOW description.
return ""
def get_day_name(s):
exp = s
if "#" in s:
exp, useless = s.split("#", 2)
elif "L" in s:
exp = exp.replace("L", '')
return self.number_to_day(int(exp))
def get_format(s):
if "#" in s:
day_of_week_of_month = s[s.find("#") + 1:]
try:
day_of_week_of_month_number = int(day_of_week_of_month)
choices = {
1: _("first"),
2: _("second"),
3: _("third"),
4: _("forth"),
5: _("fifth"),
}
day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '')
except ValueError:
day_of_week_of_month_description = ''
formated = "{}{}{}".format(_(", on the "),
day_of_week_of_month_description, _(" {0} of the month"))
elif "L" in s:
formated = _(", on the last {0} of the month")
else:
formated = _(", only on {0}")
return formated
return self.get_segment_description(
self._expression_parts[5],
_(", every day"),
lambda s: get_day_name(s),
lambda s: _(", every {0} days of the week").format(s),
lambda s: _(", {0} through {1}"),
lambda s: get_format(s)
)
def get_month_description(self):
"""Generates a description for only the MONTH portion of the expression
Returns:
The MONTH description
"""
return self.get_segment_description(
self._expression_parts[4],
'',
lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime("%B"),
lambda s: _(", every {0} months").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_day_of_month_description(self):
"""Generates a description for only the DAYOFMONTH portion of the expression
Returns:
The DAYOFMONTH description
"""
expression = self._expression_parts[3]
expression = expression.replace("?", "*")
if expression == "L":
description = _(", on the last day of the month")
elif expression == "LW" or expression == "WL":
description = _(", on the last weekday of the month")
else:
regex = re.compile("(\\d{1,2}W)|(W\\d{1,2})")
if regex.match(expression):
m = regex.match(expression)
day_number = int(m.group().replace("W", ""))
day_string = _("first weekday") if day_number == 1 else _("weekday nearest day {0}").format(
day_number)
description = _(", on the {0} of the month").format(
day_string)
else:
description = self.get_segment_description(
expression,
_(", every day"),
lambda s: s,
lambda s: _(", every day") if s == "1" else _(", every {0} days"),
lambda s: _(", between day {0} and {1} of the month"),
lambda s: _(", on day {0} of the month")
)
return description
def get_year_description(self):
"""Generates a description for only the YEAR portion of the expression
Returns:
The YEAR description
"""
def format_year(s):
regex = re.compile(r"^\d+$")
if regex.match(s):
year_int = int(s)
if year_int < 1900:
return year_int
return datetime.date(year_int, 1, 1).strftime("%Y")
else:
return s
return self.get_segment_description(
self._expression_parts[6],
'',
lambda s: format_year(s),
lambda s: _(", every {0} years").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_segment_description(
self,
expression,
all_description,
get_single_item_description,
get_interval_description_format,
get_between_description_format,
get_description_format
):
"""Returns segment description
Args:
expression: Segment to descript
all_description: *
get_single_item_description: 1
get_interval_description_format: 1/2
get_between_description_format: 1-2
get_description_format: format get_single_item_description
Returns:
segment description
"""
description = None
if expression is None or expression == '':
description = ''
elif expression == "*":
description = all_description
elif any(ext in expression for ext in ['/', '-', ',']) is False:
description = get_description_format(expression).format(
get_single_item_description(expression))
elif "/" in expression:
segments = expression.split('/')
description = get_interval_description_format(
segments[1]).format(get_single_item_description(segments[1]))
# interval contains 'between' piece (i.e. 2-59/3 )
if "-" in segments[0]:
between_segment_description = self.generate_between_segment_description(
segments[0], get_between_description_format, get_single_item_description)
if not between_segment_description.startswith(", "):
description += ", "
description += between_segment_description
elif any(ext in segments[0] for ext in ['*', ',']) is False:
range_item_description = get_description_format(segments[0]).format(
get_single_item_description(segments[0])
)
range_item_description = range_item_description.replace(", ", "")
description += _(", starting {0}").format(range_item_description)
elif "," in expression:
segments = expression.split(',')
description_content = ''
for i, segment in enumerate(segments):
if i > 0 and len(segments) > 2:
description_content += ","
if i < len(segments) - 1:
description_content += " "
if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2):
description_content += _(" and ")
if "-" in segment:
between_description = self.generate_between_segment_description(
segment,
lambda s: _(", {0} through {1}"),
get_single_item_description
)
between_description = between_description.replace(", ", "")
description_content += between_description
else:
description_content += get_single_item_description(segment)
description = get_description_format(
expression).format(
description_content)
elif "-" in expression:
description = self.generate_between_segment_description(
expression, get_between_description_format, get_single_item_description)
return description
def generate_between_segment_description(
self,
between_expression,
get_between_description_format,
get_single_item_description
):
"""
Generates the between segment description
:param between_expression:
:param get_between_description_format:
:param get_single_item_description:
:return: The between segment description
"""
description = ""
between_segments = between_expression.split('-')
between_segment_1_description = get_single_item_description(between_segments[0])
between_segment_2_description = get_single_item_description(between_segments[1])
between_segment_2_description = between_segment_2_description.replace(
":00", ":59")
between_description_format = get_between_description_format(between_expression)
description += between_description_format.format(between_segment_1_description, between_segment_2_description)
return description
def transform_verbosity(self, description, use_verbose_format):
"""Transforms the verbosity of the expression description by stripping verbosity from original description
Args:
description: The description to transform
use_verbose_format: If True, will leave description as it, if False, will strip verbose parts
second_expression: Seconds part
Returns:
The transformed description with proper verbosity
"""
if use_verbose_format is False:
description = description.replace(
_(", every minute"), '')
description = description.replace(_(", every hour"), '')
description = description.replace(_(", every day"), '')
return description
def transform_case(self, description, case_type):
"""Transforms the case of the expression description, based on options
Args:
description: The description to transform
case_type: The casing type that controls the output casing
second_expression: Seconds part
Returns:
The transformed description with proper casing
"""
if case_type == CasingTypeEnum.Sentence:
description = "{}{}".format(
description[0].upper(),
description[1:])
elif case_type == CasingTypeEnum.Title:
description = description.title()
else:
description = description.lower()
return description
def number_to_day(self, day_number):
"""Returns localized day name by its CRON number
Args:
day_number: Number of a day
Returns:
Day corresponding to day_number
Raises:
IndexError: When day_number is not found
"""
return [
calendar.day_name[6],
calendar.day_name[0],
calendar.day_name[1],
calendar.day_name[2],
calendar.day_name[3],
calendar.day_name[4],
calendar.day_name[5]
][day_number]
def __str__(self):
return self.get_description()
def __repr__(self):
return self.get_description()
|
Salamek/cron-descriptor | cron_descriptor/ExpressionDescriptor.py | ExpressionDescriptor.transform_verbosity | python | def transform_verbosity(self, description, use_verbose_format):
if use_verbose_format is False:
description = description.replace(
_(", every minute"), '')
description = description.replace(_(", every hour"), '')
description = description.replace(_(", every day"), '')
return description | Transforms the verbosity of the expression description by stripping verbosity from original description
Args:
description: The description to transform
use_verbose_format: If True, will leave description as it, if False, will strip verbose parts
second_expression: Seconds part
Returns:
The transformed description with proper verbosity | train | https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L541-L556 | null | class ExpressionDescriptor(object):
"""
Converts a Cron Expression into a human readable string
"""
_special_characters = ['/', '-', ',', '*']
_expression = ''
_options = None
_expression_parts = []
_parsed = False
def __init__(self, expression, options=None, **kwargs):
"""Initializes a new instance of the ExpressionDescriptorclass
Args:
expression: The cron expression string
options: Options to control the output description
Raises:
WrongArgumentException: if kwarg is unknow
"""
if options is None:
options = Options()
self._expression = expression
self._options = options
self._expression_parts = []
self._parsed = False
# if kwargs in _options, overwrite it, if not raise exeption
for kwarg in kwargs:
if hasattr(self._options, kwarg):
setattr(self._options, kwarg, kwargs[kwarg])
else:
raise WrongArgumentException(
"Unknow {} configuration argument".format(kwarg))
# Initializes localization
GetText(options.locale_code)
def get_description(self, description_type=DescriptionTypeEnum.FULL):
"""Generates a human readable string for the Cron Expression
Args:
description_type: Which part(s) of the expression to describe
Returns:
The cron expression description
Raises:
Exception: if throw_exception_on_parse_error is True
"""
try:
if self._parsed is False:
parser = ExpressionParser(self._expression, self._options)
self._expression_parts = parser.parse()
self._parsed = True
choices = {
DescriptionTypeEnum.FULL: self.get_full_description,
DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description,
DescriptionTypeEnum.HOURS: self.get_hours_description,
DescriptionTypeEnum.MINUTES: self.get_minutes_description,
DescriptionTypeEnum.SECONDS: self.get_seconds_description,
DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description,
DescriptionTypeEnum.MONTH: self.get_month_description,
DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description,
DescriptionTypeEnum.YEAR: self.get_year_description,
}
description = choices.get(description_type, self.get_seconds_description)()
except Exception as ex:
if self._options.throw_exception_on_parse_error:
raise
else:
description = str(ex)
return description
def get_full_description(self):
"""Generates the FULL description
Returns:
The FULL description
Raises:
FormatException: if formating fails and throw_exception_on_parse_error is True
"""
try:
time_segment = self.get_time_of_day_description()
day_of_month_desc = self.get_day_of_month_description()
month_desc = self.get_month_description()
day_of_week_desc = self.get_day_of_week_description()
year_desc = self.get_year_description()
description = "{0}{1}{2}{3}{4}".format(
time_segment,
day_of_month_desc,
day_of_week_desc,
month_desc,
year_desc)
description = self.transform_verbosity(
description, self._options.verbose)
description = self.transform_case(
description,
self._options.casing_type)
except Exception:
description = _(
"An error occured when generating the expression description. Check the cron expression syntax.")
if self._options.throw_exception_on_parse_error:
raise FormatException(description)
return description
def get_time_of_day_description(self):
"""Generates a description for only the TIMEOFDAY portion of the expression
Returns:
The TIMEOFDAY description
"""
seconds_expression = self._expression_parts[0]
minute_expression = self._expression_parts[1]
hour_expression = self._expression_parts[2]
description = StringBuilder()
# handle special cases first
if any(exp in minute_expression for exp in self._special_characters) is False and \
any(exp in hour_expression for exp in self._special_characters) is False and \
any(exp in seconds_expression for exp in self._special_characters) is False:
# specific time of day (i.e. 10 14)
description.append(_("At "))
description.append(
self.format_time(
hour_expression,
minute_expression,
seconds_expression))
elif "-" in minute_expression and \
"," not in minute_expression and \
any(exp in hour_expression for exp in self._special_characters) is False:
# minute range in single hour (i.e. 0-10 11)
minute_parts = minute_expression.split('-')
description.append(_("Every minute between {0} and {1}").format(
self.format_time(hour_expression, minute_parts[0]), self.format_time(hour_expression, minute_parts[1])))
elif "," in hour_expression and "-" not in hour_expression and \
any(exp in minute_expression for exp in self._special_characters) is False:
# hours list with single minute (o.e. 30 6,14,16)
hour_parts = hour_expression.split(',')
description.append(_("At"))
for i, hour_part in enumerate(hour_parts):
description.append(" ")
description.append(
self.format_time(hour_part, minute_expression))
if i < (len(hour_parts) - 2):
description.append(",")
if i == len(hour_parts) - 2:
description.append(_(" and"))
else:
# default time description
seconds_description = self.get_seconds_description()
minutes_description = self.get_minutes_description()
hours_description = self.get_hours_description()
description.append(seconds_description)
if description:
description.append(", ")
description.append(minutes_description)
if description:
description.append(", ")
description.append(hours_description)
return str(description)
def get_seconds_description(self):
"""Generates a description for only the SECONDS portion of the expression
Returns:
The SECONDS description
"""
return self.get_segment_description(
self._expression_parts[0],
_("every second"),
lambda s: s,
lambda s: _("every {0} seconds").format(s),
lambda s: _("seconds {0} through {1} past the minute"),
lambda s: _("at {0} seconds past the minute")
)
def get_minutes_description(self):
"""Generates a description for only the MINUTE portion of the expression
Returns:
The MINUTE description
"""
return self.get_segment_description(
self._expression_parts[1],
_("every minute"),
lambda s: s,
lambda s: _("every {0} minutes").format(s),
lambda s: _("minutes {0} through {1} past the hour"),
lambda s: '' if s == "0" else _("at {0} minutes past the hour")
)
def get_hours_description(self):
"""Generates a description for only the HOUR portion of the expression
Returns:
The HOUR description
"""
expression = self._expression_parts[2]
return self.get_segment_description(
expression,
_("every hour"),
lambda s: self.format_time(s, "0"),
lambda s: _("every {0} hours").format(s),
lambda s: _("between {0} and {1}"),
lambda s: _("at {0}")
)
def get_day_of_week_description(self):
"""Generates a description for only the DAYOFWEEK portion of the expression
Returns:
The DAYOFWEEK description
"""
if self._expression_parts[5] == "*" and self._expression_parts[3] != "*":
# DOM is specified and DOW is * so to prevent contradiction like "on day 1 of the month, every day"
# we will not specified a DOW description.
return ""
def get_day_name(s):
exp = s
if "#" in s:
exp, useless = s.split("#", 2)
elif "L" in s:
exp = exp.replace("L", '')
return self.number_to_day(int(exp))
def get_format(s):
if "#" in s:
day_of_week_of_month = s[s.find("#") + 1:]
try:
day_of_week_of_month_number = int(day_of_week_of_month)
choices = {
1: _("first"),
2: _("second"),
3: _("third"),
4: _("forth"),
5: _("fifth"),
}
day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '')
except ValueError:
day_of_week_of_month_description = ''
formated = "{}{}{}".format(_(", on the "),
day_of_week_of_month_description, _(" {0} of the month"))
elif "L" in s:
formated = _(", on the last {0} of the month")
else:
formated = _(", only on {0}")
return formated
return self.get_segment_description(
self._expression_parts[5],
_(", every day"),
lambda s: get_day_name(s),
lambda s: _(", every {0} days of the week").format(s),
lambda s: _(", {0} through {1}"),
lambda s: get_format(s)
)
def get_month_description(self):
"""Generates a description for only the MONTH portion of the expression
Returns:
The MONTH description
"""
return self.get_segment_description(
self._expression_parts[4],
'',
lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime("%B"),
lambda s: _(", every {0} months").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_day_of_month_description(self):
"""Generates a description for only the DAYOFMONTH portion of the expression
Returns:
The DAYOFMONTH description
"""
expression = self._expression_parts[3]
expression = expression.replace("?", "*")
if expression == "L":
description = _(", on the last day of the month")
elif expression == "LW" or expression == "WL":
description = _(", on the last weekday of the month")
else:
regex = re.compile("(\\d{1,2}W)|(W\\d{1,2})")
if regex.match(expression):
m = regex.match(expression)
day_number = int(m.group().replace("W", ""))
day_string = _("first weekday") if day_number == 1 else _("weekday nearest day {0}").format(
day_number)
description = _(", on the {0} of the month").format(
day_string)
else:
description = self.get_segment_description(
expression,
_(", every day"),
lambda s: s,
lambda s: _(", every day") if s == "1" else _(", every {0} days"),
lambda s: _(", between day {0} and {1} of the month"),
lambda s: _(", on day {0} of the month")
)
return description
def get_year_description(self):
"""Generates a description for only the YEAR portion of the expression
Returns:
The YEAR description
"""
def format_year(s):
regex = re.compile(r"^\d+$")
if regex.match(s):
year_int = int(s)
if year_int < 1900:
return year_int
return datetime.date(year_int, 1, 1).strftime("%Y")
else:
return s
return self.get_segment_description(
self._expression_parts[6],
'',
lambda s: format_year(s),
lambda s: _(", every {0} years").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_segment_description(
self,
expression,
all_description,
get_single_item_description,
get_interval_description_format,
get_between_description_format,
get_description_format
):
"""Returns segment description
Args:
expression: Segment to descript
all_description: *
get_single_item_description: 1
get_interval_description_format: 1/2
get_between_description_format: 1-2
get_description_format: format get_single_item_description
Returns:
segment description
"""
description = None
if expression is None or expression == '':
description = ''
elif expression == "*":
description = all_description
elif any(ext in expression for ext in ['/', '-', ',']) is False:
description = get_description_format(expression).format(
get_single_item_description(expression))
elif "/" in expression:
segments = expression.split('/')
description = get_interval_description_format(
segments[1]).format(get_single_item_description(segments[1]))
# interval contains 'between' piece (i.e. 2-59/3 )
if "-" in segments[0]:
between_segment_description = self.generate_between_segment_description(
segments[0], get_between_description_format, get_single_item_description)
if not between_segment_description.startswith(", "):
description += ", "
description += between_segment_description
elif any(ext in segments[0] for ext in ['*', ',']) is False:
range_item_description = get_description_format(segments[0]).format(
get_single_item_description(segments[0])
)
range_item_description = range_item_description.replace(", ", "")
description += _(", starting {0}").format(range_item_description)
elif "," in expression:
segments = expression.split(',')
description_content = ''
for i, segment in enumerate(segments):
if i > 0 and len(segments) > 2:
description_content += ","
if i < len(segments) - 1:
description_content += " "
if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2):
description_content += _(" and ")
if "-" in segment:
between_description = self.generate_between_segment_description(
segment,
lambda s: _(", {0} through {1}"),
get_single_item_description
)
between_description = between_description.replace(", ", "")
description_content += between_description
else:
description_content += get_single_item_description(segment)
description = get_description_format(
expression).format(
description_content)
elif "-" in expression:
description = self.generate_between_segment_description(
expression, get_between_description_format, get_single_item_description)
return description
def generate_between_segment_description(
self,
between_expression,
get_between_description_format,
get_single_item_description
):
"""
Generates the between segment description
:param between_expression:
:param get_between_description_format:
:param get_single_item_description:
:return: The between segment description
"""
description = ""
between_segments = between_expression.split('-')
between_segment_1_description = get_single_item_description(between_segments[0])
between_segment_2_description = get_single_item_description(between_segments[1])
between_segment_2_description = between_segment_2_description.replace(
":00", ":59")
between_description_format = get_between_description_format(between_expression)
description += between_description_format.format(between_segment_1_description, between_segment_2_description)
return description
def format_time(
self,
hour_expression,
minute_expression,
second_expression=''
):
"""Given time parts, will contruct a formatted time description
Args:
hour_expression: Hours part
minute_expression: Minutes part
second_expression: Seconds part
Returns:
Formatted time description
"""
hour = int(hour_expression)
period = ''
if self._options.use_24hour_time_format is False:
period = " PM" if (hour >= 12) else " AM"
if hour > 12:
hour -= 12
minute = str(int(minute_expression)) # !FIXME WUT ???
second = ''
if second_expression is not None and second_expression:
second = "{}{}".format(":", str(int(second_expression)).zfill(2))
return "{0}:{1}{2}{3}".format(str(hour).zfill(2), minute.zfill(2), second, period)
def transform_case(self, description, case_type):
"""Transforms the case of the expression description, based on options
Args:
description: The description to transform
case_type: The casing type that controls the output casing
second_expression: Seconds part
Returns:
The transformed description with proper casing
"""
if case_type == CasingTypeEnum.Sentence:
description = "{}{}".format(
description[0].upper(),
description[1:])
elif case_type == CasingTypeEnum.Title:
description = description.title()
else:
description = description.lower()
return description
def number_to_day(self, day_number):
"""Returns localized day name by its CRON number
Args:
day_number: Number of a day
Returns:
Day corresponding to day_number
Raises:
IndexError: When day_number is not found
"""
return [
calendar.day_name[6],
calendar.day_name[0],
calendar.day_name[1],
calendar.day_name[2],
calendar.day_name[3],
calendar.day_name[4],
calendar.day_name[5]
][day_number]
def __str__(self):
return self.get_description()
def __repr__(self):
return self.get_description()
|
Salamek/cron-descriptor | cron_descriptor/ExpressionDescriptor.py | ExpressionDescriptor.transform_case | python | def transform_case(self, description, case_type):
if case_type == CasingTypeEnum.Sentence:
description = "{}{}".format(
description[0].upper(),
description[1:])
elif case_type == CasingTypeEnum.Title:
description = description.title()
else:
description = description.lower()
return description | Transforms the case of the expression description, based on options
Args:
description: The description to transform
case_type: The casing type that controls the output casing
second_expression: Seconds part
Returns:
The transformed description with proper casing | train | https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L558-L576 | null | class ExpressionDescriptor(object):
"""
Converts a Cron Expression into a human readable string
"""
_special_characters = ['/', '-', ',', '*']
_expression = ''
_options = None
_expression_parts = []
_parsed = False
def __init__(self, expression, options=None, **kwargs):
"""Initializes a new instance of the ExpressionDescriptorclass
Args:
expression: The cron expression string
options: Options to control the output description
Raises:
WrongArgumentException: if kwarg is unknow
"""
if options is None:
options = Options()
self._expression = expression
self._options = options
self._expression_parts = []
self._parsed = False
# if kwargs in _options, overwrite it, if not raise exeption
for kwarg in kwargs:
if hasattr(self._options, kwarg):
setattr(self._options, kwarg, kwargs[kwarg])
else:
raise WrongArgumentException(
"Unknow {} configuration argument".format(kwarg))
# Initializes localization
GetText(options.locale_code)
def get_description(self, description_type=DescriptionTypeEnum.FULL):
"""Generates a human readable string for the Cron Expression
Args:
description_type: Which part(s) of the expression to describe
Returns:
The cron expression description
Raises:
Exception: if throw_exception_on_parse_error is True
"""
try:
if self._parsed is False:
parser = ExpressionParser(self._expression, self._options)
self._expression_parts = parser.parse()
self._parsed = True
choices = {
DescriptionTypeEnum.FULL: self.get_full_description,
DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description,
DescriptionTypeEnum.HOURS: self.get_hours_description,
DescriptionTypeEnum.MINUTES: self.get_minutes_description,
DescriptionTypeEnum.SECONDS: self.get_seconds_description,
DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description,
DescriptionTypeEnum.MONTH: self.get_month_description,
DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description,
DescriptionTypeEnum.YEAR: self.get_year_description,
}
description = choices.get(description_type, self.get_seconds_description)()
except Exception as ex:
if self._options.throw_exception_on_parse_error:
raise
else:
description = str(ex)
return description
def get_full_description(self):
"""Generates the FULL description
Returns:
The FULL description
Raises:
FormatException: if formating fails and throw_exception_on_parse_error is True
"""
try:
time_segment = self.get_time_of_day_description()
day_of_month_desc = self.get_day_of_month_description()
month_desc = self.get_month_description()
day_of_week_desc = self.get_day_of_week_description()
year_desc = self.get_year_description()
description = "{0}{1}{2}{3}{4}".format(
time_segment,
day_of_month_desc,
day_of_week_desc,
month_desc,
year_desc)
description = self.transform_verbosity(
description, self._options.verbose)
description = self.transform_case(
description,
self._options.casing_type)
except Exception:
description = _(
"An error occured when generating the expression description. Check the cron expression syntax.")
if self._options.throw_exception_on_parse_error:
raise FormatException(description)
return description
def get_time_of_day_description(self):
"""Generates a description for only the TIMEOFDAY portion of the expression
Returns:
The TIMEOFDAY description
"""
seconds_expression = self._expression_parts[0]
minute_expression = self._expression_parts[1]
hour_expression = self._expression_parts[2]
description = StringBuilder()
# handle special cases first
if any(exp in minute_expression for exp in self._special_characters) is False and \
any(exp in hour_expression for exp in self._special_characters) is False and \
any(exp in seconds_expression for exp in self._special_characters) is False:
# specific time of day (i.e. 10 14)
description.append(_("At "))
description.append(
self.format_time(
hour_expression,
minute_expression,
seconds_expression))
elif "-" in minute_expression and \
"," not in minute_expression and \
any(exp in hour_expression for exp in self._special_characters) is False:
# minute range in single hour (i.e. 0-10 11)
minute_parts = minute_expression.split('-')
description.append(_("Every minute between {0} and {1}").format(
self.format_time(hour_expression, minute_parts[0]), self.format_time(hour_expression, minute_parts[1])))
elif "," in hour_expression and "-" not in hour_expression and \
any(exp in minute_expression for exp in self._special_characters) is False:
# hours list with single minute (o.e. 30 6,14,16)
hour_parts = hour_expression.split(',')
description.append(_("At"))
for i, hour_part in enumerate(hour_parts):
description.append(" ")
description.append(
self.format_time(hour_part, minute_expression))
if i < (len(hour_parts) - 2):
description.append(",")
if i == len(hour_parts) - 2:
description.append(_(" and"))
else:
# default time description
seconds_description = self.get_seconds_description()
minutes_description = self.get_minutes_description()
hours_description = self.get_hours_description()
description.append(seconds_description)
if description:
description.append(", ")
description.append(minutes_description)
if description:
description.append(", ")
description.append(hours_description)
return str(description)
def get_seconds_description(self):
"""Generates a description for only the SECONDS portion of the expression
Returns:
The SECONDS description
"""
return self.get_segment_description(
self._expression_parts[0],
_("every second"),
lambda s: s,
lambda s: _("every {0} seconds").format(s),
lambda s: _("seconds {0} through {1} past the minute"),
lambda s: _("at {0} seconds past the minute")
)
def get_minutes_description(self):
"""Generates a description for only the MINUTE portion of the expression
Returns:
The MINUTE description
"""
return self.get_segment_description(
self._expression_parts[1],
_("every minute"),
lambda s: s,
lambda s: _("every {0} minutes").format(s),
lambda s: _("minutes {0} through {1} past the hour"),
lambda s: '' if s == "0" else _("at {0} minutes past the hour")
)
def get_hours_description(self):
"""Generates a description for only the HOUR portion of the expression
Returns:
The HOUR description
"""
expression = self._expression_parts[2]
return self.get_segment_description(
expression,
_("every hour"),
lambda s: self.format_time(s, "0"),
lambda s: _("every {0} hours").format(s),
lambda s: _("between {0} and {1}"),
lambda s: _("at {0}")
)
def get_day_of_week_description(self):
"""Generates a description for only the DAYOFWEEK portion of the expression
Returns:
The DAYOFWEEK description
"""
if self._expression_parts[5] == "*" and self._expression_parts[3] != "*":
# DOM is specified and DOW is * so to prevent contradiction like "on day 1 of the month, every day"
# we will not specified a DOW description.
return ""
def get_day_name(s):
exp = s
if "#" in s:
exp, useless = s.split("#", 2)
elif "L" in s:
exp = exp.replace("L", '')
return self.number_to_day(int(exp))
def get_format(s):
if "#" in s:
day_of_week_of_month = s[s.find("#") + 1:]
try:
day_of_week_of_month_number = int(day_of_week_of_month)
choices = {
1: _("first"),
2: _("second"),
3: _("third"),
4: _("forth"),
5: _("fifth"),
}
day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '')
except ValueError:
day_of_week_of_month_description = ''
formated = "{}{}{}".format(_(", on the "),
day_of_week_of_month_description, _(" {0} of the month"))
elif "L" in s:
formated = _(", on the last {0} of the month")
else:
formated = _(", only on {0}")
return formated
return self.get_segment_description(
self._expression_parts[5],
_(", every day"),
lambda s: get_day_name(s),
lambda s: _(", every {0} days of the week").format(s),
lambda s: _(", {0} through {1}"),
lambda s: get_format(s)
)
def get_month_description(self):
"""Generates a description for only the MONTH portion of the expression
Returns:
The MONTH description
"""
return self.get_segment_description(
self._expression_parts[4],
'',
lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime("%B"),
lambda s: _(", every {0} months").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_day_of_month_description(self):
"""Generates a description for only the DAYOFMONTH portion of the expression
Returns:
The DAYOFMONTH description
"""
expression = self._expression_parts[3]
expression = expression.replace("?", "*")
if expression == "L":
description = _(", on the last day of the month")
elif expression == "LW" or expression == "WL":
description = _(", on the last weekday of the month")
else:
regex = re.compile("(\\d{1,2}W)|(W\\d{1,2})")
if regex.match(expression):
m = regex.match(expression)
day_number = int(m.group().replace("W", ""))
day_string = _("first weekday") if day_number == 1 else _("weekday nearest day {0}").format(
day_number)
description = _(", on the {0} of the month").format(
day_string)
else:
description = self.get_segment_description(
expression,
_(", every day"),
lambda s: s,
lambda s: _(", every day") if s == "1" else _(", every {0} days"),
lambda s: _(", between day {0} and {1} of the month"),
lambda s: _(", on day {0} of the month")
)
return description
def get_year_description(self):
"""Generates a description for only the YEAR portion of the expression
Returns:
The YEAR description
"""
def format_year(s):
regex = re.compile(r"^\d+$")
if regex.match(s):
year_int = int(s)
if year_int < 1900:
return year_int
return datetime.date(year_int, 1, 1).strftime("%Y")
else:
return s
return self.get_segment_description(
self._expression_parts[6],
'',
lambda s: format_year(s),
lambda s: _(", every {0} years").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_segment_description(
self,
expression,
all_description,
get_single_item_description,
get_interval_description_format,
get_between_description_format,
get_description_format
):
"""Returns segment description
Args:
expression: Segment to descript
all_description: *
get_single_item_description: 1
get_interval_description_format: 1/2
get_between_description_format: 1-2
get_description_format: format get_single_item_description
Returns:
segment description
"""
description = None
if expression is None or expression == '':
description = ''
elif expression == "*":
description = all_description
elif any(ext in expression for ext in ['/', '-', ',']) is False:
description = get_description_format(expression).format(
get_single_item_description(expression))
elif "/" in expression:
segments = expression.split('/')
description = get_interval_description_format(
segments[1]).format(get_single_item_description(segments[1]))
# interval contains 'between' piece (i.e. 2-59/3 )
if "-" in segments[0]:
between_segment_description = self.generate_between_segment_description(
segments[0], get_between_description_format, get_single_item_description)
if not between_segment_description.startswith(", "):
description += ", "
description += between_segment_description
elif any(ext in segments[0] for ext in ['*', ',']) is False:
range_item_description = get_description_format(segments[0]).format(
get_single_item_description(segments[0])
)
range_item_description = range_item_description.replace(", ", "")
description += _(", starting {0}").format(range_item_description)
elif "," in expression:
segments = expression.split(',')
description_content = ''
for i, segment in enumerate(segments):
if i > 0 and len(segments) > 2:
description_content += ","
if i < len(segments) - 1:
description_content += " "
if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2):
description_content += _(" and ")
if "-" in segment:
between_description = self.generate_between_segment_description(
segment,
lambda s: _(", {0} through {1}"),
get_single_item_description
)
between_description = between_description.replace(", ", "")
description_content += between_description
else:
description_content += get_single_item_description(segment)
description = get_description_format(
expression).format(
description_content)
elif "-" in expression:
description = self.generate_between_segment_description(
expression, get_between_description_format, get_single_item_description)
return description
def generate_between_segment_description(
self,
between_expression,
get_between_description_format,
get_single_item_description
):
"""
Generates the between segment description
:param between_expression:
:param get_between_description_format:
:param get_single_item_description:
:return: The between segment description
"""
description = ""
between_segments = between_expression.split('-')
between_segment_1_description = get_single_item_description(between_segments[0])
between_segment_2_description = get_single_item_description(between_segments[1])
between_segment_2_description = between_segment_2_description.replace(
":00", ":59")
between_description_format = get_between_description_format(between_expression)
description += between_description_format.format(between_segment_1_description, between_segment_2_description)
return description
def format_time(
self,
hour_expression,
minute_expression,
second_expression=''
):
"""Given time parts, will contruct a formatted time description
Args:
hour_expression: Hours part
minute_expression: Minutes part
second_expression: Seconds part
Returns:
Formatted time description
"""
hour = int(hour_expression)
period = ''
if self._options.use_24hour_time_format is False:
period = " PM" if (hour >= 12) else " AM"
if hour > 12:
hour -= 12
minute = str(int(minute_expression)) # !FIXME WUT ???
second = ''
if second_expression is not None and second_expression:
second = "{}{}".format(":", str(int(second_expression)).zfill(2))
return "{0}:{1}{2}{3}".format(str(hour).zfill(2), minute.zfill(2), second, period)
def transform_verbosity(self, description, use_verbose_format):
"""Transforms the verbosity of the expression description by stripping verbosity from original description
Args:
description: The description to transform
use_verbose_format: If True, will leave description as it, if False, will strip verbose parts
second_expression: Seconds part
Returns:
The transformed description with proper verbosity
"""
if use_verbose_format is False:
description = description.replace(
_(", every minute"), '')
description = description.replace(_(", every hour"), '')
description = description.replace(_(", every day"), '')
return description
def number_to_day(self, day_number):
"""Returns localized day name by its CRON number
Args:
day_number: Number of a day
Returns:
Day corresponding to day_number
Raises:
IndexError: When day_number is not found
"""
return [
calendar.day_name[6],
calendar.day_name[0],
calendar.day_name[1],
calendar.day_name[2],
calendar.day_name[3],
calendar.day_name[4],
calendar.day_name[5]
][day_number]
def __str__(self):
return self.get_description()
def __repr__(self):
return self.get_description()
|
Salamek/cron-descriptor | cron_descriptor/ExpressionDescriptor.py | ExpressionDescriptor.number_to_day | python | def number_to_day(self, day_number):
return [
calendar.day_name[6],
calendar.day_name[0],
calendar.day_name[1],
calendar.day_name[2],
calendar.day_name[3],
calendar.day_name[4],
calendar.day_name[5]
][day_number] | Returns localized day name by its CRON number
Args:
day_number: Number of a day
Returns:
Day corresponding to day_number
Raises:
IndexError: When day_number is not found | train | https://github.com/Salamek/cron-descriptor/blob/fafe86b33e190caf205836fa1c719d27c7b408c7/cron_descriptor/ExpressionDescriptor.py#L578-L596 | null | class ExpressionDescriptor(object):
"""
Converts a Cron Expression into a human readable string
"""
_special_characters = ['/', '-', ',', '*']
_expression = ''
_options = None
_expression_parts = []
_parsed = False
def __init__(self, expression, options=None, **kwargs):
"""Initializes a new instance of the ExpressionDescriptorclass
Args:
expression: The cron expression string
options: Options to control the output description
Raises:
WrongArgumentException: if kwarg is unknow
"""
if options is None:
options = Options()
self._expression = expression
self._options = options
self._expression_parts = []
self._parsed = False
# if kwargs in _options, overwrite it, if not raise exeption
for kwarg in kwargs:
if hasattr(self._options, kwarg):
setattr(self._options, kwarg, kwargs[kwarg])
else:
raise WrongArgumentException(
"Unknow {} configuration argument".format(kwarg))
# Initializes localization
GetText(options.locale_code)
def get_description(self, description_type=DescriptionTypeEnum.FULL):
"""Generates a human readable string for the Cron Expression
Args:
description_type: Which part(s) of the expression to describe
Returns:
The cron expression description
Raises:
Exception: if throw_exception_on_parse_error is True
"""
try:
if self._parsed is False:
parser = ExpressionParser(self._expression, self._options)
self._expression_parts = parser.parse()
self._parsed = True
choices = {
DescriptionTypeEnum.FULL: self.get_full_description,
DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description,
DescriptionTypeEnum.HOURS: self.get_hours_description,
DescriptionTypeEnum.MINUTES: self.get_minutes_description,
DescriptionTypeEnum.SECONDS: self.get_seconds_description,
DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description,
DescriptionTypeEnum.MONTH: self.get_month_description,
DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description,
DescriptionTypeEnum.YEAR: self.get_year_description,
}
description = choices.get(description_type, self.get_seconds_description)()
except Exception as ex:
if self._options.throw_exception_on_parse_error:
raise
else:
description = str(ex)
return description
def get_full_description(self):
"""Generates the FULL description
Returns:
The FULL description
Raises:
FormatException: if formating fails and throw_exception_on_parse_error is True
"""
try:
time_segment = self.get_time_of_day_description()
day_of_month_desc = self.get_day_of_month_description()
month_desc = self.get_month_description()
day_of_week_desc = self.get_day_of_week_description()
year_desc = self.get_year_description()
description = "{0}{1}{2}{3}{4}".format(
time_segment,
day_of_month_desc,
day_of_week_desc,
month_desc,
year_desc)
description = self.transform_verbosity(
description, self._options.verbose)
description = self.transform_case(
description,
self._options.casing_type)
except Exception:
description = _(
"An error occured when generating the expression description. Check the cron expression syntax.")
if self._options.throw_exception_on_parse_error:
raise FormatException(description)
return description
def get_time_of_day_description(self):
"""Generates a description for only the TIMEOFDAY portion of the expression
Returns:
The TIMEOFDAY description
"""
seconds_expression = self._expression_parts[0]
minute_expression = self._expression_parts[1]
hour_expression = self._expression_parts[2]
description = StringBuilder()
# handle special cases first
if any(exp in minute_expression for exp in self._special_characters) is False and \
any(exp in hour_expression for exp in self._special_characters) is False and \
any(exp in seconds_expression for exp in self._special_characters) is False:
# specific time of day (i.e. 10 14)
description.append(_("At "))
description.append(
self.format_time(
hour_expression,
minute_expression,
seconds_expression))
elif "-" in minute_expression and \
"," not in minute_expression and \
any(exp in hour_expression for exp in self._special_characters) is False:
# minute range in single hour (i.e. 0-10 11)
minute_parts = minute_expression.split('-')
description.append(_("Every minute between {0} and {1}").format(
self.format_time(hour_expression, minute_parts[0]), self.format_time(hour_expression, minute_parts[1])))
elif "," in hour_expression and "-" not in hour_expression and \
any(exp in minute_expression for exp in self._special_characters) is False:
# hours list with single minute (o.e. 30 6,14,16)
hour_parts = hour_expression.split(',')
description.append(_("At"))
for i, hour_part in enumerate(hour_parts):
description.append(" ")
description.append(
self.format_time(hour_part, minute_expression))
if i < (len(hour_parts) - 2):
description.append(",")
if i == len(hour_parts) - 2:
description.append(_(" and"))
else:
# default time description
seconds_description = self.get_seconds_description()
minutes_description = self.get_minutes_description()
hours_description = self.get_hours_description()
description.append(seconds_description)
if description:
description.append(", ")
description.append(minutes_description)
if description:
description.append(", ")
description.append(hours_description)
return str(description)
def get_seconds_description(self):
"""Generates a description for only the SECONDS portion of the expression
Returns:
The SECONDS description
"""
return self.get_segment_description(
self._expression_parts[0],
_("every second"),
lambda s: s,
lambda s: _("every {0} seconds").format(s),
lambda s: _("seconds {0} through {1} past the minute"),
lambda s: _("at {0} seconds past the minute")
)
def get_minutes_description(self):
"""Generates a description for only the MINUTE portion of the expression
Returns:
The MINUTE description
"""
return self.get_segment_description(
self._expression_parts[1],
_("every minute"),
lambda s: s,
lambda s: _("every {0} minutes").format(s),
lambda s: _("minutes {0} through {1} past the hour"),
lambda s: '' if s == "0" else _("at {0} minutes past the hour")
)
def get_hours_description(self):
"""Generates a description for only the HOUR portion of the expression
Returns:
The HOUR description
"""
expression = self._expression_parts[2]
return self.get_segment_description(
expression,
_("every hour"),
lambda s: self.format_time(s, "0"),
lambda s: _("every {0} hours").format(s),
lambda s: _("between {0} and {1}"),
lambda s: _("at {0}")
)
def get_day_of_week_description(self):
"""Generates a description for only the DAYOFWEEK portion of the expression
Returns:
The DAYOFWEEK description
"""
if self._expression_parts[5] == "*" and self._expression_parts[3] != "*":
# DOM is specified and DOW is * so to prevent contradiction like "on day 1 of the month, every day"
# we will not specified a DOW description.
return ""
def get_day_name(s):
exp = s
if "#" in s:
exp, useless = s.split("#", 2)
elif "L" in s:
exp = exp.replace("L", '')
return self.number_to_day(int(exp))
def get_format(s):
if "#" in s:
day_of_week_of_month = s[s.find("#") + 1:]
try:
day_of_week_of_month_number = int(day_of_week_of_month)
choices = {
1: _("first"),
2: _("second"),
3: _("third"),
4: _("forth"),
5: _("fifth"),
}
day_of_week_of_month_description = choices.get(day_of_week_of_month_number, '')
except ValueError:
day_of_week_of_month_description = ''
formated = "{}{}{}".format(_(", on the "),
day_of_week_of_month_description, _(" {0} of the month"))
elif "L" in s:
formated = _(", on the last {0} of the month")
else:
formated = _(", only on {0}")
return formated
return self.get_segment_description(
self._expression_parts[5],
_(", every day"),
lambda s: get_day_name(s),
lambda s: _(", every {0} days of the week").format(s),
lambda s: _(", {0} through {1}"),
lambda s: get_format(s)
)
def get_month_description(self):
"""Generates a description for only the MONTH portion of the expression
Returns:
The MONTH description
"""
return self.get_segment_description(
self._expression_parts[4],
'',
lambda s: datetime.date(datetime.date.today().year, int(s), 1).strftime("%B"),
lambda s: _(", every {0} months").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_day_of_month_description(self):
"""Generates a description for only the DAYOFMONTH portion of the expression
Returns:
The DAYOFMONTH description
"""
expression = self._expression_parts[3]
expression = expression.replace("?", "*")
if expression == "L":
description = _(", on the last day of the month")
elif expression == "LW" or expression == "WL":
description = _(", on the last weekday of the month")
else:
regex = re.compile("(\\d{1,2}W)|(W\\d{1,2})")
if regex.match(expression):
m = regex.match(expression)
day_number = int(m.group().replace("W", ""))
day_string = _("first weekday") if day_number == 1 else _("weekday nearest day {0}").format(
day_number)
description = _(", on the {0} of the month").format(
day_string)
else:
description = self.get_segment_description(
expression,
_(", every day"),
lambda s: s,
lambda s: _(", every day") if s == "1" else _(", every {0} days"),
lambda s: _(", between day {0} and {1} of the month"),
lambda s: _(", on day {0} of the month")
)
return description
def get_year_description(self):
"""Generates a description for only the YEAR portion of the expression
Returns:
The YEAR description
"""
def format_year(s):
regex = re.compile(r"^\d+$")
if regex.match(s):
year_int = int(s)
if year_int < 1900:
return year_int
return datetime.date(year_int, 1, 1).strftime("%Y")
else:
return s
return self.get_segment_description(
self._expression_parts[6],
'',
lambda s: format_year(s),
lambda s: _(", every {0} years").format(s),
lambda s: _(", {0} through {1}"),
lambda s: _(", only in {0}")
)
def get_segment_description(
self,
expression,
all_description,
get_single_item_description,
get_interval_description_format,
get_between_description_format,
get_description_format
):
"""Returns segment description
Args:
expression: Segment to descript
all_description: *
get_single_item_description: 1
get_interval_description_format: 1/2
get_between_description_format: 1-2
get_description_format: format get_single_item_description
Returns:
segment description
"""
description = None
if expression is None or expression == '':
description = ''
elif expression == "*":
description = all_description
elif any(ext in expression for ext in ['/', '-', ',']) is False:
description = get_description_format(expression).format(
get_single_item_description(expression))
elif "/" in expression:
segments = expression.split('/')
description = get_interval_description_format(
segments[1]).format(get_single_item_description(segments[1]))
# interval contains 'between' piece (i.e. 2-59/3 )
if "-" in segments[0]:
between_segment_description = self.generate_between_segment_description(
segments[0], get_between_description_format, get_single_item_description)
if not between_segment_description.startswith(", "):
description += ", "
description += between_segment_description
elif any(ext in segments[0] for ext in ['*', ',']) is False:
range_item_description = get_description_format(segments[0]).format(
get_single_item_description(segments[0])
)
range_item_description = range_item_description.replace(", ", "")
description += _(", starting {0}").format(range_item_description)
elif "," in expression:
segments = expression.split(',')
description_content = ''
for i, segment in enumerate(segments):
if i > 0 and len(segments) > 2:
description_content += ","
if i < len(segments) - 1:
description_content += " "
if i > 0 and len(segments) > 1 and (i == len(segments) - 1 or len(segments) == 2):
description_content += _(" and ")
if "-" in segment:
between_description = self.generate_between_segment_description(
segment,
lambda s: _(", {0} through {1}"),
get_single_item_description
)
between_description = between_description.replace(", ", "")
description_content += between_description
else:
description_content += get_single_item_description(segment)
description = get_description_format(
expression).format(
description_content)
elif "-" in expression:
description = self.generate_between_segment_description(
expression, get_between_description_format, get_single_item_description)
return description
def generate_between_segment_description(
self,
between_expression,
get_between_description_format,
get_single_item_description
):
"""
Generates the between segment description
:param between_expression:
:param get_between_description_format:
:param get_single_item_description:
:return: The between segment description
"""
description = ""
between_segments = between_expression.split('-')
between_segment_1_description = get_single_item_description(between_segments[0])
between_segment_2_description = get_single_item_description(between_segments[1])
between_segment_2_description = between_segment_2_description.replace(
":00", ":59")
between_description_format = get_between_description_format(between_expression)
description += between_description_format.format(between_segment_1_description, between_segment_2_description)
return description
def format_time(
self,
hour_expression,
minute_expression,
second_expression=''
):
"""Given time parts, will contruct a formatted time description
Args:
hour_expression: Hours part
minute_expression: Minutes part
second_expression: Seconds part
Returns:
Formatted time description
"""
hour = int(hour_expression)
period = ''
if self._options.use_24hour_time_format is False:
period = " PM" if (hour >= 12) else " AM"
if hour > 12:
hour -= 12
minute = str(int(minute_expression)) # !FIXME WUT ???
second = ''
if second_expression is not None and second_expression:
second = "{}{}".format(":", str(int(second_expression)).zfill(2))
return "{0}:{1}{2}{3}".format(str(hour).zfill(2), minute.zfill(2), second, period)
def transform_verbosity(self, description, use_verbose_format):
"""Transforms the verbosity of the expression description by stripping verbosity from original description
Args:
description: The description to transform
use_verbose_format: If True, will leave description as it, if False, will strip verbose parts
second_expression: Seconds part
Returns:
The transformed description with proper verbosity
"""
if use_verbose_format is False:
description = description.replace(
_(", every minute"), '')
description = description.replace(_(", every hour"), '')
description = description.replace(_(", every day"), '')
return description
def transform_case(self, description, case_type):
"""Transforms the case of the expression description, based on options
Args:
description: The description to transform
case_type: The casing type that controls the output casing
second_expression: Seconds part
Returns:
The transformed description with proper casing
"""
if case_type == CasingTypeEnum.Sentence:
description = "{}{}".format(
description[0].upper(),
description[1:])
elif case_type == CasingTypeEnum.Title:
description = description.title()
else:
description = description.lower()
return description
def __str__(self):
return self.get_description()
def __repr__(self):
return self.get_description()
|
dnephin/PyStaticConfiguration | staticconf/schema.py | SchemaMeta.build_attributes | python | def build_attributes(cls, attributes, namespace):
config_path = attributes.get('config_path')
tokens = {}
def build_config_key(value_def, config_key):
key = value_def.config_key or config_key
return '%s.%s' % (config_path, key) if config_path else key
def build_token(name, value_def):
config_key = build_config_key(value_def, name)
value_token = ValueToken.from_definition(
value_def, namespace, config_key)
getters.register_value_proxy(namespace, value_token, value_def.help)
tokens[name] = value_token
return name, build_property(value_token)
def build_attr(name, attribute):
if not isinstance(attribute, ValueTypeDefinition):
return name, attribute
return build_token(name, attribute)
attributes = dict(build_attr(*item)
for item in six.iteritems(attributes))
attributes['_tokens'] = tokens
return attributes | Return an attributes dictionary with ValueTokens replaced by a
property which returns the config value. | train | https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/schema.py#L166-L193 | null | class SchemaMeta(type):
"""Metaclass to construct config schema object."""
def __new__(mcs, name, bases, attributes):
namespace = mcs.get_namespace(attributes)
attributes = mcs.build_attributes(attributes, namespace)
return super(SchemaMeta, mcs).__new__(mcs, name, bases, attributes)
@classmethod
def get_namespace(cls, attributes):
if 'namespace' not in attributes:
raise errors.ConfigurationError("ConfigSchema requires a namespace.")
return config.get_namespace(attributes['namespace'])
@classmethod
|
dnephin/PyStaticConfiguration | staticconf/proxy.py | cache_as_field | python | def cache_as_field(cache_name):
def cache_wrapper(func):
@functools.wraps(func)
def inner_wrapper(self, *args, **kwargs):
value = getattr(self, cache_name, UndefToken)
if value != UndefToken:
return value
ret = func(self, *args, **kwargs)
setattr(self, cache_name, ret)
return ret
return inner_wrapper
return cache_wrapper | Cache a functions return value as the field 'cache_name'. | train | https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/proxy.py#L74-L87 | null | """
Proxy a configuration value. Defers the lookup until the value is used, so that
values can be read statically at import time.
"""
import functools
import operator
from staticconf import errors
import six
class UndefToken(object):
"""A token to represent an undefined value, so that None can be used
as a default value.
"""
def __repr__(self):
return "<Undefined>"
UndefToken = UndefToken()
_special_names = [
'__abs__', '__add__', '__and__', '__bool__', '__call__', '__cmp__',
'__coerce__',
'__contains__', '__delitem__', '__delslice__', '__div__', '__divmod__',
'__eq__', '__float__', '__floordiv__', '__ge__', '__getitem__',
'__getslice__', '__gt__', '__hash__', '__hex__', '__iadd__', '__iand__',
'__idiv__', '__idivmod__', '__ifloordiv__', '__ilshift__', '__imod__',
'__imul__', '__int__', '__invert__', '__ior__', '__ipow__', '__irshift__',
'__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__', '__len__',
'__long__', '__lshift__', '__lt__', '__mod__', '__mul__', '__ne__',
'__neg__', '__oct__', '__or__', '__pos__', '__pow__', '__radd__',
'__rand__', '__rdiv__', '__rdivmod__',
'__repr__', '__reversed__', '__rfloorfiv__', '__rlshift__', '__rmod__',
'__rmul__', '__ror__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__',
'__rtruediv__', '__rxor__', '__setitem__', '__setslice__', '__sub__',
'__truediv__', '__xor__', 'next', '__nonzero__', '__str__', '__unicode__',
'__index__', '__fspath__',
]
def identity(x):
return x
unary_funcs = {
'__unicode__': six.text_type,
'__str__': str,
'__fspath__': identity, # python3.6+ os.PathLike interface
'__repr__': repr,
'__nonzero__': bool, # Python2 bool
'__bool__': bool, # Python3 bool
'__hash__': hash,
}
def build_class_def(cls):
def build_method(name):
def method(self, *args, **kwargs):
if name in unary_funcs:
return unary_funcs[name](self.value)
if hasattr(operator, name):
return getattr(operator, name)(self.value, *args)
return getattr(self.value, name)(*args, **kwargs)
return method
namespace = dict((name, build_method(name)) for name in _special_names)
return type(cls.__name__, (cls,), namespace)
def extract_value(proxy):
"""Given a value proxy type, Retrieve a value from a namespace, raising
exception if no value is found, or the value does not validate.
"""
value = proxy.namespace.get(proxy.config_key, proxy.default)
if value is UndefToken:
raise errors.ConfigurationError("%s is missing value for: %s" %
(proxy.namespace, proxy.config_key))
try:
return proxy.validator(value)
except errors.ValidationError as e:
raise errors.ConfigurationError("%s failed to validate %s: %s" %
(proxy.namespace, proxy.config_key, e))
class ValueProxy(object):
"""Proxy a configuration value so it can be loaded after import time."""
__slots__ = [
'validator',
'config_key',
'default',
'_value',
'namespace',
'__weakref__'
]
@classmethod
@cache_as_field('_class_def')
def get_class_def(cls):
return build_class_def(cls)
def __new__(cls, *args, **kwargs):
"""Create instances of this class with proxied special names."""
klass = cls.get_class_def()
instance = object.__new__(klass)
klass.__init__(instance, *args, **kwargs)
return instance
def __init__(self, validator, namespace, key, default=UndefToken):
self.validator = validator
self.config_key = key
self.default = default
self.namespace = namespace
self._value = UndefToken
@cache_as_field('_value')
def get_value(self):
return extract_value(self)
value = property(get_value)
def __getattr__(self, item):
return getattr(self.value, item)
def reset(self):
"""Clear the cached value so that configuration can be reloaded."""
self._value = UndefToken
|
dnephin/PyStaticConfiguration | staticconf/proxy.py | extract_value | python | def extract_value(proxy):
value = proxy.namespace.get(proxy.config_key, proxy.default)
if value is UndefToken:
raise errors.ConfigurationError("%s is missing value for: %s" %
(proxy.namespace, proxy.config_key))
try:
return proxy.validator(value)
except errors.ValidationError as e:
raise errors.ConfigurationError("%s failed to validate %s: %s" %
(proxy.namespace, proxy.config_key, e)) | Given a value proxy type, Retrieve a value from a namespace, raising
exception if no value is found, or the value does not validate. | train | https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/proxy.py#L90-L103 | null | """
Proxy a configuration value. Defers the lookup until the value is used, so that
values can be read statically at import time.
"""
import functools
import operator
from staticconf import errors
import six
class UndefToken(object):
"""A token to represent an undefined value, so that None can be used
as a default value.
"""
def __repr__(self):
return "<Undefined>"
UndefToken = UndefToken()
_special_names = [
'__abs__', '__add__', '__and__', '__bool__', '__call__', '__cmp__',
'__coerce__',
'__contains__', '__delitem__', '__delslice__', '__div__', '__divmod__',
'__eq__', '__float__', '__floordiv__', '__ge__', '__getitem__',
'__getslice__', '__gt__', '__hash__', '__hex__', '__iadd__', '__iand__',
'__idiv__', '__idivmod__', '__ifloordiv__', '__ilshift__', '__imod__',
'__imul__', '__int__', '__invert__', '__ior__', '__ipow__', '__irshift__',
'__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__', '__len__',
'__long__', '__lshift__', '__lt__', '__mod__', '__mul__', '__ne__',
'__neg__', '__oct__', '__or__', '__pos__', '__pow__', '__radd__',
'__rand__', '__rdiv__', '__rdivmod__',
'__repr__', '__reversed__', '__rfloorfiv__', '__rlshift__', '__rmod__',
'__rmul__', '__ror__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__',
'__rtruediv__', '__rxor__', '__setitem__', '__setslice__', '__sub__',
'__truediv__', '__xor__', 'next', '__nonzero__', '__str__', '__unicode__',
'__index__', '__fspath__',
]
def identity(x):
return x
unary_funcs = {
'__unicode__': six.text_type,
'__str__': str,
'__fspath__': identity, # python3.6+ os.PathLike interface
'__repr__': repr,
'__nonzero__': bool, # Python2 bool
'__bool__': bool, # Python3 bool
'__hash__': hash,
}
def build_class_def(cls):
def build_method(name):
def method(self, *args, **kwargs):
if name in unary_funcs:
return unary_funcs[name](self.value)
if hasattr(operator, name):
return getattr(operator, name)(self.value, *args)
return getattr(self.value, name)(*args, **kwargs)
return method
namespace = dict((name, build_method(name)) for name in _special_names)
return type(cls.__name__, (cls,), namespace)
def cache_as_field(cache_name):
"""Cache a functions return value as the field 'cache_name'."""
def cache_wrapper(func):
@functools.wraps(func)
def inner_wrapper(self, *args, **kwargs):
value = getattr(self, cache_name, UndefToken)
if value != UndefToken:
return value
ret = func(self, *args, **kwargs)
setattr(self, cache_name, ret)
return ret
return inner_wrapper
return cache_wrapper
class ValueProxy(object):
"""Proxy a configuration value so it can be loaded after import time."""
__slots__ = [
'validator',
'config_key',
'default',
'_value',
'namespace',
'__weakref__'
]
@classmethod
@cache_as_field('_class_def')
def get_class_def(cls):
return build_class_def(cls)
def __new__(cls, *args, **kwargs):
"""Create instances of this class with proxied special names."""
klass = cls.get_class_def()
instance = object.__new__(klass)
klass.__init__(instance, *args, **kwargs)
return instance
def __init__(self, validator, namespace, key, default=UndefToken):
self.validator = validator
self.config_key = key
self.default = default
self.namespace = namespace
self._value = UndefToken
@cache_as_field('_value')
def get_value(self):
return extract_value(self)
value = property(get_value)
def __getattr__(self, item):
return getattr(self.value, item)
def reset(self):
"""Clear the cached value so that configuration can be reloaded."""
self._value = UndefToken
|
dnephin/PyStaticConfiguration | staticconf/readers.py | build_reader | python | def build_reader(validator, reader_namespace=config.DEFAULT):
def reader(config_key, default=UndefToken, namespace=None):
config_namespace = config.get_namespace(namespace or reader_namespace)
return validator(_read_config(config_key, config_namespace, default))
return reader | A factory method for creating a custom config reader from a validation
function.
:param validator: a validation function which acceptance one argument (the
configuration value), and returns that value casted to
the appropriate type.
:param reader_namespace: the default namespace to use. Defaults to
`DEFAULT`. | train | https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/readers.py#L103-L116 | null | """
Functions to read values directly from a
:class:`staticconf.config.ConfigNamespace`. Values will be validated and
cast to the requested type.
Examples
--------
.. code-block:: python
import staticconf
# read an int
max_cycles = staticconf.read_int('max_cycles')
start_id = staticconf.read_int('poller.init.start_id', default=0)
# start_date will be a datetime.date
start_date = staticconf.read_date('start_date')
# matcher will be a regex object
matcher = staticconf.read_regex('matcher_pattern')
# Read a value from a different namespace
intervals = staticconf.read_float('intervals', namespace='something')
Readers can be attached to a namespace using a :class:`NamespaceReaders`
object.
.. code-block:: python
import staticconf
bling_reader = staticconf.NamespaceReaders('bling')
# These values are read from the `bling` ConfigNamespace
currency = bling_reader.read_string('currency')
value = bling_reader.read_float('value')
Arguments
---------
Readers accept the following kwargs:
config_key
string configuration key using dotted notation
default
if no `default` is given, the key must be present in the configuration.
If the key is missing a :class:`staticconf.errors.ConfigurationError`
is raised.
namespace
get the value from this namespace instead of DEFAULT.
Building custom readers
-----------------------
:func:`build_reader` is a factory function which can be used for creating
custom readers from a validation function. A validation function should handle
all exceptions and raise a :class:`staticconf.errors.ValidationError` if there
is a problem.
First create a validation function
.. code-block:: python
def validate_currency(value):
try:
# Assume a tuple or a list
name, decimal_points = value
return Currency(name, decimal_points)
except Exception, e:
raise ValidationErrror(...)
Example of a custom reader:
.. code-block:: python
from staticconf import readers
read_currency = readers.build_reader(validate_currency)
# Returns a Currency object using the data from the config namespace
# at they key `currencies.usd`.
usd_currency = read_currency('currencies.usd')
"""
from staticconf import validation, config, errors
from staticconf.proxy import UndefToken
def _read_config(config_key, config_namespace, default):
value = config_namespace.get(config_key, default=default)
if value is UndefToken:
msg = '%s missing value for %s' % (config_namespace, config_key)
raise errors.ConfigurationError(msg)
return value
class ReaderNameFactory(object):
@staticmethod
def get_name(name):
return 'read_%s' % name if name else 'read'
@staticmethod
def get_list_of_name(name):
return 'read_list_of_%s' % name
def get_all_accessors(name_factory):
for name, validator in validation.get_validators():
yield name_factory.get_name(name), validator
yield (name_factory.get_list_of_name(name),
validation.build_list_type_validator(validator))
class NamespaceAccessor(object):
def __init__(self, name, accessor_map, builder):
self.accessor_map = accessor_map
self.builder = builder
self.namespace = name
def __getattr__(self, item):
if item not in self.accessor_map:
raise AttributeError(item)
return self.builder(self.accessor_map[item], self.namespace)
def get_methods(self):
return dict((name, getattr(self, name)) for name in self.accessor_map)
def build_accessor_type(name_factory, builder):
accessor_map = dict(get_all_accessors(name_factory))
return lambda name: NamespaceAccessor(name, accessor_map, builder)
NamespaceReaders = build_accessor_type(ReaderNameFactory, build_reader)
"""An object with all reader functions which retrieve configuration from
a named namespace, instead of `DEFAULT`.
"""
default_readers = NamespaceReaders(config.DEFAULT)
globals().update(default_readers.get_methods())
__all__ = ['NamespaceReaders'] + list(default_readers.get_methods())
|
dnephin/PyStaticConfiguration | staticconf/config.py | get_namespaces_from_names | python | def get_namespaces_from_names(name, all_names):
names = configuration_namespaces.keys() if all_names else [name]
for name in names:
yield get_namespace(name) | Return a generator which yields namespace objects. | train | https://github.com/dnephin/PyStaticConfiguration/blob/229733270bc0dc0d9690ba850dbfb470e535c212/staticconf/config.py#L181-L185 | [
"def get_namespace(name):\n \"\"\"Return a :class:`ConfigNamespace` by name, creating the\n namespace if it does not exist.\n \"\"\"\n if name not in configuration_namespaces:\n configuration_namespaces[name] = ConfigNamespace(name)\n return configuration_namespaces[name]\n"
] | """
Store configuration in :class:`ConfigNamespace` objects and provide tools
for reloading, and displaying help messages.
Configuration Reloading
-----------------------
Configuration reloading is supported using a :class:`ConfigFacade`, which
composes a :class:`ConfigurationWatcher` and a :class:`ReloadCallbackChain`.
These classes provide a way of reloading configuration when the file is
modified.
"""
from collections import namedtuple
import hashlib
import logging
import os
import time
import weakref
import six
from staticconf import errors
log = logging.getLogger(__name__)
# Name for the default namespace
DEFAULT = 'DEFAULT'
def remove_by_keys(dictionary, keys):
keys = set(keys)
def filter_by_keys(item):
k, _ = item
return k not in keys
return list(filter(filter_by_keys, six.iteritems(dictionary)))
class ConfigMap(object):
"""A ConfigMap can be used to wrap a dictionary in your configuration.
It will allow you to retain your mapping structure (and prevent it
from being flattened).
"""
def __init__(self, *args, **kwargs):
self.data = dict(*args, **kwargs)
def __getitem__(self, item):
return self.data[item]
def get(self, item, default=None):
return self.data.get(item, default)
def __contains__(self, item):
return item in self.data
def __len__(self):
return len(self.data)
class ConfigNamespace(object):
"""A container for related configuration values. Values are stored
using flattened keys which map to values.
Values are added to this container using :mod:`staticconf.loader`. When a
:class:`ConfigNamespace` is created, it persists for the entire life of the
process. Values will stay in the namespace until :func:`clear` is called
to remove them.
To retrieve a namespace, use :func:`get_namespace`.
To access values stored in this namespace use :mod:`staticconf.readers`
or :mod:`staticconf.schema`.
"""
def __init__(self, name):
self.name = name
self.configuration_values = {}
self.value_proxies = weakref.WeakValueDictionary()
def get_name(self):
return self.name
def get_value_proxies(self):
return list(self.value_proxies.values())
def register_proxy(self, proxy):
self.value_proxies[id(proxy)] = proxy
def apply_config_data(
self,
config_data,
error_on_unknown,
error_on_dupe,
log_keys_only=False,
):
self.validate_keys(
config_data,
error_on_unknown,
log_keys_only=log_keys_only,
)
self.has_duplicate_keys(config_data, error_on_dupe)
self.update_values(config_data)
def update_values(self, *args, **kwargs):
self.configuration_values.update(*args, **kwargs)
def get_config_values(self):
"""Return all configuration stored in this object as a dict.
"""
return self.configuration_values
def get_config_dict(self):
"""Reconstruct the nested structure of this object's configuration
and return it as a dict.
"""
config_dict = {}
for dotted_key, value in self.get_config_values().items():
subkeys = dotted_key.split('.')
d = config_dict
for key in subkeys:
d = d.setdefault(key, value if key == subkeys[-1] else {})
return config_dict
def get_known_keys(self):
return set(vproxy.config_key for vproxy in self.get_value_proxies())
def validate_keys(
self,
config_data,
error_on_unknown,
log_keys_only=False,
):
unknown = remove_by_keys(config_data, self.get_known_keys())
if not unknown:
return
if log_keys_only:
unknown = [k for k, _ in unknown]
msg = "Unexpected value in %s configuration: %s" % (self.name, unknown)
if error_on_unknown:
raise errors.ConfigurationError(msg)
log.info(msg)
def has_duplicate_keys(self, config_data, error_on_duplicate):
args = config_data, self.configuration_values, error_on_duplicate
return has_duplicate_keys(*args)
def get(self, item, default=None):
return self.configuration_values.get(item, default)
def __getitem__(self, item):
return self.configuration_values[item]
def __setitem__(self, key, value):
self.configuration_values[key] = value
def __contains__(self, item):
return item in self.configuration_values
def clear(self):
"""Remove all values from the namespace."""
self.configuration_values.clear()
def _reset(self):
self.clear()
self.value_proxies.clear()
def __str__(self):
return "%s(%s)" % (type(self).__name__, self.name)
configuration_namespaces = {DEFAULT: ConfigNamespace(DEFAULT)}
KeyDescription = namedtuple('KeyDescription', 'name validator default help')
def get_namespace(name):
"""Return a :class:`ConfigNamespace` by name, creating the
namespace if it does not exist.
"""
if name not in configuration_namespaces:
configuration_namespaces[name] = ConfigNamespace(name)
return configuration_namespaces[name]
def reload(name=DEFAULT, all_names=False):
"""Reload one or all :class:`ConfigNamespace`. Reload clears the cache of
:mod:`staticconf.schema` and :mod:`staticconf.getters`, allowing them to
pickup the latest values in the namespace.
Defaults to reloading just the DEFAULT namespace.
:param name: the name of the :class:`ConfigNamespace` to reload
:param all_names: If True, reload all namespaces, and ignore `name`
"""
for namespace in get_namespaces_from_names(name, all_names):
for value_proxy in namespace.get_value_proxies():
value_proxy.reset()
def validate(name=DEFAULT, all_names=False):
"""Validate all registered keys after loading configuration.
Missing values or values which do not pass validation raise
:class:`staticconf.errors.ConfigurationError`. By default only validates
the `DEFAULT` namespace.
:param name: the namespace to validate
:type name: string
:param all_names: if True validates all namespaces and ignores `name`
:type all_names: boolean
"""
for namespace in get_namespaces_from_names(name, all_names):
all(value_proxy.get_value() for value_proxy in namespace.get_value_proxies())
class ConfigHelp(object):
"""Register and display help messages about config keys."""
def __init__(self):
self.descriptions = {}
def add(self, name, validator, default, namespace, help):
desc = KeyDescription(name, validator, default, help)
self.descriptions.setdefault(namespace, []).append(desc)
def view_help(self):
"""Return a help message describing all the statically configured keys.
"""
def format_desc(desc):
return "%s (Type: %s, Default: %s)\n%s" % (
desc.name,
desc.validator.__name__.replace('validate_', ''),
desc.default,
desc.help or '')
def format_namespace(key, desc_list):
return "\nNamespace: %s\n%s" % (
key,
'\n'.join(sorted(format_desc(desc) for desc in desc_list)))
def namespace_cmp(item):
name, _ = item
return chr(0) if name == DEFAULT else name
return '\n'.join(format_namespace(*desc) for desc in
sorted(six.iteritems(self.descriptions),
key=namespace_cmp))
def clear(self):
self.descriptions.clear()
config_help = ConfigHelp()
view_help = config_help.view_help
def _reset():
"""Used for internal testing."""
for namespace in configuration_namespaces.values():
namespace._reset()
config_help.clear()
def has_duplicate_keys(config_data, base_conf, raise_error):
"""Compare two dictionaries for duplicate keys. if raise_error is True
then raise on exception, otherwise log return True."""
duplicate_keys = set(base_conf) & set(config_data)
if not duplicate_keys:
return
msg = "Duplicate keys in config: %s" % duplicate_keys
if raise_error:
raise errors.ConfigurationError(msg)
log.info(msg)
return True
class ConfigurationWatcher(object):
"""Watches a file for modification and reloads the configuration
when it's modified. Accepts a min_interval to throttle checks.
The default :func:`reload()` operation is to reload all namespaces. To
only reload a specific namespace use a :class:`ReloadCallbackChain`
for the `reloader`.
.. seealso::
:func:`ConfigFacade.load` which provides a more concise interface
for the common case.
Usage:
.. code-block:: python
import staticconf
from staticconf import config
def build_configuration(filename, namespace):
config_loader = partial(staticconf.YamlConfiguration,
filename, namespace=namespace)
reloader = config.ReloadCallbackChain(namespace)
return config.ConfigurationWatcher(
config_loader, filename, min_interval=2, reloader=reloader)
config_watcher = build_configuration('config.yaml', 'my_namespace')
# Load the initial configuration
config_watcher.config_loader()
# Do some work
for item in work:
config_watcher.reload_if_changed()
...
:param config_loader: a function which takes no arguments. It is called
by :func:`reload_if_changed` if the file has been modified
:param filenames: a filename or list of filenames to watch for modifications
:param min_interval: minimum number of seconds to wait between calls to
:func:`os.path.getmtime` to check if a file has been modified.
:param reloader: a function which is called after `config_loader` when a
file has been modified. Defaults to an empty
:class:`ReloadCallbackChain`
:param comparators: a list of classes which support the
:class:`IComparator` interface which are used to determine if a config
file has been modified. Defaults to :class:`MTimeComparator`.
"""
def __init__(
self,
config_loader,
filenames,
min_interval=0,
reloader=None,
comparators=None):
self.config_loader = config_loader
self.filenames = self.get_filename_list(filenames)
self.min_interval = min_interval
self.last_check = time.time()
self.reloader = reloader or ReloadCallbackChain(all_names=True)
comparators = comparators or [MTimeComparator]
self.comparators = [comp(self.filenames) for comp in comparators]
def get_filename_list(self, filenames):
if isinstance(filenames, six.string_types):
filenames = [filenames]
filenames = sorted(os.path.abspath(name) for name in filenames)
if not filenames:
raise ValueError(
"ConfigurationWatcher requires at least one filename to watch")
return filenames
@property
def should_check(self):
return self.last_check + self.min_interval <= time.time()
def reload_if_changed(self, force=False):
"""If the file(s) being watched by this object have changed,
their configuration will be loaded again using `config_loader`.
Otherwise this is a noop.
:param force: If True ignore the `min_interval` and proceed to
file modified comparisons. To force a reload use
:func:`reload` directly.
"""
if (force or self.should_check) and self.file_modified():
return self.reload()
def file_modified(self):
self.last_check = time.time()
return any(comp.has_changed() for comp in self.comparators)
def reload(self):
config_dict = self.config_loader()
self.reloader()
return config_dict
def get_reloader(self):
return self.reloader
def load_config(self):
return self.config_loader()
class IComparator(object):
"""Interface for a comparator which is used by :class:`ConfigurationWatcher`
to determine if a file has been modified since the last check. A comparator
is used to reduce the work required to reload configuration. Comparators
should implement a mechanism that is relatively efficient (and scalable),
so it can be performed frequently.
:param filenames: A list of absolute paths to configuration files.
"""
def __init__(self, filenames):
pass
def has_changed(self):
"""Returns True if any of the files have been modified since the last
call to :func:`has_changed`. Returns False otherwise.
"""
pass
class InodeComparator(object):
"""Compare files by inode and device number. This is a good comparator to
use when your files can change multiple times per second.
"""
def __init__(self, filenames):
self.filenames = filenames
self.inodes = self.get_inodes()
def get_inodes(self):
def get_inode(stbuf):
return stbuf.st_dev, stbuf.st_ino
return [get_inode(os.stat(filename)) for filename in self.filenames]
def has_changed(self):
last_inodes, self.inodes = self.inodes, self.get_inodes()
return last_inodes != self.inodes
def build_compare_func(err_logger=None):
"""Returns a compare_func that can be passed to MTimeComparator.
The returned compare_func first tries os.path.getmtime(filename),
then calls err_logger(filename) if that fails. If err_logger is None,
then it does nothing. err_logger is always called within the context of
an OSError raised by os.path.getmtime(filename). Information on this
error can be retrieved by calling sys.exc_info inside of err_logger."""
def compare_func(filename):
try:
return os.path.getmtime(filename)
except OSError:
if err_logger is not None:
err_logger(filename)
return -1
return compare_func
class MTimeComparator(object):
"""Compare files by modified time, or using compare_func,
if it is not None.
.. note::
Most filesystems only store modified time with second grangularity
so multiple changes within the same second can be ignored.
"""
def __init__(self, filenames, compare_func=None):
self.compare_func = (os.path.getmtime if compare_func is None
else compare_func)
self.filenames_mtimes = {
filename: self.compare_func(filename) for filename in filenames
}
def has_changed(self):
for filename, compare_val in self.filenames_mtimes.items():
current_compare_val = self.compare_func(filename)
if compare_val != current_compare_val:
self.filenames_mtimes[filename] = current_compare_val
return True
return False
class MD5Comparator(object):
"""Compare files by md5 hash of their contents. This comparator will be
slower for larger files, but is more resilient to modifications which only
change mtime, but not the files contents.
"""
def __init__(self, filenames):
self.filenames = filenames
self.hashes = self.get_hashes()
def get_hashes(self):
def build_hash(filename):
hasher = hashlib.md5()
with open(filename, 'rb') as fh:
hasher.update(fh.read())
return hasher.digest()
return [build_hash(filename) for filename in self.filenames]
def has_changed(self):
last_hashes, self.hashes = self.hashes, self.get_hashes()
return last_hashes != self.hashes
class ReloadCallbackChain(object):
"""A chain of callbacks which will be triggered after configuration is
reloaded. Designed to work with :class:`ConfigurationWatcher`.
When this class is called it performs two operations:
* calls :func:`reload` on the `namespace`
* calls all attached callbacks
Usage:
.. code-block:: python
chain = ReloadCallbackChain()
chain.add('some_id', callback_foo)
chain.add('other_id', other_callback)
...
# some time later
chain.remove('some_id')
:param namespace: the name of the namespace to :func:`reload`
:param all_names: if True :func:`reload` all namespaces and ignore the
`namespace` param. Defaults to False
:param callbacks: initial list of tuples to add to the callback chain
"""
def __init__(self, namespace=DEFAULT, all_names=False, callbacks=None):
self.namespace = namespace
self.all_names = all_names
self.callbacks = dict(callbacks or ())
def add(self, identifier, callback):
self.callbacks[identifier] = callback
def remove(self, identifier):
del self.callbacks[identifier]
def __call__(self):
reload(name=self.namespace, all_names=self.all_names)
for callback in six.itervalues(self.callbacks):
callback()
def build_loader_callable(load_func, filename, namespace):
def load_configuration():
get_namespace(namespace).clear()
return load_func(filename, namespace=namespace)
return load_configuration
class ConfigFacade(object):
"""A facade around a :class:`ConfigurationWatcher` and a
:class:`ReloadCallbackChain`. See :func:`ConfigFacade.load`.
When a :class:`ConfigFacade` is loaded it will clear the namespace of
all configuration and load the file into the namespace. If this is not
the behaviour you want, use a :class:`ConfigurationWatcher` instead.
Usage:
.. code-block:: python
import staticconf
watcher = staticconf.ConfigFacade.load(
'config.yaml', # Filename or list of filenames to watch
'my_namespace',
staticconf.YamlConfiguration, # Callable which takes the filename
min_interval=3 # Wait at least 3 seconds before checking modified time
)
watcher.add_callback('identifier', do_this_after_reload)
watcher.reload_if_changed()
"""
def __init__(self, watcher):
self.watcher = watcher
self.callback_chain = watcher.get_reloader()
@classmethod
def load(
cls,
filename,
namespace,
loader_func,
min_interval=0,
comparators=None,
):
"""Create a new :class:`ConfigurationWatcher` and load the initial
configuration by calling `loader_func`.
:param filename: a filename or list of filenames to monitor for changes
:param namespace: the name of a namespace to use when loading
configuration. All config data from `filename` will
end up in a :class:`ConfigNamespace` with this name
:param loader_func: a function which accepts two arguments and uses
loader functions from :mod:`staticconf.loader` to
load configuration data into a namespace. The
arguments are `filename` and `namespace`
:param min_interval: minimum number of seconds to wait between calls to
:func:`os.path.getmtime` to check if a file has
been modified.
:param comparators: a list of classes which support the
:class:`IComparator` interface which are used to determine if a config
file has been modified. See ConfigurationWatcher::__init__.
:returns: a :class:`ConfigFacade`
"""
watcher = ConfigurationWatcher(
build_loader_callable(loader_func, filename, namespace=namespace),
filename,
min_interval=min_interval,
reloader=ReloadCallbackChain(namespace=namespace),
comparators=comparators,
)
watcher.load_config()
return cls(watcher)
def add_callback(self, identifier, callback):
self.callback_chain.add(identifier, callback)
def reload_if_changed(self, force=False):
"""See :func:`ConfigurationWatcher.reload_if_changed` """
self.watcher.reload_if_changed(force=force)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.