repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
ericjang/tdb | tdb/transitive_closure.py | _tchelper | python | def _tchelper(tc_deps,evals,deps):
for e in evals:
if e in tc_deps: # we've already included it
continue
else:
if e in deps: # has additional dependnecies
tc_deps[e]=deps[e]
# add to tc_deps the dependencies of the dependencies
_tchelper(tc_deps,deps[e],deps)
return tc_deps | modifies graph in place | train | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/transitive_closure.py#L2-L14 | [
"def _tchelper(tc_deps,evals,deps):\n\t\"\"\"\n\tmodifies graph in place\n\t\"\"\"\n\tfor e in evals:\n\t\tif e in tc_deps: # we've already included it\n\t\t\tcontinue\n\t\telse:\n\t\t\tif e in deps: # has additional dependnecies\n\t\t\t\ttc_deps[e]=deps[e]\n\t\t\t\t# add to tc_deps the dependencies of the dependen... |
def transitive_closure(evals,deps):
"""
evals = node names we want values for (i.e. we don't care about any other nodes after
we've evaluated all the eval nodes)
deps = full dependency graph of all nodes
"""
return _tchelper({},evals,deps)
|
ericjang/tdb | tdb/debug_session.py | DebugSession.run | python | def run(self, evals, feed_dict=None, breakpoints=None, break_immediately=False):
if not isinstance(evals,list):
evals=[evals]
if feed_dict is None:
feed_dict={}
if breakpoints is None:
breakpoints=[]
self.state=RUNNING
self._original_evals=evals
self._original_feed_dict=feed_dict
self._exe_order=op_store.compute_exe_order(evals)
self._init_evals_bps(evals, breakpoints)
# convert cache keys to strings
for k,v in feed_dict.items():
if not isinstance(k,str):
k=k.name
self._cache[k]=v
op_store.register_dbsession(self)
if break_immediately:
return self._break()
else:
return self.c() | starts the debug session | train | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/debug_session.py#L33-L61 | [
"def c(self):\n\t\"\"\"\n\tcontinue\n\t\"\"\"\n\ti,node=self._get_next_eval()\n\tif node.name in self._bpset:\n\t\tif self.state == RUNNING:\n\t\t\treturn self._break()\n\n\tself.state = RUNNING\n\tself._eval(node)\n\t# increment to next node\n\tself.step=i+1\n\tif self.step < len(self._exe_order):\n\t\treturn self... | class DebugSession(object):
def __init__(self, session=None):
super(DebugSession, self).__init__()
if session is None:
session=tf.InteractiveSession()
_original_evals=None
self.step=0 # index into execution order
self.session=session
self.state=INITIALIZED
self._original_evals=[] # evals passed into self.debug, in order
self._evalset=set() # string names to evaluate
self._bpset=set() # breakpoint names
self._cache={} # key: node names in evalset -> np.ndarray
self._exe_order=[] # list of HTOps, tf.Tensors to be evaluated
###
### PUBLIC METHODS
###
def s(self):
"""
step to the next node in the execution order
"""
next_node=self._exe_order[self.step]
self._eval(next_node)
self.step+=1
if self.step==len(self._exe_order):
return self._finish()
else:
# if stepping, return the value of the node we just
# evaled
return self._break(value=self._cache.get(next_node.name))
def c(self):
"""
continue
"""
i,node=self._get_next_eval()
if node.name in self._bpset:
if self.state == RUNNING:
return self._break()
self.state = RUNNING
self._eval(node)
# increment to next node
self.step=i+1
if self.step < len(self._exe_order):
return self.c()
else:
return self._finish()
def get_values(self):
"""
returns final values (same result as tf.Session.run())
"""
return [self._cache.get(i.name,None) for i in self._original_evals]
def get_exe_queue(self):
return self._exe_order[self.step:]
def get_value(self, node):
"""
retrieve a node value from the cache
"""
if isinstance(node,tf.Tensor):
return self._cache.get(node.name,None)
elif isinstance(node,tf.Operation):
return None
else: # handle ascii, unicode strings
return self._cache.get(node,None)
###
### PRIVATE METHODS
###
def _cache_value(self, tensor, ndarray):
"""
store tensor ndarray value in cache. this is called by python ops
"""
self._cache[tensor.name]=ndarray
def _init_evals_bps(self, evals, breakpoints):
# If an eval or bp is the tf.Placeholder output of a tdb.PythonOp, replace it with its respective PythonOp node
evals2=[op_store.get_op(t) if op_store.is_htop_out(t) else t for t in evals]
breakpoints2=[op_store.get_op(t) if op_store.is_htop_out(t) else t for t in breakpoints]
# compute execution order
self._exe_order=op_store.compute_exe_order(evals2) # list of nodes
# compute evaluation set
"""
HTOps may depend on tf.Tensors that are not in eval. We need to have all inputs to HTOps ready
upon evaluation.
1. all evals that were originally specified are added
2. each HTOp in the execution closure needs to be in eval (they won't be eval'ed automatically by Session.run)
3. if an input to an HTOp is a tf.Tensor (not a HT placeholder tensor), it needs to be in eval as well (it's not
tensorflow so we'll have to manually evaluate it). Remember, we don't track Placeholders because we instead
run the HTOps that generate their values.
"""
self._evalset=set([e.name for e in evals2])
for e in self._exe_order:
if isinstance(e,HTOp):
self._evalset.add(e.name)
for t in e.inputs:
if not op_store.is_htop_out(t):
self._evalset.add(t.name)
# compute breakpoint set
self._bpset=set([bp.name for bp in breakpoints2])
def _get_next_eval(self):
n=len(self._exe_order)
o=self._exe_order
return next((i,o[i]) for i in range(self.step,n) if (o[i].name in self._evalset or o[i].name in self._bpset))
def _eval(self, node):
"""
node is a TensorFlow Op or Tensor from self._exe_order
"""
# if node.name == 'Momentum':
# pdb.set_trace()
if isinstance(node,HTOp):
# All Tensors MUST be in the cache.
feed_dict=dict((t,self._cache[t.name]) for t in node.inputs)
node.run(feed_dict) # this will populate self._cache on its own
else: # is a TensorFlow node
if isinstance(node,tf.Tensor):
result=self.session.run(node,self._cache)
self._cache[node.name]=result
else:
# is an operation
if node.type =='Assign' or node.type == 'AssignAdd' or node.type == 'AssignSub':
# special operation that takes in a tensor ref and mutates it
# unfortunately, we end up having to execute nearly the full graph?
# alternatively, find a way to pass the tensor_ref thru the feed_dict
# rather than the tensor values.
self.session.run(node,self._original_feed_dict)
def _break(self,value=None):
self.state=PAUSED
i,next_node=self._get_next_eval()
print('Breakpoint triggered. Next Node: ', next_node.name)
return (self.state,value)
def _finish(self):
self.state=FINISHED
return (self.state, self.get_values())
|
ericjang/tdb | tdb/debug_session.py | DebugSession.s | python | def s(self):
next_node=self._exe_order[self.step]
self._eval(next_node)
self.step+=1
if self.step==len(self._exe_order):
return self._finish()
else:
# if stepping, return the value of the node we just
# evaled
return self._break(value=self._cache.get(next_node.name)) | step to the next node in the execution order | train | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/debug_session.py#L63-L75 | [
"def _eval(self, node):\n\t\"\"\"\n\tnode is a TensorFlow Op or Tensor from self._exe_order\n\t\"\"\"\n\t# if node.name == 'Momentum':\n\t# \tpdb.set_trace()\n\tif isinstance(node,HTOp):\n\t\t# All Tensors MUST be in the cache.\n\t\tfeed_dict=dict((t,self._cache[t.name]) for t in node.inputs)\n\t\tnode.run(feed_dic... | class DebugSession(object):
def __init__(self, session=None):
super(DebugSession, self).__init__()
if session is None:
session=tf.InteractiveSession()
_original_evals=None
self.step=0 # index into execution order
self.session=session
self.state=INITIALIZED
self._original_evals=[] # evals passed into self.debug, in order
self._evalset=set() # string names to evaluate
self._bpset=set() # breakpoint names
self._cache={} # key: node names in evalset -> np.ndarray
self._exe_order=[] # list of HTOps, tf.Tensors to be evaluated
###
### PUBLIC METHODS
###
def run(self, evals, feed_dict=None, breakpoints=None, break_immediately=False):
"""
starts the debug session
"""
if not isinstance(evals,list):
evals=[evals]
if feed_dict is None:
feed_dict={}
if breakpoints is None:
breakpoints=[]
self.state=RUNNING
self._original_evals=evals
self._original_feed_dict=feed_dict
self._exe_order=op_store.compute_exe_order(evals)
self._init_evals_bps(evals, breakpoints)
# convert cache keys to strings
for k,v in feed_dict.items():
if not isinstance(k,str):
k=k.name
self._cache[k]=v
op_store.register_dbsession(self)
if break_immediately:
return self._break()
else:
return self.c()
def c(self):
"""
continue
"""
i,node=self._get_next_eval()
if node.name in self._bpset:
if self.state == RUNNING:
return self._break()
self.state = RUNNING
self._eval(node)
# increment to next node
self.step=i+1
if self.step < len(self._exe_order):
return self.c()
else:
return self._finish()
def get_values(self):
"""
returns final values (same result as tf.Session.run())
"""
return [self._cache.get(i.name,None) for i in self._original_evals]
def get_exe_queue(self):
return self._exe_order[self.step:]
def get_value(self, node):
"""
retrieve a node value from the cache
"""
if isinstance(node,tf.Tensor):
return self._cache.get(node.name,None)
elif isinstance(node,tf.Operation):
return None
else: # handle ascii, unicode strings
return self._cache.get(node,None)
###
### PRIVATE METHODS
###
def _cache_value(self, tensor, ndarray):
"""
store tensor ndarray value in cache. this is called by python ops
"""
self._cache[tensor.name]=ndarray
def _init_evals_bps(self, evals, breakpoints):
# If an eval or bp is the tf.Placeholder output of a tdb.PythonOp, replace it with its respective PythonOp node
evals2=[op_store.get_op(t) if op_store.is_htop_out(t) else t for t in evals]
breakpoints2=[op_store.get_op(t) if op_store.is_htop_out(t) else t for t in breakpoints]
# compute execution order
self._exe_order=op_store.compute_exe_order(evals2) # list of nodes
# compute evaluation set
"""
HTOps may depend on tf.Tensors that are not in eval. We need to have all inputs to HTOps ready
upon evaluation.
1. all evals that were originally specified are added
2. each HTOp in the execution closure needs to be in eval (they won't be eval'ed automatically by Session.run)
3. if an input to an HTOp is a tf.Tensor (not a HT placeholder tensor), it needs to be in eval as well (it's not
tensorflow so we'll have to manually evaluate it). Remember, we don't track Placeholders because we instead
run the HTOps that generate their values.
"""
self._evalset=set([e.name for e in evals2])
for e in self._exe_order:
if isinstance(e,HTOp):
self._evalset.add(e.name)
for t in e.inputs:
if not op_store.is_htop_out(t):
self._evalset.add(t.name)
# compute breakpoint set
self._bpset=set([bp.name for bp in breakpoints2])
def _get_next_eval(self):
n=len(self._exe_order)
o=self._exe_order
return next((i,o[i]) for i in range(self.step,n) if (o[i].name in self._evalset or o[i].name in self._bpset))
def _eval(self, node):
"""
node is a TensorFlow Op or Tensor from self._exe_order
"""
# if node.name == 'Momentum':
# pdb.set_trace()
if isinstance(node,HTOp):
# All Tensors MUST be in the cache.
feed_dict=dict((t,self._cache[t.name]) for t in node.inputs)
node.run(feed_dict) # this will populate self._cache on its own
else: # is a TensorFlow node
if isinstance(node,tf.Tensor):
result=self.session.run(node,self._cache)
self._cache[node.name]=result
else:
# is an operation
if node.type =='Assign' or node.type == 'AssignAdd' or node.type == 'AssignSub':
# special operation that takes in a tensor ref and mutates it
# unfortunately, we end up having to execute nearly the full graph?
# alternatively, find a way to pass the tensor_ref thru the feed_dict
# rather than the tensor values.
self.session.run(node,self._original_feed_dict)
def _break(self,value=None):
self.state=PAUSED
i,next_node=self._get_next_eval()
print('Breakpoint triggered. Next Node: ', next_node.name)
return (self.state,value)
def _finish(self):
self.state=FINISHED
return (self.state, self.get_values())
|
ericjang/tdb | tdb/debug_session.py | DebugSession.c | python | def c(self):
i,node=self._get_next_eval()
if node.name in self._bpset:
if self.state == RUNNING:
return self._break()
self.state = RUNNING
self._eval(node)
# increment to next node
self.step=i+1
if self.step < len(self._exe_order):
return self.c()
else:
return self._finish() | continue | train | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/debug_session.py#L77-L93 | [
"def c(self):\n\t\"\"\"\n\tcontinue\n\t\"\"\"\n\ti,node=self._get_next_eval()\n\tif node.name in self._bpset:\n\t\tif self.state == RUNNING:\n\t\t\treturn self._break()\n\n\tself.state = RUNNING\n\tself._eval(node)\n\t# increment to next node\n\tself.step=i+1\n\tif self.step < len(self._exe_order):\n\t\treturn self... | class DebugSession(object):
def __init__(self, session=None):
super(DebugSession, self).__init__()
if session is None:
session=tf.InteractiveSession()
_original_evals=None
self.step=0 # index into execution order
self.session=session
self.state=INITIALIZED
self._original_evals=[] # evals passed into self.debug, in order
self._evalset=set() # string names to evaluate
self._bpset=set() # breakpoint names
self._cache={} # key: node names in evalset -> np.ndarray
self._exe_order=[] # list of HTOps, tf.Tensors to be evaluated
###
### PUBLIC METHODS
###
def run(self, evals, feed_dict=None, breakpoints=None, break_immediately=False):
"""
starts the debug session
"""
if not isinstance(evals,list):
evals=[evals]
if feed_dict is None:
feed_dict={}
if breakpoints is None:
breakpoints=[]
self.state=RUNNING
self._original_evals=evals
self._original_feed_dict=feed_dict
self._exe_order=op_store.compute_exe_order(evals)
self._init_evals_bps(evals, breakpoints)
# convert cache keys to strings
for k,v in feed_dict.items():
if not isinstance(k,str):
k=k.name
self._cache[k]=v
op_store.register_dbsession(self)
if break_immediately:
return self._break()
else:
return self.c()
def s(self):
"""
step to the next node in the execution order
"""
next_node=self._exe_order[self.step]
self._eval(next_node)
self.step+=1
if self.step==len(self._exe_order):
return self._finish()
else:
# if stepping, return the value of the node we just
# evaled
return self._break(value=self._cache.get(next_node.name))
def c(self):
"""
continue
"""
i,node=self._get_next_eval()
if node.name in self._bpset:
if self.state == RUNNING:
return self._break()
self.state = RUNNING
self._eval(node)
# increment to next node
self.step=i+1
if self.step < len(self._exe_order):
return self.c()
else:
return self._finish()
def get_values(self):
"""
returns final values (same result as tf.Session.run())
"""
return [self._cache.get(i.name,None) for i in self._original_evals]
def get_exe_queue(self):
return self._exe_order[self.step:]
def get_value(self, node):
"""
retrieve a node value from the cache
"""
if isinstance(node,tf.Tensor):
return self._cache.get(node.name,None)
elif isinstance(node,tf.Operation):
return None
else: # handle ascii, unicode strings
return self._cache.get(node,None)
###
### PRIVATE METHODS
###
def _cache_value(self, tensor, ndarray):
"""
store tensor ndarray value in cache. this is called by python ops
"""
self._cache[tensor.name]=ndarray
def _init_evals_bps(self, evals, breakpoints):
# If an eval or bp is the tf.Placeholder output of a tdb.PythonOp, replace it with its respective PythonOp node
evals2=[op_store.get_op(t) if op_store.is_htop_out(t) else t for t in evals]
breakpoints2=[op_store.get_op(t) if op_store.is_htop_out(t) else t for t in breakpoints]
# compute execution order
self._exe_order=op_store.compute_exe_order(evals2) # list of nodes
# compute evaluation set
"""
HTOps may depend on tf.Tensors that are not in eval. We need to have all inputs to HTOps ready
upon evaluation.
1. all evals that were originally specified are added
2. each HTOp in the execution closure needs to be in eval (they won't be eval'ed automatically by Session.run)
3. if an input to an HTOp is a tf.Tensor (not a HT placeholder tensor), it needs to be in eval as well (it's not
tensorflow so we'll have to manually evaluate it). Remember, we don't track Placeholders because we instead
run the HTOps that generate their values.
"""
self._evalset=set([e.name for e in evals2])
for e in self._exe_order:
if isinstance(e,HTOp):
self._evalset.add(e.name)
for t in e.inputs:
if not op_store.is_htop_out(t):
self._evalset.add(t.name)
# compute breakpoint set
self._bpset=set([bp.name for bp in breakpoints2])
def _get_next_eval(self):
n=len(self._exe_order)
o=self._exe_order
return next((i,o[i]) for i in range(self.step,n) if (o[i].name in self._evalset or o[i].name in self._bpset))
def _eval(self, node):
"""
node is a TensorFlow Op or Tensor from self._exe_order
"""
# if node.name == 'Momentum':
# pdb.set_trace()
if isinstance(node,HTOp):
# All Tensors MUST be in the cache.
feed_dict=dict((t,self._cache[t.name]) for t in node.inputs)
node.run(feed_dict) # this will populate self._cache on its own
else: # is a TensorFlow node
if isinstance(node,tf.Tensor):
result=self.session.run(node,self._cache)
self._cache[node.name]=result
else:
# is an operation
if node.type =='Assign' or node.type == 'AssignAdd' or node.type == 'AssignSub':
# special operation that takes in a tensor ref and mutates it
# unfortunately, we end up having to execute nearly the full graph?
# alternatively, find a way to pass the tensor_ref thru the feed_dict
# rather than the tensor values.
self.session.run(node,self._original_feed_dict)
def _break(self,value=None):
self.state=PAUSED
i,next_node=self._get_next_eval()
print('Breakpoint triggered. Next Node: ', next_node.name)
return (self.state,value)
def _finish(self):
self.state=FINISHED
return (self.state, self.get_values())
|
ericjang/tdb | tdb/debug_session.py | DebugSession.get_values | python | def get_values(self):
return [self._cache.get(i.name,None) for i in self._original_evals] | returns final values (same result as tf.Session.run()) | train | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/debug_session.py#L95-L99 | null | class DebugSession(object):
def __init__(self, session=None):
super(DebugSession, self).__init__()
if session is None:
session=tf.InteractiveSession()
_original_evals=None
self.step=0 # index into execution order
self.session=session
self.state=INITIALIZED
self._original_evals=[] # evals passed into self.debug, in order
self._evalset=set() # string names to evaluate
self._bpset=set() # breakpoint names
self._cache={} # key: node names in evalset -> np.ndarray
self._exe_order=[] # list of HTOps, tf.Tensors to be evaluated
###
### PUBLIC METHODS
###
def run(self, evals, feed_dict=None, breakpoints=None, break_immediately=False):
"""
starts the debug session
"""
if not isinstance(evals,list):
evals=[evals]
if feed_dict is None:
feed_dict={}
if breakpoints is None:
breakpoints=[]
self.state=RUNNING
self._original_evals=evals
self._original_feed_dict=feed_dict
self._exe_order=op_store.compute_exe_order(evals)
self._init_evals_bps(evals, breakpoints)
# convert cache keys to strings
for k,v in feed_dict.items():
if not isinstance(k,str):
k=k.name
self._cache[k]=v
op_store.register_dbsession(self)
if break_immediately:
return self._break()
else:
return self.c()
def s(self):
"""
step to the next node in the execution order
"""
next_node=self._exe_order[self.step]
self._eval(next_node)
self.step+=1
if self.step==len(self._exe_order):
return self._finish()
else:
# if stepping, return the value of the node we just
# evaled
return self._break(value=self._cache.get(next_node.name))
def c(self):
"""
continue
"""
i,node=self._get_next_eval()
if node.name in self._bpset:
if self.state == RUNNING:
return self._break()
self.state = RUNNING
self._eval(node)
# increment to next node
self.step=i+1
if self.step < len(self._exe_order):
return self.c()
else:
return self._finish()
def get_exe_queue(self):
return self._exe_order[self.step:]
def get_value(self, node):
"""
retrieve a node value from the cache
"""
if isinstance(node,tf.Tensor):
return self._cache.get(node.name,None)
elif isinstance(node,tf.Operation):
return None
else: # handle ascii, unicode strings
return self._cache.get(node,None)
###
### PRIVATE METHODS
###
def _cache_value(self, tensor, ndarray):
"""
store tensor ndarray value in cache. this is called by python ops
"""
self._cache[tensor.name]=ndarray
def _init_evals_bps(self, evals, breakpoints):
# If an eval or bp is the tf.Placeholder output of a tdb.PythonOp, replace it with its respective PythonOp node
evals2=[op_store.get_op(t) if op_store.is_htop_out(t) else t for t in evals]
breakpoints2=[op_store.get_op(t) if op_store.is_htop_out(t) else t for t in breakpoints]
# compute execution order
self._exe_order=op_store.compute_exe_order(evals2) # list of nodes
# compute evaluation set
"""
HTOps may depend on tf.Tensors that are not in eval. We need to have all inputs to HTOps ready
upon evaluation.
1. all evals that were originally specified are added
2. each HTOp in the execution closure needs to be in eval (they won't be eval'ed automatically by Session.run)
3. if an input to an HTOp is a tf.Tensor (not a HT placeholder tensor), it needs to be in eval as well (it's not
tensorflow so we'll have to manually evaluate it). Remember, we don't track Placeholders because we instead
run the HTOps that generate their values.
"""
self._evalset=set([e.name for e in evals2])
for e in self._exe_order:
if isinstance(e,HTOp):
self._evalset.add(e.name)
for t in e.inputs:
if not op_store.is_htop_out(t):
self._evalset.add(t.name)
# compute breakpoint set
self._bpset=set([bp.name for bp in breakpoints2])
def _get_next_eval(self):
n=len(self._exe_order)
o=self._exe_order
return next((i,o[i]) for i in range(self.step,n) if (o[i].name in self._evalset or o[i].name in self._bpset))
def _eval(self, node):
"""
node is a TensorFlow Op or Tensor from self._exe_order
"""
# if node.name == 'Momentum':
# pdb.set_trace()
if isinstance(node,HTOp):
# All Tensors MUST be in the cache.
feed_dict=dict((t,self._cache[t.name]) for t in node.inputs)
node.run(feed_dict) # this will populate self._cache on its own
else: # is a TensorFlow node
if isinstance(node,tf.Tensor):
result=self.session.run(node,self._cache)
self._cache[node.name]=result
else:
# is an operation
if node.type =='Assign' or node.type == 'AssignAdd' or node.type == 'AssignSub':
# special operation that takes in a tensor ref and mutates it
# unfortunately, we end up having to execute nearly the full graph?
# alternatively, find a way to pass the tensor_ref thru the feed_dict
# rather than the tensor values.
self.session.run(node,self._original_feed_dict)
def _break(self,value=None):
self.state=PAUSED
i,next_node=self._get_next_eval()
print('Breakpoint triggered. Next Node: ', next_node.name)
return (self.state,value)
def _finish(self):
self.state=FINISHED
return (self.state, self.get_values())
|
ericjang/tdb | tdb/debug_session.py | DebugSession.get_value | python | def get_value(self, node):
if isinstance(node,tf.Tensor):
return self._cache.get(node.name,None)
elif isinstance(node,tf.Operation):
return None
else: # handle ascii, unicode strings
return self._cache.get(node,None) | retrieve a node value from the cache | train | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/debug_session.py#L104-L113 | null | class DebugSession(object):
def __init__(self, session=None):
super(DebugSession, self).__init__()
if session is None:
session=tf.InteractiveSession()
_original_evals=None
self.step=0 # index into execution order
self.session=session
self.state=INITIALIZED
self._original_evals=[] # evals passed into self.debug, in order
self._evalset=set() # string names to evaluate
self._bpset=set() # breakpoint names
self._cache={} # key: node names in evalset -> np.ndarray
self._exe_order=[] # list of HTOps, tf.Tensors to be evaluated
###
### PUBLIC METHODS
###
def run(self, evals, feed_dict=None, breakpoints=None, break_immediately=False):
"""
starts the debug session
"""
if not isinstance(evals,list):
evals=[evals]
if feed_dict is None:
feed_dict={}
if breakpoints is None:
breakpoints=[]
self.state=RUNNING
self._original_evals=evals
self._original_feed_dict=feed_dict
self._exe_order=op_store.compute_exe_order(evals)
self._init_evals_bps(evals, breakpoints)
# convert cache keys to strings
for k,v in feed_dict.items():
if not isinstance(k,str):
k=k.name
self._cache[k]=v
op_store.register_dbsession(self)
if break_immediately:
return self._break()
else:
return self.c()
def s(self):
"""
step to the next node in the execution order
"""
next_node=self._exe_order[self.step]
self._eval(next_node)
self.step+=1
if self.step==len(self._exe_order):
return self._finish()
else:
# if stepping, return the value of the node we just
# evaled
return self._break(value=self._cache.get(next_node.name))
def c(self):
"""
continue
"""
i,node=self._get_next_eval()
if node.name in self._bpset:
if self.state == RUNNING:
return self._break()
self.state = RUNNING
self._eval(node)
# increment to next node
self.step=i+1
if self.step < len(self._exe_order):
return self.c()
else:
return self._finish()
def get_values(self):
"""
returns final values (same result as tf.Session.run())
"""
return [self._cache.get(i.name,None) for i in self._original_evals]
def get_exe_queue(self):
return self._exe_order[self.step:]
###
### PRIVATE METHODS
###
def _cache_value(self, tensor, ndarray):
"""
store tensor ndarray value in cache. this is called by python ops
"""
self._cache[tensor.name]=ndarray
def _init_evals_bps(self, evals, breakpoints):
# If an eval or bp is the tf.Placeholder output of a tdb.PythonOp, replace it with its respective PythonOp node
evals2=[op_store.get_op(t) if op_store.is_htop_out(t) else t for t in evals]
breakpoints2=[op_store.get_op(t) if op_store.is_htop_out(t) else t for t in breakpoints]
# compute execution order
self._exe_order=op_store.compute_exe_order(evals2) # list of nodes
# compute evaluation set
"""
HTOps may depend on tf.Tensors that are not in eval. We need to have all inputs to HTOps ready
upon evaluation.
1. all evals that were originally specified are added
2. each HTOp in the execution closure needs to be in eval (they won't be eval'ed automatically by Session.run)
3. if an input to an HTOp is a tf.Tensor (not a HT placeholder tensor), it needs to be in eval as well (it's not
tensorflow so we'll have to manually evaluate it). Remember, we don't track Placeholders because we instead
run the HTOps that generate their values.
"""
self._evalset=set([e.name for e in evals2])
for e in self._exe_order:
if isinstance(e,HTOp):
self._evalset.add(e.name)
for t in e.inputs:
if not op_store.is_htop_out(t):
self._evalset.add(t.name)
# compute breakpoint set
self._bpset=set([bp.name for bp in breakpoints2])
def _get_next_eval(self):
n=len(self._exe_order)
o=self._exe_order
return next((i,o[i]) for i in range(self.step,n) if (o[i].name in self._evalset or o[i].name in self._bpset))
def _eval(self, node):
"""
node is a TensorFlow Op or Tensor from self._exe_order
"""
# if node.name == 'Momentum':
# pdb.set_trace()
if isinstance(node,HTOp):
# All Tensors MUST be in the cache.
feed_dict=dict((t,self._cache[t.name]) for t in node.inputs)
node.run(feed_dict) # this will populate self._cache on its own
else: # is a TensorFlow node
if isinstance(node,tf.Tensor):
result=self.session.run(node,self._cache)
self._cache[node.name]=result
else:
# is an operation
if node.type =='Assign' or node.type == 'AssignAdd' or node.type == 'AssignSub':
# special operation that takes in a tensor ref and mutates it
# unfortunately, we end up having to execute nearly the full graph?
# alternatively, find a way to pass the tensor_ref thru the feed_dict
# rather than the tensor values.
self.session.run(node,self._original_feed_dict)
def _break(self,value=None):
self.state=PAUSED
i,next_node=self._get_next_eval()
print('Breakpoint triggered. Next Node: ', next_node.name)
return (self.state,value)
def _finish(self):
self.state=FINISHED
return (self.state, self.get_values())
|
ericjang/tdb | tdb/debug_session.py | DebugSession._init_evals_bps | python | def _init_evals_bps(self, evals, breakpoints):
# If an eval or bp is the tf.Placeholder output of a tdb.PythonOp, replace it with its respective PythonOp node
evals2=[op_store.get_op(t) if op_store.is_htop_out(t) else t for t in evals]
breakpoints2=[op_store.get_op(t) if op_store.is_htop_out(t) else t for t in breakpoints]
# compute execution order
self._exe_order=op_store.compute_exe_order(evals2) # list of nodes
# compute evaluation set
self._evalset=set([e.name for e in evals2])
for e in self._exe_order:
if isinstance(e,HTOp):
self._evalset.add(e.name)
for t in e.inputs:
if not op_store.is_htop_out(t):
self._evalset.add(t.name)
# compute breakpoint set
self._bpset=set([bp.name for bp in breakpoints2]) | HTOps may depend on tf.Tensors that are not in eval. We need to have all inputs to HTOps ready
upon evaluation.
1. all evals that were originally specified are added
2. each HTOp in the execution closure needs to be in eval (they won't be eval'ed automatically by Session.run)
3. if an input to an HTOp is a tf.Tensor (not a HT placeholder tensor), it needs to be in eval as well (it's not
tensorflow so we'll have to manually evaluate it). Remember, we don't track Placeholders because we instead
run the HTOps that generate their values. | train | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/debug_session.py#L125-L151 | null | class DebugSession(object):
def __init__(self, session=None):
super(DebugSession, self).__init__()
if session is None:
session=tf.InteractiveSession()
_original_evals=None
self.step=0 # index into execution order
self.session=session
self.state=INITIALIZED
self._original_evals=[] # evals passed into self.debug, in order
self._evalset=set() # string names to evaluate
self._bpset=set() # breakpoint names
self._cache={} # key: node names in evalset -> np.ndarray
self._exe_order=[] # list of HTOps, tf.Tensors to be evaluated
###
### PUBLIC METHODS
###
def run(self, evals, feed_dict=None, breakpoints=None, break_immediately=False):
"""
starts the debug session
"""
if not isinstance(evals,list):
evals=[evals]
if feed_dict is None:
feed_dict={}
if breakpoints is None:
breakpoints=[]
self.state=RUNNING
self._original_evals=evals
self._original_feed_dict=feed_dict
self._exe_order=op_store.compute_exe_order(evals)
self._init_evals_bps(evals, breakpoints)
# convert cache keys to strings
for k,v in feed_dict.items():
if not isinstance(k,str):
k=k.name
self._cache[k]=v
op_store.register_dbsession(self)
if break_immediately:
return self._break()
else:
return self.c()
def s(self):
"""
step to the next node in the execution order
"""
next_node=self._exe_order[self.step]
self._eval(next_node)
self.step+=1
if self.step==len(self._exe_order):
return self._finish()
else:
# if stepping, return the value of the node we just
# evaled
return self._break(value=self._cache.get(next_node.name))
def c(self):
"""
continue
"""
i,node=self._get_next_eval()
if node.name in self._bpset:
if self.state == RUNNING:
return self._break()
self.state = RUNNING
self._eval(node)
# increment to next node
self.step=i+1
if self.step < len(self._exe_order):
return self.c()
else:
return self._finish()
def get_values(self):
"""
returns final values (same result as tf.Session.run())
"""
return [self._cache.get(i.name,None) for i in self._original_evals]
def get_exe_queue(self):
return self._exe_order[self.step:]
def get_value(self, node):
"""
retrieve a node value from the cache
"""
if isinstance(node,tf.Tensor):
return self._cache.get(node.name,None)
elif isinstance(node,tf.Operation):
return None
else: # handle ascii, unicode strings
return self._cache.get(node,None)
###
### PRIVATE METHODS
###
def _cache_value(self, tensor, ndarray):
"""
store tensor ndarray value in cache. this is called by python ops
"""
self._cache[tensor.name]=ndarray
def _get_next_eval(self):
n=len(self._exe_order)
o=self._exe_order
return next((i,o[i]) for i in range(self.step,n) if (o[i].name in self._evalset or o[i].name in self._bpset))
def _eval(self, node):
"""
node is a TensorFlow Op or Tensor from self._exe_order
"""
# if node.name == 'Momentum':
# pdb.set_trace()
if isinstance(node,HTOp):
# All Tensors MUST be in the cache.
feed_dict=dict((t,self._cache[t.name]) for t in node.inputs)
node.run(feed_dict) # this will populate self._cache on its own
else: # is a TensorFlow node
if isinstance(node,tf.Tensor):
result=self.session.run(node,self._cache)
self._cache[node.name]=result
else:
# is an operation
if node.type =='Assign' or node.type == 'AssignAdd' or node.type == 'AssignSub':
# special operation that takes in a tensor ref and mutates it
# unfortunately, we end up having to execute nearly the full graph?
# alternatively, find a way to pass the tensor_ref thru the feed_dict
# rather than the tensor values.
self.session.run(node,self._original_feed_dict)
def _break(self,value=None):
self.state=PAUSED
i,next_node=self._get_next_eval()
print('Breakpoint triggered. Next Node: ', next_node.name)
return (self.state,value)
def _finish(self):
self.state=FINISHED
return (self.state, self.get_values())
|
ericjang/tdb | tdb/debug_session.py | DebugSession._eval | python | def _eval(self, node):
# if node.name == 'Momentum':
# pdb.set_trace()
if isinstance(node,HTOp):
# All Tensors MUST be in the cache.
feed_dict=dict((t,self._cache[t.name]) for t in node.inputs)
node.run(feed_dict) # this will populate self._cache on its own
else: # is a TensorFlow node
if isinstance(node,tf.Tensor):
result=self.session.run(node,self._cache)
self._cache[node.name]=result
else:
# is an operation
if node.type =='Assign' or node.type == 'AssignAdd' or node.type == 'AssignSub':
# special operation that takes in a tensor ref and mutates it
# unfortunately, we end up having to execute nearly the full graph?
# alternatively, find a way to pass the tensor_ref thru the feed_dict
# rather than the tensor values.
self.session.run(node,self._original_feed_dict) | node is a TensorFlow Op or Tensor from self._exe_order | train | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/debug_session.py#L158-L179 | null | class DebugSession(object):
def __init__(self, session=None):
super(DebugSession, self).__init__()
if session is None:
session=tf.InteractiveSession()
_original_evals=None
self.step=0 # index into execution order
self.session=session
self.state=INITIALIZED
self._original_evals=[] # evals passed into self.debug, in order
self._evalset=set() # string names to evaluate
self._bpset=set() # breakpoint names
self._cache={} # key: node names in evalset -> np.ndarray
self._exe_order=[] # list of HTOps, tf.Tensors to be evaluated
###
### PUBLIC METHODS
###
def run(self, evals, feed_dict=None, breakpoints=None, break_immediately=False):
"""
starts the debug session
"""
if not isinstance(evals,list):
evals=[evals]
if feed_dict is None:
feed_dict={}
if breakpoints is None:
breakpoints=[]
self.state=RUNNING
self._original_evals=evals
self._original_feed_dict=feed_dict
self._exe_order=op_store.compute_exe_order(evals)
self._init_evals_bps(evals, breakpoints)
# convert cache keys to strings
for k,v in feed_dict.items():
if not isinstance(k,str):
k=k.name
self._cache[k]=v
op_store.register_dbsession(self)
if break_immediately:
return self._break()
else:
return self.c()
def s(self):
"""
step to the next node in the execution order
"""
next_node=self._exe_order[self.step]
self._eval(next_node)
self.step+=1
if self.step==len(self._exe_order):
return self._finish()
else:
# if stepping, return the value of the node we just
# evaled
return self._break(value=self._cache.get(next_node.name))
def c(self):
"""
continue
"""
i,node=self._get_next_eval()
if node.name in self._bpset:
if self.state == RUNNING:
return self._break()
self.state = RUNNING
self._eval(node)
# increment to next node
self.step=i+1
if self.step < len(self._exe_order):
return self.c()
else:
return self._finish()
def get_values(self):
"""
returns final values (same result as tf.Session.run())
"""
return [self._cache.get(i.name,None) for i in self._original_evals]
def get_exe_queue(self):
return self._exe_order[self.step:]
def get_value(self, node):
"""
retrieve a node value from the cache
"""
if isinstance(node,tf.Tensor):
return self._cache.get(node.name,None)
elif isinstance(node,tf.Operation):
return None
else: # handle ascii, unicode strings
return self._cache.get(node,None)
###
### PRIVATE METHODS
###
def _cache_value(self, tensor, ndarray):
"""
store tensor ndarray value in cache. this is called by python ops
"""
self._cache[tensor.name]=ndarray
def _init_evals_bps(self, evals, breakpoints):
# If an eval or bp is the tf.Placeholder output of a tdb.PythonOp, replace it with its respective PythonOp node
evals2=[op_store.get_op(t) if op_store.is_htop_out(t) else t for t in evals]
breakpoints2=[op_store.get_op(t) if op_store.is_htop_out(t) else t for t in breakpoints]
# compute execution order
self._exe_order=op_store.compute_exe_order(evals2) # list of nodes
# compute evaluation set
"""
HTOps may depend on tf.Tensors that are not in eval. We need to have all inputs to HTOps ready
upon evaluation.
1. all evals that were originally specified are added
2. each HTOp in the execution closure needs to be in eval (they won't be eval'ed automatically by Session.run)
3. if an input to an HTOp is a tf.Tensor (not a HT placeholder tensor), it needs to be in eval as well (it's not
tensorflow so we'll have to manually evaluate it). Remember, we don't track Placeholders because we instead
run the HTOps that generate their values.
"""
self._evalset=set([e.name for e in evals2])
for e in self._exe_order:
if isinstance(e,HTOp):
self._evalset.add(e.name)
for t in e.inputs:
if not op_store.is_htop_out(t):
self._evalset.add(t.name)
# compute breakpoint set
self._bpset=set([bp.name for bp in breakpoints2])
def _get_next_eval(self):
n=len(self._exe_order)
o=self._exe_order
return next((i,o[i]) for i in range(self.step,n) if (o[i].name in self._evalset or o[i].name in self._bpset))
def _break(self,value=None):
self.state=PAUSED
i,next_node=self._get_next_eval()
print('Breakpoint triggered. Next Node: ', next_node.name)
return (self.state,value)
def _finish(self):
self.state=FINISHED
return (self.state, self.get_values())
|
ericjang/tdb | tdb/examples/mnist.py | extract_data | python | def extract_data(filename, num_images):
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images)
data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, 1)
return data | Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5]. | train | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/examples/mnist.py#L30-L42 | null | """
builds a simple mnist model
"""
import gzip
import numpy as np
import re
import sys
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
IMAGE_SIZE = 28
NUM_CHANNELS = 1
PIXEL_DEPTH = 255
NUM_LABELS = 10
VALIDATION_SIZE = 5000 # Size of the validation set.
SEED = 66478 # Set to None for random seed.
BATCH_SIZE = 64
NUM_EPOCHS = 1
TEST_SIZE=55000
TRAIN_SIZE=10000
# DATA PRE-PROCESSING
def extract_labels(filename, num_images):
"""
Extract the labels into a 1-hot matrix [image index, label index].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = np.frombuffer(buf, dtype=np.uint8)
# Convert to dense 1-hot representation.
return (np.arange(NUM_LABELS) == labels[:, None]).astype(np.float32)
def get_data(data_root):
train_data_filename = data_root+'train-images-idx3-ubyte.gz'
train_labels_filename = data_root+'train-labels-idx1-ubyte.gz'
test_data_filename = data_root+'t10k-images-idx3-ubyte.gz'
test_labels_filename = data_root+'t10k-labels-idx1-ubyte.gz'
# Extract it into numpy arrays.
train_data = extract_data(train_data_filename, 60000)
train_labels = extract_labels(train_labels_filename, 60000)
test_data = extract_data(test_data_filename, 10000)
test_labels = extract_labels(test_labels_filename, 10000)
validation_data = train_data[:VALIDATION_SIZE, :, :, :]
validation_labels = train_labels[:VALIDATION_SIZE]
train_data = train_data[VALIDATION_SIZE:, :, :, :]
train_labels = train_labels[VALIDATION_SIZE:]
global TRAIN_SIZE, TEST_SIZE
TRAIN_SIZE=train_labels.shape[0]
TEST_SIZE=test_labels.shape[0]
return train_data, train_labels, validation_data, validation_labels, test_data, test_labels
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tf.histogram_summary(x.name + '/activations', x)
tf.scalar_summary(x.name + '/sparsity', tf.nn.zero_fraction(x))
# MODEL BUILDING
def build_model():
"""
Builds the computation graph consisting of training/testing LeNet
train data - used for learning
validation data - used for printing progress (does not impact learning)
test data - used for printing final test error
"""
# training data
train_data_node = tf.placeholder(tf.float32,shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
train_labels_node = tf.placeholder(tf.float32,shape=(BATCH_SIZE, NUM_LABELS))
validation_data_node= tf.placeholder(tf.float32,shape=(VALIDATION_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
test_data_node=tf.placeholder(tf.float32,shape=(TEST_SIZE,IMAGE_SIZE,IMAGE_SIZE,NUM_CHANNELS))
# validation dataset held in a single constant node
# validation_data_node = tf.constant(validation_data)
# test_data_node = tf.constant(test_data)
# LEARNABLE WEIGHT NODES SHARED BETWEEN
conv1_weights = tf.Variable(tf.truncated_normal([5, 5, NUM_CHANNELS, 32],stddev=0.1,seed=SEED))
conv1_biases = tf.Variable(tf.zeros([32]))
conv2_weights = tf.Variable(tf.truncated_normal([5, 5, 32, 64],stddev=0.1,seed=SEED))
conv2_biases = tf.Variable(tf.constant(0.1, shape=[64]))
fc1_weights = tf.Variable(tf.truncated_normal([IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * 64, 512],stddev=0.1,seed=SEED))
fc1_biases = tf.Variable(tf.constant(0.1, shape=[512]))
fc2_weights = tf.Variable(tf.truncated_normal([512, NUM_LABELS],stddev=0.1,seed=SEED))
fc2_biases = tf.Variable(tf.constant(0.1, shape=[NUM_LABELS]))
# LENET
def build_lenet(data,train=False):
# subroutine for wiring up nodes and weights to training and evaluation LeNets
conv1 = tf.nn.conv2d(data,conv1_weights,strides=[1, 1, 1, 1],padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
pool1 = tf.nn.max_pool(relu1,ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1],padding='SAME')
conv2 = tf.nn.conv2d(pool1,conv2_weights,strides=[1, 1, 1, 1],padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
pool2 = tf.nn.max_pool(relu2,ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1],padding='SAME')
# Reshape the feature map cuboid into a 2D matrix to feed it to the
# fully connected layers.
pool_shape = pool2.get_shape().as_list()
reshape = tf.reshape(pool2,[pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])
fc1 = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
# Add a 50% dropout during training only. Dropout also scales
# activations such that no rescaling is needed at evaluation time.
if train:
fc1 = tf.nn.dropout(fc1, 0.5, seed=SEED)
# append summary ops to train
_activation_summary(conv1)
_activation_summary(fc1)
fc2 = tf.matmul(fc1, fc2_weights) + fc2_biases
return fc2
# TRAINING LOSS / REGULARIZATION NODES
logits = build_lenet(train_data_node, True)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, train_labels_node))
tf.scalar_summary(loss.op.name,loss)
regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) + tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases))
# Add the regularization term to the loss.
loss += 5e-4 * regularizers
# OPTIMIZER NODES
batch = tf.Variable(0)
# Decay once per epoch, using an exponential schedule starting at 0.01.
learning_rate = tf.train.exponential_decay(
0.01, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
TRAIN_SIZE, # Decay step.
0.95, # Decay rate.
staircase=True)
# Use simple momentum for the optimization.
optimizer = tf.train.MomentumOptimizer(learning_rate,0.9).minimize(loss,global_step=batch)
# # Predictions for the minibatch, validation set and test set.
train_prediction = tf.nn.softmax(logits)
# # We'll compute them only once in a while by calling their {eval()} method.
validation_prediction = tf.nn.softmax(build_lenet(validation_data_node))
test_prediction = tf.nn.softmax(build_lenet(test_data_node))
summaries=tf.merge_all_summaries()
# return input nodes and output nodes
return (train_data_node,
train_labels_node,
validation_data_node,
test_data_node,
train_prediction,
validation_prediction,
test_prediction,
conv1_weights,
conv2_weights,
fc1_weights,
fc2_weights,
optimizer,
loss,
learning_rate,
summaries)
def error_rate(predictions, labels):
"""Return the error rate based on dense predictions and 1-hot labels."""
return 100.0 - (
100.0 *
np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) /
predictions.shape[0])
def main():
# get dataset as numpy arrays
train_data, train_labels, validation_data, validation_labels, test_data, test_labels = get_data()
#pdb.set_trace()
# build net (return inputs)
(train_data_node,
train_labels_node,
validation_data_node,
test_data_node,
optimizer,
loss,
learning_rate,
train_prediction,
validation_prediction,
test_prediction,
summaries) = build_model()
with tf.Session() as s:
# Run all the initializers to prepare the trainable parameters.
tf.initialize_all_variables().run()
print('Initialized!')
# Loop through training steps.
summary_writer=tf.train.SummaryWriter(FLAGS.train_dir, graph_def=s.graph_def)
for step in xrange(NUM_EPOCHS * TRAIN_SIZE // BATCH_SIZE):
# Compute the offset of the current minibatch in the data.
offset = (step * BATCH_SIZE) % (TRAIN_SIZE - BATCH_SIZE)
batch_data = train_data[offset:(offset + BATCH_SIZE), :, :, :]
batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
feed_dict = {
train_data_node: batch_data,
train_labels_node: batch_labels
}
# Run the graph and fetch some of the nodes.
_, l, lr, predictions = s.run([optimizer, loss, learning_rate, train_prediction],feed_dict=feed_dict)
if step % 100 == 0:
# re-run graph, save summaries
summary_str = summaries.eval(feed_dict)
summary_writer.add_summary(summary_str, step)
if step % 100 == 0:
print('Epoch %.2f' % (float(step) * BATCH_SIZE / TRAIN_SIZE))
print('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr))
print('Minibatch error: %.1f%%' % error_rate(predictions, batch_labels))
val_predict=validation_prediction.eval(feed_dict={validation_data_node:validation_data})
print('Validation error: %.1f%%' %error_rate(val_predict, validation_labels))
sys.stdout.flush()
# Done training - print the result!
test_error = error_rate(test_prediction.eval(feed_dict={test_data_node:test_data}), test_labels)
print('Test error: %.1f%%' % test_error)
if __name__ == "__main__":
main()
|
ericjang/tdb | tdb/examples/mnist.py | _activation_summary | python | def _activation_summary(x):
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tf.histogram_summary(x.name + '/activations', x)
tf.scalar_summary(x.name + '/sparsity', tf.nn.zero_fraction(x)) | Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing | train | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/examples/mnist.py#L79-L91 | null | """
builds a simple mnist model
"""
import gzip
import numpy as np
import re
import sys
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
IMAGE_SIZE = 28
NUM_CHANNELS = 1
PIXEL_DEPTH = 255
NUM_LABELS = 10
VALIDATION_SIZE = 5000 # Size of the validation set.
SEED = 66478 # Set to None for random seed.
BATCH_SIZE = 64
NUM_EPOCHS = 1
TEST_SIZE=55000
TRAIN_SIZE=10000
# DATA PRE-PROCESSING
def extract_data(filename, num_images):
"""
Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images)
data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, 1)
return data
def extract_labels(filename, num_images):
"""
Extract the labels into a 1-hot matrix [image index, label index].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = np.frombuffer(buf, dtype=np.uint8)
# Convert to dense 1-hot representation.
return (np.arange(NUM_LABELS) == labels[:, None]).astype(np.float32)
def get_data(data_root):
train_data_filename = data_root+'train-images-idx3-ubyte.gz'
train_labels_filename = data_root+'train-labels-idx1-ubyte.gz'
test_data_filename = data_root+'t10k-images-idx3-ubyte.gz'
test_labels_filename = data_root+'t10k-labels-idx1-ubyte.gz'
# Extract it into numpy arrays.
train_data = extract_data(train_data_filename, 60000)
train_labels = extract_labels(train_labels_filename, 60000)
test_data = extract_data(test_data_filename, 10000)
test_labels = extract_labels(test_labels_filename, 10000)
validation_data = train_data[:VALIDATION_SIZE, :, :, :]
validation_labels = train_labels[:VALIDATION_SIZE]
train_data = train_data[VALIDATION_SIZE:, :, :, :]
train_labels = train_labels[VALIDATION_SIZE:]
global TRAIN_SIZE, TEST_SIZE
TRAIN_SIZE=train_labels.shape[0]
TEST_SIZE=test_labels.shape[0]
return train_data, train_labels, validation_data, validation_labels, test_data, test_labels
# MODEL BUILDING
def build_model():
"""
Builds the computation graph consisting of training/testing LeNet
train data - used for learning
validation data - used for printing progress (does not impact learning)
test data - used for printing final test error
"""
# training data
train_data_node = tf.placeholder(tf.float32,shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
train_labels_node = tf.placeholder(tf.float32,shape=(BATCH_SIZE, NUM_LABELS))
validation_data_node= tf.placeholder(tf.float32,shape=(VALIDATION_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
test_data_node=tf.placeholder(tf.float32,shape=(TEST_SIZE,IMAGE_SIZE,IMAGE_SIZE,NUM_CHANNELS))
# validation dataset held in a single constant node
# validation_data_node = tf.constant(validation_data)
# test_data_node = tf.constant(test_data)
# LEARNABLE WEIGHT NODES SHARED BETWEEN
conv1_weights = tf.Variable(tf.truncated_normal([5, 5, NUM_CHANNELS, 32],stddev=0.1,seed=SEED))
conv1_biases = tf.Variable(tf.zeros([32]))
conv2_weights = tf.Variable(tf.truncated_normal([5, 5, 32, 64],stddev=0.1,seed=SEED))
conv2_biases = tf.Variable(tf.constant(0.1, shape=[64]))
fc1_weights = tf.Variable(tf.truncated_normal([IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * 64, 512],stddev=0.1,seed=SEED))
fc1_biases = tf.Variable(tf.constant(0.1, shape=[512]))
fc2_weights = tf.Variable(tf.truncated_normal([512, NUM_LABELS],stddev=0.1,seed=SEED))
fc2_biases = tf.Variable(tf.constant(0.1, shape=[NUM_LABELS]))
# LENET
def build_lenet(data,train=False):
# subroutine for wiring up nodes and weights to training and evaluation LeNets
conv1 = tf.nn.conv2d(data,conv1_weights,strides=[1, 1, 1, 1],padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
pool1 = tf.nn.max_pool(relu1,ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1],padding='SAME')
conv2 = tf.nn.conv2d(pool1,conv2_weights,strides=[1, 1, 1, 1],padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
pool2 = tf.nn.max_pool(relu2,ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1],padding='SAME')
# Reshape the feature map cuboid into a 2D matrix to feed it to the
# fully connected layers.
pool_shape = pool2.get_shape().as_list()
reshape = tf.reshape(pool2,[pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])
fc1 = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
# Add a 50% dropout during training only. Dropout also scales
# activations such that no rescaling is needed at evaluation time.
if train:
fc1 = tf.nn.dropout(fc1, 0.5, seed=SEED)
# append summary ops to train
_activation_summary(conv1)
_activation_summary(fc1)
fc2 = tf.matmul(fc1, fc2_weights) + fc2_biases
return fc2
# TRAINING LOSS / REGULARIZATION NODES
logits = build_lenet(train_data_node, True)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, train_labels_node))
tf.scalar_summary(loss.op.name,loss)
regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) + tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases))
# Add the regularization term to the loss.
loss += 5e-4 * regularizers
# OPTIMIZER NODES
batch = tf.Variable(0)
# Decay once per epoch, using an exponential schedule starting at 0.01.
learning_rate = tf.train.exponential_decay(
0.01, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
TRAIN_SIZE, # Decay step.
0.95, # Decay rate.
staircase=True)
# Use simple momentum for the optimization.
optimizer = tf.train.MomentumOptimizer(learning_rate,0.9).minimize(loss,global_step=batch)
# # Predictions for the minibatch, validation set and test set.
train_prediction = tf.nn.softmax(logits)
# # We'll compute them only once in a while by calling their {eval()} method.
validation_prediction = tf.nn.softmax(build_lenet(validation_data_node))
test_prediction = tf.nn.softmax(build_lenet(test_data_node))
summaries=tf.merge_all_summaries()
# return input nodes and output nodes
return (train_data_node,
train_labels_node,
validation_data_node,
test_data_node,
train_prediction,
validation_prediction,
test_prediction,
conv1_weights,
conv2_weights,
fc1_weights,
fc2_weights,
optimizer,
loss,
learning_rate,
summaries)
def error_rate(predictions, labels):
"""Return the error rate based on dense predictions and 1-hot labels."""
return 100.0 - (
100.0 *
np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) /
predictions.shape[0])
def main():
# get dataset as numpy arrays
train_data, train_labels, validation_data, validation_labels, test_data, test_labels = get_data()
#pdb.set_trace()
# build net (return inputs)
(train_data_node,
train_labels_node,
validation_data_node,
test_data_node,
optimizer,
loss,
learning_rate,
train_prediction,
validation_prediction,
test_prediction,
summaries) = build_model()
with tf.Session() as s:
# Run all the initializers to prepare the trainable parameters.
tf.initialize_all_variables().run()
print('Initialized!')
# Loop through training steps.
summary_writer=tf.train.SummaryWriter(FLAGS.train_dir, graph_def=s.graph_def)
for step in xrange(NUM_EPOCHS * TRAIN_SIZE // BATCH_SIZE):
# Compute the offset of the current minibatch in the data.
offset = (step * BATCH_SIZE) % (TRAIN_SIZE - BATCH_SIZE)
batch_data = train_data[offset:(offset + BATCH_SIZE), :, :, :]
batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
feed_dict = {
train_data_node: batch_data,
train_labels_node: batch_labels
}
# Run the graph and fetch some of the nodes.
_, l, lr, predictions = s.run([optimizer, loss, learning_rate, train_prediction],feed_dict=feed_dict)
if step % 100 == 0:
# re-run graph, save summaries
summary_str = summaries.eval(feed_dict)
summary_writer.add_summary(summary_str, step)
if step % 100 == 0:
print('Epoch %.2f' % (float(step) * BATCH_SIZE / TRAIN_SIZE))
print('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr))
print('Minibatch error: %.1f%%' % error_rate(predictions, batch_labels))
val_predict=validation_prediction.eval(feed_dict={validation_data_node:validation_data})
print('Validation error: %.1f%%' %error_rate(val_predict, validation_labels))
sys.stdout.flush()
# Done training - print the result!
test_error = error_rate(test_prediction.eval(feed_dict={test_data_node:test_data}), test_labels)
print('Test error: %.1f%%' % test_error)
if __name__ == "__main__":
main()
|
ericjang/tdb | tdb/examples/mnist.py | build_model | python | def build_model():
# training data
train_data_node = tf.placeholder(tf.float32,shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
train_labels_node = tf.placeholder(tf.float32,shape=(BATCH_SIZE, NUM_LABELS))
validation_data_node= tf.placeholder(tf.float32,shape=(VALIDATION_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
test_data_node=tf.placeholder(tf.float32,shape=(TEST_SIZE,IMAGE_SIZE,IMAGE_SIZE,NUM_CHANNELS))
# validation dataset held in a single constant node
# validation_data_node = tf.constant(validation_data)
# test_data_node = tf.constant(test_data)
# LEARNABLE WEIGHT NODES SHARED BETWEEN
conv1_weights = tf.Variable(tf.truncated_normal([5, 5, NUM_CHANNELS, 32],stddev=0.1,seed=SEED))
conv1_biases = tf.Variable(tf.zeros([32]))
conv2_weights = tf.Variable(tf.truncated_normal([5, 5, 32, 64],stddev=0.1,seed=SEED))
conv2_biases = tf.Variable(tf.constant(0.1, shape=[64]))
fc1_weights = tf.Variable(tf.truncated_normal([IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * 64, 512],stddev=0.1,seed=SEED))
fc1_biases = tf.Variable(tf.constant(0.1, shape=[512]))
fc2_weights = tf.Variable(tf.truncated_normal([512, NUM_LABELS],stddev=0.1,seed=SEED))
fc2_biases = tf.Variable(tf.constant(0.1, shape=[NUM_LABELS]))
# LENET
def build_lenet(data,train=False):
# subroutine for wiring up nodes and weights to training and evaluation LeNets
conv1 = tf.nn.conv2d(data,conv1_weights,strides=[1, 1, 1, 1],padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
pool1 = tf.nn.max_pool(relu1,ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1],padding='SAME')
conv2 = tf.nn.conv2d(pool1,conv2_weights,strides=[1, 1, 1, 1],padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
pool2 = tf.nn.max_pool(relu2,ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1],padding='SAME')
# Reshape the feature map cuboid into a 2D matrix to feed it to the
# fully connected layers.
pool_shape = pool2.get_shape().as_list()
reshape = tf.reshape(pool2,[pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])
fc1 = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
# Add a 50% dropout during training only. Dropout also scales
# activations such that no rescaling is needed at evaluation time.
if train:
fc1 = tf.nn.dropout(fc1, 0.5, seed=SEED)
# append summary ops to train
_activation_summary(conv1)
_activation_summary(fc1)
fc2 = tf.matmul(fc1, fc2_weights) + fc2_biases
return fc2
# TRAINING LOSS / REGULARIZATION NODES
logits = build_lenet(train_data_node, True)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, train_labels_node))
tf.scalar_summary(loss.op.name,loss)
regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) + tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases))
# Add the regularization term to the loss.
loss += 5e-4 * regularizers
# OPTIMIZER NODES
batch = tf.Variable(0)
# Decay once per epoch, using an exponential schedule starting at 0.01.
learning_rate = tf.train.exponential_decay(
0.01, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
TRAIN_SIZE, # Decay step.
0.95, # Decay rate.
staircase=True)
# Use simple momentum for the optimization.
optimizer = tf.train.MomentumOptimizer(learning_rate,0.9).minimize(loss,global_step=batch)
# # Predictions for the minibatch, validation set and test set.
train_prediction = tf.nn.softmax(logits)
# # We'll compute them only once in a while by calling their {eval()} method.
validation_prediction = tf.nn.softmax(build_lenet(validation_data_node))
test_prediction = tf.nn.softmax(build_lenet(test_data_node))
summaries=tf.merge_all_summaries()
# return input nodes and output nodes
return (train_data_node,
train_labels_node,
validation_data_node,
test_data_node,
train_prediction,
validation_prediction,
test_prediction,
conv1_weights,
conv2_weights,
fc1_weights,
fc2_weights,
optimizer,
loss,
learning_rate,
summaries) | Builds the computation graph consisting of training/testing LeNet
train data - used for learning
validation data - used for printing progress (does not impact learning)
test data - used for printing final test error | train | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/examples/mnist.py#L94-L192 | [
"def build_lenet(data,train=False):\n # subroutine for wiring up nodes and weights to training and evaluation LeNets\n conv1 = tf.nn.conv2d(data,conv1_weights,strides=[1, 1, 1, 1],padding='SAME')\n relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))\n pool1 = tf.nn.max_pool(relu1,ksize=[1, 2, 2, 1],strides=... | """
builds a simple mnist model
"""
import gzip
import numpy as np
import re
import sys
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
IMAGE_SIZE = 28
NUM_CHANNELS = 1
PIXEL_DEPTH = 255
NUM_LABELS = 10
VALIDATION_SIZE = 5000 # Size of the validation set.
SEED = 66478 # Set to None for random seed.
BATCH_SIZE = 64
NUM_EPOCHS = 1
TEST_SIZE=55000
TRAIN_SIZE=10000
# DATA PRE-PROCESSING
def extract_data(filename, num_images):
"""
Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images)
data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, 1)
return data
def extract_labels(filename, num_images):
"""
Extract the labels into a 1-hot matrix [image index, label index].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = np.frombuffer(buf, dtype=np.uint8)
# Convert to dense 1-hot representation.
return (np.arange(NUM_LABELS) == labels[:, None]).astype(np.float32)
def get_data(data_root):
train_data_filename = data_root+'train-images-idx3-ubyte.gz'
train_labels_filename = data_root+'train-labels-idx1-ubyte.gz'
test_data_filename = data_root+'t10k-images-idx3-ubyte.gz'
test_labels_filename = data_root+'t10k-labels-idx1-ubyte.gz'
# Extract it into numpy arrays.
train_data = extract_data(train_data_filename, 60000)
train_labels = extract_labels(train_labels_filename, 60000)
test_data = extract_data(test_data_filename, 10000)
test_labels = extract_labels(test_labels_filename, 10000)
validation_data = train_data[:VALIDATION_SIZE, :, :, :]
validation_labels = train_labels[:VALIDATION_SIZE]
train_data = train_data[VALIDATION_SIZE:, :, :, :]
train_labels = train_labels[VALIDATION_SIZE:]
global TRAIN_SIZE, TEST_SIZE
TRAIN_SIZE=train_labels.shape[0]
TEST_SIZE=test_labels.shape[0]
return train_data, train_labels, validation_data, validation_labels, test_data, test_labels
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tf.histogram_summary(x.name + '/activations', x)
tf.scalar_summary(x.name + '/sparsity', tf.nn.zero_fraction(x))
# MODEL BUILDING
def error_rate(predictions, labels):
"""Return the error rate based on dense predictions and 1-hot labels."""
return 100.0 - (
100.0 *
np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) /
predictions.shape[0])
def main():
# get dataset as numpy arrays
train_data, train_labels, validation_data, validation_labels, test_data, test_labels = get_data()
#pdb.set_trace()
# build net (return inputs)
(train_data_node,
train_labels_node,
validation_data_node,
test_data_node,
optimizer,
loss,
learning_rate,
train_prediction,
validation_prediction,
test_prediction,
summaries) = build_model()
with tf.Session() as s:
# Run all the initializers to prepare the trainable parameters.
tf.initialize_all_variables().run()
print('Initialized!')
# Loop through training steps.
summary_writer=tf.train.SummaryWriter(FLAGS.train_dir, graph_def=s.graph_def)
for step in xrange(NUM_EPOCHS * TRAIN_SIZE // BATCH_SIZE):
# Compute the offset of the current minibatch in the data.
offset = (step * BATCH_SIZE) % (TRAIN_SIZE - BATCH_SIZE)
batch_data = train_data[offset:(offset + BATCH_SIZE), :, :, :]
batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
feed_dict = {
train_data_node: batch_data,
train_labels_node: batch_labels
}
# Run the graph and fetch some of the nodes.
_, l, lr, predictions = s.run([optimizer, loss, learning_rate, train_prediction],feed_dict=feed_dict)
if step % 100 == 0:
# re-run graph, save summaries
summary_str = summaries.eval(feed_dict)
summary_writer.add_summary(summary_str, step)
if step % 100 == 0:
print('Epoch %.2f' % (float(step) * BATCH_SIZE / TRAIN_SIZE))
print('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr))
print('Minibatch error: %.1f%%' % error_rate(predictions, batch_labels))
val_predict=validation_prediction.eval(feed_dict={validation_data_node:validation_data})
print('Validation error: %.1f%%' %error_rate(val_predict, validation_labels))
sys.stdout.flush()
# Done training - print the result!
test_error = error_rate(test_prediction.eval(feed_dict={test_data_node:test_data}), test_labels)
print('Test error: %.1f%%' % test_error)
if __name__ == "__main__":
main()
|
ericjang/tdb | tdb/examples/mnist.py | error_rate | python | def error_rate(predictions, labels):
return 100.0 - (
100.0 *
np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) /
predictions.shape[0]) | Return the error rate based on dense predictions and 1-hot labels. | train | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/examples/mnist.py#L194-L199 | null | """
builds a simple mnist model
"""
import gzip
import numpy as np
import re
import sys
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
IMAGE_SIZE = 28
NUM_CHANNELS = 1
PIXEL_DEPTH = 255
NUM_LABELS = 10
VALIDATION_SIZE = 5000 # Size of the validation set.
SEED = 66478 # Set to None for random seed.
BATCH_SIZE = 64
NUM_EPOCHS = 1
TEST_SIZE=55000
TRAIN_SIZE=10000
# DATA PRE-PROCESSING
def extract_data(filename, num_images):
"""
Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images)
data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, 1)
return data
def extract_labels(filename, num_images):
"""
Extract the labels into a 1-hot matrix [image index, label index].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = np.frombuffer(buf, dtype=np.uint8)
# Convert to dense 1-hot representation.
return (np.arange(NUM_LABELS) == labels[:, None]).astype(np.float32)
def get_data(data_root):
train_data_filename = data_root+'train-images-idx3-ubyte.gz'
train_labels_filename = data_root+'train-labels-idx1-ubyte.gz'
test_data_filename = data_root+'t10k-images-idx3-ubyte.gz'
test_labels_filename = data_root+'t10k-labels-idx1-ubyte.gz'
# Extract it into numpy arrays.
train_data = extract_data(train_data_filename, 60000)
train_labels = extract_labels(train_labels_filename, 60000)
test_data = extract_data(test_data_filename, 10000)
test_labels = extract_labels(test_labels_filename, 10000)
validation_data = train_data[:VALIDATION_SIZE, :, :, :]
validation_labels = train_labels[:VALIDATION_SIZE]
train_data = train_data[VALIDATION_SIZE:, :, :, :]
train_labels = train_labels[VALIDATION_SIZE:]
global TRAIN_SIZE, TEST_SIZE
TRAIN_SIZE=train_labels.shape[0]
TEST_SIZE=test_labels.shape[0]
return train_data, train_labels, validation_data, validation_labels, test_data, test_labels
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measure the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tf.histogram_summary(x.name + '/activations', x)
tf.scalar_summary(x.name + '/sparsity', tf.nn.zero_fraction(x))
# MODEL BUILDING
def build_model():
"""
Builds the computation graph consisting of training/testing LeNet
train data - used for learning
validation data - used for printing progress (does not impact learning)
test data - used for printing final test error
"""
# training data
train_data_node = tf.placeholder(tf.float32,shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
train_labels_node = tf.placeholder(tf.float32,shape=(BATCH_SIZE, NUM_LABELS))
validation_data_node= tf.placeholder(tf.float32,shape=(VALIDATION_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
test_data_node=tf.placeholder(tf.float32,shape=(TEST_SIZE,IMAGE_SIZE,IMAGE_SIZE,NUM_CHANNELS))
# validation dataset held in a single constant node
# validation_data_node = tf.constant(validation_data)
# test_data_node = tf.constant(test_data)
# LEARNABLE WEIGHT NODES SHARED BETWEEN
conv1_weights = tf.Variable(tf.truncated_normal([5, 5, NUM_CHANNELS, 32],stddev=0.1,seed=SEED))
conv1_biases = tf.Variable(tf.zeros([32]))
conv2_weights = tf.Variable(tf.truncated_normal([5, 5, 32, 64],stddev=0.1,seed=SEED))
conv2_biases = tf.Variable(tf.constant(0.1, shape=[64]))
fc1_weights = tf.Variable(tf.truncated_normal([IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * 64, 512],stddev=0.1,seed=SEED))
fc1_biases = tf.Variable(tf.constant(0.1, shape=[512]))
fc2_weights = tf.Variable(tf.truncated_normal([512, NUM_LABELS],stddev=0.1,seed=SEED))
fc2_biases = tf.Variable(tf.constant(0.1, shape=[NUM_LABELS]))
# LENET
def build_lenet(data,train=False):
# subroutine for wiring up nodes and weights to training and evaluation LeNets
conv1 = tf.nn.conv2d(data,conv1_weights,strides=[1, 1, 1, 1],padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
pool1 = tf.nn.max_pool(relu1,ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1],padding='SAME')
conv2 = tf.nn.conv2d(pool1,conv2_weights,strides=[1, 1, 1, 1],padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
pool2 = tf.nn.max_pool(relu2,ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1],padding='SAME')
# Reshape the feature map cuboid into a 2D matrix to feed it to the
# fully connected layers.
pool_shape = pool2.get_shape().as_list()
reshape = tf.reshape(pool2,[pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])
fc1 = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
# Add a 50% dropout during training only. Dropout also scales
# activations such that no rescaling is needed at evaluation time.
if train:
fc1 = tf.nn.dropout(fc1, 0.5, seed=SEED)
# append summary ops to train
_activation_summary(conv1)
_activation_summary(fc1)
fc2 = tf.matmul(fc1, fc2_weights) + fc2_biases
return fc2
# TRAINING LOSS / REGULARIZATION NODES
logits = build_lenet(train_data_node, True)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, train_labels_node))
tf.scalar_summary(loss.op.name,loss)
regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) + tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases))
# Add the regularization term to the loss.
loss += 5e-4 * regularizers
# OPTIMIZER NODES
batch = tf.Variable(0)
# Decay once per epoch, using an exponential schedule starting at 0.01.
learning_rate = tf.train.exponential_decay(
0.01, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
TRAIN_SIZE, # Decay step.
0.95, # Decay rate.
staircase=True)
# Use simple momentum for the optimization.
optimizer = tf.train.MomentumOptimizer(learning_rate,0.9).minimize(loss,global_step=batch)
# # Predictions for the minibatch, validation set and test set.
train_prediction = tf.nn.softmax(logits)
# # We'll compute them only once in a while by calling their {eval()} method.
validation_prediction = tf.nn.softmax(build_lenet(validation_data_node))
test_prediction = tf.nn.softmax(build_lenet(test_data_node))
summaries=tf.merge_all_summaries()
# return input nodes and output nodes
return (train_data_node,
train_labels_node,
validation_data_node,
test_data_node,
train_prediction,
validation_prediction,
test_prediction,
conv1_weights,
conv2_weights,
fc1_weights,
fc2_weights,
optimizer,
loss,
learning_rate,
summaries)
def main():
# get dataset as numpy arrays
train_data, train_labels, validation_data, validation_labels, test_data, test_labels = get_data()
#pdb.set_trace()
# build net (return inputs)
(train_data_node,
train_labels_node,
validation_data_node,
test_data_node,
optimizer,
loss,
learning_rate,
train_prediction,
validation_prediction,
test_prediction,
summaries) = build_model()
with tf.Session() as s:
# Run all the initializers to prepare the trainable parameters.
tf.initialize_all_variables().run()
print('Initialized!')
# Loop through training steps.
summary_writer=tf.train.SummaryWriter(FLAGS.train_dir, graph_def=s.graph_def)
for step in xrange(NUM_EPOCHS * TRAIN_SIZE // BATCH_SIZE):
# Compute the offset of the current minibatch in the data.
offset = (step * BATCH_SIZE) % (TRAIN_SIZE - BATCH_SIZE)
batch_data = train_data[offset:(offset + BATCH_SIZE), :, :, :]
batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
feed_dict = {
train_data_node: batch_data,
train_labels_node: batch_labels
}
# Run the graph and fetch some of the nodes.
_, l, lr, predictions = s.run([optimizer, loss, learning_rate, train_prediction],feed_dict=feed_dict)
if step % 100 == 0:
# re-run graph, save summaries
summary_str = summaries.eval(feed_dict)
summary_writer.add_summary(summary_str, step)
if step % 100 == 0:
print('Epoch %.2f' % (float(step) * BATCH_SIZE / TRAIN_SIZE))
print('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr))
print('Minibatch error: %.1f%%' % error_rate(predictions, batch_labels))
val_predict=validation_prediction.eval(feed_dict={validation_data_node:validation_data})
print('Validation error: %.1f%%' %error_rate(val_predict, validation_labels))
sys.stdout.flush()
# Done training - print the result!
test_error = error_rate(test_prediction.eval(feed_dict={test_data_node:test_data}), test_labels)
print('Test error: %.1f%%' % test_error)
if __name__ == "__main__":
main()
|
ericjang/tdb | tdb/op_store.py | get_node | python | def get_node(name):
if name in _ops:
return _ops[name]
else:
g=tf.get_default_graph()
return g.as_graph_element(name) | returns HTOp or tf graph element corresponding to requested node name | train | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/op_store.py#L27-L35 | null | from toposort import toposort, toposort_flatten
from transitive_closure import transitive_closure
import tensorflow as tf
_ops={} # Map<string,tdb.PythonOp>
_placeholder_2_op={} # Map<tf.PlaceholderTensor, tdb.PythonOp>
def add_op(op):
_ops[op.name]=op
for t in op.outputs:
_placeholder_2_op[t]=op
def get_op(placeholder):
return _placeholder_2_op[placeholder]
def is_htop_out(placeholder):
# returns True if placeholder is the output of a PythonOp
return placeholder in _placeholder_2_op
def compute_exe_order(evals):
deps=compute_node_deps()
eval_names=[e.name for e in evals]
tc_deps=transitive_closure(eval_names, deps)
ordered_names = toposort_flatten(tc_deps)
return [get_node(name) for name in ordered_names]
def register_dbsession(dbsession):
for op in _ops.values():
op.set_session(dbsession)
def compute_node_deps():
"""
- returns the full dependency graph of ALL ops and ALL tensors
Map<string,list<string>> where key=node name, values=list of dependency names
If an Op takes in a placeholder tensor that is the ouput of a PythonOp,
we need to replace that Placeholder with the PythonOp.
"""
deps={}
g=tf.get_default_graph()
for op in g.get_operations():
d=set([i.name for i in op.control_inputs])
for t in op.inputs:
if is_htop_out(t):
d.add(get_op(t).name)
else:
d.add(t.name)
deps[op.name]=d
for t in op.outputs:
deps[t.name]=set([op.name])
# do the same thing with HTOps
for op in _ops.values():
d=set()
for t in op.inputs:
if is_htop_out(t):
d.add(get_op(t).name)
else:
d.add(t.name)
deps[op.name]=d
return deps
|
ericjang/tdb | tdb/op_store.py | compute_node_deps | python | def compute_node_deps():
deps={}
g=tf.get_default_graph()
for op in g.get_operations():
d=set([i.name for i in op.control_inputs])
for t in op.inputs:
if is_htop_out(t):
d.add(get_op(t).name)
else:
d.add(t.name)
deps[op.name]=d
for t in op.outputs:
deps[t.name]=set([op.name])
# do the same thing with HTOps
for op in _ops.values():
d=set()
for t in op.inputs:
if is_htop_out(t):
d.add(get_op(t).name)
else:
d.add(t.name)
deps[op.name]=d
return deps | - returns the full dependency graph of ALL ops and ALL tensors
Map<string,list<string>> where key=node name, values=list of dependency names
If an Op takes in a placeholder tensor that is the ouput of a PythonOp,
we need to replace that Placeholder with the PythonOp. | train | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/op_store.py#L41-L70 | [
"def is_htop_out(placeholder):\n\t# returns True if placeholder is the output of a PythonOp\n\treturn placeholder in _placeholder_2_op\n",
"def get_op(placeholder):\n\treturn _placeholder_2_op[placeholder]\n"
] | from toposort import toposort, toposort_flatten
from transitive_closure import transitive_closure
import tensorflow as tf
_ops={} # Map<string,tdb.PythonOp>
_placeholder_2_op={} # Map<tf.PlaceholderTensor, tdb.PythonOp>
def add_op(op):
_ops[op.name]=op
for t in op.outputs:
_placeholder_2_op[t]=op
def get_op(placeholder):
return _placeholder_2_op[placeholder]
def is_htop_out(placeholder):
# returns True if placeholder is the output of a PythonOp
return placeholder in _placeholder_2_op
def compute_exe_order(evals):
deps=compute_node_deps()
eval_names=[e.name for e in evals]
tc_deps=transitive_closure(eval_names, deps)
ordered_names = toposort_flatten(tc_deps)
return [get_node(name) for name in ordered_names]
def get_node(name):
"""
returns HTOp or tf graph element corresponding to requested node name
"""
if name in _ops:
return _ops[name]
else:
g=tf.get_default_graph()
return g.as_graph_element(name)
def register_dbsession(dbsession):
for op in _ops.values():
op.set_session(dbsession)
|
ericjang/tdb | tdb/python_op.py | python_op | python | def python_op(fn, inputs=None, outputs=None):
# construct a PythonOp and return its TensorNode outputs, if it has one
global COUNT
# check outputs
if not isinstance(outputs,list):
outputs=[outputs]
for tensor in outputs:
if tensor.op.type != 'Placeholder':
raise TypeError('Output nodes must be Placeholders')
op=PythonOp('Python', fn, COUNT, inputs, outputs)
op_store.add_op(op)
COUNT+=1
if outputs:
return outputs[0]
else:
return op | User-exposed api method for constructing a python_node
Args:
fn: python function that computes some np.ndarrays given np.ndarrays as inputs. it can have arbitrary side effects.
inputs: array of tf.Tensors (optional). These are where fn derives its values from
outputs: tf.Placeholder nodes (optional). These are constructed by the user (which allows the user to
plug them into other ht.Ops or tf.Ops). The outputs of fn are mapped to each of the output placeholders.
raises an Error if fn cannot map | train | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/python_op.py#L9-L36 | null |
COUNT=0
from ht_op import HTOp
import inspect
import numpy as np
import op_store
class PythonOp(HTOp):
"""docstring for PythonOp"""
def __init__(self, node_type, fn, i, inputs, outputs):
"""
constructor. user does not call this.
"""
super(PythonOp, self).__init__(node_type, i, inputs, outputs)
self.fn=fn
def run(self, feed_dict):
#pdb.set_trace()
args=tuple(feed_dict[i] for i in self.inputs)
results=self.fn(self, *args)
self.cache_values(results)
return results
def cache_values(self, results):
"""
loads into DebugSession cache
"""
if results is None:
# self.fn was probably only used to compute side effects.
return
elif isinstance(results,np.ndarray):
# fn returns single np.ndarray.
# re-format it into a list
results=[results]
# check validity of fn output
elif isinstance(results,list):
if len(results) is not len(self.outputs):
raise ValueError('Number of output tensors does not match number of outputs produced by function')
elif isinstance(results,np.number):
if len(self.outputs) != 1:
raise ValueError('Fn produces scalar but %d outputs expected' % (len(self.outputs)))
results=[results]
# assign each element in ndarrays to corresponding output tensor
for i,ndarray in enumerate(results):
self.session._cache_value(self.outputs[i], ndarray) |
ericjang/tdb | tdb/python_op.py | PythonOp.cache_values | python | def cache_values(self, results):
if results is None:
# self.fn was probably only used to compute side effects.
return
elif isinstance(results,np.ndarray):
# fn returns single np.ndarray.
# re-format it into a list
results=[results]
# check validity of fn output
elif isinstance(results,list):
if len(results) is not len(self.outputs):
raise ValueError('Number of output tensors does not match number of outputs produced by function')
elif isinstance(results,np.number):
if len(self.outputs) != 1:
raise ValueError('Fn produces scalar but %d outputs expected' % (len(self.outputs)))
results=[results]
# assign each element in ndarrays to corresponding output tensor
for i,ndarray in enumerate(results):
self.session._cache_value(self.outputs[i], ndarray) | loads into DebugSession cache | train | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/python_op.py#L54-L75 | null | class PythonOp(HTOp):
"""docstring for PythonOp"""
def __init__(self, node_type, fn, i, inputs, outputs):
"""
constructor. user does not call this.
"""
super(PythonOp, self).__init__(node_type, i, inputs, outputs)
self.fn=fn
def run(self, feed_dict):
#pdb.set_trace()
args=tuple(feed_dict[i] for i in self.inputs)
results=self.fn(self, *args)
self.cache_values(results)
return results
|
ericjang/tdb | tdb/interface.py | debug | python | def debug(evals,feed_dict=None,breakpoints=None,break_immediately=False,session=None):
global _dbsession
_dbsession=debug_session.DebugSession(session)
return _dbsession.run(evals,feed_dict,breakpoints,break_immediately) | spawns a new debug session | train | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/interface.py#L11-L17 | null | """
top-level interface methods so user doesn't need to directly construct
a dbsession
"""
import debug_session
# default session
_dbsession=None
def s():
"""
step to the next node in the execution order
"""
global _dbsession
return _dbsession.s()
def c():
"""
continue
"""
global _dbsession
return _dbsession.c()
def get_exe_queue():
global _dbsession
return _dbsession.get_exe_queue()
def get_value(node):
global _dbsession
return _dbsession.get_value(node) |
ericjang/tdb | tdb/app.py | connect | python | def connect():
if not is_notebook():
print('Python session is not running in a Notebook Kernel')
return
global _comm
kernel=get_ipython().kernel
kernel.comm_manager.register_target('tdb',handle_comm_opened)
# initiate connection to frontend.
_comm=Comm(target_name='tdb',data={})
# bind recv handler
_comm.on_msg(None) | establish connection to frontend notebook | train | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/app.py#L15-L30 | [
"def is_notebook():\n\tiPython=get_ipython()\n\tif iPython is None or not iPython.config:\n\t\treturn False\n\treturn 'IPKernelApp' in iPython.config\n"
] | from base64 import b64encode
from ipykernel.comm import Comm
from IPython import get_ipython
import StringIO
import urllib
_comm=None
def is_notebook():
iPython=get_ipython()
if iPython is None or not iPython.config:
return False
return 'IPKernelApp' in iPython.config
def connect():
"""
establish connection to frontend notebook
"""
if not is_notebook():
print('Python session is not running in a Notebook Kernel')
return
global _comm
kernel=get_ipython().kernel
kernel.comm_manager.register_target('tdb',handle_comm_opened)
# initiate connection to frontend.
_comm=Comm(target_name='tdb',data={})
# bind recv handler
_comm.on_msg(None)
def send_action(action, params=None):
"""
helper method for sending actions
"""
data={"msg_type":"action", "action":action}
if params is not None:
data['params']=params
_comm.send(data)
def send_fig(fig,name):
"""
sends figure to frontend
"""
imgdata = StringIO.StringIO()
fig.savefig(imgdata, format='png')
imgdata.seek(0) # rewind the data
uri = 'data:image/png;base64,' + urllib.quote(b64encode(imgdata.buf))
send_action("update_plot",params={"src":uri, "name":name})
# handler messages
def handle_comm_opened(msg):
# this won't appear in the notebook
print('comm opened')
print(msg) |
ericjang/tdb | tdb/app.py | send_action | python | def send_action(action, params=None):
data={"msg_type":"action", "action":action}
if params is not None:
data['params']=params
_comm.send(data) | helper method for sending actions | train | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/app.py#L32-L39 | null | from base64 import b64encode
from ipykernel.comm import Comm
from IPython import get_ipython
import StringIO
import urllib
_comm=None
def is_notebook():
iPython=get_ipython()
if iPython is None or not iPython.config:
return False
return 'IPKernelApp' in iPython.config
def connect():
"""
establish connection to frontend notebook
"""
if not is_notebook():
print('Python session is not running in a Notebook Kernel')
return
global _comm
kernel=get_ipython().kernel
kernel.comm_manager.register_target('tdb',handle_comm_opened)
# initiate connection to frontend.
_comm=Comm(target_name='tdb',data={})
# bind recv handler
_comm.on_msg(None)
def send_fig(fig,name):
"""
sends figure to frontend
"""
imgdata = StringIO.StringIO()
fig.savefig(imgdata, format='png')
imgdata.seek(0) # rewind the data
uri = 'data:image/png;base64,' + urllib.quote(b64encode(imgdata.buf))
send_action("update_plot",params={"src":uri, "name":name})
# handler messages
def handle_comm_opened(msg):
# this won't appear in the notebook
print('comm opened')
print(msg) |
ericjang/tdb | tdb/app.py | send_fig | python | def send_fig(fig,name):
imgdata = StringIO.StringIO()
fig.savefig(imgdata, format='png')
imgdata.seek(0) # rewind the data
uri = 'data:image/png;base64,' + urllib.quote(b64encode(imgdata.buf))
send_action("update_plot",params={"src":uri, "name":name}) | sends figure to frontend | train | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/app.py#L41-L49 | [
"def send_action(action, params=None):\n\t\"\"\"\n\thelper method for sending actions\n\t\"\"\"\n\tdata={\"msg_type\":\"action\", \"action\":action}\n\tif params is not None:\n\t\tdata['params']=params\n\t_comm.send(data)\n"
] | from base64 import b64encode
from ipykernel.comm import Comm
from IPython import get_ipython
import StringIO
import urllib
_comm=None
def is_notebook():
iPython=get_ipython()
if iPython is None or not iPython.config:
return False
return 'IPKernelApp' in iPython.config
def connect():
"""
establish connection to frontend notebook
"""
if not is_notebook():
print('Python session is not running in a Notebook Kernel')
return
global _comm
kernel=get_ipython().kernel
kernel.comm_manager.register_target('tdb',handle_comm_opened)
# initiate connection to frontend.
_comm=Comm(target_name='tdb',data={})
# bind recv handler
_comm.on_msg(None)
def send_action(action, params=None):
"""
helper method for sending actions
"""
data={"msg_type":"action", "action":action}
if params is not None:
data['params']=params
_comm.send(data)
# handler messages
def handle_comm_opened(msg):
# this won't appear in the notebook
print('comm opened')
print(msg) |
ericjang/tdb | tdb/examples/viz.py | viz_square | python | def viz_square(data, normalize=True, cmap=plt.cm.gray, padsize=1, padval=0):
# normalize to 0-1 range
if normalize:
data -= data.min()
data /= data.max()
n = int(np.ceil(np.sqrt(data.shape[0]))) # force square
padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
data = np.pad(data, padding, mode='constant', constant_values=(padval, padval))
# tile the filters into an image
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
plt.matshow(data,cmap=cmap) | takes a np.ndarray of shape (n, height, width) or (n, height, width, channels)
visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)
However, this only draws first input channel | train | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/examples/viz.py#L7-L23 | null | # a collection of sample visualization functions
# for binding to plotnode
import matplotlib.pyplot as plt
import numpy as np
def viz_conv_weights(ctx, weight):
# visualize all output filters
# for the first input channel
viz_square(weight.transpose(3,0,1,2)[:,:,:,0])
def viz_activations(ctx, m):
plt.matshow(m.T,cmap=plt.cm.gray)
plt.title("LeNet Predictions")
plt.xlabel("Batch")
plt.ylabel("Digit Activation")
def viz_weight_hist(ctx, w):
plt.hist(w.flatten())
def viz_conv_hist(ctx, w):
n = int(np.ceil(np.sqrt(w.shape[3]))) # force square
f, axes = plt.subplots(n,n,sharex=True,sharey=True)
for i in range(w.shape[3]): # for each output channel
r,c=i//n,i%n
axes[r,c].hist(w[:,:,:,i].flatten())
axes[r,c].get_xaxis().set_visible(False)
axes[r,c].get_yaxis().set_visible(False)
def viz_fc_weights(ctx, w):
# visualize fully connected weights
plt.matshow(w.T,cmap=plt.cm.gray)
def watch_loss(ctx,loss):
if not hasattr(ctx, 'loss_history'):
ctx.loss_history=[]
ctx.loss_history.append(loss)
plt.plot(ctx.loss_history)
plt.ylabel('loss')
|
ericjang/tdb | tdb/plot_op.py | plot_op | python | def plot_op(fn, inputs=[], outputs=[]):
global COUNT, ht
# check outputs
if not isinstance(outputs,list):
outputs=[outputs]
for tensor in outputs:
if tensor.op.type is not 'Placeholder':
raise Error('Output nodes must be Placeholders')
op=PlotOp(fn, COUNT, inputs, outputs)
op_store.add_op(op)
COUNT+=1
# if node has output, return value for python_op is the first output (placeholder) tensor
# otherwise, return the op
if outputs:
return outputs[0]
else:
return op | User-exposed api method for constructing a python_node
Args:
fn: python function that computes some np.ndarrays given np.ndarrays as inputs. it can have arbitrary side effects.
inputs: array of tf.Tensors (optional). These are where fn derives its values from
outputs: tf.Placeholder nodes (optional). These are constructed by the user (which allows the user to
plug them into other ht.Ops or tf.Ops). The outputs of fn are mapped to each of the output placeholders.
raises an Error if fn cannot map | train | https://github.com/ericjang/tdb/blob/5e78b5dbecf78b6d28eb2f5b67decf8d1f1eb17d/tdb/plot_op.py#L10-L41 | null |
COUNT=0
from python_op import PythonOp
import app
import inspect
import matplotlib.pyplot as plt
import op_store
class PlotOp(PythonOp):
def __init__(self, fn, i, inputs, outputs):
super(PlotOp, self).__init__('Plot', fn, i, inputs, outputs)
def run(self, feed_dict):
results=super(PlotOp, self).run(feed_dict)
# send the image over
if app.is_notebook():
fig=plt.gcf()
app.send_fig(plt.gcf(), self.name)
# close the figure
plt.close(fig)
return results
|
foutaise/texttable | texttable.py | obj2unicode | python | def obj2unicode(obj):
if isinstance(obj, unicode_type):
return obj
elif isinstance(obj, bytes_type):
try:
return unicode_type(obj, 'utf-8')
except UnicodeDecodeError as strerror:
sys.stderr.write("UnicodeDecodeError exception for string '%s': %s\n" % (obj, strerror))
return unicode_type(obj, 'utf-8', 'replace')
else:
return unicode_type(obj) | Return a unicode representation of a python object | train | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L143-L155 | null | # texttable - module for creating simple ASCII tables
# Copyright (C) 2003-2019 Gerome Fournier <jef(at)foutaise.org>
"""module for creating simple ASCII tables
Example:
table = Texttable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([["Name", "Age", "Nickname"],
["Mr\\nXavier\\nHuon", 32, "Xav'"],
["Mr\\nBaptiste\\nClement", 1, "Baby"],
["Mme\\nLouise\\nBourgeau", 28, "Lou\\n\\nLoue"]])
print table.draw() + "\\n"
table = Texttable()
table.set_deco(Texttable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print table.draw()
Result:
+----------+-----+----------+
| Name | Age | Nickname |
+==========+=====+==========+
| Mr | | |
| Xavier | 32 | |
| Huon | | Xav' |
+----------+-----+----------+
| Mr | | |
| Baptiste | 1 | |
| Clement | | Baby |
+----------+-----+----------+
| Mme | | Lou |
| Louise | 28 | |
| Bourgeau | | Loue |
+----------+-----+----------+
text float exp int auto
===========================================
abcd 67.000 6.540e+02 89 128.001
efgh 67.543 6.540e-01 90 1.280e+22
ijkl 0.000 5.000e-78 89 0.000
mnop 0.023 5.000e+78 92 1.280e+22
"""
from __future__ import division
__all__ = ["Texttable", "ArraySizeError"]
__author__ = 'Gerome Fournier <jef(at)foutaise.org>'
__license__ = 'MIT'
__version__ = '1.6.1'
__credits__ = """\
Jeff Kowalczyk:
- textwrap improved import
- comment concerning header output
Anonymous:
- add_rows method, for adding rows in one go
Sergey Simonenko:
- redefined len() function to deal with non-ASCII characters
Roger Lew:
- columns datatype specifications
Brian Peterson:
- better handling of unicode errors
Frank Sachsenheim:
- add Python 2/3-compatibility
Maximilian Hils:
- fix minor bug for Python 3 compatibility
frinkelpi:
- preserve empty lines
"""
import sys
import unicodedata
# define a text wrapping function to wrap some text
# to a specific width:
# - use cjkwrap if available (better CJK support)
# - fallback to textwrap otherwise
try:
import cjkwrap
def textwrapper(txt, width):
return cjkwrap.wrap(txt, width)
except ImportError:
try:
import textwrap
def textwrapper(txt, width):
return textwrap.wrap(txt, width)
except ImportError:
sys.stderr.write("Can't import textwrap module!\n")
raise
# define a function to calculate the rendering width of a unicode character
# - use wcwidth if available
# - fallback to unicodedata information otherwise
try:
import wcwidth
def uchar_width(c):
"""Return the rendering width of a unicode character
"""
return max(0, wcwidth.wcwidth(c))
except ImportError:
def uchar_width(c):
"""Return the rendering width of a unicode character
"""
if unicodedata.east_asian_width(c) in 'WF':
return 2
elif unicodedata.combining(c):
return 0
else:
return 1
from functools import reduce
if sys.version_info >= (3, 0):
unicode_type = str
bytes_type = bytes
else:
unicode_type = unicode
bytes_type = str
def len(iterable):
"""Redefining len here so it will be able to work with non-ASCII characters
"""
if isinstance(iterable, bytes_type) or isinstance(iterable, unicode_type):
return sum([uchar_width(c) for c in obj2unicode(iterable)])
else:
return iterable.__len__()
class ArraySizeError(Exception):
"""Exception raised when specified rows don't fit the required size
"""
def __init__(self, msg):
self.msg = msg
Exception.__init__(self, msg, '')
def __str__(self):
return self.msg
class FallbackToText(Exception):
"""Used for failed conversion to float"""
pass
class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self.set_max_width(max_width)
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
return self
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
return self
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
- x will be float-converted and then used.
"""
return str(int(round(cls._to_float(x))))
@classmethod
def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x))
@classmethod
def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x))
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return obj2unicode(x)
@classmethod
def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
else:
if f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw)
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0,4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrapper(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
if __name__ == '__main__':
table = Texttable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([["Name", "Age", "Nickname"],
["Mr\nXavier\nHuon", 32, "Xav'"],
["Mr\nBaptiste\nClement", 1, "Baby"],
["Mme\nLouise\nBourgeau", 28, "Lou\n \nLoue"]])
print(table.draw() + "\n")
table = Texttable()
table.set_deco(Texttable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print(table.draw())
|
foutaise/texttable | texttable.py | len | python | def len(iterable):
if isinstance(iterable, bytes_type) or isinstance(iterable, unicode_type):
return sum([uchar_width(c) for c in obj2unicode(iterable)])
else:
return iterable.__len__() | Redefining len here so it will be able to work with non-ASCII characters | train | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L158-L164 | [
"def obj2unicode(obj):\n \"\"\"Return a unicode representation of a python object\n \"\"\"\n if isinstance(obj, unicode_type):\n return obj\n elif isinstance(obj, bytes_type):\n try:\n return unicode_type(obj, 'utf-8')\n except UnicodeDecodeError as strerror:\n ... | # texttable - module for creating simple ASCII tables
# Copyright (C) 2003-2019 Gerome Fournier <jef(at)foutaise.org>
"""module for creating simple ASCII tables
Example:
table = Texttable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([["Name", "Age", "Nickname"],
["Mr\\nXavier\\nHuon", 32, "Xav'"],
["Mr\\nBaptiste\\nClement", 1, "Baby"],
["Mme\\nLouise\\nBourgeau", 28, "Lou\\n\\nLoue"]])
print table.draw() + "\\n"
table = Texttable()
table.set_deco(Texttable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print table.draw()
Result:
+----------+-----+----------+
| Name | Age | Nickname |
+==========+=====+==========+
| Mr | | |
| Xavier | 32 | |
| Huon | | Xav' |
+----------+-----+----------+
| Mr | | |
| Baptiste | 1 | |
| Clement | | Baby |
+----------+-----+----------+
| Mme | | Lou |
| Louise | 28 | |
| Bourgeau | | Loue |
+----------+-----+----------+
text float exp int auto
===========================================
abcd 67.000 6.540e+02 89 128.001
efgh 67.543 6.540e-01 90 1.280e+22
ijkl 0.000 5.000e-78 89 0.000
mnop 0.023 5.000e+78 92 1.280e+22
"""
from __future__ import division
__all__ = ["Texttable", "ArraySizeError"]
__author__ = 'Gerome Fournier <jef(at)foutaise.org>'
__license__ = 'MIT'
__version__ = '1.6.1'
__credits__ = """\
Jeff Kowalczyk:
- textwrap improved import
- comment concerning header output
Anonymous:
- add_rows method, for adding rows in one go
Sergey Simonenko:
- redefined len() function to deal with non-ASCII characters
Roger Lew:
- columns datatype specifications
Brian Peterson:
- better handling of unicode errors
Frank Sachsenheim:
- add Python 2/3-compatibility
Maximilian Hils:
- fix minor bug for Python 3 compatibility
frinkelpi:
- preserve empty lines
"""
import sys
import unicodedata
# define a text wrapping function to wrap some text
# to a specific width:
# - use cjkwrap if available (better CJK support)
# - fallback to textwrap otherwise
try:
import cjkwrap
def textwrapper(txt, width):
return cjkwrap.wrap(txt, width)
except ImportError:
try:
import textwrap
def textwrapper(txt, width):
return textwrap.wrap(txt, width)
except ImportError:
sys.stderr.write("Can't import textwrap module!\n")
raise
# define a function to calculate the rendering width of a unicode character
# - use wcwidth if available
# - fallback to unicodedata information otherwise
try:
import wcwidth
def uchar_width(c):
"""Return the rendering width of a unicode character
"""
return max(0, wcwidth.wcwidth(c))
except ImportError:
def uchar_width(c):
"""Return the rendering width of a unicode character
"""
if unicodedata.east_asian_width(c) in 'WF':
return 2
elif unicodedata.combining(c):
return 0
else:
return 1
from functools import reduce
if sys.version_info >= (3, 0):
unicode_type = str
bytes_type = bytes
else:
unicode_type = unicode
bytes_type = str
def obj2unicode(obj):
"""Return a unicode representation of a python object
"""
if isinstance(obj, unicode_type):
return obj
elif isinstance(obj, bytes_type):
try:
return unicode_type(obj, 'utf-8')
except UnicodeDecodeError as strerror:
sys.stderr.write("UnicodeDecodeError exception for string '%s': %s\n" % (obj, strerror))
return unicode_type(obj, 'utf-8', 'replace')
else:
return unicode_type(obj)
class ArraySizeError(Exception):
"""Exception raised when specified rows don't fit the required size
"""
def __init__(self, msg):
self.msg = msg
Exception.__init__(self, msg, '')
def __str__(self):
return self.msg
class FallbackToText(Exception):
"""Used for failed conversion to float"""
pass
class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self.set_max_width(max_width)
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
return self
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
return self
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
- x will be float-converted and then used.
"""
return str(int(round(cls._to_float(x))))
@classmethod
def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x))
@classmethod
def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x))
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return obj2unicode(x)
@classmethod
def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
else:
if f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw)
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0,4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrapper(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
if __name__ == '__main__':
table = Texttable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([["Name", "Age", "Nickname"],
["Mr\nXavier\nHuon", 32, "Xav'"],
["Mr\nBaptiste\nClement", 1, "Baby"],
["Mme\nLouise\nBourgeau", 28, "Lou\n \nLoue"]])
print(table.draw() + "\n")
table = Texttable()
table.set_deco(Texttable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print(table.draw())
|
foutaise/texttable | texttable.py | Texttable.set_chars | python | def set_chars(self, array):
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self | Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '='] | train | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L227-L244 | [
"def len(iterable):\n \"\"\"Redefining len here so it will be able to work with non-ASCII characters\n \"\"\"\n if isinstance(iterable, bytes_type) or isinstance(iterable, unicode_type):\n return sum([uchar_width(c) for c in obj2unicode(iterable)])\n else:\n return iterable.__len__()\n"
] | class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self.set_max_width(max_width)
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
return self
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
return self
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
- x will be float-converted and then used.
"""
return str(int(round(cls._to_float(x))))
@classmethod
def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x))
@classmethod
def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x))
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return obj2unicode(x)
@classmethod
def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
else:
if f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw)
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0,4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrapper(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
|
foutaise/texttable | texttable.py | Texttable.set_header_align | python | def set_header_align(self, array):
self._check_row_size(array)
self._header_align = array
return self | Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right | train | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L266-L278 | [
"def _check_row_size(self, array):\n \"\"\"Check that the specified array fits the previous rows size\n \"\"\"\n\n if not self._row_size:\n self._row_size = len(array)\n elif self._row_size != len(array):\n raise ArraySizeError(\"array should contain %d elements\" \\\n % self._r... | class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self.set_max_width(max_width)
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
return self
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
return self
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
- x will be float-converted and then used.
"""
return str(int(round(cls._to_float(x))))
@classmethod
def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x))
@classmethod
def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x))
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return obj2unicode(x)
@classmethod
def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
else:
if f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw)
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0,4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrapper(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
|
foutaise/texttable | texttable.py | Texttable.set_cols_align | python | def set_cols_align(self, array):
self._check_row_size(array)
self._align = array
return self | Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right | train | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L280-L292 | [
"def _check_row_size(self, array):\n \"\"\"Check that the specified array fits the previous rows size\n \"\"\"\n\n if not self._row_size:\n self._row_size = len(array)\n elif self._row_size != len(array):\n raise ArraySizeError(\"array should contain %d elements\" \\\n % self._r... | class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self.set_max_width(max_width)
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
return self
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
return self
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
- x will be float-converted and then used.
"""
return str(int(round(cls._to_float(x))))
@classmethod
def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x))
@classmethod
def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x))
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return obj2unicode(x)
@classmethod
def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
else:
if f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw)
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0,4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrapper(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
|
foutaise/texttable | texttable.py | Texttable.set_cols_valign | python | def set_cols_valign(self, array):
self._check_row_size(array)
self._valign = array
return self | Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell | train | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L294-L306 | [
"def _check_row_size(self, array):\n \"\"\"Check that the specified array fits the previous rows size\n \"\"\"\n\n if not self._row_size:\n self._row_size = len(array)\n elif self._row_size != len(array):\n raise ArraySizeError(\"array should contain %d elements\" \\\n % self._r... | class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self.set_max_width(max_width)
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
return self
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
return self
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
- x will be float-converted and then used.
"""
return str(int(round(cls._to_float(x))))
@classmethod
def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x))
@classmethod
def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x))
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return obj2unicode(x)
@classmethod
def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
else:
if f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw)
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0,4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrapper(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
|
foutaise/texttable | texttable.py | Texttable.set_cols_dtype | python | def set_cols_dtype(self, array):
self._check_row_size(array)
self._dtype = array
return self | Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column | train | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L308-L326 | [
"def _check_row_size(self, array):\n \"\"\"Check that the specified array fits the previous rows size\n \"\"\"\n\n if not self._row_size:\n self._row_size = len(array)\n elif self._row_size != len(array):\n raise ArraySizeError(\"array should contain %d elements\" \\\n % self._r... | class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self.set_max_width(max_width)
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
return self
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
return self
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
- x will be float-converted and then used.
"""
return str(int(round(cls._to_float(x))))
@classmethod
def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x))
@classmethod
def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x))
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return obj2unicode(x)
@classmethod
def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
else:
if f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw)
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0,4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrapper(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
|
foutaise/texttable | texttable.py | Texttable.set_precision | python | def set_precision(self, width):
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self | Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3 | train | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L348-L359 | null | class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self.set_max_width(max_width)
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
return self
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
return self
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
- x will be float-converted and then used.
"""
return str(int(round(cls._to_float(x))))
@classmethod
def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x))
@classmethod
def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x))
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return obj2unicode(x)
@classmethod
def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
else:
if f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw)
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0,4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrapper(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
|
foutaise/texttable | texttable.py | Texttable.header | python | def header(self, array):
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self | Specify the header of the table | train | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L361-L367 | [
"def _check_row_size(self, array):\n \"\"\"Check that the specified array fits the previous rows size\n \"\"\"\n\n if not self._row_size:\n self._row_size = len(array)\n elif self._row_size != len(array):\n raise ArraySizeError(\"array should contain %d elements\" \\\n % self._r... | class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self.set_max_width(max_width)
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
return self
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
return self
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
- x will be float-converted and then used.
"""
return str(int(round(cls._to_float(x))))
@classmethod
def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x))
@classmethod
def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x))
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return obj2unicode(x)
@classmethod
def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
else:
if f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw)
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0,4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrapper(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
|
foutaise/texttable | texttable.py | Texttable.add_row | python | def add_row(self, array):
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self | Add a row in the rows stack
- cells can contain newlines and tabs | train | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L369-L384 | [
"def _str(self, i, x):\n \"\"\"Handles string formatting of cell data\n\n i - index of the cell datatype in self._dtype\n x - cell data to format\n \"\"\"\n FMT = {\n 'a':self._fmt_auto,\n 'i':self._fmt_int,\n 'f':self._fmt_float,\n 'e':self._fmt_exp,\n 't':... | class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self.set_max_width(max_width)
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
return self
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
return self
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
- x will be float-converted and then used.
"""
return str(int(round(cls._to_float(x))))
@classmethod
def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x))
@classmethod
def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x))
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return obj2unicode(x)
@classmethod
def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
else:
if f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw)
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0,4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrapper(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
|
foutaise/texttable | texttable.py | Texttable.add_rows | python | def add_rows(self, rows, header=True):
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self | Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table | train | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L386-L405 | [
"def header(self, array):\n \"\"\"Specify the header of the table\n \"\"\"\n\n self._check_row_size(array)\n self._header = list(map(obj2unicode, array))\n return self\n",
"def add_row(self, array):\n \"\"\"Add a row in the rows stack\n\n - cells can contain newlines and tabs\n \"\"\"\n\n ... | class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self.set_max_width(max_width)
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
return self
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
return self
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
- x will be float-converted and then used.
"""
return str(int(round(cls._to_float(x))))
@classmethod
def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x))
@classmethod
def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x))
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return obj2unicode(x)
@classmethod
def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
else:
if f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw)
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0,4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrapper(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
|
foutaise/texttable | texttable.py | Texttable.draw | python | def draw(self):
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1] | Draw the table
- the table is returned as a whole string | train | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L407-L432 | [
"def _has_border(self):\n \"\"\"Return a boolean, if border is required or not\n \"\"\"\n\n return self._deco & Texttable.BORDER > 0\n",
"def _compute_cols_width(self):\n \"\"\"Return an array with the width of each column\n\n If a specific width has been specified, exit. If the total of the\n c... | class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self.set_max_width(max_width)
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
return self
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
return self
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
- x will be float-converted and then used.
"""
return str(int(round(cls._to_float(x))))
@classmethod
def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x))
@classmethod
def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x))
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return obj2unicode(x)
@classmethod
def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
else:
if f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw)
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0,4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrapper(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
|
foutaise/texttable | texttable.py | Texttable._fmt_int | python | def _fmt_int(cls, x, **kw):
return str(int(round(cls._to_float(x)))) | Integer formatting class-method.
- x will be float-converted and then used. | train | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L444-L449 | null | class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self.set_max_width(max_width)
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
return self
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
return self
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
@classmethod
def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x))
@classmethod
def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x))
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return obj2unicode(x)
@classmethod
def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
else:
if f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw)
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0,4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrapper(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
|
foutaise/texttable | texttable.py | Texttable._fmt_float | python | def _fmt_float(cls, x, **kw):
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x)) | Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument. | train | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L452-L461 | null | class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self.set_max_width(max_width)
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
return self
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
return self
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
- x will be float-converted and then used.
"""
return str(int(round(cls._to_float(x))))
@classmethod
@classmethod
def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x))
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return obj2unicode(x)
@classmethod
def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
else:
if f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw)
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0,4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrapper(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
|
foutaise/texttable | texttable.py | Texttable._fmt_exp | python | def _fmt_exp(cls, x, **kw):
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x)) | Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument. | train | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L464-L473 | null | class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self.set_max_width(max_width)
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
return self
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
return self
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
- x will be float-converted and then used.
"""
return str(int(round(cls._to_float(x))))
@classmethod
def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x))
@classmethod
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return obj2unicode(x)
@classmethod
def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
else:
if f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw)
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0,4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrapper(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
|
foutaise/texttable | texttable.py | Texttable._fmt_auto | python | def _fmt_auto(cls, x, **kw):
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
else:
if f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw) | auto formatting class-method. | train | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L481-L491 | null | class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self.set_max_width(max_width)
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
return self
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
return self
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
- x will be float-converted and then used.
"""
return str(int(round(cls._to_float(x))))
@classmethod
def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x))
@classmethod
def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x))
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return obj2unicode(x)
@classmethod
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0,4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrapper(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
|
foutaise/texttable | texttable.py | Texttable._str | python | def _str(self, i, x):
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x) | Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format | train | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L493-L515 | [
"def _fmt_text(cls, x, **kw):\n \"\"\"String formatting class-method.\"\"\"\n return obj2unicode(x)\n"
] | class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self.set_max_width(max_width)
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
return self
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
return self
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
- x will be float-converted and then used.
"""
return str(int(round(cls._to_float(x))))
@classmethod
def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x))
@classmethod
def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x))
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return obj2unicode(x)
@classmethod
def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
else:
if f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0,4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrapper(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
|
foutaise/texttable | texttable.py | Texttable._hline | python | def _hline(self):
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string | Print an horizontal line | train | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L557-L563 | null | class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self.set_max_width(max_width)
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
return self
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
return self
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
- x will be float-converted and then used.
"""
return str(int(round(cls._to_float(x))))
@classmethod
def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x))
@classmethod
def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x))
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return obj2unicode(x)
@classmethod
def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
else:
if f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw)
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0,4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrapper(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
|
foutaise/texttable | texttable.py | Texttable._build_hline | python | def _build_hline(self, is_header=False):
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l | Return a string used to separated rows or separate header from
rows | train | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L565-L583 | null | class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self.set_max_width(max_width)
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
return self
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
return self
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
- x will be float-converted and then used.
"""
return str(int(round(cls._to_float(x))))
@classmethod
def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x))
@classmethod
def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x))
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return obj2unicode(x)
@classmethod
def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
else:
if f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw)
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0,4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrapper(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
|
foutaise/texttable | texttable.py | Texttable._len_cell | python | def _len_cell(self, cell):
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi | Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs | train | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L585-L602 | null | class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self.set_max_width(max_width)
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
return self
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
return self
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
- x will be float-converted and then used.
"""
return str(int(round(cls._to_float(x))))
@classmethod
def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x))
@classmethod
def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x))
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return obj2unicode(x)
@classmethod
def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
else:
if f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw)
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0,4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrapper(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
|
foutaise/texttable | texttable.py | Texttable._compute_cols_width | python | def _compute_cols_width(self):
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0,4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi | Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped. | train | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L604-L642 | [
"def len(iterable):\n \"\"\"Redefining len here so it will be able to work with non-ASCII characters\n \"\"\"\n if isinstance(iterable, bytes_type) or isinstance(iterable, unicode_type):\n return sum([uchar_width(c) for c in obj2unicode(iterable)])\n else:\n return iterable.__len__()\n",
... | class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self.set_max_width(max_width)
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
return self
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
return self
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
- x will be float-converted and then used.
"""
return str(int(round(cls._to_float(x))))
@classmethod
def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x))
@classmethod
def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x))
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return obj2unicode(x)
@classmethod
def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
else:
if f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw)
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrapper(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
|
foutaise/texttable | texttable.py | Texttable._splitit | python | def _splitit(self, line, isheader):
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrapper(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped | Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width | train | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L686-L714 | null | class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self.set_max_width(max_width)
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
return self
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
return self
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
- x will be float-converted and then used.
"""
return str(int(round(cls._to_float(x))))
@classmethod
def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x))
@classmethod
def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x))
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return obj2unicode(x)
@classmethod
def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
else:
if f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw)
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0,4][self._has_border()]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += " %s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', space + self._char_vert][self._has_border()]
return out
|
kelproject/pykube | pykube/query.py | Query.iterator | python | def iterator(self):
for obj in (self.execute().json().get("items") or []):
yield self.api_obj_class(self.api, obj) | Execute the API request and return an iterator over the objects. This
method does not use the query cache. | train | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/query.py#L112-L118 | [
"def execute(self):\n kwargs = {\"url\": self._build_api_url()}\n if self.api_obj_class.base:\n kwargs[\"base\"] = self.api_obj_class.base\n if self.api_obj_class.version:\n kwargs[\"version\"] = self.api_obj_class.version\n if self.namespace is not None and self.namespace is not all_:\n ... | class Query(BaseQuery):
def get_by_name(self, name):
kwargs = {
"url": "{}/{}".format(self.api_obj_class.endpoint, name),
"namespace": self.namespace,
}
if self.api_obj_class.base:
kwargs["base"] = self.api_obj_class.base
if self.api_obj_class.version:
kwargs["version"] = self.api_obj_class.version
r = self.api.get(**kwargs)
if not r.ok:
if r.status_code == 404:
raise ObjectDoesNotExist("{} does not exist.".format(name))
self.api.raise_for_status(r)
return self.api_obj_class(self.api, r.json())
def get(self, *args, **kwargs):
if "name" in kwargs:
return self.get_by_name(kwargs["name"])
clone = self.filter(*args, **kwargs)
num = len(clone)
if num == 1:
return clone.query_cache["objects"][0]
if not num:
raise ObjectDoesNotExist("get() returned zero objects")
raise ValueError("get() more than one object; use filter")
def get_or_none(self, *args, **kwargs):
try:
return self.get(*args, **kwargs)
except ObjectDoesNotExist:
return None
def watch(self, since=None):
query = self._clone(WatchQuery)
if since is now:
query.resource_version = self.response["metadata"]["resourceVersion"]
elif since is not None:
query.resource_version = since
return query
def execute(self):
kwargs = {"url": self._build_api_url()}
if self.api_obj_class.base:
kwargs["base"] = self.api_obj_class.base
if self.api_obj_class.version:
kwargs["version"] = self.api_obj_class.version
if self.namespace is not None and self.namespace is not all_:
kwargs["namespace"] = self.namespace
r = self.api.get(**kwargs)
r.raise_for_status()
return r
@property
def query_cache(self):
if not hasattr(self, "_query_cache"):
cache = {"objects": []}
cache["response"] = self.execute().json()
for obj in (cache["response"].get("items") or []):
cache["objects"].append(self.api_obj_class(self.api, obj))
self._query_cache = cache
return self._query_cache
def __len__(self):
return len(self.query_cache["objects"])
def __iter__(self):
return iter(self.query_cache["objects"])
@property
def response(self):
return self.query_cache["response"]
|
kelproject/pykube | pykube/http.py | HTTPClient.version | python | def version(self):
response = self.get(version="", base="/version")
response.raise_for_status()
data = response.json()
return (data["major"], data["minor"]) | Get Kubernetes API version | train | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/http.py#L178-L185 | [
"def get(self, *args, **kwargs):\n \"\"\"\n Executes an HTTP GET.\n\n :Parameters:\n - `args`: Non-keyword arguments\n - `kwargs`: Keyword arguments\n \"\"\"\n return self.session.get(*args, **self.get_kwargs(**kwargs))\n"
] | class HTTPClient(object):
"""
Client for interfacing with the Kubernetes API.
"""
_session = None
def __init__(self, config):
"""
Creates a new instance of the HTTPClient.
:Parameters:
- `config`: The configuration instance
"""
self.config = config
self.url = self.config.cluster["server"]
session = requests.Session()
session.mount("https://", KubernetesHTTPAdapter(self.config))
session.mount("http://", KubernetesHTTPAdapter(self.config))
self.session = session
@property
def url(self):
return self._url
@url.setter
def url(self, value):
pr = urlparse(value)
self._url = pr.geturl()
@property
def resource_list(self, api_version):
cached_attr = "_cached_resource_list"
if not hasattr(self, cached_attr):
r = self.get(version=api_version)
r.raise_for_status()
setattr(self, cached_attr, r.json())
return getattr(self, cached_attr)
def get_kwargs(self, **kwargs):
"""
Creates a full URL to request based on arguments.
:Parametes:
- `kwargs`: All keyword arguments to build a kubernetes API endpoint
"""
version = kwargs.pop("version", "v1")
if version == "v1":
base = kwargs.pop("base", "/api")
elif "/" in version:
base = kwargs.pop("base", "/apis")
else:
if "base" not in kwargs:
raise TypeError("unknown API version; base kwarg must be specified.")
base = kwargs.pop("base")
bits = [base, version]
# Overwrite (default) namespace from context if it was set
if "namespace" in kwargs:
n = kwargs.pop("namespace")
if n is not None:
if n:
namespace = n
else:
namespace = self.config.namespace
if namespace:
bits.extend([
"namespaces",
namespace,
])
url = kwargs.get("url", "")
if url.startswith("/"):
url = url[1:]
bits.append(url)
kwargs["url"] = self.url + posixpath.join(*bits)
return kwargs
def raise_for_status(self, resp):
try:
resp.raise_for_status()
except Exception:
# attempt to provide a more specific exception based around what
# Kubernetes returned as the error.
if resp.headers["content-type"] == "application/json":
payload = resp.json()
if payload["kind"] == "Status":
raise HTTPError(resp.status_code, payload["message"])
raise
def request(self, *args, **kwargs):
"""
Makes an API request based on arguments.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.request(*args, **self.get_kwargs(**kwargs))
def get(self, *args, **kwargs):
"""
Executes an HTTP GET.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.get(*args, **self.get_kwargs(**kwargs))
def options(self, *args, **kwargs):
"""
Executes an HTTP OPTIONS.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.options(*args, **self.get_kwargs(**kwargs))
def head(self, *args, **kwargs):
"""
Executes an HTTP HEAD.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.head(*args, **self.get_kwargs(**kwargs))
def post(self, *args, **kwargs):
"""
Executes an HTTP POST.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.post(*args, **self.get_kwargs(**kwargs))
def put(self, *args, **kwargs):
"""
Executes an HTTP PUT.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.put(*args, **self.get_kwargs(**kwargs))
def patch(self, *args, **kwargs):
"""
Executes an HTTP PATCH.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.patch(*args, **self.get_kwargs(**kwargs))
def delete(self, *args, **kwargs):
"""
Executes an HTTP DELETE.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.delete(*args, **self.get_kwargs(**kwargs))
|
kelproject/pykube | pykube/http.py | HTTPClient.get_kwargs | python | def get_kwargs(self, **kwargs):
version = kwargs.pop("version", "v1")
if version == "v1":
base = kwargs.pop("base", "/api")
elif "/" in version:
base = kwargs.pop("base", "/apis")
else:
if "base" not in kwargs:
raise TypeError("unknown API version; base kwarg must be specified.")
base = kwargs.pop("base")
bits = [base, version]
# Overwrite (default) namespace from context if it was set
if "namespace" in kwargs:
n = kwargs.pop("namespace")
if n is not None:
if n:
namespace = n
else:
namespace = self.config.namespace
if namespace:
bits.extend([
"namespaces",
namespace,
])
url = kwargs.get("url", "")
if url.startswith("/"):
url = url[1:]
bits.append(url)
kwargs["url"] = self.url + posixpath.join(*bits)
return kwargs | Creates a full URL to request based on arguments.
:Parametes:
- `kwargs`: All keyword arguments to build a kubernetes API endpoint | train | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/http.py#L195-L230 | null | class HTTPClient(object):
"""
Client for interfacing with the Kubernetes API.
"""
_session = None
def __init__(self, config):
"""
Creates a new instance of the HTTPClient.
:Parameters:
- `config`: The configuration instance
"""
self.config = config
self.url = self.config.cluster["server"]
session = requests.Session()
session.mount("https://", KubernetesHTTPAdapter(self.config))
session.mount("http://", KubernetesHTTPAdapter(self.config))
self.session = session
@property
def url(self):
return self._url
@url.setter
def url(self, value):
pr = urlparse(value)
self._url = pr.geturl()
@property
def version(self):
"""
Get Kubernetes API version
"""
response = self.get(version="", base="/version")
response.raise_for_status()
data = response.json()
return (data["major"], data["minor"])
def resource_list(self, api_version):
cached_attr = "_cached_resource_list"
if not hasattr(self, cached_attr):
r = self.get(version=api_version)
r.raise_for_status()
setattr(self, cached_attr, r.json())
return getattr(self, cached_attr)
def raise_for_status(self, resp):
try:
resp.raise_for_status()
except Exception:
# attempt to provide a more specific exception based around what
# Kubernetes returned as the error.
if resp.headers["content-type"] == "application/json":
payload = resp.json()
if payload["kind"] == "Status":
raise HTTPError(resp.status_code, payload["message"])
raise
def request(self, *args, **kwargs):
"""
Makes an API request based on arguments.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.request(*args, **self.get_kwargs(**kwargs))
def get(self, *args, **kwargs):
"""
Executes an HTTP GET.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.get(*args, **self.get_kwargs(**kwargs))
def options(self, *args, **kwargs):
"""
Executes an HTTP OPTIONS.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.options(*args, **self.get_kwargs(**kwargs))
def head(self, *args, **kwargs):
"""
Executes an HTTP HEAD.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.head(*args, **self.get_kwargs(**kwargs))
def post(self, *args, **kwargs):
"""
Executes an HTTP POST.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.post(*args, **self.get_kwargs(**kwargs))
def put(self, *args, **kwargs):
"""
Executes an HTTP PUT.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.put(*args, **self.get_kwargs(**kwargs))
def patch(self, *args, **kwargs):
"""
Executes an HTTP PATCH.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.patch(*args, **self.get_kwargs(**kwargs))
def delete(self, *args, **kwargs):
"""
Executes an HTTP DELETE.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.delete(*args, **self.get_kwargs(**kwargs))
|
kelproject/pykube | pykube/http.py | HTTPClient.request | python | def request(self, *args, **kwargs):
return self.session.request(*args, **self.get_kwargs(**kwargs)) | Makes an API request based on arguments.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments | train | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/http.py#L244-L252 | [
"def get_kwargs(self, **kwargs):\n \"\"\"\n Creates a full URL to request based on arguments.\n\n :Parametes:\n - `kwargs`: All keyword arguments to build a kubernetes API endpoint\n \"\"\"\n version = kwargs.pop(\"version\", \"v1\")\n if version == \"v1\":\n base = kwargs.pop(\"base\... | class HTTPClient(object):
"""
Client for interfacing with the Kubernetes API.
"""
_session = None
def __init__(self, config):
"""
Creates a new instance of the HTTPClient.
:Parameters:
- `config`: The configuration instance
"""
self.config = config
self.url = self.config.cluster["server"]
session = requests.Session()
session.mount("https://", KubernetesHTTPAdapter(self.config))
session.mount("http://", KubernetesHTTPAdapter(self.config))
self.session = session
@property
def url(self):
return self._url
@url.setter
def url(self, value):
pr = urlparse(value)
self._url = pr.geturl()
@property
def version(self):
"""
Get Kubernetes API version
"""
response = self.get(version="", base="/version")
response.raise_for_status()
data = response.json()
return (data["major"], data["minor"])
def resource_list(self, api_version):
cached_attr = "_cached_resource_list"
if not hasattr(self, cached_attr):
r = self.get(version=api_version)
r.raise_for_status()
setattr(self, cached_attr, r.json())
return getattr(self, cached_attr)
def get_kwargs(self, **kwargs):
"""
Creates a full URL to request based on arguments.
:Parametes:
- `kwargs`: All keyword arguments to build a kubernetes API endpoint
"""
version = kwargs.pop("version", "v1")
if version == "v1":
base = kwargs.pop("base", "/api")
elif "/" in version:
base = kwargs.pop("base", "/apis")
else:
if "base" not in kwargs:
raise TypeError("unknown API version; base kwarg must be specified.")
base = kwargs.pop("base")
bits = [base, version]
# Overwrite (default) namespace from context if it was set
if "namespace" in kwargs:
n = kwargs.pop("namespace")
if n is not None:
if n:
namespace = n
else:
namespace = self.config.namespace
if namespace:
bits.extend([
"namespaces",
namespace,
])
url = kwargs.get("url", "")
if url.startswith("/"):
url = url[1:]
bits.append(url)
kwargs["url"] = self.url + posixpath.join(*bits)
return kwargs
def raise_for_status(self, resp):
try:
resp.raise_for_status()
except Exception:
# attempt to provide a more specific exception based around what
# Kubernetes returned as the error.
if resp.headers["content-type"] == "application/json":
payload = resp.json()
if payload["kind"] == "Status":
raise HTTPError(resp.status_code, payload["message"])
raise
def get(self, *args, **kwargs):
"""
Executes an HTTP GET.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.get(*args, **self.get_kwargs(**kwargs))
def options(self, *args, **kwargs):
"""
Executes an HTTP OPTIONS.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.options(*args, **self.get_kwargs(**kwargs))
def head(self, *args, **kwargs):
"""
Executes an HTTP HEAD.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.head(*args, **self.get_kwargs(**kwargs))
def post(self, *args, **kwargs):
"""
Executes an HTTP POST.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.post(*args, **self.get_kwargs(**kwargs))
def put(self, *args, **kwargs):
"""
Executes an HTTP PUT.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.put(*args, **self.get_kwargs(**kwargs))
def patch(self, *args, **kwargs):
"""
Executes an HTTP PATCH.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.patch(*args, **self.get_kwargs(**kwargs))
def delete(self, *args, **kwargs):
"""
Executes an HTTP DELETE.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.delete(*args, **self.get_kwargs(**kwargs))
|
kelproject/pykube | pykube/http.py | HTTPClient.get | python | def get(self, *args, **kwargs):
return self.session.get(*args, **self.get_kwargs(**kwargs)) | Executes an HTTP GET.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments | train | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/http.py#L254-L262 | [
"def get_kwargs(self, **kwargs):\n \"\"\"\n Creates a full URL to request based on arguments.\n\n :Parametes:\n - `kwargs`: All keyword arguments to build a kubernetes API endpoint\n \"\"\"\n version = kwargs.pop(\"version\", \"v1\")\n if version == \"v1\":\n base = kwargs.pop(\"base\... | class HTTPClient(object):
"""
Client for interfacing with the Kubernetes API.
"""
_session = None
def __init__(self, config):
"""
Creates a new instance of the HTTPClient.
:Parameters:
- `config`: The configuration instance
"""
self.config = config
self.url = self.config.cluster["server"]
session = requests.Session()
session.mount("https://", KubernetesHTTPAdapter(self.config))
session.mount("http://", KubernetesHTTPAdapter(self.config))
self.session = session
@property
def url(self):
return self._url
@url.setter
def url(self, value):
pr = urlparse(value)
self._url = pr.geturl()
@property
def version(self):
"""
Get Kubernetes API version
"""
response = self.get(version="", base="/version")
response.raise_for_status()
data = response.json()
return (data["major"], data["minor"])
def resource_list(self, api_version):
cached_attr = "_cached_resource_list"
if not hasattr(self, cached_attr):
r = self.get(version=api_version)
r.raise_for_status()
setattr(self, cached_attr, r.json())
return getattr(self, cached_attr)
def get_kwargs(self, **kwargs):
"""
Creates a full URL to request based on arguments.
:Parametes:
- `kwargs`: All keyword arguments to build a kubernetes API endpoint
"""
version = kwargs.pop("version", "v1")
if version == "v1":
base = kwargs.pop("base", "/api")
elif "/" in version:
base = kwargs.pop("base", "/apis")
else:
if "base" not in kwargs:
raise TypeError("unknown API version; base kwarg must be specified.")
base = kwargs.pop("base")
bits = [base, version]
# Overwrite (default) namespace from context if it was set
if "namespace" in kwargs:
n = kwargs.pop("namespace")
if n is not None:
if n:
namespace = n
else:
namespace = self.config.namespace
if namespace:
bits.extend([
"namespaces",
namespace,
])
url = kwargs.get("url", "")
if url.startswith("/"):
url = url[1:]
bits.append(url)
kwargs["url"] = self.url + posixpath.join(*bits)
return kwargs
def raise_for_status(self, resp):
try:
resp.raise_for_status()
except Exception:
# attempt to provide a more specific exception based around what
# Kubernetes returned as the error.
if resp.headers["content-type"] == "application/json":
payload = resp.json()
if payload["kind"] == "Status":
raise HTTPError(resp.status_code, payload["message"])
raise
def request(self, *args, **kwargs):
"""
Makes an API request based on arguments.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.request(*args, **self.get_kwargs(**kwargs))
def options(self, *args, **kwargs):
"""
Executes an HTTP OPTIONS.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.options(*args, **self.get_kwargs(**kwargs))
def head(self, *args, **kwargs):
"""
Executes an HTTP HEAD.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.head(*args, **self.get_kwargs(**kwargs))
def post(self, *args, **kwargs):
"""
Executes an HTTP POST.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.post(*args, **self.get_kwargs(**kwargs))
def put(self, *args, **kwargs):
"""
Executes an HTTP PUT.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.put(*args, **self.get_kwargs(**kwargs))
def patch(self, *args, **kwargs):
"""
Executes an HTTP PATCH.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.patch(*args, **self.get_kwargs(**kwargs))
def delete(self, *args, **kwargs):
"""
Executes an HTTP DELETE.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.delete(*args, **self.get_kwargs(**kwargs))
|
kelproject/pykube | pykube/http.py | HTTPClient.options | python | def options(self, *args, **kwargs):
return self.session.options(*args, **self.get_kwargs(**kwargs)) | Executes an HTTP OPTIONS.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments | train | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/http.py#L264-L272 | [
"def get_kwargs(self, **kwargs):\n \"\"\"\n Creates a full URL to request based on arguments.\n\n :Parametes:\n - `kwargs`: All keyword arguments to build a kubernetes API endpoint\n \"\"\"\n version = kwargs.pop(\"version\", \"v1\")\n if version == \"v1\":\n base = kwargs.pop(\"base\... | class HTTPClient(object):
"""
Client for interfacing with the Kubernetes API.
"""
_session = None
def __init__(self, config):
"""
Creates a new instance of the HTTPClient.
:Parameters:
- `config`: The configuration instance
"""
self.config = config
self.url = self.config.cluster["server"]
session = requests.Session()
session.mount("https://", KubernetesHTTPAdapter(self.config))
session.mount("http://", KubernetesHTTPAdapter(self.config))
self.session = session
@property
def url(self):
return self._url
@url.setter
def url(self, value):
pr = urlparse(value)
self._url = pr.geturl()
@property
def version(self):
"""
Get Kubernetes API version
"""
response = self.get(version="", base="/version")
response.raise_for_status()
data = response.json()
return (data["major"], data["minor"])
def resource_list(self, api_version):
cached_attr = "_cached_resource_list"
if not hasattr(self, cached_attr):
r = self.get(version=api_version)
r.raise_for_status()
setattr(self, cached_attr, r.json())
return getattr(self, cached_attr)
def get_kwargs(self, **kwargs):
"""
Creates a full URL to request based on arguments.
:Parametes:
- `kwargs`: All keyword arguments to build a kubernetes API endpoint
"""
version = kwargs.pop("version", "v1")
if version == "v1":
base = kwargs.pop("base", "/api")
elif "/" in version:
base = kwargs.pop("base", "/apis")
else:
if "base" not in kwargs:
raise TypeError("unknown API version; base kwarg must be specified.")
base = kwargs.pop("base")
bits = [base, version]
# Overwrite (default) namespace from context if it was set
if "namespace" in kwargs:
n = kwargs.pop("namespace")
if n is not None:
if n:
namespace = n
else:
namespace = self.config.namespace
if namespace:
bits.extend([
"namespaces",
namespace,
])
url = kwargs.get("url", "")
if url.startswith("/"):
url = url[1:]
bits.append(url)
kwargs["url"] = self.url + posixpath.join(*bits)
return kwargs
def raise_for_status(self, resp):
try:
resp.raise_for_status()
except Exception:
# attempt to provide a more specific exception based around what
# Kubernetes returned as the error.
if resp.headers["content-type"] == "application/json":
payload = resp.json()
if payload["kind"] == "Status":
raise HTTPError(resp.status_code, payload["message"])
raise
def request(self, *args, **kwargs):
"""
Makes an API request based on arguments.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.request(*args, **self.get_kwargs(**kwargs))
def get(self, *args, **kwargs):
"""
Executes an HTTP GET.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.get(*args, **self.get_kwargs(**kwargs))
def head(self, *args, **kwargs):
"""
Executes an HTTP HEAD.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.head(*args, **self.get_kwargs(**kwargs))
def post(self, *args, **kwargs):
"""
Executes an HTTP POST.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.post(*args, **self.get_kwargs(**kwargs))
def put(self, *args, **kwargs):
"""
Executes an HTTP PUT.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.put(*args, **self.get_kwargs(**kwargs))
def patch(self, *args, **kwargs):
"""
Executes an HTTP PATCH.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.patch(*args, **self.get_kwargs(**kwargs))
def delete(self, *args, **kwargs):
"""
Executes an HTTP DELETE.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.delete(*args, **self.get_kwargs(**kwargs))
|
kelproject/pykube | pykube/http.py | HTTPClient.head | python | def head(self, *args, **kwargs):
return self.session.head(*args, **self.get_kwargs(**kwargs)) | Executes an HTTP HEAD.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments | train | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/http.py#L274-L282 | [
"def get_kwargs(self, **kwargs):\n \"\"\"\n Creates a full URL to request based on arguments.\n\n :Parametes:\n - `kwargs`: All keyword arguments to build a kubernetes API endpoint\n \"\"\"\n version = kwargs.pop(\"version\", \"v1\")\n if version == \"v1\":\n base = kwargs.pop(\"base\... | class HTTPClient(object):
"""
Client for interfacing with the Kubernetes API.
"""
_session = None
def __init__(self, config):
"""
Creates a new instance of the HTTPClient.
:Parameters:
- `config`: The configuration instance
"""
self.config = config
self.url = self.config.cluster["server"]
session = requests.Session()
session.mount("https://", KubernetesHTTPAdapter(self.config))
session.mount("http://", KubernetesHTTPAdapter(self.config))
self.session = session
@property
def url(self):
return self._url
@url.setter
def url(self, value):
pr = urlparse(value)
self._url = pr.geturl()
@property
def version(self):
"""
Get Kubernetes API version
"""
response = self.get(version="", base="/version")
response.raise_for_status()
data = response.json()
return (data["major"], data["minor"])
def resource_list(self, api_version):
cached_attr = "_cached_resource_list"
if not hasattr(self, cached_attr):
r = self.get(version=api_version)
r.raise_for_status()
setattr(self, cached_attr, r.json())
return getattr(self, cached_attr)
def get_kwargs(self, **kwargs):
"""
Creates a full URL to request based on arguments.
:Parametes:
- `kwargs`: All keyword arguments to build a kubernetes API endpoint
"""
version = kwargs.pop("version", "v1")
if version == "v1":
base = kwargs.pop("base", "/api")
elif "/" in version:
base = kwargs.pop("base", "/apis")
else:
if "base" not in kwargs:
raise TypeError("unknown API version; base kwarg must be specified.")
base = kwargs.pop("base")
bits = [base, version]
# Overwrite (default) namespace from context if it was set
if "namespace" in kwargs:
n = kwargs.pop("namespace")
if n is not None:
if n:
namespace = n
else:
namespace = self.config.namespace
if namespace:
bits.extend([
"namespaces",
namespace,
])
url = kwargs.get("url", "")
if url.startswith("/"):
url = url[1:]
bits.append(url)
kwargs["url"] = self.url + posixpath.join(*bits)
return kwargs
def raise_for_status(self, resp):
try:
resp.raise_for_status()
except Exception:
# attempt to provide a more specific exception based around what
# Kubernetes returned as the error.
if resp.headers["content-type"] == "application/json":
payload = resp.json()
if payload["kind"] == "Status":
raise HTTPError(resp.status_code, payload["message"])
raise
def request(self, *args, **kwargs):
"""
Makes an API request based on arguments.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.request(*args, **self.get_kwargs(**kwargs))
def get(self, *args, **kwargs):
"""
Executes an HTTP GET.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.get(*args, **self.get_kwargs(**kwargs))
def options(self, *args, **kwargs):
"""
Executes an HTTP OPTIONS.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.options(*args, **self.get_kwargs(**kwargs))
def post(self, *args, **kwargs):
"""
Executes an HTTP POST.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.post(*args, **self.get_kwargs(**kwargs))
def put(self, *args, **kwargs):
"""
Executes an HTTP PUT.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.put(*args, **self.get_kwargs(**kwargs))
def patch(self, *args, **kwargs):
"""
Executes an HTTP PATCH.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.patch(*args, **self.get_kwargs(**kwargs))
def delete(self, *args, **kwargs):
"""
Executes an HTTP DELETE.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.delete(*args, **self.get_kwargs(**kwargs))
|
kelproject/pykube | pykube/http.py | HTTPClient.post | python | def post(self, *args, **kwargs):
return self.session.post(*args, **self.get_kwargs(**kwargs)) | Executes an HTTP POST.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments | train | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/http.py#L284-L292 | [
"def get_kwargs(self, **kwargs):\n \"\"\"\n Creates a full URL to request based on arguments.\n\n :Parametes:\n - `kwargs`: All keyword arguments to build a kubernetes API endpoint\n \"\"\"\n version = kwargs.pop(\"version\", \"v1\")\n if version == \"v1\":\n base = kwargs.pop(\"base\... | class HTTPClient(object):
"""
Client for interfacing with the Kubernetes API.
"""
_session = None
def __init__(self, config):
"""
Creates a new instance of the HTTPClient.
:Parameters:
- `config`: The configuration instance
"""
self.config = config
self.url = self.config.cluster["server"]
session = requests.Session()
session.mount("https://", KubernetesHTTPAdapter(self.config))
session.mount("http://", KubernetesHTTPAdapter(self.config))
self.session = session
@property
def url(self):
return self._url
@url.setter
def url(self, value):
pr = urlparse(value)
self._url = pr.geturl()
@property
def version(self):
"""
Get Kubernetes API version
"""
response = self.get(version="", base="/version")
response.raise_for_status()
data = response.json()
return (data["major"], data["minor"])
def resource_list(self, api_version):
cached_attr = "_cached_resource_list"
if not hasattr(self, cached_attr):
r = self.get(version=api_version)
r.raise_for_status()
setattr(self, cached_attr, r.json())
return getattr(self, cached_attr)
def get_kwargs(self, **kwargs):
"""
Creates a full URL to request based on arguments.
:Parametes:
- `kwargs`: All keyword arguments to build a kubernetes API endpoint
"""
version = kwargs.pop("version", "v1")
if version == "v1":
base = kwargs.pop("base", "/api")
elif "/" in version:
base = kwargs.pop("base", "/apis")
else:
if "base" not in kwargs:
raise TypeError("unknown API version; base kwarg must be specified.")
base = kwargs.pop("base")
bits = [base, version]
# Overwrite (default) namespace from context if it was set
if "namespace" in kwargs:
n = kwargs.pop("namespace")
if n is not None:
if n:
namespace = n
else:
namespace = self.config.namespace
if namespace:
bits.extend([
"namespaces",
namespace,
])
url = kwargs.get("url", "")
if url.startswith("/"):
url = url[1:]
bits.append(url)
kwargs["url"] = self.url + posixpath.join(*bits)
return kwargs
def raise_for_status(self, resp):
try:
resp.raise_for_status()
except Exception:
# attempt to provide a more specific exception based around what
# Kubernetes returned as the error.
if resp.headers["content-type"] == "application/json":
payload = resp.json()
if payload["kind"] == "Status":
raise HTTPError(resp.status_code, payload["message"])
raise
def request(self, *args, **kwargs):
"""
Makes an API request based on arguments.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.request(*args, **self.get_kwargs(**kwargs))
def get(self, *args, **kwargs):
"""
Executes an HTTP GET.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.get(*args, **self.get_kwargs(**kwargs))
def options(self, *args, **kwargs):
"""
Executes an HTTP OPTIONS.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.options(*args, **self.get_kwargs(**kwargs))
def head(self, *args, **kwargs):
"""
Executes an HTTP HEAD.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.head(*args, **self.get_kwargs(**kwargs))
def put(self, *args, **kwargs):
"""
Executes an HTTP PUT.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.put(*args, **self.get_kwargs(**kwargs))
def patch(self, *args, **kwargs):
"""
Executes an HTTP PATCH.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.patch(*args, **self.get_kwargs(**kwargs))
def delete(self, *args, **kwargs):
"""
Executes an HTTP DELETE.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.delete(*args, **self.get_kwargs(**kwargs))
|
kelproject/pykube | pykube/http.py | HTTPClient.put | python | def put(self, *args, **kwargs):
return self.session.put(*args, **self.get_kwargs(**kwargs)) | Executes an HTTP PUT.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments | train | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/http.py#L294-L302 | [
"def get_kwargs(self, **kwargs):\n \"\"\"\n Creates a full URL to request based on arguments.\n\n :Parametes:\n - `kwargs`: All keyword arguments to build a kubernetes API endpoint\n \"\"\"\n version = kwargs.pop(\"version\", \"v1\")\n if version == \"v1\":\n base = kwargs.pop(\"base\... | class HTTPClient(object):
"""
Client for interfacing with the Kubernetes API.
"""
_session = None
def __init__(self, config):
"""
Creates a new instance of the HTTPClient.
:Parameters:
- `config`: The configuration instance
"""
self.config = config
self.url = self.config.cluster["server"]
session = requests.Session()
session.mount("https://", KubernetesHTTPAdapter(self.config))
session.mount("http://", KubernetesHTTPAdapter(self.config))
self.session = session
@property
def url(self):
return self._url
@url.setter
def url(self, value):
pr = urlparse(value)
self._url = pr.geturl()
@property
def version(self):
"""
Get Kubernetes API version
"""
response = self.get(version="", base="/version")
response.raise_for_status()
data = response.json()
return (data["major"], data["minor"])
def resource_list(self, api_version):
cached_attr = "_cached_resource_list"
if not hasattr(self, cached_attr):
r = self.get(version=api_version)
r.raise_for_status()
setattr(self, cached_attr, r.json())
return getattr(self, cached_attr)
def get_kwargs(self, **kwargs):
"""
Creates a full URL to request based on arguments.
:Parametes:
- `kwargs`: All keyword arguments to build a kubernetes API endpoint
"""
version = kwargs.pop("version", "v1")
if version == "v1":
base = kwargs.pop("base", "/api")
elif "/" in version:
base = kwargs.pop("base", "/apis")
else:
if "base" not in kwargs:
raise TypeError("unknown API version; base kwarg must be specified.")
base = kwargs.pop("base")
bits = [base, version]
# Overwrite (default) namespace from context if it was set
if "namespace" in kwargs:
n = kwargs.pop("namespace")
if n is not None:
if n:
namespace = n
else:
namespace = self.config.namespace
if namespace:
bits.extend([
"namespaces",
namespace,
])
url = kwargs.get("url", "")
if url.startswith("/"):
url = url[1:]
bits.append(url)
kwargs["url"] = self.url + posixpath.join(*bits)
return kwargs
def raise_for_status(self, resp):
try:
resp.raise_for_status()
except Exception:
# attempt to provide a more specific exception based around what
# Kubernetes returned as the error.
if resp.headers["content-type"] == "application/json":
payload = resp.json()
if payload["kind"] == "Status":
raise HTTPError(resp.status_code, payload["message"])
raise
def request(self, *args, **kwargs):
"""
Makes an API request based on arguments.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.request(*args, **self.get_kwargs(**kwargs))
def get(self, *args, **kwargs):
"""
Executes an HTTP GET.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.get(*args, **self.get_kwargs(**kwargs))
def options(self, *args, **kwargs):
"""
Executes an HTTP OPTIONS.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.options(*args, **self.get_kwargs(**kwargs))
def head(self, *args, **kwargs):
"""
Executes an HTTP HEAD.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.head(*args, **self.get_kwargs(**kwargs))
def post(self, *args, **kwargs):
"""
Executes an HTTP POST.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.post(*args, **self.get_kwargs(**kwargs))
def patch(self, *args, **kwargs):
"""
Executes an HTTP PATCH.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.patch(*args, **self.get_kwargs(**kwargs))
def delete(self, *args, **kwargs):
"""
Executes an HTTP DELETE.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.delete(*args, **self.get_kwargs(**kwargs))
|
kelproject/pykube | pykube/http.py | HTTPClient.patch | python | def patch(self, *args, **kwargs):
return self.session.patch(*args, **self.get_kwargs(**kwargs)) | Executes an HTTP PATCH.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments | train | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/http.py#L304-L312 | [
"def get_kwargs(self, **kwargs):\n \"\"\"\n Creates a full URL to request based on arguments.\n\n :Parametes:\n - `kwargs`: All keyword arguments to build a kubernetes API endpoint\n \"\"\"\n version = kwargs.pop(\"version\", \"v1\")\n if version == \"v1\":\n base = kwargs.pop(\"base\... | class HTTPClient(object):
"""
Client for interfacing with the Kubernetes API.
"""
_session = None
def __init__(self, config):
"""
Creates a new instance of the HTTPClient.
:Parameters:
- `config`: The configuration instance
"""
self.config = config
self.url = self.config.cluster["server"]
session = requests.Session()
session.mount("https://", KubernetesHTTPAdapter(self.config))
session.mount("http://", KubernetesHTTPAdapter(self.config))
self.session = session
@property
def url(self):
return self._url
@url.setter
def url(self, value):
pr = urlparse(value)
self._url = pr.geturl()
@property
def version(self):
"""
Get Kubernetes API version
"""
response = self.get(version="", base="/version")
response.raise_for_status()
data = response.json()
return (data["major"], data["minor"])
def resource_list(self, api_version):
cached_attr = "_cached_resource_list"
if not hasattr(self, cached_attr):
r = self.get(version=api_version)
r.raise_for_status()
setattr(self, cached_attr, r.json())
return getattr(self, cached_attr)
def get_kwargs(self, **kwargs):
"""
Creates a full URL to request based on arguments.
:Parametes:
- `kwargs`: All keyword arguments to build a kubernetes API endpoint
"""
version = kwargs.pop("version", "v1")
if version == "v1":
base = kwargs.pop("base", "/api")
elif "/" in version:
base = kwargs.pop("base", "/apis")
else:
if "base" not in kwargs:
raise TypeError("unknown API version; base kwarg must be specified.")
base = kwargs.pop("base")
bits = [base, version]
# Overwrite (default) namespace from context if it was set
if "namespace" in kwargs:
n = kwargs.pop("namespace")
if n is not None:
if n:
namespace = n
else:
namespace = self.config.namespace
if namespace:
bits.extend([
"namespaces",
namespace,
])
url = kwargs.get("url", "")
if url.startswith("/"):
url = url[1:]
bits.append(url)
kwargs["url"] = self.url + posixpath.join(*bits)
return kwargs
def raise_for_status(self, resp):
try:
resp.raise_for_status()
except Exception:
# attempt to provide a more specific exception based around what
# Kubernetes returned as the error.
if resp.headers["content-type"] == "application/json":
payload = resp.json()
if payload["kind"] == "Status":
raise HTTPError(resp.status_code, payload["message"])
raise
def request(self, *args, **kwargs):
"""
Makes an API request based on arguments.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.request(*args, **self.get_kwargs(**kwargs))
def get(self, *args, **kwargs):
"""
Executes an HTTP GET.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.get(*args, **self.get_kwargs(**kwargs))
def options(self, *args, **kwargs):
"""
Executes an HTTP OPTIONS.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.options(*args, **self.get_kwargs(**kwargs))
def head(self, *args, **kwargs):
"""
Executes an HTTP HEAD.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.head(*args, **self.get_kwargs(**kwargs))
def post(self, *args, **kwargs):
"""
Executes an HTTP POST.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.post(*args, **self.get_kwargs(**kwargs))
def put(self, *args, **kwargs):
"""
Executes an HTTP PUT.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.put(*args, **self.get_kwargs(**kwargs))
def delete(self, *args, **kwargs):
"""
Executes an HTTP DELETE.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.delete(*args, **self.get_kwargs(**kwargs))
|
kelproject/pykube | pykube/http.py | HTTPClient.delete | python | def delete(self, *args, **kwargs):
return self.session.delete(*args, **self.get_kwargs(**kwargs)) | Executes an HTTP DELETE.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments | train | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/http.py#L314-L322 | [
"def get_kwargs(self, **kwargs):\n \"\"\"\n Creates a full URL to request based on arguments.\n\n :Parametes:\n - `kwargs`: All keyword arguments to build a kubernetes API endpoint\n \"\"\"\n version = kwargs.pop(\"version\", \"v1\")\n if version == \"v1\":\n base = kwargs.pop(\"base\... | class HTTPClient(object):
"""
Client for interfacing with the Kubernetes API.
"""
_session = None
def __init__(self, config):
"""
Creates a new instance of the HTTPClient.
:Parameters:
- `config`: The configuration instance
"""
self.config = config
self.url = self.config.cluster["server"]
session = requests.Session()
session.mount("https://", KubernetesHTTPAdapter(self.config))
session.mount("http://", KubernetesHTTPAdapter(self.config))
self.session = session
@property
def url(self):
return self._url
@url.setter
def url(self, value):
pr = urlparse(value)
self._url = pr.geturl()
@property
def version(self):
"""
Get Kubernetes API version
"""
response = self.get(version="", base="/version")
response.raise_for_status()
data = response.json()
return (data["major"], data["minor"])
def resource_list(self, api_version):
cached_attr = "_cached_resource_list"
if not hasattr(self, cached_attr):
r = self.get(version=api_version)
r.raise_for_status()
setattr(self, cached_attr, r.json())
return getattr(self, cached_attr)
def get_kwargs(self, **kwargs):
"""
Creates a full URL to request based on arguments.
:Parametes:
- `kwargs`: All keyword arguments to build a kubernetes API endpoint
"""
version = kwargs.pop("version", "v1")
if version == "v1":
base = kwargs.pop("base", "/api")
elif "/" in version:
base = kwargs.pop("base", "/apis")
else:
if "base" not in kwargs:
raise TypeError("unknown API version; base kwarg must be specified.")
base = kwargs.pop("base")
bits = [base, version]
# Overwrite (default) namespace from context if it was set
if "namespace" in kwargs:
n = kwargs.pop("namespace")
if n is not None:
if n:
namespace = n
else:
namespace = self.config.namespace
if namespace:
bits.extend([
"namespaces",
namespace,
])
url = kwargs.get("url", "")
if url.startswith("/"):
url = url[1:]
bits.append(url)
kwargs["url"] = self.url + posixpath.join(*bits)
return kwargs
def raise_for_status(self, resp):
try:
resp.raise_for_status()
except Exception:
# attempt to provide a more specific exception based around what
# Kubernetes returned as the error.
if resp.headers["content-type"] == "application/json":
payload = resp.json()
if payload["kind"] == "Status":
raise HTTPError(resp.status_code, payload["message"])
raise
def request(self, *args, **kwargs):
"""
Makes an API request based on arguments.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.request(*args, **self.get_kwargs(**kwargs))
def get(self, *args, **kwargs):
"""
Executes an HTTP GET.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.get(*args, **self.get_kwargs(**kwargs))
def options(self, *args, **kwargs):
"""
Executes an HTTP OPTIONS.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.options(*args, **self.get_kwargs(**kwargs))
def head(self, *args, **kwargs):
"""
Executes an HTTP HEAD.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.head(*args, **self.get_kwargs(**kwargs))
def post(self, *args, **kwargs):
"""
Executes an HTTP POST.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.post(*args, **self.get_kwargs(**kwargs))
def put(self, *args, **kwargs):
"""
Executes an HTTP PUT.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.put(*args, **self.get_kwargs(**kwargs))
def patch(self, *args, **kwargs):
"""
Executes an HTTP PATCH.
:Parameters:
- `args`: Non-keyword arguments
- `kwargs`: Keyword arguments
"""
return self.session.patch(*args, **self.get_kwargs(**kwargs))
|
kelproject/pykube | pykube/config.py | KubeConfig.from_file | python | def from_file(cls, filename, **kwargs):
filename = os.path.expanduser(filename)
if not os.path.isfile(filename):
raise exceptions.PyKubeError("Configuration file {} not found".format(filename))
with open(filename) as f:
doc = yaml.safe_load(f.read())
self = cls(doc, **kwargs)
self.filename = filename
return self | Creates an instance of the KubeConfig class from a kubeconfig file.
:Parameters:
- `filename`: The full path to the configuration file | train | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/config.py#L63-L77 | null | class KubeConfig(object):
"""
Main configuration class.
"""
@classmethod
def from_service_account(cls, path="/var/run/secrets/kubernetes.io/serviceaccount", **kwargs):
with open(os.path.join(path, "token")) as fp:
token = fp.read()
host = os.environ.get("PYKUBE_KUBERNETES_SERVICE_HOST")
if host is None:
host = os.environ["KUBERNETES_SERVICE_HOST"]
port = os.environ.get("PYKUBE_KUBERNETES_SERVICE_PORT")
if port is None:
port = os.environ["KUBERNETES_SERVICE_PORT"]
doc = {
"clusters": [
{
"name": "self",
"cluster": {
"server": "https://{}:{}".format(host, port),
"certificate-authority": os.path.join(path, "ca.crt"),
},
},
],
"users": [
{
"name": "self",
"user": {
"token": token,
},
},
],
"contexts": [
{
"name": "self",
"context": {
"cluster": "self",
"user": "self",
},
}
],
"current-context": "self",
}
self = cls(doc, **kwargs)
return self
@classmethod
@classmethod
def from_url(cls, url, **kwargs):
"""
Creates an instance of the KubeConfig class from a single URL (useful
for interacting with kubectl proxy).
"""
doc = {
"clusters": [
{
"name": "self",
"cluster": {
"server": url,
},
},
],
"contexts": [
{
"name": "self",
"context": {
"cluster": "self",
},
}
],
"current-context": "self",
}
self = cls(doc, **kwargs)
return self
def __init__(self, doc, current_context=None):
"""
Creates an instance of the KubeConfig class.
"""
self.doc = doc
self._current_context = None
if current_context is not None:
self.set_current_context(current_context)
elif "current-context" in doc and doc["current-context"]:
self.set_current_context(doc["current-context"])
def set_current_context(self, value):
"""
Sets the context to the provided value.
:Parameters:
- `value`: The value for the current context
"""
self._current_context = value
@property
def current_context(self):
if self._current_context is None:
raise exceptions.PyKubeError("current context not set; call set_current_context")
return self._current_context
@property
def clusters(self):
"""
Returns known clusters by exposing as a read-only property.
"""
if not hasattr(self, "_clusters"):
cs = {}
for cr in self.doc["clusters"]:
cs[cr["name"]] = c = copy.deepcopy(cr["cluster"])
if "server" not in c:
c["server"] = "http://localhost"
BytesOrFile.maybe_set(c, "certificate-authority")
self._clusters = cs
return self._clusters
@property
def users(self):
"""
Returns known users by exposing as a read-only property.
"""
if not hasattr(self, "_users"):
us = {}
if "users" in self.doc:
for ur in self.doc["users"]:
us[ur["name"]] = u = copy.deepcopy(ur["user"])
BytesOrFile.maybe_set(u, "client-certificate")
BytesOrFile.maybe_set(u, "client-key")
self._users = us
return self._users
@property
def contexts(self):
"""
Returns known contexts by exposing as a read-only property.
"""
if not hasattr(self, "_contexts"):
cs = {}
for cr in self.doc["contexts"]:
cs[cr["name"]] = copy.deepcopy(cr["context"])
self._contexts = cs
return self._contexts
@property
def cluster(self):
"""
Returns the current selected cluster by exposing as a
read-only property.
"""
return self.clusters[self.contexts[self.current_context]["cluster"]]
@property
def user(self):
"""
Returns the current user set by current context
"""
return self.users.get(self.contexts[self.current_context].get("user", ""), {})
@property
def namespace(self):
"""
Returns the current context namespace by exposing as a read-only property.
"""
return self.contexts[self.current_context].get("namespace", "default")
def persist_doc(self):
if not hasattr(self, "filename") or not self.filename:
# Config was provided as string, not way to persit it
return
with open(self.filename, "w") as f:
yaml.safe_dump(self.doc, f, encoding='utf-8',
allow_unicode=True, default_flow_style=False)
def reload(self):
if hasattr(self, "_users"):
delattr(self, "_users")
if hasattr(self, "_contexts"):
delattr(self, "_contexts")
if hasattr(self, "_clusters"):
delattr(self, "_clusters")
|
kelproject/pykube | pykube/config.py | KubeConfig.from_url | python | def from_url(cls, url, **kwargs):
doc = {
"clusters": [
{
"name": "self",
"cluster": {
"server": url,
},
},
],
"contexts": [
{
"name": "self",
"context": {
"cluster": "self",
},
}
],
"current-context": "self",
}
self = cls(doc, **kwargs)
return self | Creates an instance of the KubeConfig class from a single URL (useful
for interacting with kubectl proxy). | train | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/config.py#L80-L105 | null | class KubeConfig(object):
"""
Main configuration class.
"""
@classmethod
def from_service_account(cls, path="/var/run/secrets/kubernetes.io/serviceaccount", **kwargs):
with open(os.path.join(path, "token")) as fp:
token = fp.read()
host = os.environ.get("PYKUBE_KUBERNETES_SERVICE_HOST")
if host is None:
host = os.environ["KUBERNETES_SERVICE_HOST"]
port = os.environ.get("PYKUBE_KUBERNETES_SERVICE_PORT")
if port is None:
port = os.environ["KUBERNETES_SERVICE_PORT"]
doc = {
"clusters": [
{
"name": "self",
"cluster": {
"server": "https://{}:{}".format(host, port),
"certificate-authority": os.path.join(path, "ca.crt"),
},
},
],
"users": [
{
"name": "self",
"user": {
"token": token,
},
},
],
"contexts": [
{
"name": "self",
"context": {
"cluster": "self",
"user": "self",
},
}
],
"current-context": "self",
}
self = cls(doc, **kwargs)
return self
@classmethod
def from_file(cls, filename, **kwargs):
"""
Creates an instance of the KubeConfig class from a kubeconfig file.
:Parameters:
- `filename`: The full path to the configuration file
"""
filename = os.path.expanduser(filename)
if not os.path.isfile(filename):
raise exceptions.PyKubeError("Configuration file {} not found".format(filename))
with open(filename) as f:
doc = yaml.safe_load(f.read())
self = cls(doc, **kwargs)
self.filename = filename
return self
@classmethod
def __init__(self, doc, current_context=None):
"""
Creates an instance of the KubeConfig class.
"""
self.doc = doc
self._current_context = None
if current_context is not None:
self.set_current_context(current_context)
elif "current-context" in doc and doc["current-context"]:
self.set_current_context(doc["current-context"])
def set_current_context(self, value):
"""
Sets the context to the provided value.
:Parameters:
- `value`: The value for the current context
"""
self._current_context = value
@property
def current_context(self):
if self._current_context is None:
raise exceptions.PyKubeError("current context not set; call set_current_context")
return self._current_context
@property
def clusters(self):
"""
Returns known clusters by exposing as a read-only property.
"""
if not hasattr(self, "_clusters"):
cs = {}
for cr in self.doc["clusters"]:
cs[cr["name"]] = c = copy.deepcopy(cr["cluster"])
if "server" not in c:
c["server"] = "http://localhost"
BytesOrFile.maybe_set(c, "certificate-authority")
self._clusters = cs
return self._clusters
@property
def users(self):
"""
Returns known users by exposing as a read-only property.
"""
if not hasattr(self, "_users"):
us = {}
if "users" in self.doc:
for ur in self.doc["users"]:
us[ur["name"]] = u = copy.deepcopy(ur["user"])
BytesOrFile.maybe_set(u, "client-certificate")
BytesOrFile.maybe_set(u, "client-key")
self._users = us
return self._users
@property
def contexts(self):
"""
Returns known contexts by exposing as a read-only property.
"""
if not hasattr(self, "_contexts"):
cs = {}
for cr in self.doc["contexts"]:
cs[cr["name"]] = copy.deepcopy(cr["context"])
self._contexts = cs
return self._contexts
@property
def cluster(self):
"""
Returns the current selected cluster by exposing as a
read-only property.
"""
return self.clusters[self.contexts[self.current_context]["cluster"]]
@property
def user(self):
"""
Returns the current user set by current context
"""
return self.users.get(self.contexts[self.current_context].get("user", ""), {})
@property
def namespace(self):
"""
Returns the current context namespace by exposing as a read-only property.
"""
return self.contexts[self.current_context].get("namespace", "default")
def persist_doc(self):
if not hasattr(self, "filename") or not self.filename:
# Config was provided as string, not way to persit it
return
with open(self.filename, "w") as f:
yaml.safe_dump(self.doc, f, encoding='utf-8',
allow_unicode=True, default_flow_style=False)
def reload(self):
if hasattr(self, "_users"):
delattr(self, "_users")
if hasattr(self, "_contexts"):
delattr(self, "_contexts")
if hasattr(self, "_clusters"):
delattr(self, "_clusters")
|
kelproject/pykube | pykube/config.py | KubeConfig.clusters | python | def clusters(self):
if not hasattr(self, "_clusters"):
cs = {}
for cr in self.doc["clusters"]:
cs[cr["name"]] = c = copy.deepcopy(cr["cluster"])
if "server" not in c:
c["server"] = "http://localhost"
BytesOrFile.maybe_set(c, "certificate-authority")
self._clusters = cs
return self._clusters | Returns known clusters by exposing as a read-only property. | train | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/config.py#L134-L146 | [
"def maybe_set(cls, d, key):\n file_key = key\n data_key = \"{}-data\".format(key)\n if data_key in d:\n d[file_key] = cls(data=d[data_key])\n del d[data_key]\n elif file_key in d:\n d[file_key] = cls(filename=d[file_key])\n"
] | class KubeConfig(object):
"""
Main configuration class.
"""
@classmethod
def from_service_account(cls, path="/var/run/secrets/kubernetes.io/serviceaccount", **kwargs):
with open(os.path.join(path, "token")) as fp:
token = fp.read()
host = os.environ.get("PYKUBE_KUBERNETES_SERVICE_HOST")
if host is None:
host = os.environ["KUBERNETES_SERVICE_HOST"]
port = os.environ.get("PYKUBE_KUBERNETES_SERVICE_PORT")
if port is None:
port = os.environ["KUBERNETES_SERVICE_PORT"]
doc = {
"clusters": [
{
"name": "self",
"cluster": {
"server": "https://{}:{}".format(host, port),
"certificate-authority": os.path.join(path, "ca.crt"),
},
},
],
"users": [
{
"name": "self",
"user": {
"token": token,
},
},
],
"contexts": [
{
"name": "self",
"context": {
"cluster": "self",
"user": "self",
},
}
],
"current-context": "self",
}
self = cls(doc, **kwargs)
return self
@classmethod
def from_file(cls, filename, **kwargs):
"""
Creates an instance of the KubeConfig class from a kubeconfig file.
:Parameters:
- `filename`: The full path to the configuration file
"""
filename = os.path.expanduser(filename)
if not os.path.isfile(filename):
raise exceptions.PyKubeError("Configuration file {} not found".format(filename))
with open(filename) as f:
doc = yaml.safe_load(f.read())
self = cls(doc, **kwargs)
self.filename = filename
return self
@classmethod
def from_url(cls, url, **kwargs):
"""
Creates an instance of the KubeConfig class from a single URL (useful
for interacting with kubectl proxy).
"""
doc = {
"clusters": [
{
"name": "self",
"cluster": {
"server": url,
},
},
],
"contexts": [
{
"name": "self",
"context": {
"cluster": "self",
},
}
],
"current-context": "self",
}
self = cls(doc, **kwargs)
return self
def __init__(self, doc, current_context=None):
"""
Creates an instance of the KubeConfig class.
"""
self.doc = doc
self._current_context = None
if current_context is not None:
self.set_current_context(current_context)
elif "current-context" in doc and doc["current-context"]:
self.set_current_context(doc["current-context"])
def set_current_context(self, value):
"""
Sets the context to the provided value.
:Parameters:
- `value`: The value for the current context
"""
self._current_context = value
@property
def current_context(self):
if self._current_context is None:
raise exceptions.PyKubeError("current context not set; call set_current_context")
return self._current_context
@property
@property
def users(self):
"""
Returns known users by exposing as a read-only property.
"""
if not hasattr(self, "_users"):
us = {}
if "users" in self.doc:
for ur in self.doc["users"]:
us[ur["name"]] = u = copy.deepcopy(ur["user"])
BytesOrFile.maybe_set(u, "client-certificate")
BytesOrFile.maybe_set(u, "client-key")
self._users = us
return self._users
@property
def contexts(self):
"""
Returns known contexts by exposing as a read-only property.
"""
if not hasattr(self, "_contexts"):
cs = {}
for cr in self.doc["contexts"]:
cs[cr["name"]] = copy.deepcopy(cr["context"])
self._contexts = cs
return self._contexts
@property
def cluster(self):
"""
Returns the current selected cluster by exposing as a
read-only property.
"""
return self.clusters[self.contexts[self.current_context]["cluster"]]
@property
def user(self):
"""
Returns the current user set by current context
"""
return self.users.get(self.contexts[self.current_context].get("user", ""), {})
@property
def namespace(self):
"""
Returns the current context namespace by exposing as a read-only property.
"""
return self.contexts[self.current_context].get("namespace", "default")
def persist_doc(self):
if not hasattr(self, "filename") or not self.filename:
# Config was provided as string, not way to persit it
return
with open(self.filename, "w") as f:
yaml.safe_dump(self.doc, f, encoding='utf-8',
allow_unicode=True, default_flow_style=False)
def reload(self):
if hasattr(self, "_users"):
delattr(self, "_users")
if hasattr(self, "_contexts"):
delattr(self, "_contexts")
if hasattr(self, "_clusters"):
delattr(self, "_clusters")
|
kelproject/pykube | pykube/config.py | KubeConfig.users | python | def users(self):
if not hasattr(self, "_users"):
us = {}
if "users" in self.doc:
for ur in self.doc["users"]:
us[ur["name"]] = u = copy.deepcopy(ur["user"])
BytesOrFile.maybe_set(u, "client-certificate")
BytesOrFile.maybe_set(u, "client-key")
self._users = us
return self._users | Returns known users by exposing as a read-only property. | train | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/config.py#L149-L161 | [
"def maybe_set(cls, d, key):\n file_key = key\n data_key = \"{}-data\".format(key)\n if data_key in d:\n d[file_key] = cls(data=d[data_key])\n del d[data_key]\n elif file_key in d:\n d[file_key] = cls(filename=d[file_key])\n"
] | class KubeConfig(object):
"""
Main configuration class.
"""
@classmethod
def from_service_account(cls, path="/var/run/secrets/kubernetes.io/serviceaccount", **kwargs):
with open(os.path.join(path, "token")) as fp:
token = fp.read()
host = os.environ.get("PYKUBE_KUBERNETES_SERVICE_HOST")
if host is None:
host = os.environ["KUBERNETES_SERVICE_HOST"]
port = os.environ.get("PYKUBE_KUBERNETES_SERVICE_PORT")
if port is None:
port = os.environ["KUBERNETES_SERVICE_PORT"]
doc = {
"clusters": [
{
"name": "self",
"cluster": {
"server": "https://{}:{}".format(host, port),
"certificate-authority": os.path.join(path, "ca.crt"),
},
},
],
"users": [
{
"name": "self",
"user": {
"token": token,
},
},
],
"contexts": [
{
"name": "self",
"context": {
"cluster": "self",
"user": "self",
},
}
],
"current-context": "self",
}
self = cls(doc, **kwargs)
return self
@classmethod
def from_file(cls, filename, **kwargs):
"""
Creates an instance of the KubeConfig class from a kubeconfig file.
:Parameters:
- `filename`: The full path to the configuration file
"""
filename = os.path.expanduser(filename)
if not os.path.isfile(filename):
raise exceptions.PyKubeError("Configuration file {} not found".format(filename))
with open(filename) as f:
doc = yaml.safe_load(f.read())
self = cls(doc, **kwargs)
self.filename = filename
return self
@classmethod
def from_url(cls, url, **kwargs):
"""
Creates an instance of the KubeConfig class from a single URL (useful
for interacting with kubectl proxy).
"""
doc = {
"clusters": [
{
"name": "self",
"cluster": {
"server": url,
},
},
],
"contexts": [
{
"name": "self",
"context": {
"cluster": "self",
},
}
],
"current-context": "self",
}
self = cls(doc, **kwargs)
return self
def __init__(self, doc, current_context=None):
"""
Creates an instance of the KubeConfig class.
"""
self.doc = doc
self._current_context = None
if current_context is not None:
self.set_current_context(current_context)
elif "current-context" in doc and doc["current-context"]:
self.set_current_context(doc["current-context"])
def set_current_context(self, value):
"""
Sets the context to the provided value.
:Parameters:
- `value`: The value for the current context
"""
self._current_context = value
@property
def current_context(self):
if self._current_context is None:
raise exceptions.PyKubeError("current context not set; call set_current_context")
return self._current_context
@property
def clusters(self):
"""
Returns known clusters by exposing as a read-only property.
"""
if not hasattr(self, "_clusters"):
cs = {}
for cr in self.doc["clusters"]:
cs[cr["name"]] = c = copy.deepcopy(cr["cluster"])
if "server" not in c:
c["server"] = "http://localhost"
BytesOrFile.maybe_set(c, "certificate-authority")
self._clusters = cs
return self._clusters
@property
@property
def contexts(self):
"""
Returns known contexts by exposing as a read-only property.
"""
if not hasattr(self, "_contexts"):
cs = {}
for cr in self.doc["contexts"]:
cs[cr["name"]] = copy.deepcopy(cr["context"])
self._contexts = cs
return self._contexts
@property
def cluster(self):
"""
Returns the current selected cluster by exposing as a
read-only property.
"""
return self.clusters[self.contexts[self.current_context]["cluster"]]
@property
def user(self):
"""
Returns the current user set by current context
"""
return self.users.get(self.contexts[self.current_context].get("user", ""), {})
@property
def namespace(self):
"""
Returns the current context namespace by exposing as a read-only property.
"""
return self.contexts[self.current_context].get("namespace", "default")
def persist_doc(self):
if not hasattr(self, "filename") or not self.filename:
# Config was provided as string, not way to persit it
return
with open(self.filename, "w") as f:
yaml.safe_dump(self.doc, f, encoding='utf-8',
allow_unicode=True, default_flow_style=False)
def reload(self):
if hasattr(self, "_users"):
delattr(self, "_users")
if hasattr(self, "_contexts"):
delattr(self, "_contexts")
if hasattr(self, "_clusters"):
delattr(self, "_clusters")
|
kelproject/pykube | pykube/config.py | KubeConfig.contexts | python | def contexts(self):
if not hasattr(self, "_contexts"):
cs = {}
for cr in self.doc["contexts"]:
cs[cr["name"]] = copy.deepcopy(cr["context"])
self._contexts = cs
return self._contexts | Returns known contexts by exposing as a read-only property. | train | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/config.py#L164-L173 | null | class KubeConfig(object):
"""
Main configuration class.
"""
@classmethod
def from_service_account(cls, path="/var/run/secrets/kubernetes.io/serviceaccount", **kwargs):
with open(os.path.join(path, "token")) as fp:
token = fp.read()
host = os.environ.get("PYKUBE_KUBERNETES_SERVICE_HOST")
if host is None:
host = os.environ["KUBERNETES_SERVICE_HOST"]
port = os.environ.get("PYKUBE_KUBERNETES_SERVICE_PORT")
if port is None:
port = os.environ["KUBERNETES_SERVICE_PORT"]
doc = {
"clusters": [
{
"name": "self",
"cluster": {
"server": "https://{}:{}".format(host, port),
"certificate-authority": os.path.join(path, "ca.crt"),
},
},
],
"users": [
{
"name": "self",
"user": {
"token": token,
},
},
],
"contexts": [
{
"name": "self",
"context": {
"cluster": "self",
"user": "self",
},
}
],
"current-context": "self",
}
self = cls(doc, **kwargs)
return self
@classmethod
def from_file(cls, filename, **kwargs):
"""
Creates an instance of the KubeConfig class from a kubeconfig file.
:Parameters:
- `filename`: The full path to the configuration file
"""
filename = os.path.expanduser(filename)
if not os.path.isfile(filename):
raise exceptions.PyKubeError("Configuration file {} not found".format(filename))
with open(filename) as f:
doc = yaml.safe_load(f.read())
self = cls(doc, **kwargs)
self.filename = filename
return self
@classmethod
def from_url(cls, url, **kwargs):
"""
Creates an instance of the KubeConfig class from a single URL (useful
for interacting with kubectl proxy).
"""
doc = {
"clusters": [
{
"name": "self",
"cluster": {
"server": url,
},
},
],
"contexts": [
{
"name": "self",
"context": {
"cluster": "self",
},
}
],
"current-context": "self",
}
self = cls(doc, **kwargs)
return self
def __init__(self, doc, current_context=None):
"""
Creates an instance of the KubeConfig class.
"""
self.doc = doc
self._current_context = None
if current_context is not None:
self.set_current_context(current_context)
elif "current-context" in doc and doc["current-context"]:
self.set_current_context(doc["current-context"])
def set_current_context(self, value):
"""
Sets the context to the provided value.
:Parameters:
- `value`: The value for the current context
"""
self._current_context = value
@property
def current_context(self):
if self._current_context is None:
raise exceptions.PyKubeError("current context not set; call set_current_context")
return self._current_context
@property
def clusters(self):
"""
Returns known clusters by exposing as a read-only property.
"""
if not hasattr(self, "_clusters"):
cs = {}
for cr in self.doc["clusters"]:
cs[cr["name"]] = c = copy.deepcopy(cr["cluster"])
if "server" not in c:
c["server"] = "http://localhost"
BytesOrFile.maybe_set(c, "certificate-authority")
self._clusters = cs
return self._clusters
@property
def users(self):
"""
Returns known users by exposing as a read-only property.
"""
if not hasattr(self, "_users"):
us = {}
if "users" in self.doc:
for ur in self.doc["users"]:
us[ur["name"]] = u = copy.deepcopy(ur["user"])
BytesOrFile.maybe_set(u, "client-certificate")
BytesOrFile.maybe_set(u, "client-key")
self._users = us
return self._users
@property
@property
def cluster(self):
"""
Returns the current selected cluster by exposing as a
read-only property.
"""
return self.clusters[self.contexts[self.current_context]["cluster"]]
@property
def user(self):
"""
Returns the current user set by current context
"""
return self.users.get(self.contexts[self.current_context].get("user", ""), {})
@property
def namespace(self):
"""
Returns the current context namespace by exposing as a read-only property.
"""
return self.contexts[self.current_context].get("namespace", "default")
def persist_doc(self):
if not hasattr(self, "filename") or not self.filename:
# Config was provided as string, not way to persit it
return
with open(self.filename, "w") as f:
yaml.safe_dump(self.doc, f, encoding='utf-8',
allow_unicode=True, default_flow_style=False)
def reload(self):
if hasattr(self, "_users"):
delattr(self, "_users")
if hasattr(self, "_contexts"):
delattr(self, "_contexts")
if hasattr(self, "_clusters"):
delattr(self, "_clusters")
|
kelproject/pykube | pykube/config.py | KubeConfig.user | python | def user(self):
return self.users.get(self.contexts[self.current_context].get("user", ""), {}) | Returns the current user set by current context | train | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/config.py#L184-L188 | null | class KubeConfig(object):
"""
Main configuration class.
"""
@classmethod
def from_service_account(cls, path="/var/run/secrets/kubernetes.io/serviceaccount", **kwargs):
with open(os.path.join(path, "token")) as fp:
token = fp.read()
host = os.environ.get("PYKUBE_KUBERNETES_SERVICE_HOST")
if host is None:
host = os.environ["KUBERNETES_SERVICE_HOST"]
port = os.environ.get("PYKUBE_KUBERNETES_SERVICE_PORT")
if port is None:
port = os.environ["KUBERNETES_SERVICE_PORT"]
doc = {
"clusters": [
{
"name": "self",
"cluster": {
"server": "https://{}:{}".format(host, port),
"certificate-authority": os.path.join(path, "ca.crt"),
},
},
],
"users": [
{
"name": "self",
"user": {
"token": token,
},
},
],
"contexts": [
{
"name": "self",
"context": {
"cluster": "self",
"user": "self",
},
}
],
"current-context": "self",
}
self = cls(doc, **kwargs)
return self
@classmethod
def from_file(cls, filename, **kwargs):
"""
Creates an instance of the KubeConfig class from a kubeconfig file.
:Parameters:
- `filename`: The full path to the configuration file
"""
filename = os.path.expanduser(filename)
if not os.path.isfile(filename):
raise exceptions.PyKubeError("Configuration file {} not found".format(filename))
with open(filename) as f:
doc = yaml.safe_load(f.read())
self = cls(doc, **kwargs)
self.filename = filename
return self
@classmethod
def from_url(cls, url, **kwargs):
"""
Creates an instance of the KubeConfig class from a single URL (useful
for interacting with kubectl proxy).
"""
doc = {
"clusters": [
{
"name": "self",
"cluster": {
"server": url,
},
},
],
"contexts": [
{
"name": "self",
"context": {
"cluster": "self",
},
}
],
"current-context": "self",
}
self = cls(doc, **kwargs)
return self
def __init__(self, doc, current_context=None):
"""
Creates an instance of the KubeConfig class.
"""
self.doc = doc
self._current_context = None
if current_context is not None:
self.set_current_context(current_context)
elif "current-context" in doc and doc["current-context"]:
self.set_current_context(doc["current-context"])
def set_current_context(self, value):
"""
Sets the context to the provided value.
:Parameters:
- `value`: The value for the current context
"""
self._current_context = value
@property
def current_context(self):
if self._current_context is None:
raise exceptions.PyKubeError("current context not set; call set_current_context")
return self._current_context
@property
def clusters(self):
"""
Returns known clusters by exposing as a read-only property.
"""
if not hasattr(self, "_clusters"):
cs = {}
for cr in self.doc["clusters"]:
cs[cr["name"]] = c = copy.deepcopy(cr["cluster"])
if "server" not in c:
c["server"] = "http://localhost"
BytesOrFile.maybe_set(c, "certificate-authority")
self._clusters = cs
return self._clusters
@property
def users(self):
"""
Returns known users by exposing as a read-only property.
"""
if not hasattr(self, "_users"):
us = {}
if "users" in self.doc:
for ur in self.doc["users"]:
us[ur["name"]] = u = copy.deepcopy(ur["user"])
BytesOrFile.maybe_set(u, "client-certificate")
BytesOrFile.maybe_set(u, "client-key")
self._users = us
return self._users
@property
def contexts(self):
"""
Returns known contexts by exposing as a read-only property.
"""
if not hasattr(self, "_contexts"):
cs = {}
for cr in self.doc["contexts"]:
cs[cr["name"]] = copy.deepcopy(cr["context"])
self._contexts = cs
return self._contexts
@property
def cluster(self):
"""
Returns the current selected cluster by exposing as a
read-only property.
"""
return self.clusters[self.contexts[self.current_context]["cluster"]]
@property
@property
def namespace(self):
"""
Returns the current context namespace by exposing as a read-only property.
"""
return self.contexts[self.current_context].get("namespace", "default")
def persist_doc(self):
if not hasattr(self, "filename") or not self.filename:
# Config was provided as string, not way to persit it
return
with open(self.filename, "w") as f:
yaml.safe_dump(self.doc, f, encoding='utf-8',
allow_unicode=True, default_flow_style=False)
def reload(self):
if hasattr(self, "_users"):
delattr(self, "_users")
if hasattr(self, "_contexts"):
delattr(self, "_contexts")
if hasattr(self, "_clusters"):
delattr(self, "_clusters")
|
kelproject/pykube | pykube/config.py | BytesOrFile.bytes | python | def bytes(self):
if self._filename:
with open(self._filename, "rb") as f:
return f.read()
else:
return self._bytes | Returns the provided data as bytes. | train | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/config.py#L250-L258 | null | class BytesOrFile(object):
"""
Implements the same interface for files and byte input.
"""
@classmethod
def maybe_set(cls, d, key):
file_key = key
data_key = "{}-data".format(key)
if data_key in d:
d[file_key] = cls(data=d[data_key])
del d[data_key]
elif file_key in d:
d[file_key] = cls(filename=d[file_key])
def __init__(self, filename=None, data=None):
"""
Creates a new instance of BytesOrFile.
:Parameters:
- `filename`: A full path to a file
- `data`: base64 encoded bytes
"""
self._filename = None
self._bytes = None
if filename is not None and data is not None:
raise TypeError("filename or data kwarg must be specified, not both")
elif filename is not None:
if not os.path.isfile(filename):
raise exceptions.PyKubeError("'{}' file does not exist".format(filename))
self._filename = filename
elif data is not None:
self._bytes = base64.b64decode(data)
else:
raise TypeError("filename or data kwarg must be specified")
def filename(self):
"""
Returns the provided data as a file location.
"""
if self._filename:
return self._filename
else:
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(self._bytes)
return f.name
|
kelproject/pykube | pykube/config.py | BytesOrFile.filename | python | def filename(self):
if self._filename:
return self._filename
else:
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(self._bytes)
return f.name | Returns the provided data as a file location. | train | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/config.py#L260-L269 | null | class BytesOrFile(object):
"""
Implements the same interface for files and byte input.
"""
@classmethod
def maybe_set(cls, d, key):
file_key = key
data_key = "{}-data".format(key)
if data_key in d:
d[file_key] = cls(data=d[data_key])
del d[data_key]
elif file_key in d:
d[file_key] = cls(filename=d[file_key])
def __init__(self, filename=None, data=None):
"""
Creates a new instance of BytesOrFile.
:Parameters:
- `filename`: A full path to a file
- `data`: base64 encoded bytes
"""
self._filename = None
self._bytes = None
if filename is not None and data is not None:
raise TypeError("filename or data kwarg must be specified, not both")
elif filename is not None:
if not os.path.isfile(filename):
raise exceptions.PyKubeError("'{}' file does not exist".format(filename))
self._filename = filename
elif data is not None:
self._bytes = base64.b64decode(data)
else:
raise TypeError("filename or data kwarg must be specified")
def bytes(self):
"""
Returns the provided data as bytes.
"""
if self._filename:
with open(self._filename, "rb") as f:
return f.read()
else:
return self._bytes
|
kelproject/pykube | pykube/objects.py | object_factory | python | def object_factory(api, api_version, kind):
resource_list = api.resource_list(api_version)
resource = next((resource for resource in resource_list["resources"] if resource["kind"] == kind), None)
base = NamespacedAPIObject if resource["namespaced"] else APIObject
return type(kind, (base,), {
"version": api_version,
"endpoint": resource["name"],
"kind": kind
}) | Dynamically builds a Python class for the given Kubernetes object in an API.
For example:
api = pykube.HTTPClient(...)
NetworkPolicy = pykube.object_factory(api, "networking.k8s.io/v1", "NetworkPolicy")
This enables construction of any Kubernetes object kind without explicit support
from pykube.
Currently, the HTTPClient passed to this function will not be bound to the returned type.
It is planned to fix this, but in the mean time pass it as you would normally. | train | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/objects.py#L138-L160 | null | import copy
import json
import os.path as op
from inspect import getmro
import six
from six.moves.urllib.parse import urlencode
from .exceptions import ObjectDoesNotExist
from .mixins import ReplicatedMixin, ScalableMixin
from .query import Query
from .utils import obj_merge
class ObjectManager(object):
def __call__(self, api, namespace=None):
if namespace is None and NamespacedAPIObject in getmro(self.api_obj_class):
namespace = api.config.namespace
return Query(api, self.api_obj_class, namespace=namespace)
def __get__(self, obj, api_obj_class):
assert obj is None, "cannot invoke objects on resource object."
self.api_obj_class = api_obj_class
return self
@six.python_2_unicode_compatible
class APIObject(object):
objects = ObjectManager()
base = None
namespace = None
def __init__(self, api, obj):
self.api = api
self.set_obj(obj)
def set_obj(self, obj):
self.obj = obj
self._original_obj = copy.deepcopy(obj)
def __repr__(self):
return "<{kind} {name}>".format(kind=self.kind, name=self.name)
def __str__(self):
return self.name
@property
def name(self):
return self.obj["metadata"]["name"]
@property
def metadata(self):
return self.obj["metadata"]
@property
def labels(self):
return self.obj["metadata"].get("labels", {})
@property
def annotations(self):
return self.obj["metadata"].get("annotations", {})
def api_kwargs(self, **kwargs):
kw = {}
# Construct url for api request
obj_list = kwargs.pop("obj_list", False)
if obj_list:
kw["url"] = self.endpoint
else:
operation = kwargs.pop("operation", "")
kw["url"] = op.normpath(op.join(self.endpoint, self.name, operation))
params = kwargs.pop("params", None)
if params is not None:
query_string = urlencode(params)
kw["url"] = "{}{}".format(kw["url"], "?{}".format(query_string) if query_string else "")
if self.base:
kw["base"] = self.base
kw["version"] = self.version
if self.namespace is not None:
kw["namespace"] = self.namespace
kw.update(kwargs)
return kw
def exists(self, ensure=False):
r = self.api.get(**self.api_kwargs())
if r.status_code not in {200, 404}:
self.api.raise_for_status(r)
if not r.ok:
if ensure:
raise ObjectDoesNotExist("{} does not exist.".format(self.name))
else:
return False
return True
def create(self):
r = self.api.post(**self.api_kwargs(data=json.dumps(self.obj), obj_list=True))
self.api.raise_for_status(r)
self.set_obj(r.json())
def reload(self):
r = self.api.get(**self.api_kwargs())
self.api.raise_for_status(r)
self.set_obj(r.json())
def watch(self):
return self.__class__.objects(
self.api,
namespace=self.namespace
).filter(field_selector={
"metadata.name": self.name
}).watch()
def update(self):
self.obj = obj_merge(self.obj, self._original_obj)
r = self.api.patch(**self.api_kwargs(
headers={"Content-Type": "application/merge-patch+json"},
data=json.dumps(self.obj),
))
self.api.raise_for_status(r)
self.set_obj(r.json())
def delete(self):
r = self.api.delete(**self.api_kwargs())
if r.status_code != 404:
self.api.raise_for_status(r)
class NamespacedAPIObject(APIObject):
@property
def namespace(self):
if self.obj["metadata"].get("namespace"):
return self.obj["metadata"]["namespace"]
else:
return self.api.config.namespace
class ConfigMap(NamespacedAPIObject):
version = "v1"
endpoint = "configmaps"
kind = "ConfigMap"
class CronJob(NamespacedAPIObject):
version = "batch/v2alpha1"
endpoint = "cronjobs"
kind = "CronJob"
class DaemonSet(NamespacedAPIObject):
version = "extensions/v1beta1"
endpoint = "daemonsets"
kind = "DaemonSet"
class Deployment(NamespacedAPIObject, ReplicatedMixin, ScalableMixin):
version = "extensions/v1beta1"
endpoint = "deployments"
kind = "Deployment"
@property
def ready(self):
return (
self.obj["status"]["observedGeneration"] >= self.obj["metadata"]["generation"] and
self.obj["status"]["updatedReplicas"] == self.replicas
)
def rollout_undo(self, target_revision=None):
"""Produces same action as kubectl rollout undo deployment command.
Input variable is revision to rollback to (in kubectl, --to-revision)
"""
if target_revision is None:
revision = {}
else:
revision = {
"revision": target_revision
}
params = {
"kind": "DeploymentRollback",
"apiVersion": self.version,
"name": self.name,
"rollbackTo": revision
}
kwargs = {
"version": self.version,
"namespace": self.namespace,
"operation": "rollback",
}
r = self.api.post(**self.api_kwargs(data=json.dumps(params), **kwargs))
r.raise_for_status()
return r.text
class Endpoint(NamespacedAPIObject):
version = "v1"
endpoint = "endpoints"
kind = "Endpoint"
class Event(NamespacedAPIObject):
version = "v1"
endpoint = "events"
kind = "Event"
class LimitRange(NamespacedAPIObject):
version = "v1"
endpoint = "limitranges"
kind = "LimitRange"
class ResourceQuota(NamespacedAPIObject):
version = "v1"
endpoint = "resourcequotas"
kind = "ResourceQuota"
class ServiceAccount(NamespacedAPIObject):
version = "v1"
endpoint = "serviceaccounts"
kind = "ServiceAccount"
class Ingress(NamespacedAPIObject):
version = "extensions/v1beta1"
endpoint = "ingresses"
kind = "Ingress"
class ThirdPartyResource(APIObject):
version = "extensions/v1beta1"
endpoint = "thirdpartyresources"
kind = "ThirdPartyResource"
class Job(NamespacedAPIObject, ScalableMixin):
version = "batch/v1"
endpoint = "jobs"
kind = "Job"
scalable_attr = "parallelism"
@property
def parallelism(self):
return self.obj["spec"]["parallelism"]
@parallelism.setter
def parallelism(self, value):
self.obj["spec"]["parallelism"] = value
class Namespace(APIObject):
version = "v1"
endpoint = "namespaces"
kind = "Namespace"
class Node(APIObject):
version = "v1"
endpoint = "nodes"
kind = "Node"
@property
def unschedulable(self):
if 'unschedulable' in self.obj["spec"]:
return self.obj["spec"]["unschedulable"]
return False
@unschedulable.setter
def unschedulable(self, value):
self.obj["spec"]["unschedulable"] = value
self.update()
def cordon(self):
self.unschedulable = True
def uncordon(self):
self.unschedulable = False
class Pod(NamespacedAPIObject):
version = "v1"
endpoint = "pods"
kind = "Pod"
@property
def ready(self):
cs = self.obj["status"].get("conditions", [])
condition = next((c for c in cs if c["type"] == "Ready"), None)
return condition is not None and condition["status"] == "True"
def logs(self, container=None, pretty=None, previous=False,
since_seconds=None, since_time=None, timestamps=False,
tail_lines=None, limit_bytes=None):
"""
Produces the same result as calling kubectl logs pod/<pod-name>.
Check parameters meaning at
http://kubernetes.io/docs/api-reference/v1/operations/,
part 'read log of the specified Pod'. The result is plain text.
"""
log_call = "log"
params = {}
if container is not None:
params["container"] = container
if pretty is not None:
params["pretty"] = pretty
if previous:
params["previous"] = "true"
if since_seconds is not None and since_time is None:
params["sinceSeconds"] = int(since_seconds)
elif since_time is not None and since_seconds is None:
params["sinceTime"] = since_time
if timestamps:
params["timestamps"] = "true"
if tail_lines is not None:
params["tailLines"] = int(tail_lines)
if limit_bytes is not None:
params["limitBytes"] = int(limit_bytes)
query_string = urlencode(params)
log_call += "?{}".format(query_string) if query_string else ""
kwargs = {
"version": self.version,
"namespace": self.namespace,
"operation": log_call,
}
r = self.api.get(**self.api_kwargs(**kwargs))
r.raise_for_status()
return r.text
class ReplicationController(NamespacedAPIObject, ReplicatedMixin, ScalableMixin):
version = "v1"
endpoint = "replicationcontrollers"
kind = "ReplicationController"
@property
def ready(self):
return (
self.obj['status']['observedGeneration'] >= self.obj['metadata']['generation'] and
self.obj['status']['readyReplicas'] == self.replicas
)
class ReplicaSet(NamespacedAPIObject, ReplicatedMixin, ScalableMixin):
version = "extensions/v1beta1"
endpoint = "replicasets"
kind = "ReplicaSet"
class Secret(NamespacedAPIObject):
version = "v1"
endpoint = "secrets"
kind = "Secret"
class Service(NamespacedAPIObject):
version = "v1"
endpoint = "services"
kind = "Service"
class PersistentVolume(APIObject):
version = "v1"
endpoint = "persistentvolumes"
kind = "PersistentVolume"
class PersistentVolumeClaim(NamespacedAPIObject):
version = "v1"
endpoint = "persistentvolumeclaims"
kind = "PersistentVolumeClaim"
class HorizontalPodAutoscaler(NamespacedAPIObject):
version = "autoscaling/v1"
endpoint = "horizontalpodautoscalers"
kind = "HorizontalPodAutoscaler"
class PetSet(NamespacedAPIObject):
version = "apps/v1alpha1"
endpoint = "petsets"
kind = "PetSet"
class StatefulSet(NamespacedAPIObject, ReplicatedMixin, ScalableMixin):
version = "apps/v1beta1"
endpoint = "statefulsets"
kind = "StatefulSet"
class Role(NamespacedAPIObject):
version = "rbac.authorization.k8s.io/v1alpha1"
endpoint = "roles"
kind = "Role"
class RoleBinding(NamespacedAPIObject):
version = "rbac.authorization.k8s.io/v1alpha1"
endpoint = "rolebindings"
kind = "RoleBinding"
class ClusterRole(APIObject):
version = "rbac.authorization.k8s.io/v1alpha1"
endpoint = "clusterroles"
kind = "ClusterRole"
class ClusterRoleBinding(APIObject):
version = "rbac.authorization.k8s.io/v1alpha1"
endpoint = "clusterrolebindings"
kind = "ClusterRoleBinding"
class PodSecurityPolicy(APIObject):
version = "extensions/v1beta1"
endpoint = "podsecuritypolicies"
kind = "PodSecurityPolicy"
|
kelproject/pykube | pykube/objects.py | Deployment.rollout_undo | python | def rollout_undo(self, target_revision=None):
if target_revision is None:
revision = {}
else:
revision = {
"revision": target_revision
}
params = {
"kind": "DeploymentRollback",
"apiVersion": self.version,
"name": self.name,
"rollbackTo": revision
}
kwargs = {
"version": self.version,
"namespace": self.namespace,
"operation": "rollback",
}
r = self.api.post(**self.api_kwargs(data=json.dumps(params), **kwargs))
r.raise_for_status()
return r.text | Produces same action as kubectl rollout undo deployment command.
Input variable is revision to rollback to (in kubectl, --to-revision) | train | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/objects.py#L197-L222 | null | class Deployment(NamespacedAPIObject, ReplicatedMixin, ScalableMixin):
version = "extensions/v1beta1"
endpoint = "deployments"
kind = "Deployment"
@property
def ready(self):
return (
self.obj["status"]["observedGeneration"] >= self.obj["metadata"]["generation"] and
self.obj["status"]["updatedReplicas"] == self.replicas
)
|
kelproject/pykube | pykube/objects.py | Pod.logs | python | def logs(self, container=None, pretty=None, previous=False,
since_seconds=None, since_time=None, timestamps=False,
tail_lines=None, limit_bytes=None):
log_call = "log"
params = {}
if container is not None:
params["container"] = container
if pretty is not None:
params["pretty"] = pretty
if previous:
params["previous"] = "true"
if since_seconds is not None and since_time is None:
params["sinceSeconds"] = int(since_seconds)
elif since_time is not None and since_seconds is None:
params["sinceTime"] = since_time
if timestamps:
params["timestamps"] = "true"
if tail_lines is not None:
params["tailLines"] = int(tail_lines)
if limit_bytes is not None:
params["limitBytes"] = int(limit_bytes)
query_string = urlencode(params)
log_call += "?{}".format(query_string) if query_string else ""
kwargs = {
"version": self.version,
"namespace": self.namespace,
"operation": log_call,
}
r = self.api.get(**self.api_kwargs(**kwargs))
r.raise_for_status()
return r.text | Produces the same result as calling kubectl logs pod/<pod-name>.
Check parameters meaning at
http://kubernetes.io/docs/api-reference/v1/operations/,
part 'read log of the specified Pod'. The result is plain text. | train | https://github.com/kelproject/pykube/blob/e8a46298a592ad9037587afb707ac75b3114eff9/pykube/objects.py#L333-L370 | null | class Pod(NamespacedAPIObject):
version = "v1"
endpoint = "pods"
kind = "Pod"
@property
def ready(self):
cs = self.obj["status"].get("conditions", [])
condition = next((c for c in cs if c["type"] == "Ready"), None)
return condition is not None and condition["status"] == "True"
|
coleifer/peewee | playhouse/sqlite_ext.py | ClosureTable | python | def ClosureTable(model_class, foreign_key=None, referencing_class=None,
referencing_key=None):
if referencing_class is None:
referencing_class = model_class
if foreign_key is None:
for field_obj in model_class._meta.refs:
if field_obj.rel_model is model_class:
foreign_key = field_obj
break
else:
raise ValueError('Unable to find self-referential foreign key.')
source_key = model_class._meta.primary_key
if referencing_key is None:
referencing_key = source_key
class BaseClosureTable(VirtualModel):
depth = VirtualField(IntegerField)
id = VirtualField(IntegerField)
idcolumn = VirtualField(TextField)
parentcolumn = VirtualField(TextField)
root = VirtualField(IntegerField)
tablename = VirtualField(TextField)
class Meta:
extension_module = 'transitive_closure'
@classmethod
def descendants(cls, node, depth=None, include_node=False):
query = (model_class
.select(model_class, cls.depth.alias('depth'))
.join(cls, on=(source_key == cls.id))
.where(cls.root == node)
.objects())
if depth is not None:
query = query.where(cls.depth == depth)
elif not include_node:
query = query.where(cls.depth > 0)
return query
@classmethod
def ancestors(cls, node, depth=None, include_node=False):
query = (model_class
.select(model_class, cls.depth.alias('depth'))
.join(cls, on=(source_key == cls.root))
.where(cls.id == node)
.objects())
if depth:
query = query.where(cls.depth == depth)
elif not include_node:
query = query.where(cls.depth > 0)
return query
@classmethod
def siblings(cls, node, include_node=False):
if referencing_class is model_class:
# self-join
fk_value = node.__data__.get(foreign_key.name)
query = model_class.select().where(foreign_key == fk_value)
else:
# siblings as given in reference_class
siblings = (referencing_class
.select(referencing_key)
.join(cls, on=(foreign_key == cls.root))
.where((cls.id == node) & (cls.depth == 1)))
# the according models
query = (model_class
.select()
.where(source_key << siblings)
.objects())
if not include_node:
query = query.where(source_key != node)
return query
class Meta:
database = referencing_class._meta.database
options = {
'tablename': referencing_class._meta.table_name,
'idcolumn': referencing_key.column_name,
'parentcolumn': foreign_key.column_name}
primary_key = False
name = '%sClosure' % model_class.__name__
return type(name, (BaseClosureTable,), {'Meta': Meta}) | Model factory for the transitive closure extension. | train | https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/playhouse/sqlite_ext.py#L719-L807 | null | import json
import math
import re
import struct
import sys
from peewee import *
from peewee import ColumnBase
from peewee import EnclosedNodeList
from peewee import Entity
from peewee import Expression
from peewee import Node
from peewee import NodeList
from peewee import OP
from peewee import VirtualField
from peewee import merge_dict
from peewee import sqlite3
try:
from playhouse._sqlite_ext import (
backup,
backup_to_file,
Blob,
ConnectionHelper,
register_bloomfilter,
register_hash_functions,
register_rank_functions,
sqlite_get_db_status,
sqlite_get_status,
TableFunction,
ZeroBlob,
)
CYTHON_SQLITE_EXTENSIONS = True
except ImportError:
CYTHON_SQLITE_EXTENSIONS = False
if sys.version_info[0] == 3:
basestring = str
FTS3_MATCHINFO = 'pcx'
FTS4_MATCHINFO = 'pcnalx'
if sqlite3 is not None:
FTS_VERSION = 4 if sqlite3.sqlite_version_info[:3] >= (3, 7, 4) else 3
else:
FTS_VERSION = 3
FTS5_MIN_SQLITE_VERSION = (3, 9, 0)
class RowIDField(AutoField):
auto_increment = True
column_name = name = required_name = 'rowid'
def bind(self, model, name, *args):
if name != self.required_name:
raise ValueError('%s must be named "%s".' %
(type(self), self.required_name))
super(RowIDField, self).bind(model, name, *args)
class DocIDField(RowIDField):
column_name = name = required_name = 'docid'
class AutoIncrementField(AutoField):
def ddl(self, ctx):
node_list = super(AutoIncrementField, self).ddl(ctx)
return NodeList((node_list, SQL('AUTOINCREMENT')))
class JSONPath(ColumnBase):
def __init__(self, field, path=None):
super(JSONPath, self).__init__()
self._field = field
self._path = path or ()
@property
def path(self):
return Value('$%s' % ''.join(self._path))
def __getitem__(self, idx):
if isinstance(idx, int):
item = '[%s]' % idx
else:
item = '.%s' % idx
return JSONPath(self._field, self._path + (item,))
def set(self, value, as_json=None):
if as_json or isinstance(value, (list, dict)):
value = fn.json(self._field._json_dumps(value))
return fn.json_set(self._field, self.path, value)
def update(self, value):
return self.set(fn.json_patch(self, self._field._json_dumps(value)))
def remove(self):
return fn.json_remove(self._field, self.path)
def json_type(self):
return fn.json_type(self._field, self.path)
def length(self):
return fn.json_array_length(self._field, self.path)
def children(self):
return fn.json_each(self._field, self.path)
def tree(self):
return fn.json_tree(self._field, self.path)
def __sql__(self, ctx):
return ctx.sql(fn.json_extract(self._field, self.path)
if self._path else self._field)
class JSONField(TextField):
field_type = 'JSON'
def __init__(self, json_dumps=None, json_loads=None, **kwargs):
self._json_dumps = json_dumps or json.dumps
self._json_loads = json_loads or json.loads
super(JSONField, self).__init__(**kwargs)
def python_value(self, value):
if value is not None:
try:
return self._json_loads(value)
except (TypeError, ValueError):
return value
def db_value(self, value):
if value is not None:
if not isinstance(value, Node):
value = fn.json(self._json_dumps(value))
return value
def _e(op):
def inner(self, rhs):
if isinstance(rhs, (list, dict)):
rhs = Value(rhs, converter=self.db_value, unpack=False)
return Expression(self, op, rhs)
return inner
__eq__ = _e(OP.EQ)
__ne__ = _e(OP.NE)
__gt__ = _e(OP.GT)
__ge__ = _e(OP.GTE)
__lt__ = _e(OP.LT)
__le__ = _e(OP.LTE)
__hash__ = Field.__hash__
def __getitem__(self, item):
return JSONPath(self)[item]
def set(self, value, as_json=None):
return JSONPath(self).set(value, as_json)
def update(self, data):
return JSONPath(self).update(data)
def remove(self):
return JSONPath(self).remove()
def json_type(self):
return fn.json_type(self)
def length(self):
return fn.json_array_length(self)
def children(self):
"""
Schema of `json_each` and `json_tree`:
key,
value,
type TEXT (object, array, string, etc),
atom (value for primitive/scalar types, NULL for array and object)
id INTEGER (unique identifier for element)
parent INTEGER (unique identifier of parent element or NULL)
fullkey TEXT (full path describing element)
path TEXT (path to the container of the current element)
json JSON hidden (1st input parameter to function)
root TEXT hidden (2nd input parameter, path at which to start)
"""
return fn.json_each(self)
def tree(self):
return fn.json_tree(self)
class SearchField(Field):
def __init__(self, unindexed=False, column_name=None, **k):
if k:
raise ValueError('SearchField does not accept these keyword '
'arguments: %s.' % sorted(k))
super(SearchField, self).__init__(unindexed=unindexed,
column_name=column_name, null=True)
def match(self, term):
return match(self, term)
class VirtualTableSchemaManager(SchemaManager):
def _create_virtual_table(self, safe=True, **options):
options = self.model.clean_options(
merge_dict(self.model._meta.options, options))
# Structure:
# CREATE VIRTUAL TABLE <model>
# USING <extension_module>
# ([prefix_arguments, ...] fields, ... [arguments, ...], [options...])
ctx = self._create_context()
ctx.literal('CREATE VIRTUAL TABLE ')
if safe:
ctx.literal('IF NOT EXISTS ')
(ctx
.sql(self.model)
.literal(' USING '))
ext_module = self.model._meta.extension_module
if isinstance(ext_module, Node):
return ctx.sql(ext_module)
ctx.sql(SQL(ext_module)).literal(' ')
arguments = []
meta = self.model._meta
if meta.prefix_arguments:
arguments.extend([SQL(a) for a in meta.prefix_arguments])
# Constraints, data-types, foreign and primary keys are all omitted.
for field in meta.sorted_fields:
if isinstance(field, (RowIDField)) or field._hidden:
continue
field_def = [Entity(field.column_name)]
if field.unindexed:
field_def.append(SQL('UNINDEXED'))
arguments.append(NodeList(field_def))
if meta.arguments:
arguments.extend([SQL(a) for a in meta.arguments])
if options:
arguments.extend(self._create_table_option_sql(options))
return ctx.sql(EnclosedNodeList(arguments))
def _create_table(self, safe=True, **options):
if issubclass(self.model, VirtualModel):
return self._create_virtual_table(safe, **options)
return super(VirtualTableSchemaManager, self)._create_table(
safe, **options)
class VirtualModel(Model):
class Meta:
arguments = None
extension_module = None
prefix_arguments = None
primary_key = False
schema_manager_class = VirtualTableSchemaManager
@classmethod
def clean_options(cls, options):
return options
class BaseFTSModel(VirtualModel):
@classmethod
def clean_options(cls, options):
content = options.get('content')
prefix = options.get('prefix')
tokenize = options.get('tokenize')
if isinstance(content, basestring) and content == '':
# Special-case content-less full-text search tables.
options['content'] = "''"
elif isinstance(content, Field):
# Special-case to ensure fields are fully-qualified.
options['content'] = Entity(content.model._meta.table_name,
content.column_name)
if prefix:
if isinstance(prefix, (list, tuple)):
prefix = ','.join([str(i) for i in prefix])
options['prefix'] = "'%s'" % prefix.strip("' ")
if tokenize and cls._meta.extension_module.lower() == 'fts5':
# Tokenizers need to be in quoted string for FTS5, but not for FTS3
# or FTS4.
options['tokenize'] = '"%s"' % tokenize
return options
class FTSModel(BaseFTSModel):
"""
VirtualModel class for creating tables that use either the FTS3 or FTS4
search extensions. Peewee automatically determines which version of the
FTS extension is supported and will use FTS4 if possible.
"""
# FTS3/4 uses "docid" in the same way a normal table uses "rowid".
docid = DocIDField()
class Meta:
extension_module = 'FTS%s' % FTS_VERSION
@classmethod
def _fts_cmd(cls, cmd):
tbl = cls._meta.table_name
res = cls._meta.database.execute_sql(
"INSERT INTO %s(%s) VALUES('%s');" % (tbl, tbl, cmd))
return res.fetchone()
@classmethod
def optimize(cls):
return cls._fts_cmd('optimize')
@classmethod
def rebuild(cls):
return cls._fts_cmd('rebuild')
@classmethod
def integrity_check(cls):
return cls._fts_cmd('integrity-check')
@classmethod
def merge(cls, blocks=200, segments=8):
return cls._fts_cmd('merge=%s,%s' % (blocks, segments))
@classmethod
def automerge(cls, state=True):
return cls._fts_cmd('automerge=%s' % (state and '1' or '0'))
@classmethod
def match(cls, term):
"""
Generate a `MATCH` expression appropriate for searching this table.
"""
return match(cls._meta.entity, term)
@classmethod
def rank(cls, *weights):
matchinfo = fn.matchinfo(cls._meta.entity, FTS3_MATCHINFO)
return fn.fts_rank(matchinfo, *weights)
@classmethod
def bm25(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_bm25(match_info, *weights)
@classmethod
def bm25f(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_bm25f(match_info, *weights)
@classmethod
def lucene(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_lucene(match_info, *weights)
@classmethod
def _search(cls, term, weights, with_score, score_alias, score_fn,
explicit_ordering):
if not weights:
rank = score_fn()
elif isinstance(weights, dict):
weight_args = []
for field in cls._meta.sorted_fields:
# Attempt to get the specified weight of the field by looking
# it up using it's field instance followed by name.
field_weight = weights.get(field, weights.get(field.name, 1.0))
weight_args.append(field_weight)
rank = score_fn(*weight_args)
else:
rank = score_fn(*weights)
selection = ()
order_by = rank
if with_score:
selection = (cls, rank.alias(score_alias))
if with_score and not explicit_ordering:
order_by = SQL(score_alias)
return (cls
.select(*selection)
.where(cls.match(term))
.order_by(order_by))
@classmethod
def search(cls, term, weights=None, with_score=False, score_alias='score',
explicit_ordering=False):
"""Full-text search using selected `term`."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.rank,
explicit_ordering)
@classmethod
def search_bm25(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search for selected `term` using BM25 algorithm."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.bm25,
explicit_ordering)
@classmethod
def search_bm25f(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search for selected `term` using BM25 algorithm."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.bm25f,
explicit_ordering)
@classmethod
def search_lucene(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search for selected `term` using BM25 algorithm."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.lucene,
explicit_ordering)
_alphabet = 'abcdefghijklmnopqrstuvwxyz'
_alphanum = (set('\t ,"(){}*:_+0123456789') |
set(_alphabet) |
set(_alphabet.upper()) |
set((chr(26),)))
_invalid_ascii = set(chr(p) for p in range(128) if chr(p) not in _alphanum)
_quote_re = re.compile('(?:[^\s"]|"(?:\\.|[^"])*")+')
class FTS5Model(BaseFTSModel):
"""
Requires SQLite >= 3.9.0.
Table options:
content: table name of external content, or empty string for "contentless"
content_rowid: column name of external content primary key
prefix: integer(s). Ex: '2' or '2 3 4'
tokenize: porter, unicode61, ascii. Ex: 'porter unicode61'
The unicode tokenizer supports the following parameters:
* remove_diacritics (1 or 0, default is 1)
* tokenchars (string of characters, e.g. '-_'
* separators (string of characters)
Parameters are passed as alternating parameter name and value, so:
{'tokenize': "unicode61 remove_diacritics 0 tokenchars '-_'"}
Content-less tables:
If you don't need the full-text content in it's original form, you can
specify a content-less table. Searches and auxiliary functions will work
as usual, but the only values returned when SELECT-ing can be rowid. Also
content-less tables do not support UPDATE or DELETE.
External content tables:
You can set up triggers to sync these, e.g.
-- Create a table. And an external content fts5 table to index it.
CREATE TABLE tbl(a INTEGER PRIMARY KEY, b);
CREATE VIRTUAL TABLE ft USING fts5(b, content='tbl', content_rowid='a');
-- Triggers to keep the FTS index up to date.
CREATE TRIGGER tbl_ai AFTER INSERT ON tbl BEGIN
INSERT INTO ft(rowid, b) VALUES (new.a, new.b);
END;
CREATE TRIGGER tbl_ad AFTER DELETE ON tbl BEGIN
INSERT INTO ft(fts_idx, rowid, b) VALUES('delete', old.a, old.b);
END;
CREATE TRIGGER tbl_au AFTER UPDATE ON tbl BEGIN
INSERT INTO ft(fts_idx, rowid, b) VALUES('delete', old.a, old.b);
INSERT INTO ft(rowid, b) VALUES (new.a, new.b);
END;
Built-in auxiliary functions:
* bm25(tbl[, weight_0, ... weight_n])
* highlight(tbl, col_idx, prefix, suffix)
* snippet(tbl, col_idx, prefix, suffix, ?, max_tokens)
"""
# FTS5 does not support declared primary keys, but we can use the
# implicit rowid.
rowid = RowIDField()
class Meta:
extension_module = 'fts5'
_error_messages = {
'field_type': ('Besides the implicit `rowid` column, all columns must '
'be instances of SearchField'),
'index': 'Secondary indexes are not supported for FTS5 models',
'pk': 'FTS5 models must use the default `rowid` primary key',
}
@classmethod
def validate_model(cls):
# Perform FTS5-specific validation and options post-processing.
if cls._meta.primary_key.name != 'rowid':
raise ImproperlyConfigured(cls._error_messages['pk'])
for field in cls._meta.fields.values():
if not isinstance(field, (SearchField, RowIDField)):
raise ImproperlyConfigured(cls._error_messages['field_type'])
if cls._meta.indexes:
raise ImproperlyConfigured(cls._error_messages['index'])
@classmethod
def fts5_installed(cls):
if sqlite3.sqlite_version_info[:3] < FTS5_MIN_SQLITE_VERSION:
return False
# Test in-memory DB to determine if the FTS5 extension is installed.
tmp_db = sqlite3.connect(':memory:')
try:
tmp_db.execute('CREATE VIRTUAL TABLE fts5test USING fts5 (data);')
except:
try:
tmp_db.enable_load_extension(True)
tmp_db.load_extension('fts5')
except:
return False
else:
cls._meta.database.load_extension('fts5')
finally:
tmp_db.close()
return True
@staticmethod
def validate_query(query):
"""
Simple helper function to indicate whether a search query is a
valid FTS5 query. Note: this simply looks at the characters being
used, and is not guaranteed to catch all problematic queries.
"""
tokens = _quote_re.findall(query)
for token in tokens:
if token.startswith('"') and token.endswith('"'):
continue
if set(token) & _invalid_ascii:
return False
return True
@staticmethod
def clean_query(query, replace=chr(26)):
"""
Clean a query of invalid tokens.
"""
accum = []
any_invalid = False
tokens = _quote_re.findall(query)
for token in tokens:
if token.startswith('"') and token.endswith('"'):
accum.append(token)
continue
token_set = set(token)
invalid_for_token = token_set & _invalid_ascii
if invalid_for_token:
any_invalid = True
for c in invalid_for_token:
token = token.replace(c, replace)
accum.append(token)
if any_invalid:
return ' '.join(accum)
return query
@classmethod
def match(cls, term):
"""
Generate a `MATCH` expression appropriate for searching this table.
"""
return match(cls._meta.entity, term)
@classmethod
def rank(cls, *args):
return cls.bm25(*args) if args else SQL('rank')
@classmethod
def bm25(cls, *weights):
return fn.bm25(cls._meta.entity, *weights)
@classmethod
def search(cls, term, weights=None, with_score=False, score_alias='score',
explicit_ordering=False):
"""Full-text search using selected `term`."""
return cls.search_bm25(
FTS5Model.clean_query(term),
weights,
with_score,
score_alias,
explicit_ordering)
@classmethod
def search_bm25(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search using selected `term`."""
if not weights:
rank = SQL('rank')
elif isinstance(weights, dict):
weight_args = []
for field in cls._meta.sorted_fields:
if isinstance(field, SearchField) and not field.unindexed:
weight_args.append(
weights.get(field, weights.get(field.name, 1.0)))
rank = fn.bm25(cls._meta.entity, *weight_args)
else:
rank = fn.bm25(cls._meta.entity, *weights)
selection = ()
order_by = rank
if with_score:
selection = (cls, rank.alias(score_alias))
if with_score and not explicit_ordering:
order_by = SQL(score_alias)
return (cls
.select(*selection)
.where(cls.match(FTS5Model.clean_query(term)))
.order_by(order_by))
@classmethod
def _fts_cmd_sql(cls, cmd, **extra_params):
tbl = cls._meta.entity
columns = [tbl]
values = [cmd]
for key, value in extra_params.items():
columns.append(Entity(key))
values.append(value)
return NodeList((
SQL('INSERT INTO'),
cls._meta.entity,
EnclosedNodeList(columns),
SQL('VALUES'),
EnclosedNodeList(values)))
@classmethod
def _fts_cmd(cls, cmd, **extra_params):
query = cls._fts_cmd_sql(cmd, **extra_params)
return cls._meta.database.execute(query)
@classmethod
def automerge(cls, level):
if not (0 <= level <= 16):
raise ValueError('level must be between 0 and 16')
return cls._fts_cmd('automerge', rank=level)
@classmethod
def merge(cls, npages):
return cls._fts_cmd('merge', rank=npages)
@classmethod
def set_pgsz(cls, pgsz):
return cls._fts_cmd('pgsz', rank=pgsz)
@classmethod
def set_rank(cls, rank_expression):
return cls._fts_cmd('rank', rank=rank_expression)
@classmethod
def delete_all(cls):
return cls._fts_cmd('delete-all')
@classmethod
def VocabModel(cls, table_type='row', table=None):
if table_type not in ('row', 'col', 'instance'):
raise ValueError('table_type must be either "row", "col" or '
'"instance".')
attr = '_vocab_model_%s' % table_type
if not hasattr(cls, attr):
class Meta:
database = cls._meta.database
table_name = table or cls._meta.table_name + '_v'
extension_module = fn.fts5vocab(
cls._meta.entity,
SQL(table_type))
attrs = {
'term': VirtualField(TextField),
'doc': IntegerField(),
'cnt': IntegerField(),
'rowid': RowIDField(),
'Meta': Meta,
}
if table_type == 'col':
attrs['col'] = VirtualField(TextField)
elif table_type == 'instance':
attrs['offset'] = VirtualField(IntegerField)
class_name = '%sVocab' % cls.__name__
setattr(cls, attr, type(class_name, (VirtualModel,), attrs))
return getattr(cls, attr)
class LSMTable(VirtualModel):
class Meta:
extension_module = 'lsm1'
filename = None
@classmethod
def clean_options(cls, options):
filename = cls._meta.filename
if not filename:
raise ValueError('LSM1 extension requires that you specify a '
'filename for the LSM database.')
else:
if len(filename) >= 2 and filename[0] != '"':
filename = '"%s"' % filename
if not cls._meta.primary_key:
raise ValueError('LSM1 models must specify a primary-key field.')
key = cls._meta.primary_key
if isinstance(key, AutoField):
raise ValueError('LSM1 models must explicitly declare a primary '
'key field.')
if not isinstance(key, (TextField, BlobField, IntegerField)):
raise ValueError('LSM1 key must be a TextField, BlobField, or '
'IntegerField.')
key._hidden = True
if isinstance(key, IntegerField):
data_type = 'UINT'
elif isinstance(key, BlobField):
data_type = 'BLOB'
else:
data_type = 'TEXT'
cls._meta.prefix_arguments = [filename, '"%s"' % key.name, data_type]
# Does the key map to a scalar value, or a tuple of values?
if len(cls._meta.sorted_fields) == 2:
cls._meta._value_field = cls._meta.sorted_fields[1]
else:
cls._meta._value_field = None
return options
@classmethod
def load_extension(cls, path='lsm.so'):
cls._meta.database.load_extension(path)
@staticmethod
def slice_to_expr(key, idx):
if idx.start is not None and idx.stop is not None:
return key.between(idx.start, idx.stop)
elif idx.start is not None:
return key >= idx.start
elif idx.stop is not None:
return key <= idx.stop
@staticmethod
def _apply_lookup_to_query(query, key, lookup):
if isinstance(lookup, slice):
expr = LSMTable.slice_to_expr(key, lookup)
if expr is not None:
query = query.where(expr)
return query, False
elif isinstance(lookup, Expression):
return query.where(lookup), False
else:
return query.where(key == lookup), True
@classmethod
def get_by_id(cls, pk):
query, is_single = cls._apply_lookup_to_query(
cls.select().namedtuples(),
cls._meta.primary_key,
pk)
if is_single:
try:
row = query.get()
except cls.DoesNotExist:
raise KeyError(pk)
return row[1] if cls._meta._value_field is not None else row
else:
return query
@classmethod
def set_by_id(cls, key, value):
if cls._meta._value_field is not None:
data = {cls._meta._value_field: value}
elif isinstance(value, tuple):
data = {}
for field, fval in zip(cls._meta.sorted_fields[1:], value):
data[field] = fval
elif isinstance(value, dict):
data = value
elif isinstance(value, cls):
data = value.__dict__
data[cls._meta.primary_key] = key
cls.replace(data).execute()
@classmethod
def delete_by_id(cls, pk):
query, is_single = cls._apply_lookup_to_query(
cls.delete(),
cls._meta.primary_key,
pk)
return query.execute()
OP.MATCH = 'MATCH'
def _sqlite_regexp(regex, value):
return re.search(regex, value) is not None
class SqliteExtDatabase(SqliteDatabase):
def __init__(self, database, c_extensions=None, rank_functions=True,
hash_functions=False, regexp_function=False,
bloomfilter=False, json_contains=False, *args, **kwargs):
super(SqliteExtDatabase, self).__init__(database, *args, **kwargs)
self._row_factory = None
if c_extensions and not CYTHON_SQLITE_EXTENSIONS:
raise ImproperlyConfigured('SqliteExtDatabase initialized with '
'C extensions, but shared library was '
'not found!')
prefer_c = CYTHON_SQLITE_EXTENSIONS and (c_extensions is not False)
if rank_functions:
if prefer_c:
register_rank_functions(self)
else:
self.register_function(bm25, 'fts_bm25')
self.register_function(rank, 'fts_rank')
self.register_function(bm25, 'fts_bm25f') # Fall back to bm25.
self.register_function(bm25, 'fts_lucene')
if hash_functions:
if not prefer_c:
raise ValueError('C extension required to register hash '
'functions.')
register_hash_functions(self)
if regexp_function:
self.register_function(_sqlite_regexp, 'regexp', 2)
if bloomfilter:
if not prefer_c:
raise ValueError('C extension required to use bloomfilter.')
register_bloomfilter(self)
if json_contains:
self.register_function(_json_contains, 'json_contains')
self._c_extensions = prefer_c
def _add_conn_hooks(self, conn):
super(SqliteExtDatabase, self)._add_conn_hooks(conn)
if self._row_factory:
conn.row_factory = self._row_factory
def row_factory(self, fn):
self._row_factory = fn
if CYTHON_SQLITE_EXTENSIONS:
SQLITE_STATUS_MEMORY_USED = 0
SQLITE_STATUS_PAGECACHE_USED = 1
SQLITE_STATUS_PAGECACHE_OVERFLOW = 2
SQLITE_STATUS_SCRATCH_USED = 3
SQLITE_STATUS_SCRATCH_OVERFLOW = 4
SQLITE_STATUS_MALLOC_SIZE = 5
SQLITE_STATUS_PARSER_STACK = 6
SQLITE_STATUS_PAGECACHE_SIZE = 7
SQLITE_STATUS_SCRATCH_SIZE = 8
SQLITE_STATUS_MALLOC_COUNT = 9
SQLITE_DBSTATUS_LOOKASIDE_USED = 0
SQLITE_DBSTATUS_CACHE_USED = 1
SQLITE_DBSTATUS_SCHEMA_USED = 2
SQLITE_DBSTATUS_STMT_USED = 3
SQLITE_DBSTATUS_LOOKASIDE_HIT = 4
SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE = 5
SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL = 6
SQLITE_DBSTATUS_CACHE_HIT = 7
SQLITE_DBSTATUS_CACHE_MISS = 8
SQLITE_DBSTATUS_CACHE_WRITE = 9
SQLITE_DBSTATUS_DEFERRED_FKS = 10
#SQLITE_DBSTATUS_CACHE_USED_SHARED = 11
def __status__(flag, return_highwater=False):
"""
Expose a sqlite3_status() call for a particular flag as a property of
the Database object.
"""
def getter(self):
result = sqlite_get_status(flag)
return result[1] if return_highwater else result
return property(getter)
def __dbstatus__(flag, return_highwater=False, return_current=False):
"""
Expose a sqlite3_dbstatus() call for a particular flag as a property of
the Database instance. Unlike sqlite3_status(), the dbstatus properties
pertain to the current connection.
"""
def getter(self):
if self._state.conn is None:
raise ImproperlyConfigured('database connection not opened.')
result = sqlite_get_db_status(self._state.conn, flag)
if return_current:
return result[0]
return result[1] if return_highwater else result
return property(getter)
class CSqliteExtDatabase(SqliteExtDatabase):
def __init__(self, *args, **kwargs):
self._conn_helper = None
self._commit_hook = self._rollback_hook = self._update_hook = None
self._replace_busy_handler = False
super(CSqliteExtDatabase, self).__init__(*args, **kwargs)
def init(self, database, replace_busy_handler=False, **kwargs):
super(CSqliteExtDatabase, self).init(database, **kwargs)
self._replace_busy_handler = replace_busy_handler
def _close(self, conn):
if self._commit_hook:
self._conn_helper.set_commit_hook(None)
if self._rollback_hook:
self._conn_helper.set_rollback_hook(None)
if self._update_hook:
self._conn_helper.set_update_hook(None)
return super(CSqliteExtDatabase, self)._close(conn)
def _add_conn_hooks(self, conn):
super(CSqliteExtDatabase, self)._add_conn_hooks(conn)
self._conn_helper = ConnectionHelper(conn)
if self._commit_hook is not None:
self._conn_helper.set_commit_hook(self._commit_hook)
if self._rollback_hook is not None:
self._conn_helper.set_rollback_hook(self._rollback_hook)
if self._update_hook is not None:
self._conn_helper.set_update_hook(self._update_hook)
if self._replace_busy_handler:
timeout = self._timeout or 5
self._conn_helper.set_busy_handler(timeout * 1000)
def on_commit(self, fn):
self._commit_hook = fn
if not self.is_closed():
self._conn_helper.set_commit_hook(fn)
return fn
def on_rollback(self, fn):
self._rollback_hook = fn
if not self.is_closed():
self._conn_helper.set_rollback_hook(fn)
return fn
def on_update(self, fn):
self._update_hook = fn
if not self.is_closed():
self._conn_helper.set_update_hook(fn)
return fn
def changes(self):
return self._conn_helper.changes()
@property
def last_insert_rowid(self):
return self._conn_helper.last_insert_rowid()
@property
def autocommit(self):
return self._conn_helper.autocommit()
def backup(self, destination, pages=None, name=None, progress=None):
return backup(self.connection(), destination.connection(),
pages=pages, name=name, progress=progress)
def backup_to_file(self, filename, pages=None, name=None,
progress=None):
return backup_to_file(self.connection(), filename, pages=pages,
name=name, progress=progress)
def blob_open(self, table, column, rowid, read_only=False):
return Blob(self, table, column, rowid, read_only)
# Status properties.
memory_used = __status__(SQLITE_STATUS_MEMORY_USED)
malloc_size = __status__(SQLITE_STATUS_MALLOC_SIZE, True)
malloc_count = __status__(SQLITE_STATUS_MALLOC_COUNT)
pagecache_used = __status__(SQLITE_STATUS_PAGECACHE_USED)
pagecache_overflow = __status__(SQLITE_STATUS_PAGECACHE_OVERFLOW)
pagecache_size = __status__(SQLITE_STATUS_PAGECACHE_SIZE, True)
scratch_used = __status__(SQLITE_STATUS_SCRATCH_USED)
scratch_overflow = __status__(SQLITE_STATUS_SCRATCH_OVERFLOW)
scratch_size = __status__(SQLITE_STATUS_SCRATCH_SIZE, True)
# Connection status properties.
lookaside_used = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_USED)
lookaside_hit = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_HIT, True)
lookaside_miss = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE,
True)
lookaside_miss_full = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL,
True)
cache_used = __dbstatus__(SQLITE_DBSTATUS_CACHE_USED, False, True)
#cache_used_shared = __dbstatus__(SQLITE_DBSTATUS_CACHE_USED_SHARED,
# False, True)
schema_used = __dbstatus__(SQLITE_DBSTATUS_SCHEMA_USED, False, True)
statement_used = __dbstatus__(SQLITE_DBSTATUS_STMT_USED, False, True)
cache_hit = __dbstatus__(SQLITE_DBSTATUS_CACHE_HIT, False, True)
cache_miss = __dbstatus__(SQLITE_DBSTATUS_CACHE_MISS, False, True)
cache_write = __dbstatus__(SQLITE_DBSTATUS_CACHE_WRITE, False, True)
def match(lhs, rhs):
return Expression(lhs, OP.MATCH, rhs)
def _parse_match_info(buf):
# See http://sqlite.org/fts3.html#matchinfo
bufsize = len(buf) # Length in bytes.
return [struct.unpack('@I', buf[i:i+4])[0] for i in range(0, bufsize, 4)]
def get_weights(ncol, raw_weights):
if not raw_weights:
return [1] * ncol
else:
weights = [0] * ncol
for i, weight in enumerate(raw_weights):
weights[i] = weight
return weights
# Ranking implementation, which parse matchinfo.
def rank(raw_match_info, *raw_weights):
# Handle match_info called w/default args 'pcx' - based on the example rank
# function http://sqlite.org/fts3.html#appendix_a
match_info = _parse_match_info(raw_match_info)
score = 0.0
p, c = match_info[:2]
weights = get_weights(c, raw_weights)
# matchinfo X value corresponds to, for each phrase in the search query, a
# list of 3 values for each column in the search table.
# So if we have a two-phrase search query and three columns of data, the
# following would be the layout:
# p0 : c0=[0, 1, 2], c1=[3, 4, 5], c2=[6, 7, 8]
# p1 : c0=[9, 10, 11], c1=[12, 13, 14], c2=[15, 16, 17]
for phrase_num in range(p):
phrase_info_idx = 2 + (phrase_num * c * 3)
for col_num in range(c):
weight = weights[col_num]
if not weight:
continue
col_idx = phrase_info_idx + (col_num * 3)
# The idea is that we count the number of times the phrase appears
# in this column of the current row, compared to how many times it
# appears in this column across all rows. The ratio of these values
# provides a rough way to score based on "high value" terms.
row_hits = match_info[col_idx]
all_rows_hits = match_info[col_idx + 1]
if row_hits > 0:
score += weight * (float(row_hits) / all_rows_hits)
return -score
# Okapi BM25 ranking implementation (FTS4 only).
def bm25(raw_match_info, *args):
"""
Usage:
# Format string *must* be pcnalx
# Second parameter to bm25 specifies the index of the column, on
# the table being queries.
bm25(matchinfo(document_tbl, 'pcnalx'), 1) AS rank
"""
match_info = _parse_match_info(raw_match_info)
K = 1.2
B = 0.75
score = 0.0
P_O, C_O, N_O, A_O = range(4) # Offsets into the matchinfo buffer.
term_count = match_info[P_O] # n
col_count = match_info[C_O]
total_docs = match_info[N_O] # N
L_O = A_O + col_count
X_O = L_O + col_count
weights = get_weights(col_count, args)
for i in range(term_count):
for j in range(col_count):
weight = weights[j]
if weight == 0:
continue
x = X_O + (3 * (j + i * col_count))
term_frequency = float(match_info[x]) # f(qi, D)
docs_with_term = float(match_info[x + 2]) # n(qi)
# log( (N - n(qi) + 0.5) / (n(qi) + 0.5) )
idf = math.log(
(total_docs - docs_with_term + 0.5) /
(docs_with_term + 0.5))
if idf <= 0.0:
idf = 1e-6
doc_length = float(match_info[L_O + j]) # |D|
avg_length = float(match_info[A_O + j]) or 1. # avgdl
ratio = doc_length / avg_length
num = term_frequency * (K + 1)
b_part = 1 - B + (B * ratio)
denom = term_frequency + (K * b_part)
pc_score = idf * (num / denom)
score += (pc_score * weight)
return -score
def _json_contains(src_json, obj_json):
stack = []
try:
stack.append((json.loads(obj_json), json.loads(src_json)))
except:
# Invalid JSON!
return False
while stack:
obj, src = stack.pop()
if isinstance(src, dict):
if isinstance(obj, dict):
for key in obj:
if key not in src:
return False
stack.append((obj[key], src[key]))
elif isinstance(obj, list):
for item in obj:
if item not in src:
return False
elif obj not in src:
return False
elif isinstance(src, list):
if isinstance(obj, dict):
return False
elif isinstance(obj, list):
try:
for i in range(len(obj)):
stack.append((obj[i], src[i]))
except IndexError:
return False
elif obj not in src:
return False
elif obj != src:
return False
return True
|
coleifer/peewee | playhouse/sqlite_ext.py | bm25 | python | def bm25(raw_match_info, *args):
match_info = _parse_match_info(raw_match_info)
K = 1.2
B = 0.75
score = 0.0
P_O, C_O, N_O, A_O = range(4) # Offsets into the matchinfo buffer.
term_count = match_info[P_O] # n
col_count = match_info[C_O]
total_docs = match_info[N_O] # N
L_O = A_O + col_count
X_O = L_O + col_count
weights = get_weights(col_count, args)
for i in range(term_count):
for j in range(col_count):
weight = weights[j]
if weight == 0:
continue
x = X_O + (3 * (j + i * col_count))
term_frequency = float(match_info[x]) # f(qi, D)
docs_with_term = float(match_info[x + 2]) # n(qi)
# log( (N - n(qi) + 0.5) / (n(qi) + 0.5) )
idf = math.log(
(total_docs - docs_with_term + 0.5) /
(docs_with_term + 0.5))
if idf <= 0.0:
idf = 1e-6
doc_length = float(match_info[L_O + j]) # |D|
avg_length = float(match_info[A_O + j]) or 1. # avgdl
ratio = doc_length / avg_length
num = term_frequency * (K + 1)
b_part = 1 - B + (B * ratio)
denom = term_frequency + (K * b_part)
pc_score = idf * (num / denom)
score += (pc_score * weight)
return -score | Usage:
# Format string *must* be pcnalx
# Second parameter to bm25 specifies the index of the column, on
# the table being queries.
bm25(matchinfo(document_tbl, 'pcnalx'), 1) AS rank | train | https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/playhouse/sqlite_ext.py#L1172-L1223 | [
"def _parse_match_info(buf):\n # See http://sqlite.org/fts3.html#matchinfo\n bufsize = len(buf) # Length in bytes.\n return [struct.unpack('@I', buf[i:i+4])[0] for i in range(0, bufsize, 4)]\n",
"def get_weights(ncol, raw_weights):\n if not raw_weights:\n return [1] * ncol\n else:\n ... | import json
import math
import re
import struct
import sys
from peewee import *
from peewee import ColumnBase
from peewee import EnclosedNodeList
from peewee import Entity
from peewee import Expression
from peewee import Node
from peewee import NodeList
from peewee import OP
from peewee import VirtualField
from peewee import merge_dict
from peewee import sqlite3
try:
from playhouse._sqlite_ext import (
backup,
backup_to_file,
Blob,
ConnectionHelper,
register_bloomfilter,
register_hash_functions,
register_rank_functions,
sqlite_get_db_status,
sqlite_get_status,
TableFunction,
ZeroBlob,
)
CYTHON_SQLITE_EXTENSIONS = True
except ImportError:
CYTHON_SQLITE_EXTENSIONS = False
if sys.version_info[0] == 3:
basestring = str
FTS3_MATCHINFO = 'pcx'
FTS4_MATCHINFO = 'pcnalx'
if sqlite3 is not None:
FTS_VERSION = 4 if sqlite3.sqlite_version_info[:3] >= (3, 7, 4) else 3
else:
FTS_VERSION = 3
FTS5_MIN_SQLITE_VERSION = (3, 9, 0)
class RowIDField(AutoField):
auto_increment = True
column_name = name = required_name = 'rowid'
def bind(self, model, name, *args):
if name != self.required_name:
raise ValueError('%s must be named "%s".' %
(type(self), self.required_name))
super(RowIDField, self).bind(model, name, *args)
class DocIDField(RowIDField):
column_name = name = required_name = 'docid'
class AutoIncrementField(AutoField):
def ddl(self, ctx):
node_list = super(AutoIncrementField, self).ddl(ctx)
return NodeList((node_list, SQL('AUTOINCREMENT')))
class JSONPath(ColumnBase):
def __init__(self, field, path=None):
super(JSONPath, self).__init__()
self._field = field
self._path = path or ()
@property
def path(self):
return Value('$%s' % ''.join(self._path))
def __getitem__(self, idx):
if isinstance(idx, int):
item = '[%s]' % idx
else:
item = '.%s' % idx
return JSONPath(self._field, self._path + (item,))
def set(self, value, as_json=None):
if as_json or isinstance(value, (list, dict)):
value = fn.json(self._field._json_dumps(value))
return fn.json_set(self._field, self.path, value)
def update(self, value):
return self.set(fn.json_patch(self, self._field._json_dumps(value)))
def remove(self):
return fn.json_remove(self._field, self.path)
def json_type(self):
return fn.json_type(self._field, self.path)
def length(self):
return fn.json_array_length(self._field, self.path)
def children(self):
return fn.json_each(self._field, self.path)
def tree(self):
return fn.json_tree(self._field, self.path)
def __sql__(self, ctx):
return ctx.sql(fn.json_extract(self._field, self.path)
if self._path else self._field)
class JSONField(TextField):
field_type = 'JSON'
def __init__(self, json_dumps=None, json_loads=None, **kwargs):
self._json_dumps = json_dumps or json.dumps
self._json_loads = json_loads or json.loads
super(JSONField, self).__init__(**kwargs)
def python_value(self, value):
if value is not None:
try:
return self._json_loads(value)
except (TypeError, ValueError):
return value
def db_value(self, value):
if value is not None:
if not isinstance(value, Node):
value = fn.json(self._json_dumps(value))
return value
def _e(op):
def inner(self, rhs):
if isinstance(rhs, (list, dict)):
rhs = Value(rhs, converter=self.db_value, unpack=False)
return Expression(self, op, rhs)
return inner
__eq__ = _e(OP.EQ)
__ne__ = _e(OP.NE)
__gt__ = _e(OP.GT)
__ge__ = _e(OP.GTE)
__lt__ = _e(OP.LT)
__le__ = _e(OP.LTE)
__hash__ = Field.__hash__
def __getitem__(self, item):
return JSONPath(self)[item]
def set(self, value, as_json=None):
return JSONPath(self).set(value, as_json)
def update(self, data):
return JSONPath(self).update(data)
def remove(self):
return JSONPath(self).remove()
def json_type(self):
return fn.json_type(self)
def length(self):
return fn.json_array_length(self)
def children(self):
"""
Schema of `json_each` and `json_tree`:
key,
value,
type TEXT (object, array, string, etc),
atom (value for primitive/scalar types, NULL for array and object)
id INTEGER (unique identifier for element)
parent INTEGER (unique identifier of parent element or NULL)
fullkey TEXT (full path describing element)
path TEXT (path to the container of the current element)
json JSON hidden (1st input parameter to function)
root TEXT hidden (2nd input parameter, path at which to start)
"""
return fn.json_each(self)
def tree(self):
return fn.json_tree(self)
class SearchField(Field):
def __init__(self, unindexed=False, column_name=None, **k):
if k:
raise ValueError('SearchField does not accept these keyword '
'arguments: %s.' % sorted(k))
super(SearchField, self).__init__(unindexed=unindexed,
column_name=column_name, null=True)
def match(self, term):
return match(self, term)
class VirtualTableSchemaManager(SchemaManager):
def _create_virtual_table(self, safe=True, **options):
options = self.model.clean_options(
merge_dict(self.model._meta.options, options))
# Structure:
# CREATE VIRTUAL TABLE <model>
# USING <extension_module>
# ([prefix_arguments, ...] fields, ... [arguments, ...], [options...])
ctx = self._create_context()
ctx.literal('CREATE VIRTUAL TABLE ')
if safe:
ctx.literal('IF NOT EXISTS ')
(ctx
.sql(self.model)
.literal(' USING '))
ext_module = self.model._meta.extension_module
if isinstance(ext_module, Node):
return ctx.sql(ext_module)
ctx.sql(SQL(ext_module)).literal(' ')
arguments = []
meta = self.model._meta
if meta.prefix_arguments:
arguments.extend([SQL(a) for a in meta.prefix_arguments])
# Constraints, data-types, foreign and primary keys are all omitted.
for field in meta.sorted_fields:
if isinstance(field, (RowIDField)) or field._hidden:
continue
field_def = [Entity(field.column_name)]
if field.unindexed:
field_def.append(SQL('UNINDEXED'))
arguments.append(NodeList(field_def))
if meta.arguments:
arguments.extend([SQL(a) for a in meta.arguments])
if options:
arguments.extend(self._create_table_option_sql(options))
return ctx.sql(EnclosedNodeList(arguments))
def _create_table(self, safe=True, **options):
if issubclass(self.model, VirtualModel):
return self._create_virtual_table(safe, **options)
return super(VirtualTableSchemaManager, self)._create_table(
safe, **options)
class VirtualModel(Model):
class Meta:
arguments = None
extension_module = None
prefix_arguments = None
primary_key = False
schema_manager_class = VirtualTableSchemaManager
@classmethod
def clean_options(cls, options):
return options
class BaseFTSModel(VirtualModel):
@classmethod
def clean_options(cls, options):
content = options.get('content')
prefix = options.get('prefix')
tokenize = options.get('tokenize')
if isinstance(content, basestring) and content == '':
# Special-case content-less full-text search tables.
options['content'] = "''"
elif isinstance(content, Field):
# Special-case to ensure fields are fully-qualified.
options['content'] = Entity(content.model._meta.table_name,
content.column_name)
if prefix:
if isinstance(prefix, (list, tuple)):
prefix = ','.join([str(i) for i in prefix])
options['prefix'] = "'%s'" % prefix.strip("' ")
if tokenize and cls._meta.extension_module.lower() == 'fts5':
# Tokenizers need to be in quoted string for FTS5, but not for FTS3
# or FTS4.
options['tokenize'] = '"%s"' % tokenize
return options
class FTSModel(BaseFTSModel):
"""
VirtualModel class for creating tables that use either the FTS3 or FTS4
search extensions. Peewee automatically determines which version of the
FTS extension is supported and will use FTS4 if possible.
"""
# FTS3/4 uses "docid" in the same way a normal table uses "rowid".
docid = DocIDField()
class Meta:
extension_module = 'FTS%s' % FTS_VERSION
@classmethod
def _fts_cmd(cls, cmd):
tbl = cls._meta.table_name
res = cls._meta.database.execute_sql(
"INSERT INTO %s(%s) VALUES('%s');" % (tbl, tbl, cmd))
return res.fetchone()
@classmethod
def optimize(cls):
return cls._fts_cmd('optimize')
@classmethod
def rebuild(cls):
return cls._fts_cmd('rebuild')
@classmethod
def integrity_check(cls):
return cls._fts_cmd('integrity-check')
@classmethod
def merge(cls, blocks=200, segments=8):
return cls._fts_cmd('merge=%s,%s' % (blocks, segments))
@classmethod
def automerge(cls, state=True):
return cls._fts_cmd('automerge=%s' % (state and '1' or '0'))
@classmethod
def match(cls, term):
"""
Generate a `MATCH` expression appropriate for searching this table.
"""
return match(cls._meta.entity, term)
@classmethod
def rank(cls, *weights):
matchinfo = fn.matchinfo(cls._meta.entity, FTS3_MATCHINFO)
return fn.fts_rank(matchinfo, *weights)
@classmethod
def bm25(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_bm25(match_info, *weights)
@classmethod
def bm25f(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_bm25f(match_info, *weights)
@classmethod
def lucene(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_lucene(match_info, *weights)
@classmethod
def _search(cls, term, weights, with_score, score_alias, score_fn,
explicit_ordering):
if not weights:
rank = score_fn()
elif isinstance(weights, dict):
weight_args = []
for field in cls._meta.sorted_fields:
# Attempt to get the specified weight of the field by looking
# it up using it's field instance followed by name.
field_weight = weights.get(field, weights.get(field.name, 1.0))
weight_args.append(field_weight)
rank = score_fn(*weight_args)
else:
rank = score_fn(*weights)
selection = ()
order_by = rank
if with_score:
selection = (cls, rank.alias(score_alias))
if with_score and not explicit_ordering:
order_by = SQL(score_alias)
return (cls
.select(*selection)
.where(cls.match(term))
.order_by(order_by))
@classmethod
def search(cls, term, weights=None, with_score=False, score_alias='score',
explicit_ordering=False):
"""Full-text search using selected `term`."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.rank,
explicit_ordering)
@classmethod
def search_bm25(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search for selected `term` using BM25 algorithm."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.bm25,
explicit_ordering)
@classmethod
def search_bm25f(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search for selected `term` using BM25 algorithm."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.bm25f,
explicit_ordering)
@classmethod
def search_lucene(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search for selected `term` using BM25 algorithm."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.lucene,
explicit_ordering)
_alphabet = 'abcdefghijklmnopqrstuvwxyz'
_alphanum = (set('\t ,"(){}*:_+0123456789') |
set(_alphabet) |
set(_alphabet.upper()) |
set((chr(26),)))
_invalid_ascii = set(chr(p) for p in range(128) if chr(p) not in _alphanum)
_quote_re = re.compile('(?:[^\s"]|"(?:\\.|[^"])*")+')
class FTS5Model(BaseFTSModel):
"""
Requires SQLite >= 3.9.0.
Table options:
content: table name of external content, or empty string for "contentless"
content_rowid: column name of external content primary key
prefix: integer(s). Ex: '2' or '2 3 4'
tokenize: porter, unicode61, ascii. Ex: 'porter unicode61'
The unicode tokenizer supports the following parameters:
* remove_diacritics (1 or 0, default is 1)
* tokenchars (string of characters, e.g. '-_'
* separators (string of characters)
Parameters are passed as alternating parameter name and value, so:
{'tokenize': "unicode61 remove_diacritics 0 tokenchars '-_'"}
Content-less tables:
If you don't need the full-text content in it's original form, you can
specify a content-less table. Searches and auxiliary functions will work
as usual, but the only values returned when SELECT-ing can be rowid. Also
content-less tables do not support UPDATE or DELETE.
External content tables:
You can set up triggers to sync these, e.g.
-- Create a table. And an external content fts5 table to index it.
CREATE TABLE tbl(a INTEGER PRIMARY KEY, b);
CREATE VIRTUAL TABLE ft USING fts5(b, content='tbl', content_rowid='a');
-- Triggers to keep the FTS index up to date.
CREATE TRIGGER tbl_ai AFTER INSERT ON tbl BEGIN
INSERT INTO ft(rowid, b) VALUES (new.a, new.b);
END;
CREATE TRIGGER tbl_ad AFTER DELETE ON tbl BEGIN
INSERT INTO ft(fts_idx, rowid, b) VALUES('delete', old.a, old.b);
END;
CREATE TRIGGER tbl_au AFTER UPDATE ON tbl BEGIN
INSERT INTO ft(fts_idx, rowid, b) VALUES('delete', old.a, old.b);
INSERT INTO ft(rowid, b) VALUES (new.a, new.b);
END;
Built-in auxiliary functions:
* bm25(tbl[, weight_0, ... weight_n])
* highlight(tbl, col_idx, prefix, suffix)
* snippet(tbl, col_idx, prefix, suffix, ?, max_tokens)
"""
# FTS5 does not support declared primary keys, but we can use the
# implicit rowid.
rowid = RowIDField()
class Meta:
extension_module = 'fts5'
_error_messages = {
'field_type': ('Besides the implicit `rowid` column, all columns must '
'be instances of SearchField'),
'index': 'Secondary indexes are not supported for FTS5 models',
'pk': 'FTS5 models must use the default `rowid` primary key',
}
@classmethod
def validate_model(cls):
# Perform FTS5-specific validation and options post-processing.
if cls._meta.primary_key.name != 'rowid':
raise ImproperlyConfigured(cls._error_messages['pk'])
for field in cls._meta.fields.values():
if not isinstance(field, (SearchField, RowIDField)):
raise ImproperlyConfigured(cls._error_messages['field_type'])
if cls._meta.indexes:
raise ImproperlyConfigured(cls._error_messages['index'])
@classmethod
def fts5_installed(cls):
if sqlite3.sqlite_version_info[:3] < FTS5_MIN_SQLITE_VERSION:
return False
# Test in-memory DB to determine if the FTS5 extension is installed.
tmp_db = sqlite3.connect(':memory:')
try:
tmp_db.execute('CREATE VIRTUAL TABLE fts5test USING fts5 (data);')
except:
try:
tmp_db.enable_load_extension(True)
tmp_db.load_extension('fts5')
except:
return False
else:
cls._meta.database.load_extension('fts5')
finally:
tmp_db.close()
return True
@staticmethod
def validate_query(query):
"""
Simple helper function to indicate whether a search query is a
valid FTS5 query. Note: this simply looks at the characters being
used, and is not guaranteed to catch all problematic queries.
"""
tokens = _quote_re.findall(query)
for token in tokens:
if token.startswith('"') and token.endswith('"'):
continue
if set(token) & _invalid_ascii:
return False
return True
@staticmethod
def clean_query(query, replace=chr(26)):
"""
Clean a query of invalid tokens.
"""
accum = []
any_invalid = False
tokens = _quote_re.findall(query)
for token in tokens:
if token.startswith('"') and token.endswith('"'):
accum.append(token)
continue
token_set = set(token)
invalid_for_token = token_set & _invalid_ascii
if invalid_for_token:
any_invalid = True
for c in invalid_for_token:
token = token.replace(c, replace)
accum.append(token)
if any_invalid:
return ' '.join(accum)
return query
@classmethod
def match(cls, term):
"""
Generate a `MATCH` expression appropriate for searching this table.
"""
return match(cls._meta.entity, term)
@classmethod
def rank(cls, *args):
return cls.bm25(*args) if args else SQL('rank')
@classmethod
def bm25(cls, *weights):
return fn.bm25(cls._meta.entity, *weights)
@classmethod
def search(cls, term, weights=None, with_score=False, score_alias='score',
explicit_ordering=False):
"""Full-text search using selected `term`."""
return cls.search_bm25(
FTS5Model.clean_query(term),
weights,
with_score,
score_alias,
explicit_ordering)
@classmethod
def search_bm25(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search using selected `term`."""
if not weights:
rank = SQL('rank')
elif isinstance(weights, dict):
weight_args = []
for field in cls._meta.sorted_fields:
if isinstance(field, SearchField) and not field.unindexed:
weight_args.append(
weights.get(field, weights.get(field.name, 1.0)))
rank = fn.bm25(cls._meta.entity, *weight_args)
else:
rank = fn.bm25(cls._meta.entity, *weights)
selection = ()
order_by = rank
if with_score:
selection = (cls, rank.alias(score_alias))
if with_score and not explicit_ordering:
order_by = SQL(score_alias)
return (cls
.select(*selection)
.where(cls.match(FTS5Model.clean_query(term)))
.order_by(order_by))
@classmethod
def _fts_cmd_sql(cls, cmd, **extra_params):
tbl = cls._meta.entity
columns = [tbl]
values = [cmd]
for key, value in extra_params.items():
columns.append(Entity(key))
values.append(value)
return NodeList((
SQL('INSERT INTO'),
cls._meta.entity,
EnclosedNodeList(columns),
SQL('VALUES'),
EnclosedNodeList(values)))
@classmethod
def _fts_cmd(cls, cmd, **extra_params):
query = cls._fts_cmd_sql(cmd, **extra_params)
return cls._meta.database.execute(query)
@classmethod
def automerge(cls, level):
if not (0 <= level <= 16):
raise ValueError('level must be between 0 and 16')
return cls._fts_cmd('automerge', rank=level)
@classmethod
def merge(cls, npages):
return cls._fts_cmd('merge', rank=npages)
@classmethod
def set_pgsz(cls, pgsz):
return cls._fts_cmd('pgsz', rank=pgsz)
@classmethod
def set_rank(cls, rank_expression):
return cls._fts_cmd('rank', rank=rank_expression)
@classmethod
def delete_all(cls):
return cls._fts_cmd('delete-all')
@classmethod
def VocabModel(cls, table_type='row', table=None):
if table_type not in ('row', 'col', 'instance'):
raise ValueError('table_type must be either "row", "col" or '
'"instance".')
attr = '_vocab_model_%s' % table_type
if not hasattr(cls, attr):
class Meta:
database = cls._meta.database
table_name = table or cls._meta.table_name + '_v'
extension_module = fn.fts5vocab(
cls._meta.entity,
SQL(table_type))
attrs = {
'term': VirtualField(TextField),
'doc': IntegerField(),
'cnt': IntegerField(),
'rowid': RowIDField(),
'Meta': Meta,
}
if table_type == 'col':
attrs['col'] = VirtualField(TextField)
elif table_type == 'instance':
attrs['offset'] = VirtualField(IntegerField)
class_name = '%sVocab' % cls.__name__
setattr(cls, attr, type(class_name, (VirtualModel,), attrs))
return getattr(cls, attr)
def ClosureTable(model_class, foreign_key=None, referencing_class=None,
referencing_key=None):
"""Model factory for the transitive closure extension."""
if referencing_class is None:
referencing_class = model_class
if foreign_key is None:
for field_obj in model_class._meta.refs:
if field_obj.rel_model is model_class:
foreign_key = field_obj
break
else:
raise ValueError('Unable to find self-referential foreign key.')
source_key = model_class._meta.primary_key
if referencing_key is None:
referencing_key = source_key
class BaseClosureTable(VirtualModel):
depth = VirtualField(IntegerField)
id = VirtualField(IntegerField)
idcolumn = VirtualField(TextField)
parentcolumn = VirtualField(TextField)
root = VirtualField(IntegerField)
tablename = VirtualField(TextField)
class Meta:
extension_module = 'transitive_closure'
@classmethod
def descendants(cls, node, depth=None, include_node=False):
query = (model_class
.select(model_class, cls.depth.alias('depth'))
.join(cls, on=(source_key == cls.id))
.where(cls.root == node)
.objects())
if depth is not None:
query = query.where(cls.depth == depth)
elif not include_node:
query = query.where(cls.depth > 0)
return query
@classmethod
def ancestors(cls, node, depth=None, include_node=False):
query = (model_class
.select(model_class, cls.depth.alias('depth'))
.join(cls, on=(source_key == cls.root))
.where(cls.id == node)
.objects())
if depth:
query = query.where(cls.depth == depth)
elif not include_node:
query = query.where(cls.depth > 0)
return query
@classmethod
def siblings(cls, node, include_node=False):
if referencing_class is model_class:
# self-join
fk_value = node.__data__.get(foreign_key.name)
query = model_class.select().where(foreign_key == fk_value)
else:
# siblings as given in reference_class
siblings = (referencing_class
.select(referencing_key)
.join(cls, on=(foreign_key == cls.root))
.where((cls.id == node) & (cls.depth == 1)))
# the according models
query = (model_class
.select()
.where(source_key << siblings)
.objects())
if not include_node:
query = query.where(source_key != node)
return query
class Meta:
database = referencing_class._meta.database
options = {
'tablename': referencing_class._meta.table_name,
'idcolumn': referencing_key.column_name,
'parentcolumn': foreign_key.column_name}
primary_key = False
name = '%sClosure' % model_class.__name__
return type(name, (BaseClosureTable,), {'Meta': Meta})
class LSMTable(VirtualModel):
class Meta:
extension_module = 'lsm1'
filename = None
@classmethod
def clean_options(cls, options):
filename = cls._meta.filename
if not filename:
raise ValueError('LSM1 extension requires that you specify a '
'filename for the LSM database.')
else:
if len(filename) >= 2 and filename[0] != '"':
filename = '"%s"' % filename
if not cls._meta.primary_key:
raise ValueError('LSM1 models must specify a primary-key field.')
key = cls._meta.primary_key
if isinstance(key, AutoField):
raise ValueError('LSM1 models must explicitly declare a primary '
'key field.')
if not isinstance(key, (TextField, BlobField, IntegerField)):
raise ValueError('LSM1 key must be a TextField, BlobField, or '
'IntegerField.')
key._hidden = True
if isinstance(key, IntegerField):
data_type = 'UINT'
elif isinstance(key, BlobField):
data_type = 'BLOB'
else:
data_type = 'TEXT'
cls._meta.prefix_arguments = [filename, '"%s"' % key.name, data_type]
# Does the key map to a scalar value, or a tuple of values?
if len(cls._meta.sorted_fields) == 2:
cls._meta._value_field = cls._meta.sorted_fields[1]
else:
cls._meta._value_field = None
return options
@classmethod
def load_extension(cls, path='lsm.so'):
cls._meta.database.load_extension(path)
@staticmethod
def slice_to_expr(key, idx):
if idx.start is not None and idx.stop is not None:
return key.between(idx.start, idx.stop)
elif idx.start is not None:
return key >= idx.start
elif idx.stop is not None:
return key <= idx.stop
@staticmethod
def _apply_lookup_to_query(query, key, lookup):
if isinstance(lookup, slice):
expr = LSMTable.slice_to_expr(key, lookup)
if expr is not None:
query = query.where(expr)
return query, False
elif isinstance(lookup, Expression):
return query.where(lookup), False
else:
return query.where(key == lookup), True
@classmethod
def get_by_id(cls, pk):
query, is_single = cls._apply_lookup_to_query(
cls.select().namedtuples(),
cls._meta.primary_key,
pk)
if is_single:
try:
row = query.get()
except cls.DoesNotExist:
raise KeyError(pk)
return row[1] if cls._meta._value_field is not None else row
else:
return query
@classmethod
def set_by_id(cls, key, value):
if cls._meta._value_field is not None:
data = {cls._meta._value_field: value}
elif isinstance(value, tuple):
data = {}
for field, fval in zip(cls._meta.sorted_fields[1:], value):
data[field] = fval
elif isinstance(value, dict):
data = value
elif isinstance(value, cls):
data = value.__dict__
data[cls._meta.primary_key] = key
cls.replace(data).execute()
@classmethod
def delete_by_id(cls, pk):
query, is_single = cls._apply_lookup_to_query(
cls.delete(),
cls._meta.primary_key,
pk)
return query.execute()
OP.MATCH = 'MATCH'
def _sqlite_regexp(regex, value):
return re.search(regex, value) is not None
class SqliteExtDatabase(SqliteDatabase):
def __init__(self, database, c_extensions=None, rank_functions=True,
hash_functions=False, regexp_function=False,
bloomfilter=False, json_contains=False, *args, **kwargs):
super(SqliteExtDatabase, self).__init__(database, *args, **kwargs)
self._row_factory = None
if c_extensions and not CYTHON_SQLITE_EXTENSIONS:
raise ImproperlyConfigured('SqliteExtDatabase initialized with '
'C extensions, but shared library was '
'not found!')
prefer_c = CYTHON_SQLITE_EXTENSIONS and (c_extensions is not False)
if rank_functions:
if prefer_c:
register_rank_functions(self)
else:
self.register_function(bm25, 'fts_bm25')
self.register_function(rank, 'fts_rank')
self.register_function(bm25, 'fts_bm25f') # Fall back to bm25.
self.register_function(bm25, 'fts_lucene')
if hash_functions:
if not prefer_c:
raise ValueError('C extension required to register hash '
'functions.')
register_hash_functions(self)
if regexp_function:
self.register_function(_sqlite_regexp, 'regexp', 2)
if bloomfilter:
if not prefer_c:
raise ValueError('C extension required to use bloomfilter.')
register_bloomfilter(self)
if json_contains:
self.register_function(_json_contains, 'json_contains')
self._c_extensions = prefer_c
def _add_conn_hooks(self, conn):
super(SqliteExtDatabase, self)._add_conn_hooks(conn)
if self._row_factory:
conn.row_factory = self._row_factory
def row_factory(self, fn):
self._row_factory = fn
if CYTHON_SQLITE_EXTENSIONS:
SQLITE_STATUS_MEMORY_USED = 0
SQLITE_STATUS_PAGECACHE_USED = 1
SQLITE_STATUS_PAGECACHE_OVERFLOW = 2
SQLITE_STATUS_SCRATCH_USED = 3
SQLITE_STATUS_SCRATCH_OVERFLOW = 4
SQLITE_STATUS_MALLOC_SIZE = 5
SQLITE_STATUS_PARSER_STACK = 6
SQLITE_STATUS_PAGECACHE_SIZE = 7
SQLITE_STATUS_SCRATCH_SIZE = 8
SQLITE_STATUS_MALLOC_COUNT = 9
SQLITE_DBSTATUS_LOOKASIDE_USED = 0
SQLITE_DBSTATUS_CACHE_USED = 1
SQLITE_DBSTATUS_SCHEMA_USED = 2
SQLITE_DBSTATUS_STMT_USED = 3
SQLITE_DBSTATUS_LOOKASIDE_HIT = 4
SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE = 5
SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL = 6
SQLITE_DBSTATUS_CACHE_HIT = 7
SQLITE_DBSTATUS_CACHE_MISS = 8
SQLITE_DBSTATUS_CACHE_WRITE = 9
SQLITE_DBSTATUS_DEFERRED_FKS = 10
#SQLITE_DBSTATUS_CACHE_USED_SHARED = 11
def __status__(flag, return_highwater=False):
"""
Expose a sqlite3_status() call for a particular flag as a property of
the Database object.
"""
def getter(self):
result = sqlite_get_status(flag)
return result[1] if return_highwater else result
return property(getter)
def __dbstatus__(flag, return_highwater=False, return_current=False):
"""
Expose a sqlite3_dbstatus() call for a particular flag as a property of
the Database instance. Unlike sqlite3_status(), the dbstatus properties
pertain to the current connection.
"""
def getter(self):
if self._state.conn is None:
raise ImproperlyConfigured('database connection not opened.')
result = sqlite_get_db_status(self._state.conn, flag)
if return_current:
return result[0]
return result[1] if return_highwater else result
return property(getter)
class CSqliteExtDatabase(SqliteExtDatabase):
def __init__(self, *args, **kwargs):
self._conn_helper = None
self._commit_hook = self._rollback_hook = self._update_hook = None
self._replace_busy_handler = False
super(CSqliteExtDatabase, self).__init__(*args, **kwargs)
def init(self, database, replace_busy_handler=False, **kwargs):
super(CSqliteExtDatabase, self).init(database, **kwargs)
self._replace_busy_handler = replace_busy_handler
def _close(self, conn):
if self._commit_hook:
self._conn_helper.set_commit_hook(None)
if self._rollback_hook:
self._conn_helper.set_rollback_hook(None)
if self._update_hook:
self._conn_helper.set_update_hook(None)
return super(CSqliteExtDatabase, self)._close(conn)
def _add_conn_hooks(self, conn):
super(CSqliteExtDatabase, self)._add_conn_hooks(conn)
self._conn_helper = ConnectionHelper(conn)
if self._commit_hook is not None:
self._conn_helper.set_commit_hook(self._commit_hook)
if self._rollback_hook is not None:
self._conn_helper.set_rollback_hook(self._rollback_hook)
if self._update_hook is not None:
self._conn_helper.set_update_hook(self._update_hook)
if self._replace_busy_handler:
timeout = self._timeout or 5
self._conn_helper.set_busy_handler(timeout * 1000)
def on_commit(self, fn):
self._commit_hook = fn
if not self.is_closed():
self._conn_helper.set_commit_hook(fn)
return fn
def on_rollback(self, fn):
self._rollback_hook = fn
if not self.is_closed():
self._conn_helper.set_rollback_hook(fn)
return fn
def on_update(self, fn):
self._update_hook = fn
if not self.is_closed():
self._conn_helper.set_update_hook(fn)
return fn
def changes(self):
return self._conn_helper.changes()
@property
def last_insert_rowid(self):
return self._conn_helper.last_insert_rowid()
@property
def autocommit(self):
return self._conn_helper.autocommit()
def backup(self, destination, pages=None, name=None, progress=None):
return backup(self.connection(), destination.connection(),
pages=pages, name=name, progress=progress)
def backup_to_file(self, filename, pages=None, name=None,
progress=None):
return backup_to_file(self.connection(), filename, pages=pages,
name=name, progress=progress)
def blob_open(self, table, column, rowid, read_only=False):
return Blob(self, table, column, rowid, read_only)
# Status properties.
memory_used = __status__(SQLITE_STATUS_MEMORY_USED)
malloc_size = __status__(SQLITE_STATUS_MALLOC_SIZE, True)
malloc_count = __status__(SQLITE_STATUS_MALLOC_COUNT)
pagecache_used = __status__(SQLITE_STATUS_PAGECACHE_USED)
pagecache_overflow = __status__(SQLITE_STATUS_PAGECACHE_OVERFLOW)
pagecache_size = __status__(SQLITE_STATUS_PAGECACHE_SIZE, True)
scratch_used = __status__(SQLITE_STATUS_SCRATCH_USED)
scratch_overflow = __status__(SQLITE_STATUS_SCRATCH_OVERFLOW)
scratch_size = __status__(SQLITE_STATUS_SCRATCH_SIZE, True)
# Connection status properties.
lookaside_used = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_USED)
lookaside_hit = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_HIT, True)
lookaside_miss = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE,
True)
lookaside_miss_full = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL,
True)
cache_used = __dbstatus__(SQLITE_DBSTATUS_CACHE_USED, False, True)
#cache_used_shared = __dbstatus__(SQLITE_DBSTATUS_CACHE_USED_SHARED,
# False, True)
schema_used = __dbstatus__(SQLITE_DBSTATUS_SCHEMA_USED, False, True)
statement_used = __dbstatus__(SQLITE_DBSTATUS_STMT_USED, False, True)
cache_hit = __dbstatus__(SQLITE_DBSTATUS_CACHE_HIT, False, True)
cache_miss = __dbstatus__(SQLITE_DBSTATUS_CACHE_MISS, False, True)
cache_write = __dbstatus__(SQLITE_DBSTATUS_CACHE_WRITE, False, True)
def match(lhs, rhs):
return Expression(lhs, OP.MATCH, rhs)
def _parse_match_info(buf):
# See http://sqlite.org/fts3.html#matchinfo
bufsize = len(buf) # Length in bytes.
return [struct.unpack('@I', buf[i:i+4])[0] for i in range(0, bufsize, 4)]
def get_weights(ncol, raw_weights):
if not raw_weights:
return [1] * ncol
else:
weights = [0] * ncol
for i, weight in enumerate(raw_weights):
weights[i] = weight
return weights
# Ranking implementation, which parse matchinfo.
def rank(raw_match_info, *raw_weights):
# Handle match_info called w/default args 'pcx' - based on the example rank
# function http://sqlite.org/fts3.html#appendix_a
match_info = _parse_match_info(raw_match_info)
score = 0.0
p, c = match_info[:2]
weights = get_weights(c, raw_weights)
# matchinfo X value corresponds to, for each phrase in the search query, a
# list of 3 values for each column in the search table.
# So if we have a two-phrase search query and three columns of data, the
# following would be the layout:
# p0 : c0=[0, 1, 2], c1=[3, 4, 5], c2=[6, 7, 8]
# p1 : c0=[9, 10, 11], c1=[12, 13, 14], c2=[15, 16, 17]
for phrase_num in range(p):
phrase_info_idx = 2 + (phrase_num * c * 3)
for col_num in range(c):
weight = weights[col_num]
if not weight:
continue
col_idx = phrase_info_idx + (col_num * 3)
# The idea is that we count the number of times the phrase appears
# in this column of the current row, compared to how many times it
# appears in this column across all rows. The ratio of these values
# provides a rough way to score based on "high value" terms.
row_hits = match_info[col_idx]
all_rows_hits = match_info[col_idx + 1]
if row_hits > 0:
score += weight * (float(row_hits) / all_rows_hits)
return -score
# Okapi BM25 ranking implementation (FTS4 only).
def _json_contains(src_json, obj_json):
stack = []
try:
stack.append((json.loads(obj_json), json.loads(src_json)))
except:
# Invalid JSON!
return False
while stack:
obj, src = stack.pop()
if isinstance(src, dict):
if isinstance(obj, dict):
for key in obj:
if key not in src:
return False
stack.append((obj[key], src[key]))
elif isinstance(obj, list):
for item in obj:
if item not in src:
return False
elif obj not in src:
return False
elif isinstance(src, list):
if isinstance(obj, dict):
return False
elif isinstance(obj, list):
try:
for i in range(len(obj)):
stack.append((obj[i], src[i]))
except IndexError:
return False
elif obj not in src:
return False
elif obj != src:
return False
return True
|
coleifer/peewee | playhouse/sqlite_ext.py | FTSModel.search | python | def search(cls, term, weights=None, with_score=False, score_alias='score',
explicit_ordering=False):
return cls._search(
term,
weights,
with_score,
score_alias,
cls.rank,
explicit_ordering) | Full-text search using selected `term`. | train | https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/playhouse/sqlite_ext.py#L391-L400 | null | class FTSModel(BaseFTSModel):
"""
VirtualModel class for creating tables that use either the FTS3 or FTS4
search extensions. Peewee automatically determines which version of the
FTS extension is supported and will use FTS4 if possible.
"""
# FTS3/4 uses "docid" in the same way a normal table uses "rowid".
docid = DocIDField()
class Meta:
extension_module = 'FTS%s' % FTS_VERSION
@classmethod
def _fts_cmd(cls, cmd):
tbl = cls._meta.table_name
res = cls._meta.database.execute_sql(
"INSERT INTO %s(%s) VALUES('%s');" % (tbl, tbl, cmd))
return res.fetchone()
@classmethod
def optimize(cls):
return cls._fts_cmd('optimize')
@classmethod
def rebuild(cls):
return cls._fts_cmd('rebuild')
@classmethod
def integrity_check(cls):
return cls._fts_cmd('integrity-check')
@classmethod
def merge(cls, blocks=200, segments=8):
return cls._fts_cmd('merge=%s,%s' % (blocks, segments))
@classmethod
def automerge(cls, state=True):
return cls._fts_cmd('automerge=%s' % (state and '1' or '0'))
@classmethod
def match(cls, term):
"""
Generate a `MATCH` expression appropriate for searching this table.
"""
return match(cls._meta.entity, term)
@classmethod
def rank(cls, *weights):
matchinfo = fn.matchinfo(cls._meta.entity, FTS3_MATCHINFO)
return fn.fts_rank(matchinfo, *weights)
@classmethod
def bm25(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_bm25(match_info, *weights)
@classmethod
def bm25f(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_bm25f(match_info, *weights)
@classmethod
def lucene(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_lucene(match_info, *weights)
@classmethod
def _search(cls, term, weights, with_score, score_alias, score_fn,
explicit_ordering):
if not weights:
rank = score_fn()
elif isinstance(weights, dict):
weight_args = []
for field in cls._meta.sorted_fields:
# Attempt to get the specified weight of the field by looking
# it up using it's field instance followed by name.
field_weight = weights.get(field, weights.get(field.name, 1.0))
weight_args.append(field_weight)
rank = score_fn(*weight_args)
else:
rank = score_fn(*weights)
selection = ()
order_by = rank
if with_score:
selection = (cls, rank.alias(score_alias))
if with_score and not explicit_ordering:
order_by = SQL(score_alias)
return (cls
.select(*selection)
.where(cls.match(term))
.order_by(order_by))
@classmethod
@classmethod
def search_bm25(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search for selected `term` using BM25 algorithm."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.bm25,
explicit_ordering)
@classmethod
def search_bm25f(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search for selected `term` using BM25 algorithm."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.bm25f,
explicit_ordering)
@classmethod
def search_lucene(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search for selected `term` using BM25 algorithm."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.lucene,
explicit_ordering)
|
coleifer/peewee | playhouse/sqlite_ext.py | FTSModel.search_bm25 | python | def search_bm25(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
return cls._search(
term,
weights,
with_score,
score_alias,
cls.bm25,
explicit_ordering) | Full-text search for selected `term` using BM25 algorithm. | train | https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/playhouse/sqlite_ext.py#L403-L412 | null | class FTSModel(BaseFTSModel):
"""
VirtualModel class for creating tables that use either the FTS3 or FTS4
search extensions. Peewee automatically determines which version of the
FTS extension is supported and will use FTS4 if possible.
"""
# FTS3/4 uses "docid" in the same way a normal table uses "rowid".
docid = DocIDField()
class Meta:
extension_module = 'FTS%s' % FTS_VERSION
@classmethod
def _fts_cmd(cls, cmd):
tbl = cls._meta.table_name
res = cls._meta.database.execute_sql(
"INSERT INTO %s(%s) VALUES('%s');" % (tbl, tbl, cmd))
return res.fetchone()
@classmethod
def optimize(cls):
return cls._fts_cmd('optimize')
@classmethod
def rebuild(cls):
return cls._fts_cmd('rebuild')
@classmethod
def integrity_check(cls):
return cls._fts_cmd('integrity-check')
@classmethod
def merge(cls, blocks=200, segments=8):
return cls._fts_cmd('merge=%s,%s' % (blocks, segments))
@classmethod
def automerge(cls, state=True):
return cls._fts_cmd('automerge=%s' % (state and '1' or '0'))
@classmethod
def match(cls, term):
"""
Generate a `MATCH` expression appropriate for searching this table.
"""
return match(cls._meta.entity, term)
@classmethod
def rank(cls, *weights):
matchinfo = fn.matchinfo(cls._meta.entity, FTS3_MATCHINFO)
return fn.fts_rank(matchinfo, *weights)
@classmethod
def bm25(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_bm25(match_info, *weights)
@classmethod
def bm25f(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_bm25f(match_info, *weights)
@classmethod
def lucene(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_lucene(match_info, *weights)
@classmethod
def _search(cls, term, weights, with_score, score_alias, score_fn,
explicit_ordering):
if not weights:
rank = score_fn()
elif isinstance(weights, dict):
weight_args = []
for field in cls._meta.sorted_fields:
# Attempt to get the specified weight of the field by looking
# it up using it's field instance followed by name.
field_weight = weights.get(field, weights.get(field.name, 1.0))
weight_args.append(field_weight)
rank = score_fn(*weight_args)
else:
rank = score_fn(*weights)
selection = ()
order_by = rank
if with_score:
selection = (cls, rank.alias(score_alias))
if with_score and not explicit_ordering:
order_by = SQL(score_alias)
return (cls
.select(*selection)
.where(cls.match(term))
.order_by(order_by))
@classmethod
def search(cls, term, weights=None, with_score=False, score_alias='score',
explicit_ordering=False):
"""Full-text search using selected `term`."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.rank,
explicit_ordering)
@classmethod
@classmethod
def search_bm25f(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search for selected `term` using BM25 algorithm."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.bm25f,
explicit_ordering)
@classmethod
def search_lucene(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search for selected `term` using BM25 algorithm."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.lucene,
explicit_ordering)
|
coleifer/peewee | playhouse/sqlite_ext.py | FTSModel.search_bm25f | python | def search_bm25f(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
return cls._search(
term,
weights,
with_score,
score_alias,
cls.bm25f,
explicit_ordering) | Full-text search for selected `term` using BM25 algorithm. | train | https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/playhouse/sqlite_ext.py#L415-L424 | null | class FTSModel(BaseFTSModel):
"""
VirtualModel class for creating tables that use either the FTS3 or FTS4
search extensions. Peewee automatically determines which version of the
FTS extension is supported and will use FTS4 if possible.
"""
# FTS3/4 uses "docid" in the same way a normal table uses "rowid".
docid = DocIDField()
class Meta:
extension_module = 'FTS%s' % FTS_VERSION
@classmethod
def _fts_cmd(cls, cmd):
tbl = cls._meta.table_name
res = cls._meta.database.execute_sql(
"INSERT INTO %s(%s) VALUES('%s');" % (tbl, tbl, cmd))
return res.fetchone()
@classmethod
def optimize(cls):
return cls._fts_cmd('optimize')
@classmethod
def rebuild(cls):
return cls._fts_cmd('rebuild')
@classmethod
def integrity_check(cls):
return cls._fts_cmd('integrity-check')
@classmethod
def merge(cls, blocks=200, segments=8):
return cls._fts_cmd('merge=%s,%s' % (blocks, segments))
@classmethod
def automerge(cls, state=True):
return cls._fts_cmd('automerge=%s' % (state and '1' or '0'))
@classmethod
def match(cls, term):
"""
Generate a `MATCH` expression appropriate for searching this table.
"""
return match(cls._meta.entity, term)
@classmethod
def rank(cls, *weights):
matchinfo = fn.matchinfo(cls._meta.entity, FTS3_MATCHINFO)
return fn.fts_rank(matchinfo, *weights)
@classmethod
def bm25(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_bm25(match_info, *weights)
@classmethod
def bm25f(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_bm25f(match_info, *weights)
@classmethod
def lucene(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_lucene(match_info, *weights)
@classmethod
def _search(cls, term, weights, with_score, score_alias, score_fn,
explicit_ordering):
if not weights:
rank = score_fn()
elif isinstance(weights, dict):
weight_args = []
for field in cls._meta.sorted_fields:
# Attempt to get the specified weight of the field by looking
# it up using it's field instance followed by name.
field_weight = weights.get(field, weights.get(field.name, 1.0))
weight_args.append(field_weight)
rank = score_fn(*weight_args)
else:
rank = score_fn(*weights)
selection = ()
order_by = rank
if with_score:
selection = (cls, rank.alias(score_alias))
if with_score and not explicit_ordering:
order_by = SQL(score_alias)
return (cls
.select(*selection)
.where(cls.match(term))
.order_by(order_by))
@classmethod
def search(cls, term, weights=None, with_score=False, score_alias='score',
explicit_ordering=False):
"""Full-text search using selected `term`."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.rank,
explicit_ordering)
@classmethod
def search_bm25(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search for selected `term` using BM25 algorithm."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.bm25,
explicit_ordering)
@classmethod
@classmethod
def search_lucene(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search for selected `term` using BM25 algorithm."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.lucene,
explicit_ordering)
|
coleifer/peewee | playhouse/sqlite_ext.py | FTSModel.search_lucene | python | def search_lucene(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
return cls._search(
term,
weights,
with_score,
score_alias,
cls.lucene,
explicit_ordering) | Full-text search for selected `term` using BM25 algorithm. | train | https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/playhouse/sqlite_ext.py#L427-L436 | null | class FTSModel(BaseFTSModel):
"""
VirtualModel class for creating tables that use either the FTS3 or FTS4
search extensions. Peewee automatically determines which version of the
FTS extension is supported and will use FTS4 if possible.
"""
# FTS3/4 uses "docid" in the same way a normal table uses "rowid".
docid = DocIDField()
class Meta:
extension_module = 'FTS%s' % FTS_VERSION
@classmethod
def _fts_cmd(cls, cmd):
tbl = cls._meta.table_name
res = cls._meta.database.execute_sql(
"INSERT INTO %s(%s) VALUES('%s');" % (tbl, tbl, cmd))
return res.fetchone()
@classmethod
def optimize(cls):
return cls._fts_cmd('optimize')
@classmethod
def rebuild(cls):
return cls._fts_cmd('rebuild')
@classmethod
def integrity_check(cls):
return cls._fts_cmd('integrity-check')
@classmethod
def merge(cls, blocks=200, segments=8):
return cls._fts_cmd('merge=%s,%s' % (blocks, segments))
@classmethod
def automerge(cls, state=True):
return cls._fts_cmd('automerge=%s' % (state and '1' or '0'))
@classmethod
def match(cls, term):
"""
Generate a `MATCH` expression appropriate for searching this table.
"""
return match(cls._meta.entity, term)
@classmethod
def rank(cls, *weights):
matchinfo = fn.matchinfo(cls._meta.entity, FTS3_MATCHINFO)
return fn.fts_rank(matchinfo, *weights)
@classmethod
def bm25(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_bm25(match_info, *weights)
@classmethod
def bm25f(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_bm25f(match_info, *weights)
@classmethod
def lucene(cls, *weights):
match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
return fn.fts_lucene(match_info, *weights)
@classmethod
def _search(cls, term, weights, with_score, score_alias, score_fn,
explicit_ordering):
if not weights:
rank = score_fn()
elif isinstance(weights, dict):
weight_args = []
for field in cls._meta.sorted_fields:
# Attempt to get the specified weight of the field by looking
# it up using it's field instance followed by name.
field_weight = weights.get(field, weights.get(field.name, 1.0))
weight_args.append(field_weight)
rank = score_fn(*weight_args)
else:
rank = score_fn(*weights)
selection = ()
order_by = rank
if with_score:
selection = (cls, rank.alias(score_alias))
if with_score and not explicit_ordering:
order_by = SQL(score_alias)
return (cls
.select(*selection)
.where(cls.match(term))
.order_by(order_by))
@classmethod
def search(cls, term, weights=None, with_score=False, score_alias='score',
explicit_ordering=False):
"""Full-text search using selected `term`."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.rank,
explicit_ordering)
@classmethod
def search_bm25(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search for selected `term` using BM25 algorithm."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.bm25,
explicit_ordering)
@classmethod
def search_bm25f(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search for selected `term` using BM25 algorithm."""
return cls._search(
term,
weights,
with_score,
score_alias,
cls.bm25f,
explicit_ordering)
@classmethod
|
coleifer/peewee | playhouse/sqlite_ext.py | FTS5Model.validate_query | python | def validate_query(query):
tokens = _quote_re.findall(query)
for token in tokens:
if token.startswith('"') and token.endswith('"'):
continue
if set(token) & _invalid_ascii:
return False
return True | Simple helper function to indicate whether a search query is a
valid FTS5 query. Note: this simply looks at the characters being
used, and is not guaranteed to catch all problematic queries. | train | https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/playhouse/sqlite_ext.py#L550-L562 | null | class FTS5Model(BaseFTSModel):
"""
Requires SQLite >= 3.9.0.
Table options:
content: table name of external content, or empty string for "contentless"
content_rowid: column name of external content primary key
prefix: integer(s). Ex: '2' or '2 3 4'
tokenize: porter, unicode61, ascii. Ex: 'porter unicode61'
The unicode tokenizer supports the following parameters:
* remove_diacritics (1 or 0, default is 1)
* tokenchars (string of characters, e.g. '-_'
* separators (string of characters)
Parameters are passed as alternating parameter name and value, so:
{'tokenize': "unicode61 remove_diacritics 0 tokenchars '-_'"}
Content-less tables:
If you don't need the full-text content in it's original form, you can
specify a content-less table. Searches and auxiliary functions will work
as usual, but the only values returned when SELECT-ing can be rowid. Also
content-less tables do not support UPDATE or DELETE.
External content tables:
You can set up triggers to sync these, e.g.
-- Create a table. And an external content fts5 table to index it.
CREATE TABLE tbl(a INTEGER PRIMARY KEY, b);
CREATE VIRTUAL TABLE ft USING fts5(b, content='tbl', content_rowid='a');
-- Triggers to keep the FTS index up to date.
CREATE TRIGGER tbl_ai AFTER INSERT ON tbl BEGIN
INSERT INTO ft(rowid, b) VALUES (new.a, new.b);
END;
CREATE TRIGGER tbl_ad AFTER DELETE ON tbl BEGIN
INSERT INTO ft(fts_idx, rowid, b) VALUES('delete', old.a, old.b);
END;
CREATE TRIGGER tbl_au AFTER UPDATE ON tbl BEGIN
INSERT INTO ft(fts_idx, rowid, b) VALUES('delete', old.a, old.b);
INSERT INTO ft(rowid, b) VALUES (new.a, new.b);
END;
Built-in auxiliary functions:
* bm25(tbl[, weight_0, ... weight_n])
* highlight(tbl, col_idx, prefix, suffix)
* snippet(tbl, col_idx, prefix, suffix, ?, max_tokens)
"""
# FTS5 does not support declared primary keys, but we can use the
# implicit rowid.
rowid = RowIDField()
class Meta:
extension_module = 'fts5'
_error_messages = {
'field_type': ('Besides the implicit `rowid` column, all columns must '
'be instances of SearchField'),
'index': 'Secondary indexes are not supported for FTS5 models',
'pk': 'FTS5 models must use the default `rowid` primary key',
}
@classmethod
def validate_model(cls):
# Perform FTS5-specific validation and options post-processing.
if cls._meta.primary_key.name != 'rowid':
raise ImproperlyConfigured(cls._error_messages['pk'])
for field in cls._meta.fields.values():
if not isinstance(field, (SearchField, RowIDField)):
raise ImproperlyConfigured(cls._error_messages['field_type'])
if cls._meta.indexes:
raise ImproperlyConfigured(cls._error_messages['index'])
@classmethod
def fts5_installed(cls):
if sqlite3.sqlite_version_info[:3] < FTS5_MIN_SQLITE_VERSION:
return False
# Test in-memory DB to determine if the FTS5 extension is installed.
tmp_db = sqlite3.connect(':memory:')
try:
tmp_db.execute('CREATE VIRTUAL TABLE fts5test USING fts5 (data);')
except:
try:
tmp_db.enable_load_extension(True)
tmp_db.load_extension('fts5')
except:
return False
else:
cls._meta.database.load_extension('fts5')
finally:
tmp_db.close()
return True
@staticmethod
@staticmethod
def clean_query(query, replace=chr(26)):
"""
Clean a query of invalid tokens.
"""
accum = []
any_invalid = False
tokens = _quote_re.findall(query)
for token in tokens:
if token.startswith('"') and token.endswith('"'):
accum.append(token)
continue
token_set = set(token)
invalid_for_token = token_set & _invalid_ascii
if invalid_for_token:
any_invalid = True
for c in invalid_for_token:
token = token.replace(c, replace)
accum.append(token)
if any_invalid:
return ' '.join(accum)
return query
@classmethod
def match(cls, term):
"""
Generate a `MATCH` expression appropriate for searching this table.
"""
return match(cls._meta.entity, term)
@classmethod
def rank(cls, *args):
return cls.bm25(*args) if args else SQL('rank')
@classmethod
def bm25(cls, *weights):
return fn.bm25(cls._meta.entity, *weights)
@classmethod
def search(cls, term, weights=None, with_score=False, score_alias='score',
explicit_ordering=False):
"""Full-text search using selected `term`."""
return cls.search_bm25(
FTS5Model.clean_query(term),
weights,
with_score,
score_alias,
explicit_ordering)
@classmethod
def search_bm25(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search using selected `term`."""
if not weights:
rank = SQL('rank')
elif isinstance(weights, dict):
weight_args = []
for field in cls._meta.sorted_fields:
if isinstance(field, SearchField) and not field.unindexed:
weight_args.append(
weights.get(field, weights.get(field.name, 1.0)))
rank = fn.bm25(cls._meta.entity, *weight_args)
else:
rank = fn.bm25(cls._meta.entity, *weights)
selection = ()
order_by = rank
if with_score:
selection = (cls, rank.alias(score_alias))
if with_score and not explicit_ordering:
order_by = SQL(score_alias)
return (cls
.select(*selection)
.where(cls.match(FTS5Model.clean_query(term)))
.order_by(order_by))
@classmethod
def _fts_cmd_sql(cls, cmd, **extra_params):
tbl = cls._meta.entity
columns = [tbl]
values = [cmd]
for key, value in extra_params.items():
columns.append(Entity(key))
values.append(value)
return NodeList((
SQL('INSERT INTO'),
cls._meta.entity,
EnclosedNodeList(columns),
SQL('VALUES'),
EnclosedNodeList(values)))
@classmethod
def _fts_cmd(cls, cmd, **extra_params):
query = cls._fts_cmd_sql(cmd, **extra_params)
return cls._meta.database.execute(query)
@classmethod
def automerge(cls, level):
if not (0 <= level <= 16):
raise ValueError('level must be between 0 and 16')
return cls._fts_cmd('automerge', rank=level)
@classmethod
def merge(cls, npages):
return cls._fts_cmd('merge', rank=npages)
@classmethod
def set_pgsz(cls, pgsz):
return cls._fts_cmd('pgsz', rank=pgsz)
@classmethod
def set_rank(cls, rank_expression):
return cls._fts_cmd('rank', rank=rank_expression)
@classmethod
def delete_all(cls):
return cls._fts_cmd('delete-all')
@classmethod
def VocabModel(cls, table_type='row', table=None):
if table_type not in ('row', 'col', 'instance'):
raise ValueError('table_type must be either "row", "col" or '
'"instance".')
attr = '_vocab_model_%s' % table_type
if not hasattr(cls, attr):
class Meta:
database = cls._meta.database
table_name = table or cls._meta.table_name + '_v'
extension_module = fn.fts5vocab(
cls._meta.entity,
SQL(table_type))
attrs = {
'term': VirtualField(TextField),
'doc': IntegerField(),
'cnt': IntegerField(),
'rowid': RowIDField(),
'Meta': Meta,
}
if table_type == 'col':
attrs['col'] = VirtualField(TextField)
elif table_type == 'instance':
attrs['offset'] = VirtualField(IntegerField)
class_name = '%sVocab' % cls.__name__
setattr(cls, attr, type(class_name, (VirtualModel,), attrs))
return getattr(cls, attr)
|
coleifer/peewee | playhouse/sqlite_ext.py | FTS5Model.clean_query | python | def clean_query(query, replace=chr(26)):
accum = []
any_invalid = False
tokens = _quote_re.findall(query)
for token in tokens:
if token.startswith('"') and token.endswith('"'):
accum.append(token)
continue
token_set = set(token)
invalid_for_token = token_set & _invalid_ascii
if invalid_for_token:
any_invalid = True
for c in invalid_for_token:
token = token.replace(c, replace)
accum.append(token)
if any_invalid:
return ' '.join(accum)
return query | Clean a query of invalid tokens. | train | https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/playhouse/sqlite_ext.py#L565-L586 | null | class FTS5Model(BaseFTSModel):
"""
Requires SQLite >= 3.9.0.
Table options:
content: table name of external content, or empty string for "contentless"
content_rowid: column name of external content primary key
prefix: integer(s). Ex: '2' or '2 3 4'
tokenize: porter, unicode61, ascii. Ex: 'porter unicode61'
The unicode tokenizer supports the following parameters:
* remove_diacritics (1 or 0, default is 1)
* tokenchars (string of characters, e.g. '-_'
* separators (string of characters)
Parameters are passed as alternating parameter name and value, so:
{'tokenize': "unicode61 remove_diacritics 0 tokenchars '-_'"}
Content-less tables:
If you don't need the full-text content in it's original form, you can
specify a content-less table. Searches and auxiliary functions will work
as usual, but the only values returned when SELECT-ing can be rowid. Also
content-less tables do not support UPDATE or DELETE.
External content tables:
You can set up triggers to sync these, e.g.
-- Create a table. And an external content fts5 table to index it.
CREATE TABLE tbl(a INTEGER PRIMARY KEY, b);
CREATE VIRTUAL TABLE ft USING fts5(b, content='tbl', content_rowid='a');
-- Triggers to keep the FTS index up to date.
CREATE TRIGGER tbl_ai AFTER INSERT ON tbl BEGIN
INSERT INTO ft(rowid, b) VALUES (new.a, new.b);
END;
CREATE TRIGGER tbl_ad AFTER DELETE ON tbl BEGIN
INSERT INTO ft(fts_idx, rowid, b) VALUES('delete', old.a, old.b);
END;
CREATE TRIGGER tbl_au AFTER UPDATE ON tbl BEGIN
INSERT INTO ft(fts_idx, rowid, b) VALUES('delete', old.a, old.b);
INSERT INTO ft(rowid, b) VALUES (new.a, new.b);
END;
Built-in auxiliary functions:
* bm25(tbl[, weight_0, ... weight_n])
* highlight(tbl, col_idx, prefix, suffix)
* snippet(tbl, col_idx, prefix, suffix, ?, max_tokens)
"""
# FTS5 does not support declared primary keys, but we can use the
# implicit rowid.
rowid = RowIDField()
class Meta:
extension_module = 'fts5'
_error_messages = {
'field_type': ('Besides the implicit `rowid` column, all columns must '
'be instances of SearchField'),
'index': 'Secondary indexes are not supported for FTS5 models',
'pk': 'FTS5 models must use the default `rowid` primary key',
}
@classmethod
def validate_model(cls):
# Perform FTS5-specific validation and options post-processing.
if cls._meta.primary_key.name != 'rowid':
raise ImproperlyConfigured(cls._error_messages['pk'])
for field in cls._meta.fields.values():
if not isinstance(field, (SearchField, RowIDField)):
raise ImproperlyConfigured(cls._error_messages['field_type'])
if cls._meta.indexes:
raise ImproperlyConfigured(cls._error_messages['index'])
@classmethod
def fts5_installed(cls):
if sqlite3.sqlite_version_info[:3] < FTS5_MIN_SQLITE_VERSION:
return False
# Test in-memory DB to determine if the FTS5 extension is installed.
tmp_db = sqlite3.connect(':memory:')
try:
tmp_db.execute('CREATE VIRTUAL TABLE fts5test USING fts5 (data);')
except:
try:
tmp_db.enable_load_extension(True)
tmp_db.load_extension('fts5')
except:
return False
else:
cls._meta.database.load_extension('fts5')
finally:
tmp_db.close()
return True
@staticmethod
def validate_query(query):
"""
Simple helper function to indicate whether a search query is a
valid FTS5 query. Note: this simply looks at the characters being
used, and is not guaranteed to catch all problematic queries.
"""
tokens = _quote_re.findall(query)
for token in tokens:
if token.startswith('"') and token.endswith('"'):
continue
if set(token) & _invalid_ascii:
return False
return True
@staticmethod
@classmethod
def match(cls, term):
"""
Generate a `MATCH` expression appropriate for searching this table.
"""
return match(cls._meta.entity, term)
@classmethod
def rank(cls, *args):
return cls.bm25(*args) if args else SQL('rank')
@classmethod
def bm25(cls, *weights):
return fn.bm25(cls._meta.entity, *weights)
@classmethod
def search(cls, term, weights=None, with_score=False, score_alias='score',
explicit_ordering=False):
"""Full-text search using selected `term`."""
return cls.search_bm25(
FTS5Model.clean_query(term),
weights,
with_score,
score_alias,
explicit_ordering)
@classmethod
def search_bm25(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search using selected `term`."""
if not weights:
rank = SQL('rank')
elif isinstance(weights, dict):
weight_args = []
for field in cls._meta.sorted_fields:
if isinstance(field, SearchField) and not field.unindexed:
weight_args.append(
weights.get(field, weights.get(field.name, 1.0)))
rank = fn.bm25(cls._meta.entity, *weight_args)
else:
rank = fn.bm25(cls._meta.entity, *weights)
selection = ()
order_by = rank
if with_score:
selection = (cls, rank.alias(score_alias))
if with_score and not explicit_ordering:
order_by = SQL(score_alias)
return (cls
.select(*selection)
.where(cls.match(FTS5Model.clean_query(term)))
.order_by(order_by))
@classmethod
def _fts_cmd_sql(cls, cmd, **extra_params):
tbl = cls._meta.entity
columns = [tbl]
values = [cmd]
for key, value in extra_params.items():
columns.append(Entity(key))
values.append(value)
return NodeList((
SQL('INSERT INTO'),
cls._meta.entity,
EnclosedNodeList(columns),
SQL('VALUES'),
EnclosedNodeList(values)))
@classmethod
def _fts_cmd(cls, cmd, **extra_params):
query = cls._fts_cmd_sql(cmd, **extra_params)
return cls._meta.database.execute(query)
@classmethod
def automerge(cls, level):
if not (0 <= level <= 16):
raise ValueError('level must be between 0 and 16')
return cls._fts_cmd('automerge', rank=level)
@classmethod
def merge(cls, npages):
return cls._fts_cmd('merge', rank=npages)
@classmethod
def set_pgsz(cls, pgsz):
return cls._fts_cmd('pgsz', rank=pgsz)
@classmethod
def set_rank(cls, rank_expression):
return cls._fts_cmd('rank', rank=rank_expression)
@classmethod
def delete_all(cls):
return cls._fts_cmd('delete-all')
@classmethod
def VocabModel(cls, table_type='row', table=None):
if table_type not in ('row', 'col', 'instance'):
raise ValueError('table_type must be either "row", "col" or '
'"instance".')
attr = '_vocab_model_%s' % table_type
if not hasattr(cls, attr):
class Meta:
database = cls._meta.database
table_name = table or cls._meta.table_name + '_v'
extension_module = fn.fts5vocab(
cls._meta.entity,
SQL(table_type))
attrs = {
'term': VirtualField(TextField),
'doc': IntegerField(),
'cnt': IntegerField(),
'rowid': RowIDField(),
'Meta': Meta,
}
if table_type == 'col':
attrs['col'] = VirtualField(TextField)
elif table_type == 'instance':
attrs['offset'] = VirtualField(IntegerField)
class_name = '%sVocab' % cls.__name__
setattr(cls, attr, type(class_name, (VirtualModel,), attrs))
return getattr(cls, attr)
|
coleifer/peewee | playhouse/sqlite_ext.py | FTS5Model.search | python | def search(cls, term, weights=None, with_score=False, score_alias='score',
explicit_ordering=False):
return cls.search_bm25(
FTS5Model.clean_query(term),
weights,
with_score,
score_alias,
explicit_ordering) | Full-text search using selected `term`. | train | https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/playhouse/sqlite_ext.py#L604-L612 | null | class FTS5Model(BaseFTSModel):
"""
Requires SQLite >= 3.9.0.
Table options:
content: table name of external content, or empty string for "contentless"
content_rowid: column name of external content primary key
prefix: integer(s). Ex: '2' or '2 3 4'
tokenize: porter, unicode61, ascii. Ex: 'porter unicode61'
The unicode tokenizer supports the following parameters:
* remove_diacritics (1 or 0, default is 1)
* tokenchars (string of characters, e.g. '-_'
* separators (string of characters)
Parameters are passed as alternating parameter name and value, so:
{'tokenize': "unicode61 remove_diacritics 0 tokenchars '-_'"}
Content-less tables:
If you don't need the full-text content in it's original form, you can
specify a content-less table. Searches and auxiliary functions will work
as usual, but the only values returned when SELECT-ing can be rowid. Also
content-less tables do not support UPDATE or DELETE.
External content tables:
You can set up triggers to sync these, e.g.
-- Create a table. And an external content fts5 table to index it.
CREATE TABLE tbl(a INTEGER PRIMARY KEY, b);
CREATE VIRTUAL TABLE ft USING fts5(b, content='tbl', content_rowid='a');
-- Triggers to keep the FTS index up to date.
CREATE TRIGGER tbl_ai AFTER INSERT ON tbl BEGIN
INSERT INTO ft(rowid, b) VALUES (new.a, new.b);
END;
CREATE TRIGGER tbl_ad AFTER DELETE ON tbl BEGIN
INSERT INTO ft(fts_idx, rowid, b) VALUES('delete', old.a, old.b);
END;
CREATE TRIGGER tbl_au AFTER UPDATE ON tbl BEGIN
INSERT INTO ft(fts_idx, rowid, b) VALUES('delete', old.a, old.b);
INSERT INTO ft(rowid, b) VALUES (new.a, new.b);
END;
Built-in auxiliary functions:
* bm25(tbl[, weight_0, ... weight_n])
* highlight(tbl, col_idx, prefix, suffix)
* snippet(tbl, col_idx, prefix, suffix, ?, max_tokens)
"""
# FTS5 does not support declared primary keys, but we can use the
# implicit rowid.
rowid = RowIDField()
class Meta:
extension_module = 'fts5'
_error_messages = {
'field_type': ('Besides the implicit `rowid` column, all columns must '
'be instances of SearchField'),
'index': 'Secondary indexes are not supported for FTS5 models',
'pk': 'FTS5 models must use the default `rowid` primary key',
}
@classmethod
def validate_model(cls):
# Perform FTS5-specific validation and options post-processing.
if cls._meta.primary_key.name != 'rowid':
raise ImproperlyConfigured(cls._error_messages['pk'])
for field in cls._meta.fields.values():
if not isinstance(field, (SearchField, RowIDField)):
raise ImproperlyConfigured(cls._error_messages['field_type'])
if cls._meta.indexes:
raise ImproperlyConfigured(cls._error_messages['index'])
@classmethod
def fts5_installed(cls):
if sqlite3.sqlite_version_info[:3] < FTS5_MIN_SQLITE_VERSION:
return False
# Test in-memory DB to determine if the FTS5 extension is installed.
tmp_db = sqlite3.connect(':memory:')
try:
tmp_db.execute('CREATE VIRTUAL TABLE fts5test USING fts5 (data);')
except:
try:
tmp_db.enable_load_extension(True)
tmp_db.load_extension('fts5')
except:
return False
else:
cls._meta.database.load_extension('fts5')
finally:
tmp_db.close()
return True
@staticmethod
def validate_query(query):
"""
Simple helper function to indicate whether a search query is a
valid FTS5 query. Note: this simply looks at the characters being
used, and is not guaranteed to catch all problematic queries.
"""
tokens = _quote_re.findall(query)
for token in tokens:
if token.startswith('"') and token.endswith('"'):
continue
if set(token) & _invalid_ascii:
return False
return True
@staticmethod
def clean_query(query, replace=chr(26)):
"""
Clean a query of invalid tokens.
"""
accum = []
any_invalid = False
tokens = _quote_re.findall(query)
for token in tokens:
if token.startswith('"') and token.endswith('"'):
accum.append(token)
continue
token_set = set(token)
invalid_for_token = token_set & _invalid_ascii
if invalid_for_token:
any_invalid = True
for c in invalid_for_token:
token = token.replace(c, replace)
accum.append(token)
if any_invalid:
return ' '.join(accum)
return query
@classmethod
def match(cls, term):
"""
Generate a `MATCH` expression appropriate for searching this table.
"""
return match(cls._meta.entity, term)
@classmethod
def rank(cls, *args):
return cls.bm25(*args) if args else SQL('rank')
@classmethod
def bm25(cls, *weights):
return fn.bm25(cls._meta.entity, *weights)
@classmethod
@classmethod
def search_bm25(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search using selected `term`."""
if not weights:
rank = SQL('rank')
elif isinstance(weights, dict):
weight_args = []
for field in cls._meta.sorted_fields:
if isinstance(field, SearchField) and not field.unindexed:
weight_args.append(
weights.get(field, weights.get(field.name, 1.0)))
rank = fn.bm25(cls._meta.entity, *weight_args)
else:
rank = fn.bm25(cls._meta.entity, *weights)
selection = ()
order_by = rank
if with_score:
selection = (cls, rank.alias(score_alias))
if with_score and not explicit_ordering:
order_by = SQL(score_alias)
return (cls
.select(*selection)
.where(cls.match(FTS5Model.clean_query(term)))
.order_by(order_by))
@classmethod
def _fts_cmd_sql(cls, cmd, **extra_params):
tbl = cls._meta.entity
columns = [tbl]
values = [cmd]
for key, value in extra_params.items():
columns.append(Entity(key))
values.append(value)
return NodeList((
SQL('INSERT INTO'),
cls._meta.entity,
EnclosedNodeList(columns),
SQL('VALUES'),
EnclosedNodeList(values)))
@classmethod
def _fts_cmd(cls, cmd, **extra_params):
query = cls._fts_cmd_sql(cmd, **extra_params)
return cls._meta.database.execute(query)
@classmethod
def automerge(cls, level):
if not (0 <= level <= 16):
raise ValueError('level must be between 0 and 16')
return cls._fts_cmd('automerge', rank=level)
@classmethod
def merge(cls, npages):
return cls._fts_cmd('merge', rank=npages)
@classmethod
def set_pgsz(cls, pgsz):
return cls._fts_cmd('pgsz', rank=pgsz)
@classmethod
def set_rank(cls, rank_expression):
return cls._fts_cmd('rank', rank=rank_expression)
@classmethod
def delete_all(cls):
return cls._fts_cmd('delete-all')
@classmethod
def VocabModel(cls, table_type='row', table=None):
if table_type not in ('row', 'col', 'instance'):
raise ValueError('table_type must be either "row", "col" or '
'"instance".')
attr = '_vocab_model_%s' % table_type
if not hasattr(cls, attr):
class Meta:
database = cls._meta.database
table_name = table or cls._meta.table_name + '_v'
extension_module = fn.fts5vocab(
cls._meta.entity,
SQL(table_type))
attrs = {
'term': VirtualField(TextField),
'doc': IntegerField(),
'cnt': IntegerField(),
'rowid': RowIDField(),
'Meta': Meta,
}
if table_type == 'col':
attrs['col'] = VirtualField(TextField)
elif table_type == 'instance':
attrs['offset'] = VirtualField(IntegerField)
class_name = '%sVocab' % cls.__name__
setattr(cls, attr, type(class_name, (VirtualModel,), attrs))
return getattr(cls, attr)
|
coleifer/peewee | playhouse/sqlite_ext.py | FTS5Model.search_bm25 | python | def search_bm25(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
if not weights:
rank = SQL('rank')
elif isinstance(weights, dict):
weight_args = []
for field in cls._meta.sorted_fields:
if isinstance(field, SearchField) and not field.unindexed:
weight_args.append(
weights.get(field, weights.get(field.name, 1.0)))
rank = fn.bm25(cls._meta.entity, *weight_args)
else:
rank = fn.bm25(cls._meta.entity, *weights)
selection = ()
order_by = rank
if with_score:
selection = (cls, rank.alias(score_alias))
if with_score and not explicit_ordering:
order_by = SQL(score_alias)
return (cls
.select(*selection)
.where(cls.match(FTS5Model.clean_query(term)))
.order_by(order_by)) | Full-text search using selected `term`. | train | https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/playhouse/sqlite_ext.py#L615-L640 | [
"def alias(self, alias):\n if alias:\n return Alias(self, alias)\n return self\n"
] | class FTS5Model(BaseFTSModel):
"""
Requires SQLite >= 3.9.0.
Table options:
content: table name of external content, or empty string for "contentless"
content_rowid: column name of external content primary key
prefix: integer(s). Ex: '2' or '2 3 4'
tokenize: porter, unicode61, ascii. Ex: 'porter unicode61'
The unicode tokenizer supports the following parameters:
* remove_diacritics (1 or 0, default is 1)
* tokenchars (string of characters, e.g. '-_'
* separators (string of characters)
Parameters are passed as alternating parameter name and value, so:
{'tokenize': "unicode61 remove_diacritics 0 tokenchars '-_'"}
Content-less tables:
If you don't need the full-text content in it's original form, you can
specify a content-less table. Searches and auxiliary functions will work
as usual, but the only values returned when SELECT-ing can be rowid. Also
content-less tables do not support UPDATE or DELETE.
External content tables:
You can set up triggers to sync these, e.g.
-- Create a table. And an external content fts5 table to index it.
CREATE TABLE tbl(a INTEGER PRIMARY KEY, b);
CREATE VIRTUAL TABLE ft USING fts5(b, content='tbl', content_rowid='a');
-- Triggers to keep the FTS index up to date.
CREATE TRIGGER tbl_ai AFTER INSERT ON tbl BEGIN
INSERT INTO ft(rowid, b) VALUES (new.a, new.b);
END;
CREATE TRIGGER tbl_ad AFTER DELETE ON tbl BEGIN
INSERT INTO ft(fts_idx, rowid, b) VALUES('delete', old.a, old.b);
END;
CREATE TRIGGER tbl_au AFTER UPDATE ON tbl BEGIN
INSERT INTO ft(fts_idx, rowid, b) VALUES('delete', old.a, old.b);
INSERT INTO ft(rowid, b) VALUES (new.a, new.b);
END;
Built-in auxiliary functions:
* bm25(tbl[, weight_0, ... weight_n])
* highlight(tbl, col_idx, prefix, suffix)
* snippet(tbl, col_idx, prefix, suffix, ?, max_tokens)
"""
# FTS5 does not support declared primary keys, but we can use the
# implicit rowid.
rowid = RowIDField()
class Meta:
extension_module = 'fts5'
_error_messages = {
'field_type': ('Besides the implicit `rowid` column, all columns must '
'be instances of SearchField'),
'index': 'Secondary indexes are not supported for FTS5 models',
'pk': 'FTS5 models must use the default `rowid` primary key',
}
@classmethod
def validate_model(cls):
# Perform FTS5-specific validation and options post-processing.
if cls._meta.primary_key.name != 'rowid':
raise ImproperlyConfigured(cls._error_messages['pk'])
for field in cls._meta.fields.values():
if not isinstance(field, (SearchField, RowIDField)):
raise ImproperlyConfigured(cls._error_messages['field_type'])
if cls._meta.indexes:
raise ImproperlyConfigured(cls._error_messages['index'])
@classmethod
def fts5_installed(cls):
if sqlite3.sqlite_version_info[:3] < FTS5_MIN_SQLITE_VERSION:
return False
# Test in-memory DB to determine if the FTS5 extension is installed.
tmp_db = sqlite3.connect(':memory:')
try:
tmp_db.execute('CREATE VIRTUAL TABLE fts5test USING fts5 (data);')
except:
try:
tmp_db.enable_load_extension(True)
tmp_db.load_extension('fts5')
except:
return False
else:
cls._meta.database.load_extension('fts5')
finally:
tmp_db.close()
return True
@staticmethod
def validate_query(query):
"""
Simple helper function to indicate whether a search query is a
valid FTS5 query. Note: this simply looks at the characters being
used, and is not guaranteed to catch all problematic queries.
"""
tokens = _quote_re.findall(query)
for token in tokens:
if token.startswith('"') and token.endswith('"'):
continue
if set(token) & _invalid_ascii:
return False
return True
@staticmethod
def clean_query(query, replace=chr(26)):
"""
Clean a query of invalid tokens.
"""
accum = []
any_invalid = False
tokens = _quote_re.findall(query)
for token in tokens:
if token.startswith('"') and token.endswith('"'):
accum.append(token)
continue
token_set = set(token)
invalid_for_token = token_set & _invalid_ascii
if invalid_for_token:
any_invalid = True
for c in invalid_for_token:
token = token.replace(c, replace)
accum.append(token)
if any_invalid:
return ' '.join(accum)
return query
@classmethod
def match(cls, term):
"""
Generate a `MATCH` expression appropriate for searching this table.
"""
return match(cls._meta.entity, term)
@classmethod
def rank(cls, *args):
return cls.bm25(*args) if args else SQL('rank')
@classmethod
def bm25(cls, *weights):
return fn.bm25(cls._meta.entity, *weights)
@classmethod
def search(cls, term, weights=None, with_score=False, score_alias='score',
explicit_ordering=False):
"""Full-text search using selected `term`."""
return cls.search_bm25(
FTS5Model.clean_query(term),
weights,
with_score,
score_alias,
explicit_ordering)
@classmethod
@classmethod
def _fts_cmd_sql(cls, cmd, **extra_params):
tbl = cls._meta.entity
columns = [tbl]
values = [cmd]
for key, value in extra_params.items():
columns.append(Entity(key))
values.append(value)
return NodeList((
SQL('INSERT INTO'),
cls._meta.entity,
EnclosedNodeList(columns),
SQL('VALUES'),
EnclosedNodeList(values)))
@classmethod
def _fts_cmd(cls, cmd, **extra_params):
query = cls._fts_cmd_sql(cmd, **extra_params)
return cls._meta.database.execute(query)
@classmethod
def automerge(cls, level):
if not (0 <= level <= 16):
raise ValueError('level must be between 0 and 16')
return cls._fts_cmd('automerge', rank=level)
@classmethod
def merge(cls, npages):
return cls._fts_cmd('merge', rank=npages)
@classmethod
def set_pgsz(cls, pgsz):
return cls._fts_cmd('pgsz', rank=pgsz)
@classmethod
def set_rank(cls, rank_expression):
return cls._fts_cmd('rank', rank=rank_expression)
@classmethod
def delete_all(cls):
return cls._fts_cmd('delete-all')
@classmethod
def VocabModel(cls, table_type='row', table=None):
if table_type not in ('row', 'col', 'instance'):
raise ValueError('table_type must be either "row", "col" or '
'"instance".')
attr = '_vocab_model_%s' % table_type
if not hasattr(cls, attr):
class Meta:
database = cls._meta.database
table_name = table or cls._meta.table_name + '_v'
extension_module = fn.fts5vocab(
cls._meta.entity,
SQL(table_type))
attrs = {
'term': VirtualField(TextField),
'doc': IntegerField(),
'cnt': IntegerField(),
'rowid': RowIDField(),
'Meta': Meta,
}
if table_type == 'col':
attrs['col'] = VirtualField(TextField)
elif table_type == 'instance':
attrs['offset'] = VirtualField(IntegerField)
class_name = '%sVocab' % cls.__name__
setattr(cls, attr, type(class_name, (VirtualModel,), attrs))
return getattr(cls, attr)
|
coleifer/peewee | playhouse/pool.py | PooledDatabase.manual_close | python | def manual_close(self):
if self.is_closed():
return False
# Obtain reference to the connection in-use by the calling thread.
conn = self.connection()
# A connection will only be re-added to the available list if it is
# marked as "in use" at the time it is closed. We will explicitly
# remove it from the "in use" list, call "close()" for the
# side-effects, and then explicitly close the connection.
self._in_use.pop(self.conn_key(conn), None)
self.close()
self._close(conn, close_conn=True) | Close the underlying connection without returning it to the pool. | train | https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/playhouse/pool.py#L190-L206 | [
"def is_closed(self):\n return self._state.closed\n"
] | class PooledDatabase(object):
def __init__(self, database, max_connections=20, stale_timeout=None,
timeout=None, **kwargs):
self._max_connections = make_int(max_connections)
self._stale_timeout = make_int(stale_timeout)
self._wait_timeout = make_int(timeout)
if self._wait_timeout == 0:
self._wait_timeout = float('inf')
# Available / idle connections stored in a heap, sorted oldest first.
self._connections = []
# Mapping of connection id to PoolConnection. Ordinarily we would want
# to use something like a WeakKeyDictionary, but Python typically won't
# allow us to create weak references to connection objects.
self._in_use = {}
# Use the memory address of the connection as the key in the event the
# connection object is not hashable. Connections will not get
# garbage-collected, however, because a reference to them will persist
# in "_in_use" as long as the conn has not been closed.
self.conn_key = id
super(PooledDatabase, self).__init__(database, **kwargs)
def init(self, database, max_connections=None, stale_timeout=None,
timeout=None, **connect_kwargs):
super(PooledDatabase, self).init(database, **connect_kwargs)
if max_connections is not None:
self._max_connections = make_int(max_connections)
if stale_timeout is not None:
self._stale_timeout = make_int(stale_timeout)
if timeout is not None:
self._wait_timeout = make_int(timeout)
if self._wait_timeout == 0:
self._wait_timeout = float('inf')
def connect(self, reuse_if_open=False):
if not self._wait_timeout:
return super(PooledDatabase, self).connect(reuse_if_open)
expires = time.time() + self._wait_timeout
while expires > time.time():
try:
ret = super(PooledDatabase, self).connect(reuse_if_open)
except MaxConnectionsExceeded:
time.sleep(0.1)
else:
return ret
raise MaxConnectionsExceeded('Max connections exceeded, timed out '
'attempting to connect.')
def _connect(self):
while True:
try:
# Remove the oldest connection from the heap.
ts, conn = heapq.heappop(self._connections)
key = self.conn_key(conn)
except IndexError:
ts = conn = None
logger.debug('No connection available in pool.')
break
else:
if self._is_closed(conn):
# This connecton was closed, but since it was not stale
# it got added back to the queue of available conns. We
# then closed it and marked it as explicitly closed, so
# it's safe to throw it away now.
# (Because Database.close() calls Database._close()).
logger.debug('Connection %s was closed.', key)
ts = conn = None
elif self._stale_timeout and self._is_stale(ts):
# If we are attempting to check out a stale connection,
# then close it. We don't need to mark it in the "closed"
# set, because it is not in the list of available conns
# anymore.
logger.debug('Connection %s was stale, closing.', key)
self._close(conn, True)
ts = conn = None
else:
break
if conn is None:
if self._max_connections and (
len(self._in_use) >= self._max_connections):
raise MaxConnectionsExceeded('Exceeded maximum connections.')
conn = super(PooledDatabase, self)._connect()
ts = time.time()
key = self.conn_key(conn)
logger.debug('Created new connection %s.', key)
self._in_use[key] = PoolConnection(ts, conn, time.time())
return conn
def _is_stale(self, timestamp):
# Called on check-out and check-in to ensure the connection has
# not outlived the stale timeout.
return (time.time() - timestamp) > self._stale_timeout
def _is_closed(self, conn):
return False
def _can_reuse(self, conn):
# Called on check-in to make sure the connection can be re-used.
return True
def _close(self, conn, close_conn=False):
key = self.conn_key(conn)
if close_conn:
super(PooledDatabase, self)._close(conn)
elif key in self._in_use:
pool_conn = self._in_use.pop(key)
if self._stale_timeout and self._is_stale(pool_conn.timestamp):
logger.debug('Closing stale connection %s.', key)
super(PooledDatabase, self)._close(conn)
elif self._can_reuse(conn):
logger.debug('Returning %s to pool.', key)
heapq.heappush(self._connections, (pool_conn.timestamp, conn))
else:
logger.debug('Closed %s.', key)
def close_idle(self):
# Close any open connections that are not currently in-use.
with self._lock:
for _, conn in self._connections:
self._close(conn, close_conn=True)
self._connections = []
def close_stale(self, age=600):
# Close any connections that are in-use but were checked out quite some
# time ago and can be considered stale.
with self._lock:
in_use = {}
cutoff = time.time() - age
n = 0
for key, pool_conn in self._in_use.items():
if pool_conn.checked_out < cutoff:
self._close(pool_conn.connection, close_conn=True)
n += 1
else:
in_use[key] = pool_conn
self._in_use = in_use
return n
def close_all(self):
# Close all connections -- available and in-use. Warning: may break any
# active connections used by other threads.
self.close()
with self._lock:
for _, conn in self._connections:
self._close(conn, close_conn=True)
for pool_conn in self._in_use.values():
self._close(pool_conn.connection, close_conn=True)
self._connections = []
self._in_use = {}
|
coleifer/peewee | examples/analytics/reports.py | Report.top_pages_by_time_period | python | def top_pages_by_time_period(self, interval='day'):
date_trunc = fn.date_trunc(interval, PageView.timestamp)
return (self.get_query()
.select(
PageView.url,
date_trunc.alias(interval),
fn.Count(PageView.id).alias('count'))
.group_by(PageView.url, date_trunc)
.order_by(
SQL(interval),
SQL('count').desc(),
PageView.url)) | Get a breakdown of top pages per interval, i.e.
day url count
2014-01-01 /blog/ 11
2014-01-02 /blog/ 14
2014-01-03 /blog/ 9 | train | https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/examples/analytics/reports.py#L19-L38 | [
"def desc(self, collation=None, nulls=None):\n return Desc(self, collation=collation, nulls=nulls)\n",
"def get_query(self):\n query = PageView.select().where(PageView.account == self.account)\n if self.date_range:\n query = query.where(PageView.timestamp.between(*self.date_range))\n return que... | class Report(object):
def __init__(self, account_id=DEFAULT_ACCOUNT_ID):
self.account = Account.get(Account.id == account_id)
self.date_range = None
def get_query(self):
query = PageView.select().where(PageView.account == self.account)
if self.date_range:
query = query.where(PageView.timestamp.between(*self.date_range))
return query
def cookies(self):
"""
Retrieve the cookies header from all the users who visited.
"""
return (self.get_query()
.select(PageView.ip, PageView.headers['Cookie'])
.where(PageView.headers['Cookie'].is_null(False))
.tuples())
def user_agents(self):
"""
Retrieve user-agents, sorted by most common to least common.
"""
return (self.get_query()
.select(
PageView.headers['User-Agent'],
fn.Count(PageView.id))
.group_by(PageView.headers['User-Agent'])
.order_by(fn.Count(PageView.id).desc())
.tuples())
def languages(self):
"""
Retrieve languages, sorted by most common to least common. The
Accept-Languages header sometimes looks weird, i.e.
"en-US,en;q=0.8,is;q=0.6,da;q=0.4" We will split on the first semi-
colon.
"""
language = PageView.headers['Accept-Language']
first_language = fn.SubStr(
language, # String to slice.
1, # Left index.
fn.StrPos(language, ';'))
return (self.get_query()
.select(first_language, fn.Count(PageView.id))
.group_by(first_language)
.order_by(fn.Count(PageView.id).desc())
.tuples())
def trail(self):
"""
Get all visitors by IP and then list the pages they visited in order.
"""
inner = (self.get_query()
.select(PageView.ip, PageView.url)
.order_by(PageView.timestamp))
return (PageView
.select(
PageView.ip,
fn.array_agg(PageView.url).alias('urls'))
.from_(inner.alias('t1'))
.group_by(PageView.ip))
def _referrer_clause(self, domain_only=True):
if domain_only:
return fn.SubString(Clause(
PageView.referrer, SQL('FROM'), '.*://([^/]*)'))
return PageView.referrer
def top_referrers(self, domain_only=True):
"""
What domains send us the most traffic?
"""
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(referrer, fn.Count(PageView.id))
.group_by(referrer)
.order_by(fn.Count(PageView.id).desc())
.tuples())
def referrers_for_url(self, domain_only=True):
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(PageView.url, referrer, fn.Count(PageView.id))
.group_by(PageView.url, referrer)
.order_by(PageView.url, fn.Count(PageView.id).desc())
.tuples())
def referrers_to_url(self, domain_only=True):
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(referrer, PageView.url, fn.Count(PageView.id))
.group_by(referrer, PageView.url)
.order_by(referrer, fn.Count(PageView.id).desc())
.tuples())
|
coleifer/peewee | examples/analytics/reports.py | Report.cookies | python | def cookies(self):
return (self.get_query()
.select(PageView.ip, PageView.headers['Cookie'])
.where(PageView.headers['Cookie'].is_null(False))
.tuples()) | Retrieve the cookies header from all the users who visited. | train | https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/examples/analytics/reports.py#L40-L47 | [
"def get_query(self):\n query = PageView.select().where(PageView.account == self.account)\n if self.date_range:\n query = query.where(PageView.timestamp.between(*self.date_range))\n return query\n"
] | class Report(object):
def __init__(self, account_id=DEFAULT_ACCOUNT_ID):
self.account = Account.get(Account.id == account_id)
self.date_range = None
def get_query(self):
query = PageView.select().where(PageView.account == self.account)
if self.date_range:
query = query.where(PageView.timestamp.between(*self.date_range))
return query
def top_pages_by_time_period(self, interval='day'):
"""
Get a breakdown of top pages per interval, i.e.
day url count
2014-01-01 /blog/ 11
2014-01-02 /blog/ 14
2014-01-03 /blog/ 9
"""
date_trunc = fn.date_trunc(interval, PageView.timestamp)
return (self.get_query()
.select(
PageView.url,
date_trunc.alias(interval),
fn.Count(PageView.id).alias('count'))
.group_by(PageView.url, date_trunc)
.order_by(
SQL(interval),
SQL('count').desc(),
PageView.url))
def user_agents(self):
"""
Retrieve user-agents, sorted by most common to least common.
"""
return (self.get_query()
.select(
PageView.headers['User-Agent'],
fn.Count(PageView.id))
.group_by(PageView.headers['User-Agent'])
.order_by(fn.Count(PageView.id).desc())
.tuples())
def languages(self):
"""
Retrieve languages, sorted by most common to least common. The
Accept-Languages header sometimes looks weird, i.e.
"en-US,en;q=0.8,is;q=0.6,da;q=0.4" We will split on the first semi-
colon.
"""
language = PageView.headers['Accept-Language']
first_language = fn.SubStr(
language, # String to slice.
1, # Left index.
fn.StrPos(language, ';'))
return (self.get_query()
.select(first_language, fn.Count(PageView.id))
.group_by(first_language)
.order_by(fn.Count(PageView.id).desc())
.tuples())
def trail(self):
"""
Get all visitors by IP and then list the pages they visited in order.
"""
inner = (self.get_query()
.select(PageView.ip, PageView.url)
.order_by(PageView.timestamp))
return (PageView
.select(
PageView.ip,
fn.array_agg(PageView.url).alias('urls'))
.from_(inner.alias('t1'))
.group_by(PageView.ip))
def _referrer_clause(self, domain_only=True):
if domain_only:
return fn.SubString(Clause(
PageView.referrer, SQL('FROM'), '.*://([^/]*)'))
return PageView.referrer
def top_referrers(self, domain_only=True):
"""
What domains send us the most traffic?
"""
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(referrer, fn.Count(PageView.id))
.group_by(referrer)
.order_by(fn.Count(PageView.id).desc())
.tuples())
def referrers_for_url(self, domain_only=True):
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(PageView.url, referrer, fn.Count(PageView.id))
.group_by(PageView.url, referrer)
.order_by(PageView.url, fn.Count(PageView.id).desc())
.tuples())
def referrers_to_url(self, domain_only=True):
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(referrer, PageView.url, fn.Count(PageView.id))
.group_by(referrer, PageView.url)
.order_by(referrer, fn.Count(PageView.id).desc())
.tuples())
|
coleifer/peewee | examples/analytics/reports.py | Report.user_agents | python | def user_agents(self):
return (self.get_query()
.select(
PageView.headers['User-Agent'],
fn.Count(PageView.id))
.group_by(PageView.headers['User-Agent'])
.order_by(fn.Count(PageView.id).desc())
.tuples()) | Retrieve user-agents, sorted by most common to least common. | train | https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/examples/analytics/reports.py#L49-L59 | [
"def get_query(self):\n query = PageView.select().where(PageView.account == self.account)\n if self.date_range:\n query = query.where(PageView.timestamp.between(*self.date_range))\n return query\n"
] | class Report(object):
def __init__(self, account_id=DEFAULT_ACCOUNT_ID):
self.account = Account.get(Account.id == account_id)
self.date_range = None
def get_query(self):
query = PageView.select().where(PageView.account == self.account)
if self.date_range:
query = query.where(PageView.timestamp.between(*self.date_range))
return query
def top_pages_by_time_period(self, interval='day'):
"""
Get a breakdown of top pages per interval, i.e.
day url count
2014-01-01 /blog/ 11
2014-01-02 /blog/ 14
2014-01-03 /blog/ 9
"""
date_trunc = fn.date_trunc(interval, PageView.timestamp)
return (self.get_query()
.select(
PageView.url,
date_trunc.alias(interval),
fn.Count(PageView.id).alias('count'))
.group_by(PageView.url, date_trunc)
.order_by(
SQL(interval),
SQL('count').desc(),
PageView.url))
def cookies(self):
"""
Retrieve the cookies header from all the users who visited.
"""
return (self.get_query()
.select(PageView.ip, PageView.headers['Cookie'])
.where(PageView.headers['Cookie'].is_null(False))
.tuples())
def languages(self):
"""
Retrieve languages, sorted by most common to least common. The
Accept-Languages header sometimes looks weird, i.e.
"en-US,en;q=0.8,is;q=0.6,da;q=0.4" We will split on the first semi-
colon.
"""
language = PageView.headers['Accept-Language']
first_language = fn.SubStr(
language, # String to slice.
1, # Left index.
fn.StrPos(language, ';'))
return (self.get_query()
.select(first_language, fn.Count(PageView.id))
.group_by(first_language)
.order_by(fn.Count(PageView.id).desc())
.tuples())
def trail(self):
"""
Get all visitors by IP and then list the pages they visited in order.
"""
inner = (self.get_query()
.select(PageView.ip, PageView.url)
.order_by(PageView.timestamp))
return (PageView
.select(
PageView.ip,
fn.array_agg(PageView.url).alias('urls'))
.from_(inner.alias('t1'))
.group_by(PageView.ip))
def _referrer_clause(self, domain_only=True):
if domain_only:
return fn.SubString(Clause(
PageView.referrer, SQL('FROM'), '.*://([^/]*)'))
return PageView.referrer
def top_referrers(self, domain_only=True):
"""
What domains send us the most traffic?
"""
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(referrer, fn.Count(PageView.id))
.group_by(referrer)
.order_by(fn.Count(PageView.id).desc())
.tuples())
def referrers_for_url(self, domain_only=True):
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(PageView.url, referrer, fn.Count(PageView.id))
.group_by(PageView.url, referrer)
.order_by(PageView.url, fn.Count(PageView.id).desc())
.tuples())
def referrers_to_url(self, domain_only=True):
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(referrer, PageView.url, fn.Count(PageView.id))
.group_by(referrer, PageView.url)
.order_by(referrer, fn.Count(PageView.id).desc())
.tuples())
|
coleifer/peewee | examples/analytics/reports.py | Report.languages | python | def languages(self):
language = PageView.headers['Accept-Language']
first_language = fn.SubStr(
language, # String to slice.
1, # Left index.
fn.StrPos(language, ';'))
return (self.get_query()
.select(first_language, fn.Count(PageView.id))
.group_by(first_language)
.order_by(fn.Count(PageView.id).desc())
.tuples()) | Retrieve languages, sorted by most common to least common. The
Accept-Languages header sometimes looks weird, i.e.
"en-US,en;q=0.8,is;q=0.6,da;q=0.4" We will split on the first semi-
colon. | train | https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/examples/analytics/reports.py#L61-L77 | [
"def get_query(self):\n query = PageView.select().where(PageView.account == self.account)\n if self.date_range:\n query = query.where(PageView.timestamp.between(*self.date_range))\n return query\n"
] | class Report(object):
def __init__(self, account_id=DEFAULT_ACCOUNT_ID):
self.account = Account.get(Account.id == account_id)
self.date_range = None
def get_query(self):
query = PageView.select().where(PageView.account == self.account)
if self.date_range:
query = query.where(PageView.timestamp.between(*self.date_range))
return query
def top_pages_by_time_period(self, interval='day'):
"""
Get a breakdown of top pages per interval, i.e.
day url count
2014-01-01 /blog/ 11
2014-01-02 /blog/ 14
2014-01-03 /blog/ 9
"""
date_trunc = fn.date_trunc(interval, PageView.timestamp)
return (self.get_query()
.select(
PageView.url,
date_trunc.alias(interval),
fn.Count(PageView.id).alias('count'))
.group_by(PageView.url, date_trunc)
.order_by(
SQL(interval),
SQL('count').desc(),
PageView.url))
def cookies(self):
"""
Retrieve the cookies header from all the users who visited.
"""
return (self.get_query()
.select(PageView.ip, PageView.headers['Cookie'])
.where(PageView.headers['Cookie'].is_null(False))
.tuples())
def user_agents(self):
"""
Retrieve user-agents, sorted by most common to least common.
"""
return (self.get_query()
.select(
PageView.headers['User-Agent'],
fn.Count(PageView.id))
.group_by(PageView.headers['User-Agent'])
.order_by(fn.Count(PageView.id).desc())
.tuples())
def trail(self):
"""
Get all visitors by IP and then list the pages they visited in order.
"""
inner = (self.get_query()
.select(PageView.ip, PageView.url)
.order_by(PageView.timestamp))
return (PageView
.select(
PageView.ip,
fn.array_agg(PageView.url).alias('urls'))
.from_(inner.alias('t1'))
.group_by(PageView.ip))
def _referrer_clause(self, domain_only=True):
if domain_only:
return fn.SubString(Clause(
PageView.referrer, SQL('FROM'), '.*://([^/]*)'))
return PageView.referrer
def top_referrers(self, domain_only=True):
"""
What domains send us the most traffic?
"""
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(referrer, fn.Count(PageView.id))
.group_by(referrer)
.order_by(fn.Count(PageView.id).desc())
.tuples())
def referrers_for_url(self, domain_only=True):
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(PageView.url, referrer, fn.Count(PageView.id))
.group_by(PageView.url, referrer)
.order_by(PageView.url, fn.Count(PageView.id).desc())
.tuples())
def referrers_to_url(self, domain_only=True):
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(referrer, PageView.url, fn.Count(PageView.id))
.group_by(referrer, PageView.url)
.order_by(referrer, fn.Count(PageView.id).desc())
.tuples())
|
coleifer/peewee | examples/analytics/reports.py | Report.trail | python | def trail(self):
inner = (self.get_query()
.select(PageView.ip, PageView.url)
.order_by(PageView.timestamp))
return (PageView
.select(
PageView.ip,
fn.array_agg(PageView.url).alias('urls'))
.from_(inner.alias('t1'))
.group_by(PageView.ip)) | Get all visitors by IP and then list the pages they visited in order. | train | https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/examples/analytics/reports.py#L79-L91 | [
"def get_query(self):\n query = PageView.select().where(PageView.account == self.account)\n if self.date_range:\n query = query.where(PageView.timestamp.between(*self.date_range))\n return query\n"
] | class Report(object):
def __init__(self, account_id=DEFAULT_ACCOUNT_ID):
self.account = Account.get(Account.id == account_id)
self.date_range = None
def get_query(self):
query = PageView.select().where(PageView.account == self.account)
if self.date_range:
query = query.where(PageView.timestamp.between(*self.date_range))
return query
def top_pages_by_time_period(self, interval='day'):
"""
Get a breakdown of top pages per interval, i.e.
day url count
2014-01-01 /blog/ 11
2014-01-02 /blog/ 14
2014-01-03 /blog/ 9
"""
date_trunc = fn.date_trunc(interval, PageView.timestamp)
return (self.get_query()
.select(
PageView.url,
date_trunc.alias(interval),
fn.Count(PageView.id).alias('count'))
.group_by(PageView.url, date_trunc)
.order_by(
SQL(interval),
SQL('count').desc(),
PageView.url))
def cookies(self):
"""
Retrieve the cookies header from all the users who visited.
"""
return (self.get_query()
.select(PageView.ip, PageView.headers['Cookie'])
.where(PageView.headers['Cookie'].is_null(False))
.tuples())
def user_agents(self):
"""
Retrieve user-agents, sorted by most common to least common.
"""
return (self.get_query()
.select(
PageView.headers['User-Agent'],
fn.Count(PageView.id))
.group_by(PageView.headers['User-Agent'])
.order_by(fn.Count(PageView.id).desc())
.tuples())
def languages(self):
"""
Retrieve languages, sorted by most common to least common. The
Accept-Languages header sometimes looks weird, i.e.
"en-US,en;q=0.8,is;q=0.6,da;q=0.4" We will split on the first semi-
colon.
"""
language = PageView.headers['Accept-Language']
first_language = fn.SubStr(
language, # String to slice.
1, # Left index.
fn.StrPos(language, ';'))
return (self.get_query()
.select(first_language, fn.Count(PageView.id))
.group_by(first_language)
.order_by(fn.Count(PageView.id).desc())
.tuples())
def _referrer_clause(self, domain_only=True):
if domain_only:
return fn.SubString(Clause(
PageView.referrer, SQL('FROM'), '.*://([^/]*)'))
return PageView.referrer
def top_referrers(self, domain_only=True):
"""
What domains send us the most traffic?
"""
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(referrer, fn.Count(PageView.id))
.group_by(referrer)
.order_by(fn.Count(PageView.id).desc())
.tuples())
def referrers_for_url(self, domain_only=True):
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(PageView.url, referrer, fn.Count(PageView.id))
.group_by(PageView.url, referrer)
.order_by(PageView.url, fn.Count(PageView.id).desc())
.tuples())
def referrers_to_url(self, domain_only=True):
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(referrer, PageView.url, fn.Count(PageView.id))
.group_by(referrer, PageView.url)
.order_by(referrer, fn.Count(PageView.id).desc())
.tuples())
|
coleifer/peewee | examples/analytics/reports.py | Report.top_referrers | python | def top_referrers(self, domain_only=True):
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(referrer, fn.Count(PageView.id))
.group_by(referrer)
.order_by(fn.Count(PageView.id).desc())
.tuples()) | What domains send us the most traffic? | train | https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/examples/analytics/reports.py#L99-L108 | [
"def get_query(self):\n query = PageView.select().where(PageView.account == self.account)\n if self.date_range:\n query = query.where(PageView.timestamp.between(*self.date_range))\n return query\n",
"def _referrer_clause(self, domain_only=True):\n if domain_only:\n return fn.SubString(Cl... | class Report(object):
def __init__(self, account_id=DEFAULT_ACCOUNT_ID):
self.account = Account.get(Account.id == account_id)
self.date_range = None
def get_query(self):
query = PageView.select().where(PageView.account == self.account)
if self.date_range:
query = query.where(PageView.timestamp.between(*self.date_range))
return query
def top_pages_by_time_period(self, interval='day'):
"""
Get a breakdown of top pages per interval, i.e.
day url count
2014-01-01 /blog/ 11
2014-01-02 /blog/ 14
2014-01-03 /blog/ 9
"""
date_trunc = fn.date_trunc(interval, PageView.timestamp)
return (self.get_query()
.select(
PageView.url,
date_trunc.alias(interval),
fn.Count(PageView.id).alias('count'))
.group_by(PageView.url, date_trunc)
.order_by(
SQL(interval),
SQL('count').desc(),
PageView.url))
def cookies(self):
"""
Retrieve the cookies header from all the users who visited.
"""
return (self.get_query()
.select(PageView.ip, PageView.headers['Cookie'])
.where(PageView.headers['Cookie'].is_null(False))
.tuples())
def user_agents(self):
"""
Retrieve user-agents, sorted by most common to least common.
"""
return (self.get_query()
.select(
PageView.headers['User-Agent'],
fn.Count(PageView.id))
.group_by(PageView.headers['User-Agent'])
.order_by(fn.Count(PageView.id).desc())
.tuples())
def languages(self):
"""
Retrieve languages, sorted by most common to least common. The
Accept-Languages header sometimes looks weird, i.e.
"en-US,en;q=0.8,is;q=0.6,da;q=0.4" We will split on the first semi-
colon.
"""
language = PageView.headers['Accept-Language']
first_language = fn.SubStr(
language, # String to slice.
1, # Left index.
fn.StrPos(language, ';'))
return (self.get_query()
.select(first_language, fn.Count(PageView.id))
.group_by(first_language)
.order_by(fn.Count(PageView.id).desc())
.tuples())
def trail(self):
"""
Get all visitors by IP and then list the pages they visited in order.
"""
inner = (self.get_query()
.select(PageView.ip, PageView.url)
.order_by(PageView.timestamp))
return (PageView
.select(
PageView.ip,
fn.array_agg(PageView.url).alias('urls'))
.from_(inner.alias('t1'))
.group_by(PageView.ip))
def _referrer_clause(self, domain_only=True):
if domain_only:
return fn.SubString(Clause(
PageView.referrer, SQL('FROM'), '.*://([^/]*)'))
return PageView.referrer
def referrers_for_url(self, domain_only=True):
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(PageView.url, referrer, fn.Count(PageView.id))
.group_by(PageView.url, referrer)
.order_by(PageView.url, fn.Count(PageView.id).desc())
.tuples())
def referrers_to_url(self, domain_only=True):
referrer = self._referrer_clause(domain_only)
return (self.get_query()
.select(referrer, PageView.url, fn.Count(PageView.id))
.group_by(referrer, PageView.url)
.order_by(referrer, fn.Count(PageView.id).desc())
.tuples())
|
coleifer/peewee | examples/diary.py | add_entry | python | def add_entry():
print('Enter your entry. Press ctrl+d when finished.')
data = sys.stdin.read().strip()
if data and raw_input('Save entry? [Yn] ') != 'n':
Entry.create(content=data)
print('Saved successfully.') | Add entry | train | https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/examples/diary.py#L35-L41 | null | #!/usr/bin/env python
from collections import OrderedDict
import datetime
from getpass import getpass
import sys
from peewee import *
from playhouse.sqlcipher_ext import SqlCipherDatabase
# Defer initialization of the database until the script is executed from the
# command-line.
db = SqlCipherDatabase(None)
class Entry(Model):
content = TextField()
timestamp = DateTimeField(default=datetime.datetime.now)
class Meta:
database = db
def initialize(passphrase):
db.init('diary.db', passphrase=passphrase, kdf_iter=64000)
db.create_tables([Entry])
def menu_loop():
choice = None
while choice != 'q':
for key, value in menu.items():
print('%s) %s' % (key, value.__doc__))
choice = raw_input('Action: ').lower().strip()
if choice in menu:
menu[choice]()
def view_entries(search_query=None):
"""View previous entries"""
query = Entry.select().order_by(Entry.timestamp.desc())
if search_query:
query = query.where(Entry.content.contains(search_query))
for entry in query:
timestamp = entry.timestamp.strftime('%A %B %d, %Y %I:%M%p')
print(timestamp)
print('=' * len(timestamp))
print(entry.content)
print('n) next entry')
print('d) delete entry')
print('q) return to main menu')
action = raw_input('Choice? (Ndq) ').lower().strip()
if action == 'q':
break
elif action == 'd':
entry.delete_instance()
break
def search_entries():
"""Search entries"""
view_entries(raw_input('Search query: '))
menu = OrderedDict([
('a', add_entry),
('v', view_entries),
('s', search_entries),
])
if __name__ == '__main__':
# Collect the passphrase using a secure method.
passphrase = getpass('Enter password: ')
if not passphrase:
sys.stderr.write('Passphrase required to access diary.\n')
sys.stderr.flush()
sys.exit(1)
elif len(passphrase) < 8:
sys.stderr.write('Passphrase must be at least 8 characters.\n')
sys.stderr.flush()
sys.exit(1)
# Initialize the database.
initialize(passphrase)
menu_loop()
|
coleifer/peewee | examples/diary.py | view_entries | python | def view_entries(search_query=None):
query = Entry.select().order_by(Entry.timestamp.desc())
if search_query:
query = query.where(Entry.content.contains(search_query))
for entry in query:
timestamp = entry.timestamp.strftime('%A %B %d, %Y %I:%M%p')
print(timestamp)
print('=' * len(timestamp))
print(entry.content)
print('n) next entry')
print('d) delete entry')
print('q) return to main menu')
action = raw_input('Choice? (Ndq) ').lower().strip()
if action == 'q':
break
elif action == 'd':
entry.delete_instance()
break | View previous entries | train | https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/examples/diary.py#L43-L62 | null | #!/usr/bin/env python
from collections import OrderedDict
import datetime
from getpass import getpass
import sys
from peewee import *
from playhouse.sqlcipher_ext import SqlCipherDatabase
# Defer initialization of the database until the script is executed from the
# command-line.
db = SqlCipherDatabase(None)
class Entry(Model):
content = TextField()
timestamp = DateTimeField(default=datetime.datetime.now)
class Meta:
database = db
def initialize(passphrase):
db.init('diary.db', passphrase=passphrase, kdf_iter=64000)
db.create_tables([Entry])
def menu_loop():
choice = None
while choice != 'q':
for key, value in menu.items():
print('%s) %s' % (key, value.__doc__))
choice = raw_input('Action: ').lower().strip()
if choice in menu:
menu[choice]()
def add_entry():
"""Add entry"""
print('Enter your entry. Press ctrl+d when finished.')
data = sys.stdin.read().strip()
if data and raw_input('Save entry? [Yn] ') != 'n':
Entry.create(content=data)
print('Saved successfully.')
def search_entries():
"""Search entries"""
view_entries(raw_input('Search query: '))
menu = OrderedDict([
('a', add_entry),
('v', view_entries),
('s', search_entries),
])
if __name__ == '__main__':
# Collect the passphrase using a secure method.
passphrase = getpass('Enter password: ')
if not passphrase:
sys.stderr.write('Passphrase required to access diary.\n')
sys.stderr.flush()
sys.exit(1)
elif len(passphrase) < 8:
sys.stderr.write('Passphrase must be at least 8 characters.\n')
sys.stderr.flush()
sys.exit(1)
# Initialize the database.
initialize(passphrase)
menu_loop()
|
coleifer/peewee | examples/blog/app.py | Entry.html_content | python | def html_content(self):
hilite = CodeHiliteExtension(linenums=False, css_class='highlight')
extras = ExtraExtension()
markdown_content = markdown(self.content, extensions=[hilite, extras])
oembed_content = parse_html(
markdown_content,
oembed_providers,
urlize_all=True,
maxwidth=app.config['SITE_WIDTH'])
return Markup(oembed_content) | Generate HTML representation of the markdown-formatted blog entry,
and also convert any media URLs into rich media objects such as video
players or images. | train | https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/examples/blog/app.py#L66-L80 | null | class Entry(flask_db.Model):
title = CharField()
slug = CharField(unique=True)
content = TextField()
published = BooleanField(index=True)
timestamp = DateTimeField(default=datetime.datetime.now, index=True)
@property
def save(self, *args, **kwargs):
# Generate a URL-friendly representation of the entry's title.
if not self.slug:
self.slug = re.sub('[^\w]+', '-', self.title.lower()).strip('-')
ret = super(Entry, self).save(*args, **kwargs)
# Store search content.
self.update_search_index()
return ret
def update_search_index(self):
# Create a row in the FTSEntry table with the post content. This will
# allow us to use SQLite's awesome full-text search extension to
# search our entries.
exists = (FTSEntry
.select(FTSEntry.docid)
.where(FTSEntry.docid == self.id)
.exists())
content = '\n'.join((self.title, self.content))
if exists:
(FTSEntry
.update({FTSEntry.content: content})
.where(FTSEntry.docid == self.id)
.execute())
else:
FTSEntry.insert({
FTSEntry.docid: self.id,
FTSEntry.content: content}).execute()
@classmethod
def public(cls):
return Entry.select().where(Entry.published == True)
@classmethod
def drafts(cls):
return Entry.select().where(Entry.published == False)
@classmethod
def search(cls, query):
words = [word.strip() for word in query.split() if word.strip()]
if not words:
# Return an empty query.
return Entry.noop()
else:
search = ' '.join(words)
# Query the full-text search index for entries matching the given
# search query, then join the actual Entry data on the matching
# search result.
return (Entry
.select(Entry, FTSEntry.rank().alias('score'))
.join(FTSEntry, on=(Entry.id == FTSEntry.docid))
.where(
FTSEntry.match(search) &
(Entry.published == True))
.order_by(SQL('score')))
|
coleifer/peewee | playhouse/shortcuts.py | model_to_dict | python | def model_to_dict(model, recurse=True, backrefs=False, only=None,
exclude=None, seen=None, extra_attrs=None,
fields_from_query=None, max_depth=None, manytomany=False):
max_depth = -1 if max_depth is None else max_depth
if max_depth == 0:
recurse = False
only = _clone_set(only)
extra_attrs = _clone_set(extra_attrs)
should_skip = lambda n: (n in exclude) or (only and (n not in only))
if fields_from_query is not None:
for item in fields_from_query._returning:
if isinstance(item, Field):
only.add(item)
elif isinstance(item, Alias):
extra_attrs.add(item._alias)
data = {}
exclude = _clone_set(exclude)
seen = _clone_set(seen)
exclude |= seen
model_class = type(model)
if manytomany:
for name, m2m in model._meta.manytomany.items():
if should_skip(name):
continue
exclude.update((m2m, m2m.rel_model._meta.manytomany[m2m.backref]))
for fkf in m2m.through_model._meta.refs:
exclude.add(fkf)
accum = []
for rel_obj in getattr(model, name):
accum.append(model_to_dict(
rel_obj,
recurse=recurse,
backrefs=backrefs,
only=only,
exclude=exclude,
max_depth=max_depth - 1))
data[name] = accum
for field in model._meta.sorted_fields:
if should_skip(field):
continue
field_data = model.__data__.get(field.name)
if isinstance(field, ForeignKeyField) and recurse:
if field_data:
seen.add(field)
rel_obj = getattr(model, field.name)
field_data = model_to_dict(
rel_obj,
recurse=recurse,
backrefs=backrefs,
only=only,
exclude=exclude,
seen=seen,
max_depth=max_depth - 1)
else:
field_data = None
data[field.name] = field_data
if extra_attrs:
for attr_name in extra_attrs:
attr = getattr(model, attr_name)
if callable_(attr):
data[attr_name] = attr()
else:
data[attr_name] = attr
if backrefs and recurse:
for foreign_key, rel_model in model._meta.backrefs.items():
if foreign_key.backref == '+': continue
descriptor = getattr(model_class, foreign_key.backref)
if descriptor in exclude or foreign_key in exclude:
continue
if only and (descriptor not in only) and (foreign_key not in only):
continue
accum = []
exclude.add(foreign_key)
related_query = getattr(model, foreign_key.backref)
for rel_obj in related_query:
accum.append(model_to_dict(
rel_obj,
recurse=recurse,
backrefs=backrefs,
only=only,
exclude=exclude,
max_depth=max_depth - 1))
data[foreign_key.backref] = accum
return data | Convert a model instance (and any related objects) to a dictionary.
:param bool recurse: Whether foreign-keys should be recursed.
:param bool backrefs: Whether lists of related objects should be recursed.
:param only: A list (or set) of field instances indicating which fields
should be included.
:param exclude: A list (or set) of field instances that should be
excluded from the dictionary.
:param list extra_attrs: Names of model instance attributes or methods
that should be included.
:param SelectQuery fields_from_query: Query that was source of model. Take
fields explicitly selected by the query and serialize them.
:param int max_depth: Maximum depth to recurse, value <= 0 means no max.
:param bool manytomany: Process many-to-many fields. | train | https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/playhouse/shortcuts.py#L10-L124 | [
"callable_ = lambda c: isinstance(c, Callable)\n",
"_clone_set = lambda s: set(s) if s else set()\n",
"def model_to_dict(model, recurse=True, backrefs=False, only=None,\n exclude=None, seen=None, extra_attrs=None,\n fields_from_query=None, max_depth=None, manytomany=False):\n ... | from peewee import *
from peewee import Alias
from peewee import SENTINEL
from peewee import callable_
_clone_set = lambda s: set(s) if s else set()
def update_model_from_dict(instance, data, ignore_unknown=False):
meta = instance._meta
backrefs = dict([(fk.backref, fk) for fk in meta.backrefs])
for key, value in data.items():
if key in meta.combined:
field = meta.combined[key]
is_backref = False
elif key in backrefs:
field = backrefs[key]
is_backref = True
elif ignore_unknown:
setattr(instance, key, value)
continue
else:
raise AttributeError('Unrecognized attribute "%s" for model '
'class %s.' % (key, type(instance)))
is_foreign_key = isinstance(field, ForeignKeyField)
if not is_backref and is_foreign_key and isinstance(value, dict):
try:
rel_instance = instance.__rel__[field.name]
except KeyError:
rel_instance = field.rel_model()
setattr(
instance,
field.name,
update_model_from_dict(rel_instance, value, ignore_unknown))
elif is_backref and isinstance(value, (list, tuple)):
instances = [
dict_to_model(field.model, row_data, ignore_unknown)
for row_data in value]
for rel_instance in instances:
setattr(rel_instance, field.name, instance)
setattr(instance, field.backref, instances)
else:
setattr(instance, field.name, value)
return instance
def dict_to_model(model_class, data, ignore_unknown=False):
return update_model_from_dict(model_class(), data, ignore_unknown)
class ReconnectMixin(object):
"""
Mixin class that attempts to automatically reconnect to the database under
certain error conditions.
For example, MySQL servers will typically close connections that are idle
for 28800 seconds ("wait_timeout" setting). If your application makes use
of long-lived connections, you may find your connections are closed after
a period of no activity. This mixin will attempt to reconnect automatically
when these errors occur.
This mixin class probably should not be used with Postgres (unless you
REALLY know what you are doing) and definitely has no business being used
with Sqlite. If you wish to use with Postgres, you will need to adapt the
`reconnect_errors` attribute to something appropriate for Postgres.
"""
reconnect_errors = (
# Error class, error message fragment (or empty string for all).
(OperationalError, '2006'), # MySQL server has gone away.
(OperationalError, '2013'), # Lost connection to MySQL server.
(OperationalError, '2014'), # Commands out of sync.
)
def __init__(self, *args, **kwargs):
super(ReconnectMixin, self).__init__(*args, **kwargs)
# Normalize the reconnect errors to a more efficient data-structure.
self._reconnect_errors = {}
for exc_class, err_fragment in self.reconnect_errors:
self._reconnect_errors.setdefault(exc_class, [])
self._reconnect_errors[exc_class].append(err_fragment.lower())
def execute_sql(self, sql, params=None, commit=SENTINEL):
try:
return super(ReconnectMixin, self).execute_sql(sql, params, commit)
except Exception as exc:
exc_class = type(exc)
if exc_class not in self._reconnect_errors:
raise exc
exc_repr = str(exc).lower()
for err_fragment in self._reconnect_errors[exc_class]:
if err_fragment in exc_repr:
break
else:
raise exc
if not self.is_closed():
self.close()
self.connect()
return super(ReconnectMixin, self).execute_sql(sql, params, commit)
|
coleifer/peewee | peewee.py | ColumnBase._e | python | def _e(op, inv=False):
def inner(self, rhs):
if inv:
return Expression(rhs, op, self)
return Expression(self, op, rhs)
return inner | Lightweight factory which returns a method that builds an Expression
consisting of the left-hand and right-hand operands, using `op`. | train | https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/peewee.py#L1091-L1100 | null | class ColumnBase(Node):
def alias(self, alias):
if alias:
return Alias(self, alias)
return self
def unalias(self):
return self
def cast(self, as_type):
return Cast(self, as_type)
def asc(self, collation=None, nulls=None):
return Asc(self, collation=collation, nulls=nulls)
__pos__ = asc
def desc(self, collation=None, nulls=None):
return Desc(self, collation=collation, nulls=nulls)
__neg__ = desc
def __invert__(self):
return Negated(self)
__and__ = _e(OP.AND)
__or__ = _e(OP.OR)
__add__ = _e(OP.ADD)
__sub__ = _e(OP.SUB)
__mul__ = _e(OP.MUL)
__div__ = __truediv__ = _e(OP.DIV)
__xor__ = _e(OP.XOR)
__radd__ = _e(OP.ADD, inv=True)
__rsub__ = _e(OP.SUB, inv=True)
__rmul__ = _e(OP.MUL, inv=True)
__rdiv__ = __rtruediv__ = _e(OP.DIV, inv=True)
__rand__ = _e(OP.AND, inv=True)
__ror__ = _e(OP.OR, inv=True)
__rxor__ = _e(OP.XOR, inv=True)
def __eq__(self, rhs):
op = OP.IS if rhs is None else OP.EQ
return Expression(self, op, rhs)
def __ne__(self, rhs):
op = OP.IS_NOT if rhs is None else OP.NE
return Expression(self, op, rhs)
__lt__ = _e(OP.LT)
__le__ = _e(OP.LTE)
__gt__ = _e(OP.GT)
__ge__ = _e(OP.GTE)
__lshift__ = _e(OP.IN)
__rshift__ = _e(OP.IS)
__mod__ = _e(OP.LIKE)
__pow__ = _e(OP.ILIKE)
bin_and = _e(OP.BIN_AND)
bin_or = _e(OP.BIN_OR)
in_ = _e(OP.IN)
not_in = _e(OP.NOT_IN)
regexp = _e(OP.REGEXP)
# Special expressions.
def is_null(self, is_null=True):
op = OP.IS if is_null else OP.IS_NOT
return Expression(self, op, None)
def contains(self, rhs):
return Expression(self, OP.ILIKE, '%%%s%%' % rhs)
def startswith(self, rhs):
return Expression(self, OP.ILIKE, '%s%%' % rhs)
def endswith(self, rhs):
return Expression(self, OP.ILIKE, '%%%s' % rhs)
def between(self, lo, hi):
return Expression(self, OP.BETWEEN, NodeList((lo, SQL('AND'), hi)))
def concat(self, rhs):
return StringExpression(self, OP.CONCAT, rhs)
def regexp(self, rhs):
return Expression(self, OP.REGEXP, rhs)
def iregexp(self, rhs):
return Expression(self, OP.IREGEXP, rhs)
def __getitem__(self, item):
if isinstance(item, slice):
if item.start is None or item.stop is None:
raise ValueError('BETWEEN range must have both a start- and '
'end-point.')
return self.between(item.start, item.stop)
return self == item
def distinct(self):
return NodeList((SQL('DISTINCT'), self))
def collate(self, collation):
return NodeList((self, SQL('COLLATE %s' % collation)))
def get_sort_key(self, ctx):
return ()
|
huyingxi/Synonyms | synonyms/word2vec.py | KeyedVectors.load_word2vec_format | python | def load_word2vec_format(
cls,
fname,
fvocab=None,
binary=False,
encoding='utf8',
unicode_errors='strict',
limit=None,
datatype=REAL):
counts = None
if fvocab is not None:
logging.debug("loading word counts from %s" % fvocab)
counts = {}
with smart_open(fvocab) as fin:
for line in fin:
word, count = to_unicode(line).strip().split()
counts[word] = int(count)
logging.debug("loading projection weights from %s" % fname)
with smart_open(fname) as fin:
header = to_unicode(fin.readline(), encoding=encoding)
# throws for invalid file format
vocab_size, vector_size = (int(x) for x in header.split())
if limit:
vocab_size = min(vocab_size, limit)
result = cls()
result.vector_size = vector_size
result.syn0 = zeros((vocab_size, vector_size), dtype=datatype)
def add_word(word, weights):
word_id = len(result.vocab)
# logging.debug("word id: %d, word: %s, weights: %s" % (word_id, word, weights))
if word in result.vocab:
logging.debug(
"duplicate word '%s' in %s, ignoring all but first" %
(word, fname))
return
if counts is None:
# most common scenario: no vocab file given. just make up
# some bogus counts, in descending order
result.vocab[word] = Vocab(
index=word_id, count=vocab_size - word_id)
elif word in counts:
# use count from the vocab file
result.vocab[word] = Vocab(
index=word_id, count=counts[word])
else:
# vocab file given, but word is missing -- set count to
# None (TODO: or raise?)
logging.debug(
"vocabulary file is incomplete: '%s' is missing" %
word)
result.vocab[word] = Vocab(index=word_id, count=None)
result.syn0[word_id] = weights
result.index2word.append(word)
if binary:
binary_len = dtype(REAL).itemsize * vector_size
for _ in xrange(vocab_size):
# mixed text and binary: read text first, then binary
word = []
while True:
ch = fin.read(1)
if ch == b' ':
break
if ch == b'':
raise EOFError(
"unexpected end of input; is count incorrect or file otherwise damaged?")
# ignore newlines in front of words (some binary files
# have)
if ch != b'\n':
word.append(ch)
word = to_unicode(
b''.join(word), encoding=encoding, errors=unicode_errors)
weights = fromstring(fin.read(binary_len), dtype=REAL)
add_word(word, weights)
else:
for line_no in xrange(vocab_size):
line = fin.readline()
if line == b'':
raise EOFError(
"unexpected end of input; is count incorrect or file otherwise damaged?")
parts = to_unicode(
line.rstrip(),
encoding=encoding,
errors=unicode_errors).split(" ")
if len(parts) != vector_size + 1:
raise ValueError(
"invalid vector on line %s (is this really the text format?)" %
line_no)
word, weights = parts[0], [REAL(x) for x in parts[1:]]
add_word(word, weights)
if result.syn0.shape[0] != len(result.vocab):
logging.debug(
"duplicate words detected, shrinking matrix size from %i to %i" %
(result.syn0.shape[0], len(result.vocab)))
result.syn0 = ascontiguousarray(result.syn0[: len(result.vocab)])
assert (len(result.vocab), vector_size) == result.syn0.shape
'''
KDTree
Build KDTree with vectors.
http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KDTree.html#sklearn.neighbors.KDTree
'''
result.kdt = KDTree(result.syn0, leaf_size=10, metric = "euclidean")
logging.debug("loaded %s matrix from %s" % (result.syn0.shape, fname))
return result | Load the input-hidden weight matrix from the original C word2vec-tool format.
Note that the information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
`binary` is a boolean indicating whether the data is in binary word2vec format.
`norm_only` is a boolean indicating whether to only store normalised word2vec vectors in memory.
Word counts are read from `fvocab` filename, if set (this is the file generated
by `-save-vocab` flag of the original C tool).
If you trained the C model using non-utf8 encoding for words, specify that
encoding in `encoding`.
`unicode_errors`, default 'strict', is a string suitable to be passed as the `errors`
argument to the unicode() (Python 2.x) or str() (Python 3.x) function. If your source
file may include word tokens truncated in the middle of a multibyte unicode character
(as is common from the original word2vec.c tool), 'ignore' or 'replace' may help.
`limit` sets a maximum number of word-vectors to read from the file. The default,
None, means read all.
`datatype` (experimental) can coerce dimensions to a non-default float type (such
as np.float16) to save memory. (Such types may result in much slower bulk operations
or incompatibility with optimized routines.) | train | https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/word2vec.py#L88-L214 | [
"def any2unicode(text, encoding='utf8', errors='strict'):\n \"\"\"Convert a string (bytestring in `encoding` or unicode), to unicode.\"\"\"\n if isinstance(text, unicode):\n return text\n return unicode(text, encoding, errors=errors)\n",
"def smart_open(fname, mode='rb'):\n _, ext = os.path.spl... | class KeyedVectors():
"""
Class to contain vectors and vocab for the Word2Vec training class and other w2v methods not directly
involved in training such as most_similar()
"""
def __init__(self):
self.syn0 = []
self.syn0norm = None
self.vocab = {}
self.index2word = []
self.vector_size = None
self.kdt = None
@property
def wv(self):
return self
def save(self, *args, **kwargs):
# don't bother storing the cached normalized vectors
kwargs['ignore'] = kwargs.get('ignore', ['syn0norm'])
super(KeyedVectors, self).save(*args, **kwargs)
@classmethod
def word_vec(self, word, use_norm=False):
"""
Accept a single word as input.
Returns the word's representations in vector space, as a 1D numpy array.
If `use_norm` is True, returns the normalized word vector.
Example::
>>> trained_model['office']
array([ -1.40128313e-02, ...])
"""
if word in self.vocab:
if use_norm:
result = self.syn0norm[self.vocab[word].index]
else:
result = self.syn0[self.vocab[word].index]
result.setflags(write=False)
return result
else:
raise KeyError("word '%s' not in vocabulary" % word)
def neighbours(self, word, size = 10):
"""
Get nearest words with KDTree, ranking by cosine distance
"""
word = word.strip()
v = self.word_vec(word)
[distances], [points] = self.kdt.query(array([v]), k = size, return_distance = True)
assert len(distances) == len(points), "distances and points should be in same shape."
words, scores = [], {}
for (x,y) in zip(points, distances):
w = self.index2word[x]
if w == word: s = 1.0
else: s = cosine(v, self.syn0[x])
if s < 0: s = abs(s)
words.append(w)
scores[w] = min(s, 1.0)
for x in sorted(words, key=scores.get, reverse=True):
yield x, scores[x]
|
huyingxi/Synonyms | synonyms/word2vec.py | KeyedVectors.word_vec | python | def word_vec(self, word, use_norm=False):
if word in self.vocab:
if use_norm:
result = self.syn0norm[self.vocab[word].index]
else:
result = self.syn0[self.vocab[word].index]
result.setflags(write=False)
return result
else:
raise KeyError("word '%s' not in vocabulary" % word) | Accept a single word as input.
Returns the word's representations in vector space, as a 1D numpy array.
If `use_norm` is True, returns the normalized word vector.
Example::
>>> trained_model['office']
array([ -1.40128313e-02, ...]) | train | https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/word2vec.py#L216-L234 | null | class KeyedVectors():
"""
Class to contain vectors and vocab for the Word2Vec training class and other w2v methods not directly
involved in training such as most_similar()
"""
def __init__(self):
self.syn0 = []
self.syn0norm = None
self.vocab = {}
self.index2word = []
self.vector_size = None
self.kdt = None
@property
def wv(self):
return self
def save(self, *args, **kwargs):
# don't bother storing the cached normalized vectors
kwargs['ignore'] = kwargs.get('ignore', ['syn0norm'])
super(KeyedVectors, self).save(*args, **kwargs)
@classmethod
def load_word2vec_format(
cls,
fname,
fvocab=None,
binary=False,
encoding='utf8',
unicode_errors='strict',
limit=None,
datatype=REAL):
"""
Load the input-hidden weight matrix from the original C word2vec-tool format.
Note that the information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
`binary` is a boolean indicating whether the data is in binary word2vec format.
`norm_only` is a boolean indicating whether to only store normalised word2vec vectors in memory.
Word counts are read from `fvocab` filename, if set (this is the file generated
by `-save-vocab` flag of the original C tool).
If you trained the C model using non-utf8 encoding for words, specify that
encoding in `encoding`.
`unicode_errors`, default 'strict', is a string suitable to be passed as the `errors`
argument to the unicode() (Python 2.x) or str() (Python 3.x) function. If your source
file may include word tokens truncated in the middle of a multibyte unicode character
(as is common from the original word2vec.c tool), 'ignore' or 'replace' may help.
`limit` sets a maximum number of word-vectors to read from the file. The default,
None, means read all.
`datatype` (experimental) can coerce dimensions to a non-default float type (such
as np.float16) to save memory. (Such types may result in much slower bulk operations
or incompatibility with optimized routines.)
"""
counts = None
if fvocab is not None:
logging.debug("loading word counts from %s" % fvocab)
counts = {}
with smart_open(fvocab) as fin:
for line in fin:
word, count = to_unicode(line).strip().split()
counts[word] = int(count)
logging.debug("loading projection weights from %s" % fname)
with smart_open(fname) as fin:
header = to_unicode(fin.readline(), encoding=encoding)
# throws for invalid file format
vocab_size, vector_size = (int(x) for x in header.split())
if limit:
vocab_size = min(vocab_size, limit)
result = cls()
result.vector_size = vector_size
result.syn0 = zeros((vocab_size, vector_size), dtype=datatype)
def add_word(word, weights):
word_id = len(result.vocab)
# logging.debug("word id: %d, word: %s, weights: %s" % (word_id, word, weights))
if word in result.vocab:
logging.debug(
"duplicate word '%s' in %s, ignoring all but first" %
(word, fname))
return
if counts is None:
# most common scenario: no vocab file given. just make up
# some bogus counts, in descending order
result.vocab[word] = Vocab(
index=word_id, count=vocab_size - word_id)
elif word in counts:
# use count from the vocab file
result.vocab[word] = Vocab(
index=word_id, count=counts[word])
else:
# vocab file given, but word is missing -- set count to
# None (TODO: or raise?)
logging.debug(
"vocabulary file is incomplete: '%s' is missing" %
word)
result.vocab[word] = Vocab(index=word_id, count=None)
result.syn0[word_id] = weights
result.index2word.append(word)
if binary:
binary_len = dtype(REAL).itemsize * vector_size
for _ in xrange(vocab_size):
# mixed text and binary: read text first, then binary
word = []
while True:
ch = fin.read(1)
if ch == b' ':
break
if ch == b'':
raise EOFError(
"unexpected end of input; is count incorrect or file otherwise damaged?")
# ignore newlines in front of words (some binary files
# have)
if ch != b'\n':
word.append(ch)
word = to_unicode(
b''.join(word), encoding=encoding, errors=unicode_errors)
weights = fromstring(fin.read(binary_len), dtype=REAL)
add_word(word, weights)
else:
for line_no in xrange(vocab_size):
line = fin.readline()
if line == b'':
raise EOFError(
"unexpected end of input; is count incorrect or file otherwise damaged?")
parts = to_unicode(
line.rstrip(),
encoding=encoding,
errors=unicode_errors).split(" ")
if len(parts) != vector_size + 1:
raise ValueError(
"invalid vector on line %s (is this really the text format?)" %
line_no)
word, weights = parts[0], [REAL(x) for x in parts[1:]]
add_word(word, weights)
if result.syn0.shape[0] != len(result.vocab):
logging.debug(
"duplicate words detected, shrinking matrix size from %i to %i" %
(result.syn0.shape[0], len(result.vocab)))
result.syn0 = ascontiguousarray(result.syn0[: len(result.vocab)])
assert (len(result.vocab), vector_size) == result.syn0.shape
'''
KDTree
Build KDTree with vectors.
http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KDTree.html#sklearn.neighbors.KDTree
'''
result.kdt = KDTree(result.syn0, leaf_size=10, metric = "euclidean")
logging.debug("loaded %s matrix from %s" % (result.syn0.shape, fname))
return result
def neighbours(self, word, size = 10):
"""
Get nearest words with KDTree, ranking by cosine distance
"""
word = word.strip()
v = self.word_vec(word)
[distances], [points] = self.kdt.query(array([v]), k = size, return_distance = True)
assert len(distances) == len(points), "distances and points should be in same shape."
words, scores = [], {}
for (x,y) in zip(points, distances):
w = self.index2word[x]
if w == word: s = 1.0
else: s = cosine(v, self.syn0[x])
if s < 0: s = abs(s)
words.append(w)
scores[w] = min(s, 1.0)
for x in sorted(words, key=scores.get, reverse=True):
yield x, scores[x]
|
huyingxi/Synonyms | synonyms/word2vec.py | KeyedVectors.neighbours | python | def neighbours(self, word, size = 10):
word = word.strip()
v = self.word_vec(word)
[distances], [points] = self.kdt.query(array([v]), k = size, return_distance = True)
assert len(distances) == len(points), "distances and points should be in same shape."
words, scores = [], {}
for (x,y) in zip(points, distances):
w = self.index2word[x]
if w == word: s = 1.0
else: s = cosine(v, self.syn0[x])
if s < 0: s = abs(s)
words.append(w)
scores[w] = min(s, 1.0)
for x in sorted(words, key=scores.get, reverse=True):
yield x, scores[x] | Get nearest words with KDTree, ranking by cosine distance | train | https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/word2vec.py#L236-L253 | [
"cosine = lambda a, b: dot(a, b)/(norm(a)*norm(b))\n",
"def word_vec(self, word, use_norm=False):\n \"\"\"\n Accept a single word as input.\n Returns the word's representations in vector space, as a 1D numpy array.\n If `use_norm` is True, returns the normalized word vector.\n Example::\n >>> ... | class KeyedVectors():
"""
Class to contain vectors and vocab for the Word2Vec training class and other w2v methods not directly
involved in training such as most_similar()
"""
def __init__(self):
self.syn0 = []
self.syn0norm = None
self.vocab = {}
self.index2word = []
self.vector_size = None
self.kdt = None
@property
def wv(self):
return self
def save(self, *args, **kwargs):
# don't bother storing the cached normalized vectors
kwargs['ignore'] = kwargs.get('ignore', ['syn0norm'])
super(KeyedVectors, self).save(*args, **kwargs)
@classmethod
def load_word2vec_format(
cls,
fname,
fvocab=None,
binary=False,
encoding='utf8',
unicode_errors='strict',
limit=None,
datatype=REAL):
"""
Load the input-hidden weight matrix from the original C word2vec-tool format.
Note that the information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
`binary` is a boolean indicating whether the data is in binary word2vec format.
`norm_only` is a boolean indicating whether to only store normalised word2vec vectors in memory.
Word counts are read from `fvocab` filename, if set (this is the file generated
by `-save-vocab` flag of the original C tool).
If you trained the C model using non-utf8 encoding for words, specify that
encoding in `encoding`.
`unicode_errors`, default 'strict', is a string suitable to be passed as the `errors`
argument to the unicode() (Python 2.x) or str() (Python 3.x) function. If your source
file may include word tokens truncated in the middle of a multibyte unicode character
(as is common from the original word2vec.c tool), 'ignore' or 'replace' may help.
`limit` sets a maximum number of word-vectors to read from the file. The default,
None, means read all.
`datatype` (experimental) can coerce dimensions to a non-default float type (such
as np.float16) to save memory. (Such types may result in much slower bulk operations
or incompatibility with optimized routines.)
"""
counts = None
if fvocab is not None:
logging.debug("loading word counts from %s" % fvocab)
counts = {}
with smart_open(fvocab) as fin:
for line in fin:
word, count = to_unicode(line).strip().split()
counts[word] = int(count)
logging.debug("loading projection weights from %s" % fname)
with smart_open(fname) as fin:
header = to_unicode(fin.readline(), encoding=encoding)
# throws for invalid file format
vocab_size, vector_size = (int(x) for x in header.split())
if limit:
vocab_size = min(vocab_size, limit)
result = cls()
result.vector_size = vector_size
result.syn0 = zeros((vocab_size, vector_size), dtype=datatype)
def add_word(word, weights):
word_id = len(result.vocab)
# logging.debug("word id: %d, word: %s, weights: %s" % (word_id, word, weights))
if word in result.vocab:
logging.debug(
"duplicate word '%s' in %s, ignoring all but first" %
(word, fname))
return
if counts is None:
# most common scenario: no vocab file given. just make up
# some bogus counts, in descending order
result.vocab[word] = Vocab(
index=word_id, count=vocab_size - word_id)
elif word in counts:
# use count from the vocab file
result.vocab[word] = Vocab(
index=word_id, count=counts[word])
else:
# vocab file given, but word is missing -- set count to
# None (TODO: or raise?)
logging.debug(
"vocabulary file is incomplete: '%s' is missing" %
word)
result.vocab[word] = Vocab(index=word_id, count=None)
result.syn0[word_id] = weights
result.index2word.append(word)
if binary:
binary_len = dtype(REAL).itemsize * vector_size
for _ in xrange(vocab_size):
# mixed text and binary: read text first, then binary
word = []
while True:
ch = fin.read(1)
if ch == b' ':
break
if ch == b'':
raise EOFError(
"unexpected end of input; is count incorrect or file otherwise damaged?")
# ignore newlines in front of words (some binary files
# have)
if ch != b'\n':
word.append(ch)
word = to_unicode(
b''.join(word), encoding=encoding, errors=unicode_errors)
weights = fromstring(fin.read(binary_len), dtype=REAL)
add_word(word, weights)
else:
for line_no in xrange(vocab_size):
line = fin.readline()
if line == b'':
raise EOFError(
"unexpected end of input; is count incorrect or file otherwise damaged?")
parts = to_unicode(
line.rstrip(),
encoding=encoding,
errors=unicode_errors).split(" ")
if len(parts) != vector_size + 1:
raise ValueError(
"invalid vector on line %s (is this really the text format?)" %
line_no)
word, weights = parts[0], [REAL(x) for x in parts[1:]]
add_word(word, weights)
if result.syn0.shape[0] != len(result.vocab):
logging.debug(
"duplicate words detected, shrinking matrix size from %i to %i" %
(result.syn0.shape[0], len(result.vocab)))
result.syn0 = ascontiguousarray(result.syn0[: len(result.vocab)])
assert (len(result.vocab), vector_size) == result.syn0.shape
'''
KDTree
Build KDTree with vectors.
http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KDTree.html#sklearn.neighbors.KDTree
'''
result.kdt = KDTree(result.syn0, leaf_size=10, metric = "euclidean")
logging.debug("loaded %s matrix from %s" % (result.syn0.shape, fname))
return result
def word_vec(self, word, use_norm=False):
"""
Accept a single word as input.
Returns the word's representations in vector space, as a 1D numpy array.
If `use_norm` is True, returns the normalized word vector.
Example::
>>> trained_model['office']
array([ -1.40128313e-02, ...])
"""
if word in self.vocab:
if use_norm:
result = self.syn0norm[self.vocab[word].index]
else:
result = self.syn0[self.vocab[word].index]
result.setflags(write=False)
return result
else:
raise KeyError("word '%s' not in vocabulary" % word)
|
huyingxi/Synonyms | synonyms/utils.py | get_random_state | python | def get_random_state(seed):
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError(
'%r cannot be used to seed a np.random.RandomState instance' %
seed) | Turn seed into a np.random.RandomState instance.
Method originally from maciejkula/glove-python, and written by @joshloyal. | train | https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/utils.py#L91-L104 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Radim Rehurek <me@radimrehurek.com>
# Modifications (C) 2017 Hai Liang Wang <hailiang.hl.wang@gmail.com>
# Licensed under the GNU LGPL v3.0 - http://www.gnu.org/licenses/lgpl.html
# Author: Hai Liang Wang
# Date: 2017-10-16:14:13:24
#
#=========================================================================
from __future__ import print_function
from __future__ import division
__copyright__ = "Copyright (c) 2017 . All Rights Reserved"
__author__ = "Hai Liang Wang"
__date__ = "2017-10-16:14:13:24"
import os
import sys
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(curdir)
import re
import unicodedata
import os
import random
import shutil
import sys
import subprocess
from contextlib import contextmanager
import numpy as np
import numbers
from six import string_types, u
if sys.version_info[0] < 3:
reload(sys)
sys.setdefaultencoding("utf-8")
# raise "Must be using Python 3"
else:
unicode = str
import collections
import warnings
try:
from html.entities import name2codepoint as n2cp
except ImportError:
from htmlentitydefs import name2codepoint as n2cp
try:
import cPickle as _pickle
except ImportError:
import pickle as _pickle
try:
from smart_open import smart_open
except ImportError:
print("smart_open library not found; falling back to local-filesystem-only")
def make_closing(base, **attrs):
"""
Add support for `with Base(attrs) as fout:` to the base class if it's missing.
The base class' `close()` method will be called on context exit, to always close the file properly.
This is needed for gzip.GzipFile, bz2.BZ2File etc in older Pythons (<=2.6), which otherwise
raise "AttributeError: GzipFile instance has no attribute '__exit__'".
"""
if not hasattr(base, '__enter__'):
attrs['__enter__'] = lambda self: self
if not hasattr(base, '__exit__'):
attrs['__exit__'] = lambda self, type, value, traceback: self.close()
return type('Closing' + base.__name__, (base, object), attrs)
def smart_open(fname, mode='rb'):
_, ext = os.path.splitext(fname)
if ext == '.bz2':
from bz2 import BZ2File
return make_closing(BZ2File)(fname, mode)
if ext == '.gz':
from gzip import GzipFile
return make_closing(GzipFile)(fname, mode)
return open(fname, mode)
PAT_ALPHABETIC = re.compile(r'(((?![\d])\w)+)', re.UNICODE)
RE_HTML_ENTITY = re.compile(r'&(#?)([xX]?)(\w{1,8});', re.UNICODE)
class NoCM(object):
def acquire(self):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
nocm = NoCM()
@contextmanager
def file_or_filename(input):
"""
Return a file-like object ready to be read from the beginning. `input` is either
a filename (gz/bz2 also supported) or a file-like object supporting seek.
"""
if isinstance(input, string_types):
# input was a filename: open as file
yield smart_open(input)
else:
# input already a file-like object; just reset to the beginning
input.seek(0)
yield input
def deaccent(text):
"""
Remove accentuation from the given string. Input text is either a unicode string or utf8 encoded bytestring.
Return input string with accents removed, as unicode.
>>> deaccent("Šéf chomutovských komunistů dostal poštou bílý prášek")
u'Sef chomutovskych komunistu dostal postou bily prasek'
"""
if not isinstance(text, unicode):
# assume utf8 for byte strings, use default (strict) error handling
text = text.decode('utf8')
norm = unicodedata.normalize("NFD", text)
result = u('').join(ch for ch in norm if unicodedata.category(ch) != 'Mn')
return unicodedata.normalize("NFC", result)
def copytree_hardlink(source, dest):
"""
Recursively copy a directory ala shutils.copytree, but hardlink files
instead of copying. Available on UNIX systems only.
"""
copy2 = shutil.copy2
try:
shutil.copy2 = os.link
shutil.copytree(source, dest)
finally:
shutil.copy2 = copy2
def tokenize(
text,
lowercase=False,
deacc=False,
encoding='utf8',
errors="strict",
to_lower=False,
lower=False):
"""
Iteratively yield tokens as unicode strings, removing accent marks
and optionally lowercasing the unidoce string by assigning True
to one of the parameters, lowercase, to_lower, or lower.
Input text may be either unicode or utf8-encoded byte string.
The tokens on output are maximal contiguous sequences of alphabetic
characters (no digits!).
>>> list(tokenize('Nic nemůže letět rychlostí vyšší, než 300 tisíc kilometrů za sekundu!', deacc = True))
[u'Nic', u'nemuze', u'letet', u'rychlosti', u'vyssi', u'nez', u'tisic', u'kilometru', u'za', u'sekundu']
"""
lowercase = lowercase or to_lower or lower
text = to_unicode(text, encoding, errors=errors)
if lowercase:
text = text.lower()
if deacc:
text = deaccent(text)
return simple_tokenize(text)
def simple_tokenize(text):
for match in PAT_ALPHABETIC.finditer(text):
yield match.group()
def simple_preprocess(doc, deacc=False, min_len=2, max_len=15):
"""
Convert a document into a list of tokens.
This lowercases, tokenizes, de-accents (optional). -- the output are final
tokens = unicode strings, that won't be processed any further.
"""
tokens = [
token for token in tokenize(doc, lower=True, deacc=deacc, errors='ignore')
if min_len <= len(token) <= max_len and not token.startswith('_')
]
return tokens
def any2utf8(text, errors='strict', encoding='utf8'):
"""Convert a string (unicode or bytestring in `encoding`), to bytestring in utf8."""
if isinstance(text, unicode):
return text.encode('utf8')
# do bytestring -> unicode -> utf8 full circle, to ensure valid utf8
return unicode(text, encoding, errors=errors).encode('utf8')
to_utf8 = any2utf8
def any2unicode(text, encoding='utf8', errors='strict'):
"""Convert a string (bytestring in `encoding` or unicode), to unicode."""
if isinstance(text, unicode):
return text
return unicode(text, encoding, errors=errors)
to_unicode = any2unicode
# cosine distance
# https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linalg.norm.html
from numpy import dot
from numpy.linalg import norm
cosine = lambda a, b: dot(a, b)/(norm(a)*norm(b))
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def call_on_class_only(*args, **kwargs):
"""Raise exception when load methods are called on instance"""
raise AttributeError('This method should be called on a class object.')
def is_digit(obj):
'''
Check if an object is Number
'''
return isinstance(obj, (numbers.Integral, numbers.Complex, numbers.Real))
def is_zhs(str):
'''
Check if str is Chinese Word
'''
for i in str:
if not is_zh(i):
return False
return True
def is_zh(ch):
"""return True if ch is Chinese character.
full-width puncts/latins are not counted in.
"""
x = ord(ch)
# CJK Radicals Supplement and Kangxi radicals
if 0x2e80 <= x <= 0x2fef:
return True
# CJK Unified Ideographs Extension A
elif 0x3400 <= x <= 0x4dbf:
return True
# CJK Unified Ideographs
elif 0x4e00 <= x <= 0x9fbb:
return True
# CJK Compatibility Ideographs
elif 0xf900 <= x <= 0xfad9:
return True
# CJK Unified Ideographs Extension B
elif 0x20000 <= x <= 0x2a6df:
return True
else:
return False
def is_punct(ch):
x = ord(ch)
# in no-formal literals, space is used as punctuation sometimes.
if x < 127 and ascii.ispunct(x):
return True
# General Punctuation
elif 0x2000 <= x <= 0x206f:
return True
# CJK Symbols and Punctuation
elif 0x3000 <= x <= 0x303f:
return True
# Halfwidth and Fullwidth Forms
elif 0xff00 <= x <= 0xffef:
return True
# CJK Compatibility Forms
elif 0xfe30 <= x <= 0xfe4f:
return True
else:
return False |
huyingxi/Synonyms | synonyms/utils.py | file_or_filename | python | def file_or_filename(input):
if isinstance(input, string_types):
# input was a filename: open as file
yield smart_open(input)
else:
# input already a file-like object; just reset to the beginning
input.seek(0)
yield input | Return a file-like object ready to be read from the beginning. `input` is either
a filename (gz/bz2 also supported) or a file-like object supporting seek. | train | https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/utils.py#L125-L137 | [
"def smart_open(fname, mode='rb'):\n _, ext = os.path.splitext(fname)\n if ext == '.bz2':\n from bz2 import BZ2File\n return make_closing(BZ2File)(fname, mode)\n if ext == '.gz':\n from gzip import GzipFile\n return make_closing(GzipFile)(fname, mode)\n return open(fname, mod... | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Radim Rehurek <me@radimrehurek.com>
# Modifications (C) 2017 Hai Liang Wang <hailiang.hl.wang@gmail.com>
# Licensed under the GNU LGPL v3.0 - http://www.gnu.org/licenses/lgpl.html
# Author: Hai Liang Wang
# Date: 2017-10-16:14:13:24
#
#=========================================================================
from __future__ import print_function
from __future__ import division
__copyright__ = "Copyright (c) 2017 . All Rights Reserved"
__author__ = "Hai Liang Wang"
__date__ = "2017-10-16:14:13:24"
import os
import sys
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(curdir)
import re
import unicodedata
import os
import random
import shutil
import sys
import subprocess
from contextlib import contextmanager
import numpy as np
import numbers
from six import string_types, u
if sys.version_info[0] < 3:
reload(sys)
sys.setdefaultencoding("utf-8")
# raise "Must be using Python 3"
else:
unicode = str
import collections
import warnings
try:
from html.entities import name2codepoint as n2cp
except ImportError:
from htmlentitydefs import name2codepoint as n2cp
try:
import cPickle as _pickle
except ImportError:
import pickle as _pickle
try:
from smart_open import smart_open
except ImportError:
print("smart_open library not found; falling back to local-filesystem-only")
def make_closing(base, **attrs):
"""
Add support for `with Base(attrs) as fout:` to the base class if it's missing.
The base class' `close()` method will be called on context exit, to always close the file properly.
This is needed for gzip.GzipFile, bz2.BZ2File etc in older Pythons (<=2.6), which otherwise
raise "AttributeError: GzipFile instance has no attribute '__exit__'".
"""
if not hasattr(base, '__enter__'):
attrs['__enter__'] = lambda self: self
if not hasattr(base, '__exit__'):
attrs['__exit__'] = lambda self, type, value, traceback: self.close()
return type('Closing' + base.__name__, (base, object), attrs)
def smart_open(fname, mode='rb'):
_, ext = os.path.splitext(fname)
if ext == '.bz2':
from bz2 import BZ2File
return make_closing(BZ2File)(fname, mode)
if ext == '.gz':
from gzip import GzipFile
return make_closing(GzipFile)(fname, mode)
return open(fname, mode)
PAT_ALPHABETIC = re.compile(r'(((?![\d])\w)+)', re.UNICODE)
RE_HTML_ENTITY = re.compile(r'&(#?)([xX]?)(\w{1,8});', re.UNICODE)
def get_random_state(seed):
"""
Turn seed into a np.random.RandomState instance.
Method originally from maciejkula/glove-python, and written by @joshloyal.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError(
'%r cannot be used to seed a np.random.RandomState instance' %
seed)
class NoCM(object):
def acquire(self):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
nocm = NoCM()
@contextmanager
def deaccent(text):
"""
Remove accentuation from the given string. Input text is either a unicode string or utf8 encoded bytestring.
Return input string with accents removed, as unicode.
>>> deaccent("Šéf chomutovských komunistů dostal poštou bílý prášek")
u'Sef chomutovskych komunistu dostal postou bily prasek'
"""
if not isinstance(text, unicode):
# assume utf8 for byte strings, use default (strict) error handling
text = text.decode('utf8')
norm = unicodedata.normalize("NFD", text)
result = u('').join(ch for ch in norm if unicodedata.category(ch) != 'Mn')
return unicodedata.normalize("NFC", result)
def copytree_hardlink(source, dest):
"""
Recursively copy a directory ala shutils.copytree, but hardlink files
instead of copying. Available on UNIX systems only.
"""
copy2 = shutil.copy2
try:
shutil.copy2 = os.link
shutil.copytree(source, dest)
finally:
shutil.copy2 = copy2
def tokenize(
text,
lowercase=False,
deacc=False,
encoding='utf8',
errors="strict",
to_lower=False,
lower=False):
"""
Iteratively yield tokens as unicode strings, removing accent marks
and optionally lowercasing the unidoce string by assigning True
to one of the parameters, lowercase, to_lower, or lower.
Input text may be either unicode or utf8-encoded byte string.
The tokens on output are maximal contiguous sequences of alphabetic
characters (no digits!).
>>> list(tokenize('Nic nemůže letět rychlostí vyšší, než 300 tisíc kilometrů za sekundu!', deacc = True))
[u'Nic', u'nemuze', u'letet', u'rychlosti', u'vyssi', u'nez', u'tisic', u'kilometru', u'za', u'sekundu']
"""
lowercase = lowercase or to_lower or lower
text = to_unicode(text, encoding, errors=errors)
if lowercase:
text = text.lower()
if deacc:
text = deaccent(text)
return simple_tokenize(text)
def simple_tokenize(text):
for match in PAT_ALPHABETIC.finditer(text):
yield match.group()
def simple_preprocess(doc, deacc=False, min_len=2, max_len=15):
"""
Convert a document into a list of tokens.
This lowercases, tokenizes, de-accents (optional). -- the output are final
tokens = unicode strings, that won't be processed any further.
"""
tokens = [
token for token in tokenize(doc, lower=True, deacc=deacc, errors='ignore')
if min_len <= len(token) <= max_len and not token.startswith('_')
]
return tokens
def any2utf8(text, errors='strict', encoding='utf8'):
"""Convert a string (unicode or bytestring in `encoding`), to bytestring in utf8."""
if isinstance(text, unicode):
return text.encode('utf8')
# do bytestring -> unicode -> utf8 full circle, to ensure valid utf8
return unicode(text, encoding, errors=errors).encode('utf8')
to_utf8 = any2utf8
def any2unicode(text, encoding='utf8', errors='strict'):
"""Convert a string (bytestring in `encoding` or unicode), to unicode."""
if isinstance(text, unicode):
return text
return unicode(text, encoding, errors=errors)
to_unicode = any2unicode
# cosine distance
# https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linalg.norm.html
from numpy import dot
from numpy.linalg import norm
cosine = lambda a, b: dot(a, b)/(norm(a)*norm(b))
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def call_on_class_only(*args, **kwargs):
"""Raise exception when load methods are called on instance"""
raise AttributeError('This method should be called on a class object.')
def is_digit(obj):
'''
Check if an object is Number
'''
return isinstance(obj, (numbers.Integral, numbers.Complex, numbers.Real))
def is_zhs(str):
'''
Check if str is Chinese Word
'''
for i in str:
if not is_zh(i):
return False
return True
def is_zh(ch):
"""return True if ch is Chinese character.
full-width puncts/latins are not counted in.
"""
x = ord(ch)
# CJK Radicals Supplement and Kangxi radicals
if 0x2e80 <= x <= 0x2fef:
return True
# CJK Unified Ideographs Extension A
elif 0x3400 <= x <= 0x4dbf:
return True
# CJK Unified Ideographs
elif 0x4e00 <= x <= 0x9fbb:
return True
# CJK Compatibility Ideographs
elif 0xf900 <= x <= 0xfad9:
return True
# CJK Unified Ideographs Extension B
elif 0x20000 <= x <= 0x2a6df:
return True
else:
return False
def is_punct(ch):
x = ord(ch)
# in no-formal literals, space is used as punctuation sometimes.
if x < 127 and ascii.ispunct(x):
return True
# General Punctuation
elif 0x2000 <= x <= 0x206f:
return True
# CJK Symbols and Punctuation
elif 0x3000 <= x <= 0x303f:
return True
# Halfwidth and Fullwidth Forms
elif 0xff00 <= x <= 0xffef:
return True
# CJK Compatibility Forms
elif 0xfe30 <= x <= 0xfe4f:
return True
else:
return False |
huyingxi/Synonyms | synonyms/utils.py | deaccent | python | def deaccent(text):
if not isinstance(text, unicode):
# assume utf8 for byte strings, use default (strict) error handling
text = text.decode('utf8')
norm = unicodedata.normalize("NFD", text)
result = u('').join(ch for ch in norm if unicodedata.category(ch) != 'Mn')
return unicodedata.normalize("NFC", result) | Remove accentuation from the given string. Input text is either a unicode string or utf8 encoded bytestring.
Return input string with accents removed, as unicode.
>>> deaccent("Šéf chomutovských komunistů dostal poštou bílý prášek")
u'Sef chomutovskych komunistu dostal postou bily prasek' | train | https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/utils.py#L140-L155 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Radim Rehurek <me@radimrehurek.com>
# Modifications (C) 2017 Hai Liang Wang <hailiang.hl.wang@gmail.com>
# Licensed under the GNU LGPL v3.0 - http://www.gnu.org/licenses/lgpl.html
# Author: Hai Liang Wang
# Date: 2017-10-16:14:13:24
#
#=========================================================================
from __future__ import print_function
from __future__ import division
__copyright__ = "Copyright (c) 2017 . All Rights Reserved"
__author__ = "Hai Liang Wang"
__date__ = "2017-10-16:14:13:24"
import os
import sys
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(curdir)
import re
import unicodedata
import os
import random
import shutil
import sys
import subprocess
from contextlib import contextmanager
import numpy as np
import numbers
from six import string_types, u
if sys.version_info[0] < 3:
reload(sys)
sys.setdefaultencoding("utf-8")
# raise "Must be using Python 3"
else:
unicode = str
import collections
import warnings
try:
from html.entities import name2codepoint as n2cp
except ImportError:
from htmlentitydefs import name2codepoint as n2cp
try:
import cPickle as _pickle
except ImportError:
import pickle as _pickle
try:
from smart_open import smart_open
except ImportError:
print("smart_open library not found; falling back to local-filesystem-only")
def make_closing(base, **attrs):
"""
Add support for `with Base(attrs) as fout:` to the base class if it's missing.
The base class' `close()` method will be called on context exit, to always close the file properly.
This is needed for gzip.GzipFile, bz2.BZ2File etc in older Pythons (<=2.6), which otherwise
raise "AttributeError: GzipFile instance has no attribute '__exit__'".
"""
if not hasattr(base, '__enter__'):
attrs['__enter__'] = lambda self: self
if not hasattr(base, '__exit__'):
attrs['__exit__'] = lambda self, type, value, traceback: self.close()
return type('Closing' + base.__name__, (base, object), attrs)
def smart_open(fname, mode='rb'):
_, ext = os.path.splitext(fname)
if ext == '.bz2':
from bz2 import BZ2File
return make_closing(BZ2File)(fname, mode)
if ext == '.gz':
from gzip import GzipFile
return make_closing(GzipFile)(fname, mode)
return open(fname, mode)
PAT_ALPHABETIC = re.compile(r'(((?![\d])\w)+)', re.UNICODE)
RE_HTML_ENTITY = re.compile(r'&(#?)([xX]?)(\w{1,8});', re.UNICODE)
def get_random_state(seed):
"""
Turn seed into a np.random.RandomState instance.
Method originally from maciejkula/glove-python, and written by @joshloyal.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError(
'%r cannot be used to seed a np.random.RandomState instance' %
seed)
class NoCM(object):
def acquire(self):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
nocm = NoCM()
@contextmanager
def file_or_filename(input):
"""
Return a file-like object ready to be read from the beginning. `input` is either
a filename (gz/bz2 also supported) or a file-like object supporting seek.
"""
if isinstance(input, string_types):
# input was a filename: open as file
yield smart_open(input)
else:
# input already a file-like object; just reset to the beginning
input.seek(0)
yield input
def copytree_hardlink(source, dest):
"""
Recursively copy a directory ala shutils.copytree, but hardlink files
instead of copying. Available on UNIX systems only.
"""
copy2 = shutil.copy2
try:
shutil.copy2 = os.link
shutil.copytree(source, dest)
finally:
shutil.copy2 = copy2
def tokenize(
text,
lowercase=False,
deacc=False,
encoding='utf8',
errors="strict",
to_lower=False,
lower=False):
"""
Iteratively yield tokens as unicode strings, removing accent marks
and optionally lowercasing the unidoce string by assigning True
to one of the parameters, lowercase, to_lower, or lower.
Input text may be either unicode or utf8-encoded byte string.
The tokens on output are maximal contiguous sequences of alphabetic
characters (no digits!).
>>> list(tokenize('Nic nemůže letět rychlostí vyšší, než 300 tisíc kilometrů za sekundu!', deacc = True))
[u'Nic', u'nemuze', u'letet', u'rychlosti', u'vyssi', u'nez', u'tisic', u'kilometru', u'za', u'sekundu']
"""
lowercase = lowercase or to_lower or lower
text = to_unicode(text, encoding, errors=errors)
if lowercase:
text = text.lower()
if deacc:
text = deaccent(text)
return simple_tokenize(text)
def simple_tokenize(text):
for match in PAT_ALPHABETIC.finditer(text):
yield match.group()
def simple_preprocess(doc, deacc=False, min_len=2, max_len=15):
"""
Convert a document into a list of tokens.
This lowercases, tokenizes, de-accents (optional). -- the output are final
tokens = unicode strings, that won't be processed any further.
"""
tokens = [
token for token in tokenize(doc, lower=True, deacc=deacc, errors='ignore')
if min_len <= len(token) <= max_len and not token.startswith('_')
]
return tokens
def any2utf8(text, errors='strict', encoding='utf8'):
"""Convert a string (unicode or bytestring in `encoding`), to bytestring in utf8."""
if isinstance(text, unicode):
return text.encode('utf8')
# do bytestring -> unicode -> utf8 full circle, to ensure valid utf8
return unicode(text, encoding, errors=errors).encode('utf8')
to_utf8 = any2utf8
def any2unicode(text, encoding='utf8', errors='strict'):
"""Convert a string (bytestring in `encoding` or unicode), to unicode."""
if isinstance(text, unicode):
return text
return unicode(text, encoding, errors=errors)
to_unicode = any2unicode
# cosine distance
# https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linalg.norm.html
from numpy import dot
from numpy.linalg import norm
cosine = lambda a, b: dot(a, b)/(norm(a)*norm(b))
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def call_on_class_only(*args, **kwargs):
"""Raise exception when load methods are called on instance"""
raise AttributeError('This method should be called on a class object.')
def is_digit(obj):
'''
Check if an object is Number
'''
return isinstance(obj, (numbers.Integral, numbers.Complex, numbers.Real))
def is_zhs(str):
'''
Check if str is Chinese Word
'''
for i in str:
if not is_zh(i):
return False
return True
def is_zh(ch):
"""return True if ch is Chinese character.
full-width puncts/latins are not counted in.
"""
x = ord(ch)
# CJK Radicals Supplement and Kangxi radicals
if 0x2e80 <= x <= 0x2fef:
return True
# CJK Unified Ideographs Extension A
elif 0x3400 <= x <= 0x4dbf:
return True
# CJK Unified Ideographs
elif 0x4e00 <= x <= 0x9fbb:
return True
# CJK Compatibility Ideographs
elif 0xf900 <= x <= 0xfad9:
return True
# CJK Unified Ideographs Extension B
elif 0x20000 <= x <= 0x2a6df:
return True
else:
return False
def is_punct(ch):
x = ord(ch)
# in no-formal literals, space is used as punctuation sometimes.
if x < 127 and ascii.ispunct(x):
return True
# General Punctuation
elif 0x2000 <= x <= 0x206f:
return True
# CJK Symbols and Punctuation
elif 0x3000 <= x <= 0x303f:
return True
# Halfwidth and Fullwidth Forms
elif 0xff00 <= x <= 0xffef:
return True
# CJK Compatibility Forms
elif 0xfe30 <= x <= 0xfe4f:
return True
else:
return False |
huyingxi/Synonyms | synonyms/utils.py | copytree_hardlink | python | def copytree_hardlink(source, dest):
copy2 = shutil.copy2
try:
shutil.copy2 = os.link
shutil.copytree(source, dest)
finally:
shutil.copy2 = copy2 | Recursively copy a directory ala shutils.copytree, but hardlink files
instead of copying. Available on UNIX systems only. | train | https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/utils.py#L158-L168 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Radim Rehurek <me@radimrehurek.com>
# Modifications (C) 2017 Hai Liang Wang <hailiang.hl.wang@gmail.com>
# Licensed under the GNU LGPL v3.0 - http://www.gnu.org/licenses/lgpl.html
# Author: Hai Liang Wang
# Date: 2017-10-16:14:13:24
#
#=========================================================================
from __future__ import print_function
from __future__ import division
__copyright__ = "Copyright (c) 2017 . All Rights Reserved"
__author__ = "Hai Liang Wang"
__date__ = "2017-10-16:14:13:24"
import os
import sys
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(curdir)
import re
import unicodedata
import os
import random
import shutil
import sys
import subprocess
from contextlib import contextmanager
import numpy as np
import numbers
from six import string_types, u
if sys.version_info[0] < 3:
reload(sys)
sys.setdefaultencoding("utf-8")
# raise "Must be using Python 3"
else:
unicode = str
import collections
import warnings
try:
from html.entities import name2codepoint as n2cp
except ImportError:
from htmlentitydefs import name2codepoint as n2cp
try:
import cPickle as _pickle
except ImportError:
import pickle as _pickle
try:
from smart_open import smart_open
except ImportError:
print("smart_open library not found; falling back to local-filesystem-only")
def make_closing(base, **attrs):
"""
Add support for `with Base(attrs) as fout:` to the base class if it's missing.
The base class' `close()` method will be called on context exit, to always close the file properly.
This is needed for gzip.GzipFile, bz2.BZ2File etc in older Pythons (<=2.6), which otherwise
raise "AttributeError: GzipFile instance has no attribute '__exit__'".
"""
if not hasattr(base, '__enter__'):
attrs['__enter__'] = lambda self: self
if not hasattr(base, '__exit__'):
attrs['__exit__'] = lambda self, type, value, traceback: self.close()
return type('Closing' + base.__name__, (base, object), attrs)
def smart_open(fname, mode='rb'):
_, ext = os.path.splitext(fname)
if ext == '.bz2':
from bz2 import BZ2File
return make_closing(BZ2File)(fname, mode)
if ext == '.gz':
from gzip import GzipFile
return make_closing(GzipFile)(fname, mode)
return open(fname, mode)
PAT_ALPHABETIC = re.compile(r'(((?![\d])\w)+)', re.UNICODE)
RE_HTML_ENTITY = re.compile(r'&(#?)([xX]?)(\w{1,8});', re.UNICODE)
def get_random_state(seed):
"""
Turn seed into a np.random.RandomState instance.
Method originally from maciejkula/glove-python, and written by @joshloyal.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError(
'%r cannot be used to seed a np.random.RandomState instance' %
seed)
class NoCM(object):
def acquire(self):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
nocm = NoCM()
@contextmanager
def file_or_filename(input):
"""
Return a file-like object ready to be read from the beginning. `input` is either
a filename (gz/bz2 also supported) or a file-like object supporting seek.
"""
if isinstance(input, string_types):
# input was a filename: open as file
yield smart_open(input)
else:
# input already a file-like object; just reset to the beginning
input.seek(0)
yield input
def deaccent(text):
"""
Remove accentuation from the given string. Input text is either a unicode string or utf8 encoded bytestring.
Return input string with accents removed, as unicode.
>>> deaccent("Šéf chomutovských komunistů dostal poštou bílý prášek")
u'Sef chomutovskych komunistu dostal postou bily prasek'
"""
if not isinstance(text, unicode):
# assume utf8 for byte strings, use default (strict) error handling
text = text.decode('utf8')
norm = unicodedata.normalize("NFD", text)
result = u('').join(ch for ch in norm if unicodedata.category(ch) != 'Mn')
return unicodedata.normalize("NFC", result)
def tokenize(
text,
lowercase=False,
deacc=False,
encoding='utf8',
errors="strict",
to_lower=False,
lower=False):
"""
Iteratively yield tokens as unicode strings, removing accent marks
and optionally lowercasing the unidoce string by assigning True
to one of the parameters, lowercase, to_lower, or lower.
Input text may be either unicode or utf8-encoded byte string.
The tokens on output are maximal contiguous sequences of alphabetic
characters (no digits!).
>>> list(tokenize('Nic nemůže letět rychlostí vyšší, než 300 tisíc kilometrů za sekundu!', deacc = True))
[u'Nic', u'nemuze', u'letet', u'rychlosti', u'vyssi', u'nez', u'tisic', u'kilometru', u'za', u'sekundu']
"""
lowercase = lowercase or to_lower or lower
text = to_unicode(text, encoding, errors=errors)
if lowercase:
text = text.lower()
if deacc:
text = deaccent(text)
return simple_tokenize(text)
def simple_tokenize(text):
for match in PAT_ALPHABETIC.finditer(text):
yield match.group()
def simple_preprocess(doc, deacc=False, min_len=2, max_len=15):
"""
Convert a document into a list of tokens.
This lowercases, tokenizes, de-accents (optional). -- the output are final
tokens = unicode strings, that won't be processed any further.
"""
tokens = [
token for token in tokenize(doc, lower=True, deacc=deacc, errors='ignore')
if min_len <= len(token) <= max_len and not token.startswith('_')
]
return tokens
def any2utf8(text, errors='strict', encoding='utf8'):
"""Convert a string (unicode or bytestring in `encoding`), to bytestring in utf8."""
if isinstance(text, unicode):
return text.encode('utf8')
# do bytestring -> unicode -> utf8 full circle, to ensure valid utf8
return unicode(text, encoding, errors=errors).encode('utf8')
to_utf8 = any2utf8
def any2unicode(text, encoding='utf8', errors='strict'):
"""Convert a string (bytestring in `encoding` or unicode), to unicode."""
if isinstance(text, unicode):
return text
return unicode(text, encoding, errors=errors)
to_unicode = any2unicode
# cosine distance
# https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linalg.norm.html
from numpy import dot
from numpy.linalg import norm
cosine = lambda a, b: dot(a, b)/(norm(a)*norm(b))
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def call_on_class_only(*args, **kwargs):
"""Raise exception when load methods are called on instance"""
raise AttributeError('This method should be called on a class object.')
def is_digit(obj):
'''
Check if an object is Number
'''
return isinstance(obj, (numbers.Integral, numbers.Complex, numbers.Real))
def is_zhs(str):
'''
Check if str is Chinese Word
'''
for i in str:
if not is_zh(i):
return False
return True
def is_zh(ch):
"""return True if ch is Chinese character.
full-width puncts/latins are not counted in.
"""
x = ord(ch)
# CJK Radicals Supplement and Kangxi radicals
if 0x2e80 <= x <= 0x2fef:
return True
# CJK Unified Ideographs Extension A
elif 0x3400 <= x <= 0x4dbf:
return True
# CJK Unified Ideographs
elif 0x4e00 <= x <= 0x9fbb:
return True
# CJK Compatibility Ideographs
elif 0xf900 <= x <= 0xfad9:
return True
# CJK Unified Ideographs Extension B
elif 0x20000 <= x <= 0x2a6df:
return True
else:
return False
def is_punct(ch):
x = ord(ch)
# in no-formal literals, space is used as punctuation sometimes.
if x < 127 and ascii.ispunct(x):
return True
# General Punctuation
elif 0x2000 <= x <= 0x206f:
return True
# CJK Symbols and Punctuation
elif 0x3000 <= x <= 0x303f:
return True
# Halfwidth and Fullwidth Forms
elif 0xff00 <= x <= 0xffef:
return True
# CJK Compatibility Forms
elif 0xfe30 <= x <= 0xfe4f:
return True
else:
return False |
huyingxi/Synonyms | synonyms/utils.py | tokenize | python | def tokenize(
text,
lowercase=False,
deacc=False,
encoding='utf8',
errors="strict",
to_lower=False,
lower=False):
lowercase = lowercase or to_lower or lower
text = to_unicode(text, encoding, errors=errors)
if lowercase:
text = text.lower()
if deacc:
text = deaccent(text)
return simple_tokenize(text) | Iteratively yield tokens as unicode strings, removing accent marks
and optionally lowercasing the unidoce string by assigning True
to one of the parameters, lowercase, to_lower, or lower.
Input text may be either unicode or utf8-encoded byte string.
The tokens on output are maximal contiguous sequences of alphabetic
characters (no digits!).
>>> list(tokenize('Nic nemůže letět rychlostí vyšší, než 300 tisíc kilometrů za sekundu!', deacc = True))
[u'Nic', u'nemuze', u'letet', u'rychlosti', u'vyssi', u'nez', u'tisic', u'kilometru', u'za', u'sekundu'] | train | https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/utils.py#L171-L199 | [
"def any2unicode(text, encoding='utf8', errors='strict'):\n \"\"\"Convert a string (bytestring in `encoding` or unicode), to unicode.\"\"\"\n if isinstance(text, unicode):\n return text\n return unicode(text, encoding, errors=errors)\n",
"def deaccent(text):\n \"\"\"\n Remove accentuation fr... | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Radim Rehurek <me@radimrehurek.com>
# Modifications (C) 2017 Hai Liang Wang <hailiang.hl.wang@gmail.com>
# Licensed under the GNU LGPL v3.0 - http://www.gnu.org/licenses/lgpl.html
# Author: Hai Liang Wang
# Date: 2017-10-16:14:13:24
#
#=========================================================================
from __future__ import print_function
from __future__ import division
__copyright__ = "Copyright (c) 2017 . All Rights Reserved"
__author__ = "Hai Liang Wang"
__date__ = "2017-10-16:14:13:24"
import os
import sys
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(curdir)
import re
import unicodedata
import os
import random
import shutil
import sys
import subprocess
from contextlib import contextmanager
import numpy as np
import numbers
from six import string_types, u
if sys.version_info[0] < 3:
reload(sys)
sys.setdefaultencoding("utf-8")
# raise "Must be using Python 3"
else:
unicode = str
import collections
import warnings
try:
from html.entities import name2codepoint as n2cp
except ImportError:
from htmlentitydefs import name2codepoint as n2cp
try:
import cPickle as _pickle
except ImportError:
import pickle as _pickle
try:
from smart_open import smart_open
except ImportError:
print("smart_open library not found; falling back to local-filesystem-only")
def make_closing(base, **attrs):
"""
Add support for `with Base(attrs) as fout:` to the base class if it's missing.
The base class' `close()` method will be called on context exit, to always close the file properly.
This is needed for gzip.GzipFile, bz2.BZ2File etc in older Pythons (<=2.6), which otherwise
raise "AttributeError: GzipFile instance has no attribute '__exit__'".
"""
if not hasattr(base, '__enter__'):
attrs['__enter__'] = lambda self: self
if not hasattr(base, '__exit__'):
attrs['__exit__'] = lambda self, type, value, traceback: self.close()
return type('Closing' + base.__name__, (base, object), attrs)
def smart_open(fname, mode='rb'):
_, ext = os.path.splitext(fname)
if ext == '.bz2':
from bz2 import BZ2File
return make_closing(BZ2File)(fname, mode)
if ext == '.gz':
from gzip import GzipFile
return make_closing(GzipFile)(fname, mode)
return open(fname, mode)
PAT_ALPHABETIC = re.compile(r'(((?![\d])\w)+)', re.UNICODE)
RE_HTML_ENTITY = re.compile(r'&(#?)([xX]?)(\w{1,8});', re.UNICODE)
def get_random_state(seed):
"""
Turn seed into a np.random.RandomState instance.
Method originally from maciejkula/glove-python, and written by @joshloyal.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError(
'%r cannot be used to seed a np.random.RandomState instance' %
seed)
class NoCM(object):
def acquire(self):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
nocm = NoCM()
@contextmanager
def file_or_filename(input):
"""
Return a file-like object ready to be read from the beginning. `input` is either
a filename (gz/bz2 also supported) or a file-like object supporting seek.
"""
if isinstance(input, string_types):
# input was a filename: open as file
yield smart_open(input)
else:
# input already a file-like object; just reset to the beginning
input.seek(0)
yield input
def deaccent(text):
"""
Remove accentuation from the given string. Input text is either a unicode string or utf8 encoded bytestring.
Return input string with accents removed, as unicode.
>>> deaccent("Šéf chomutovských komunistů dostal poštou bílý prášek")
u'Sef chomutovskych komunistu dostal postou bily prasek'
"""
if not isinstance(text, unicode):
# assume utf8 for byte strings, use default (strict) error handling
text = text.decode('utf8')
norm = unicodedata.normalize("NFD", text)
result = u('').join(ch for ch in norm if unicodedata.category(ch) != 'Mn')
return unicodedata.normalize("NFC", result)
def copytree_hardlink(source, dest):
"""
Recursively copy a directory ala shutils.copytree, but hardlink files
instead of copying. Available on UNIX systems only.
"""
copy2 = shutil.copy2
try:
shutil.copy2 = os.link
shutil.copytree(source, dest)
finally:
shutil.copy2 = copy2
def simple_tokenize(text):
for match in PAT_ALPHABETIC.finditer(text):
yield match.group()
def simple_preprocess(doc, deacc=False, min_len=2, max_len=15):
"""
Convert a document into a list of tokens.
This lowercases, tokenizes, de-accents (optional). -- the output are final
tokens = unicode strings, that won't be processed any further.
"""
tokens = [
token for token in tokenize(doc, lower=True, deacc=deacc, errors='ignore')
if min_len <= len(token) <= max_len and not token.startswith('_')
]
return tokens
def any2utf8(text, errors='strict', encoding='utf8'):
"""Convert a string (unicode or bytestring in `encoding`), to bytestring in utf8."""
if isinstance(text, unicode):
return text.encode('utf8')
# do bytestring -> unicode -> utf8 full circle, to ensure valid utf8
return unicode(text, encoding, errors=errors).encode('utf8')
to_utf8 = any2utf8
def any2unicode(text, encoding='utf8', errors='strict'):
"""Convert a string (bytestring in `encoding` or unicode), to unicode."""
if isinstance(text, unicode):
return text
return unicode(text, encoding, errors=errors)
to_unicode = any2unicode
# cosine distance
# https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linalg.norm.html
from numpy import dot
from numpy.linalg import norm
cosine = lambda a, b: dot(a, b)/(norm(a)*norm(b))
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def call_on_class_only(*args, **kwargs):
"""Raise exception when load methods are called on instance"""
raise AttributeError('This method should be called on a class object.')
def is_digit(obj):
'''
Check if an object is Number
'''
return isinstance(obj, (numbers.Integral, numbers.Complex, numbers.Real))
def is_zhs(str):
'''
Check if str is Chinese Word
'''
for i in str:
if not is_zh(i):
return False
return True
def is_zh(ch):
"""return True if ch is Chinese character.
full-width puncts/latins are not counted in.
"""
x = ord(ch)
# CJK Radicals Supplement and Kangxi radicals
if 0x2e80 <= x <= 0x2fef:
return True
# CJK Unified Ideographs Extension A
elif 0x3400 <= x <= 0x4dbf:
return True
# CJK Unified Ideographs
elif 0x4e00 <= x <= 0x9fbb:
return True
# CJK Compatibility Ideographs
elif 0xf900 <= x <= 0xfad9:
return True
# CJK Unified Ideographs Extension B
elif 0x20000 <= x <= 0x2a6df:
return True
else:
return False
def is_punct(ch):
x = ord(ch)
# in no-formal literals, space is used as punctuation sometimes.
if x < 127 and ascii.ispunct(x):
return True
# General Punctuation
elif 0x2000 <= x <= 0x206f:
return True
# CJK Symbols and Punctuation
elif 0x3000 <= x <= 0x303f:
return True
# Halfwidth and Fullwidth Forms
elif 0xff00 <= x <= 0xffef:
return True
# CJK Compatibility Forms
elif 0xfe30 <= x <= 0xfe4f:
return True
else:
return False |
huyingxi/Synonyms | synonyms/utils.py | simple_preprocess | python | def simple_preprocess(doc, deacc=False, min_len=2, max_len=15):
tokens = [
token for token in tokenize(doc, lower=True, deacc=deacc, errors='ignore')
if min_len <= len(token) <= max_len and not token.startswith('_')
]
return tokens | Convert a document into a list of tokens.
This lowercases, tokenizes, de-accents (optional). -- the output are final
tokens = unicode strings, that won't be processed any further. | train | https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/utils.py#L207-L219 | [
"def tokenize(\n text,\n lowercase=False,\n deacc=False,\n encoding='utf8',\n errors=\"strict\",\n to_lower=False,\n lower=False):\n \"\"\"\n Iteratively yield tokens as unicode strings, removing accent marks\n and optionally lowercasing the unidoce string b... | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Radim Rehurek <me@radimrehurek.com>
# Modifications (C) 2017 Hai Liang Wang <hailiang.hl.wang@gmail.com>
# Licensed under the GNU LGPL v3.0 - http://www.gnu.org/licenses/lgpl.html
# Author: Hai Liang Wang
# Date: 2017-10-16:14:13:24
#
#=========================================================================
from __future__ import print_function
from __future__ import division
__copyright__ = "Copyright (c) 2017 . All Rights Reserved"
__author__ = "Hai Liang Wang"
__date__ = "2017-10-16:14:13:24"
import os
import sys
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(curdir)
import re
import unicodedata
import os
import random
import shutil
import sys
import subprocess
from contextlib import contextmanager
import numpy as np
import numbers
from six import string_types, u
if sys.version_info[0] < 3:
reload(sys)
sys.setdefaultencoding("utf-8")
# raise "Must be using Python 3"
else:
unicode = str
import collections
import warnings
try:
from html.entities import name2codepoint as n2cp
except ImportError:
from htmlentitydefs import name2codepoint as n2cp
try:
import cPickle as _pickle
except ImportError:
import pickle as _pickle
try:
from smart_open import smart_open
except ImportError:
print("smart_open library not found; falling back to local-filesystem-only")
def make_closing(base, **attrs):
"""
Add support for `with Base(attrs) as fout:` to the base class if it's missing.
The base class' `close()` method will be called on context exit, to always close the file properly.
This is needed for gzip.GzipFile, bz2.BZ2File etc in older Pythons (<=2.6), which otherwise
raise "AttributeError: GzipFile instance has no attribute '__exit__'".
"""
if not hasattr(base, '__enter__'):
attrs['__enter__'] = lambda self: self
if not hasattr(base, '__exit__'):
attrs['__exit__'] = lambda self, type, value, traceback: self.close()
return type('Closing' + base.__name__, (base, object), attrs)
def smart_open(fname, mode='rb'):
_, ext = os.path.splitext(fname)
if ext == '.bz2':
from bz2 import BZ2File
return make_closing(BZ2File)(fname, mode)
if ext == '.gz':
from gzip import GzipFile
return make_closing(GzipFile)(fname, mode)
return open(fname, mode)
PAT_ALPHABETIC = re.compile(r'(((?![\d])\w)+)', re.UNICODE)
RE_HTML_ENTITY = re.compile(r'&(#?)([xX]?)(\w{1,8});', re.UNICODE)
def get_random_state(seed):
"""
Turn seed into a np.random.RandomState instance.
Method originally from maciejkula/glove-python, and written by @joshloyal.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError(
'%r cannot be used to seed a np.random.RandomState instance' %
seed)
class NoCM(object):
def acquire(self):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
nocm = NoCM()
@contextmanager
def file_or_filename(input):
"""
Return a file-like object ready to be read from the beginning. `input` is either
a filename (gz/bz2 also supported) or a file-like object supporting seek.
"""
if isinstance(input, string_types):
# input was a filename: open as file
yield smart_open(input)
else:
# input already a file-like object; just reset to the beginning
input.seek(0)
yield input
def deaccent(text):
"""
Remove accentuation from the given string. Input text is either a unicode string or utf8 encoded bytestring.
Return input string with accents removed, as unicode.
>>> deaccent("Šéf chomutovských komunistů dostal poštou bílý prášek")
u'Sef chomutovskych komunistu dostal postou bily prasek'
"""
if not isinstance(text, unicode):
# assume utf8 for byte strings, use default (strict) error handling
text = text.decode('utf8')
norm = unicodedata.normalize("NFD", text)
result = u('').join(ch for ch in norm if unicodedata.category(ch) != 'Mn')
return unicodedata.normalize("NFC", result)
def copytree_hardlink(source, dest):
"""
Recursively copy a directory ala shutils.copytree, but hardlink files
instead of copying. Available on UNIX systems only.
"""
copy2 = shutil.copy2
try:
shutil.copy2 = os.link
shutil.copytree(source, dest)
finally:
shutil.copy2 = copy2
def tokenize(
text,
lowercase=False,
deacc=False,
encoding='utf8',
errors="strict",
to_lower=False,
lower=False):
"""
Iteratively yield tokens as unicode strings, removing accent marks
and optionally lowercasing the unidoce string by assigning True
to one of the parameters, lowercase, to_lower, or lower.
Input text may be either unicode or utf8-encoded byte string.
The tokens on output are maximal contiguous sequences of alphabetic
characters (no digits!).
>>> list(tokenize('Nic nemůže letět rychlostí vyšší, než 300 tisíc kilometrů za sekundu!', deacc = True))
[u'Nic', u'nemuze', u'letet', u'rychlosti', u'vyssi', u'nez', u'tisic', u'kilometru', u'za', u'sekundu']
"""
lowercase = lowercase or to_lower or lower
text = to_unicode(text, encoding, errors=errors)
if lowercase:
text = text.lower()
if deacc:
text = deaccent(text)
return simple_tokenize(text)
def simple_tokenize(text):
for match in PAT_ALPHABETIC.finditer(text):
yield match.group()
def any2utf8(text, errors='strict', encoding='utf8'):
"""Convert a string (unicode or bytestring in `encoding`), to bytestring in utf8."""
if isinstance(text, unicode):
return text.encode('utf8')
# do bytestring -> unicode -> utf8 full circle, to ensure valid utf8
return unicode(text, encoding, errors=errors).encode('utf8')
to_utf8 = any2utf8
def any2unicode(text, encoding='utf8', errors='strict'):
"""Convert a string (bytestring in `encoding` or unicode), to unicode."""
if isinstance(text, unicode):
return text
return unicode(text, encoding, errors=errors)
to_unicode = any2unicode
# cosine distance
# https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linalg.norm.html
from numpy import dot
from numpy.linalg import norm
cosine = lambda a, b: dot(a, b)/(norm(a)*norm(b))
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def call_on_class_only(*args, **kwargs):
"""Raise exception when load methods are called on instance"""
raise AttributeError('This method should be called on a class object.')
def is_digit(obj):
'''
Check if an object is Number
'''
return isinstance(obj, (numbers.Integral, numbers.Complex, numbers.Real))
def is_zhs(str):
'''
Check if str is Chinese Word
'''
for i in str:
if not is_zh(i):
return False
return True
def is_zh(ch):
"""return True if ch is Chinese character.
full-width puncts/latins are not counted in.
"""
x = ord(ch)
# CJK Radicals Supplement and Kangxi radicals
if 0x2e80 <= x <= 0x2fef:
return True
# CJK Unified Ideographs Extension A
elif 0x3400 <= x <= 0x4dbf:
return True
# CJK Unified Ideographs
elif 0x4e00 <= x <= 0x9fbb:
return True
# CJK Compatibility Ideographs
elif 0xf900 <= x <= 0xfad9:
return True
# CJK Unified Ideographs Extension B
elif 0x20000 <= x <= 0x2a6df:
return True
else:
return False
def is_punct(ch):
x = ord(ch)
# in no-formal literals, space is used as punctuation sometimes.
if x < 127 and ascii.ispunct(x):
return True
# General Punctuation
elif 0x2000 <= x <= 0x206f:
return True
# CJK Symbols and Punctuation
elif 0x3000 <= x <= 0x303f:
return True
# Halfwidth and Fullwidth Forms
elif 0xff00 <= x <= 0xffef:
return True
# CJK Compatibility Forms
elif 0xfe30 <= x <= 0xfe4f:
return True
else:
return False |
huyingxi/Synonyms | synonyms/utils.py | any2utf8 | python | def any2utf8(text, errors='strict', encoding='utf8'):
if isinstance(text, unicode):
return text.encode('utf8')
# do bytestring -> unicode -> utf8 full circle, to ensure valid utf8
return unicode(text, encoding, errors=errors).encode('utf8') | Convert a string (unicode or bytestring in `encoding`), to bytestring in utf8. | train | https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/utils.py#L222-L227 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Radim Rehurek <me@radimrehurek.com>
# Modifications (C) 2017 Hai Liang Wang <hailiang.hl.wang@gmail.com>
# Licensed under the GNU LGPL v3.0 - http://www.gnu.org/licenses/lgpl.html
# Author: Hai Liang Wang
# Date: 2017-10-16:14:13:24
#
#=========================================================================
from __future__ import print_function
from __future__ import division
__copyright__ = "Copyright (c) 2017 . All Rights Reserved"
__author__ = "Hai Liang Wang"
__date__ = "2017-10-16:14:13:24"
import os
import sys
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(curdir)
import re
import unicodedata
import os
import random
import shutil
import sys
import subprocess
from contextlib import contextmanager
import numpy as np
import numbers
from six import string_types, u
if sys.version_info[0] < 3:
reload(sys)
sys.setdefaultencoding("utf-8")
# raise "Must be using Python 3"
else:
unicode = str
import collections
import warnings
try:
from html.entities import name2codepoint as n2cp
except ImportError:
from htmlentitydefs import name2codepoint as n2cp
try:
import cPickle as _pickle
except ImportError:
import pickle as _pickle
try:
from smart_open import smart_open
except ImportError:
print("smart_open library not found; falling back to local-filesystem-only")
def make_closing(base, **attrs):
"""
Add support for `with Base(attrs) as fout:` to the base class if it's missing.
The base class' `close()` method will be called on context exit, to always close the file properly.
This is needed for gzip.GzipFile, bz2.BZ2File etc in older Pythons (<=2.6), which otherwise
raise "AttributeError: GzipFile instance has no attribute '__exit__'".
"""
if not hasattr(base, '__enter__'):
attrs['__enter__'] = lambda self: self
if not hasattr(base, '__exit__'):
attrs['__exit__'] = lambda self, type, value, traceback: self.close()
return type('Closing' + base.__name__, (base, object), attrs)
def smart_open(fname, mode='rb'):
_, ext = os.path.splitext(fname)
if ext == '.bz2':
from bz2 import BZ2File
return make_closing(BZ2File)(fname, mode)
if ext == '.gz':
from gzip import GzipFile
return make_closing(GzipFile)(fname, mode)
return open(fname, mode)
PAT_ALPHABETIC = re.compile(r'(((?![\d])\w)+)', re.UNICODE)
RE_HTML_ENTITY = re.compile(r'&(#?)([xX]?)(\w{1,8});', re.UNICODE)
def get_random_state(seed):
"""
Turn seed into a np.random.RandomState instance.
Method originally from maciejkula/glove-python, and written by @joshloyal.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError(
'%r cannot be used to seed a np.random.RandomState instance' %
seed)
class NoCM(object):
def acquire(self):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
nocm = NoCM()
@contextmanager
def file_or_filename(input):
"""
Return a file-like object ready to be read from the beginning. `input` is either
a filename (gz/bz2 also supported) or a file-like object supporting seek.
"""
if isinstance(input, string_types):
# input was a filename: open as file
yield smart_open(input)
else:
# input already a file-like object; just reset to the beginning
input.seek(0)
yield input
def deaccent(text):
"""
Remove accentuation from the given string. Input text is either a unicode string or utf8 encoded bytestring.
Return input string with accents removed, as unicode.
>>> deaccent("Šéf chomutovských komunistů dostal poštou bílý prášek")
u'Sef chomutovskych komunistu dostal postou bily prasek'
"""
if not isinstance(text, unicode):
# assume utf8 for byte strings, use default (strict) error handling
text = text.decode('utf8')
norm = unicodedata.normalize("NFD", text)
result = u('').join(ch for ch in norm if unicodedata.category(ch) != 'Mn')
return unicodedata.normalize("NFC", result)
def copytree_hardlink(source, dest):
"""
Recursively copy a directory ala shutils.copytree, but hardlink files
instead of copying. Available on UNIX systems only.
"""
copy2 = shutil.copy2
try:
shutil.copy2 = os.link
shutil.copytree(source, dest)
finally:
shutil.copy2 = copy2
def tokenize(
text,
lowercase=False,
deacc=False,
encoding='utf8',
errors="strict",
to_lower=False,
lower=False):
"""
Iteratively yield tokens as unicode strings, removing accent marks
and optionally lowercasing the unidoce string by assigning True
to one of the parameters, lowercase, to_lower, or lower.
Input text may be either unicode or utf8-encoded byte string.
The tokens on output are maximal contiguous sequences of alphabetic
characters (no digits!).
>>> list(tokenize('Nic nemůže letět rychlostí vyšší, než 300 tisíc kilometrů za sekundu!', deacc = True))
[u'Nic', u'nemuze', u'letet', u'rychlosti', u'vyssi', u'nez', u'tisic', u'kilometru', u'za', u'sekundu']
"""
lowercase = lowercase or to_lower or lower
text = to_unicode(text, encoding, errors=errors)
if lowercase:
text = text.lower()
if deacc:
text = deaccent(text)
return simple_tokenize(text)
def simple_tokenize(text):
for match in PAT_ALPHABETIC.finditer(text):
yield match.group()
def simple_preprocess(doc, deacc=False, min_len=2, max_len=15):
"""
Convert a document into a list of tokens.
This lowercases, tokenizes, de-accents (optional). -- the output are final
tokens = unicode strings, that won't be processed any further.
"""
tokens = [
token for token in tokenize(doc, lower=True, deacc=deacc, errors='ignore')
if min_len <= len(token) <= max_len and not token.startswith('_')
]
return tokens
to_utf8 = any2utf8
def any2unicode(text, encoding='utf8', errors='strict'):
"""Convert a string (bytestring in `encoding` or unicode), to unicode."""
if isinstance(text, unicode):
return text
return unicode(text, encoding, errors=errors)
to_unicode = any2unicode
# cosine distance
# https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linalg.norm.html
from numpy import dot
from numpy.linalg import norm
cosine = lambda a, b: dot(a, b)/(norm(a)*norm(b))
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def call_on_class_only(*args, **kwargs):
"""Raise exception when load methods are called on instance"""
raise AttributeError('This method should be called on a class object.')
def is_digit(obj):
'''
Check if an object is Number
'''
return isinstance(obj, (numbers.Integral, numbers.Complex, numbers.Real))
def is_zhs(str):
'''
Check if str is Chinese Word
'''
for i in str:
if not is_zh(i):
return False
return True
def is_zh(ch):
"""return True if ch is Chinese character.
full-width puncts/latins are not counted in.
"""
x = ord(ch)
# CJK Radicals Supplement and Kangxi radicals
if 0x2e80 <= x <= 0x2fef:
return True
# CJK Unified Ideographs Extension A
elif 0x3400 <= x <= 0x4dbf:
return True
# CJK Unified Ideographs
elif 0x4e00 <= x <= 0x9fbb:
return True
# CJK Compatibility Ideographs
elif 0xf900 <= x <= 0xfad9:
return True
# CJK Unified Ideographs Extension B
elif 0x20000 <= x <= 0x2a6df:
return True
else:
return False
def is_punct(ch):
x = ord(ch)
# in no-formal literals, space is used as punctuation sometimes.
if x < 127 and ascii.ispunct(x):
return True
# General Punctuation
elif 0x2000 <= x <= 0x206f:
return True
# CJK Symbols and Punctuation
elif 0x3000 <= x <= 0x303f:
return True
# Halfwidth and Fullwidth Forms
elif 0xff00 <= x <= 0xffef:
return True
# CJK Compatibility Forms
elif 0xfe30 <= x <= 0xfe4f:
return True
else:
return False |
huyingxi/Synonyms | synonyms/utils.py | any2unicode | python | def any2unicode(text, encoding='utf8', errors='strict'):
if isinstance(text, unicode):
return text
return unicode(text, encoding, errors=errors) | Convert a string (bytestring in `encoding` or unicode), to unicode. | train | https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/utils.py#L233-L237 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Radim Rehurek <me@radimrehurek.com>
# Modifications (C) 2017 Hai Liang Wang <hailiang.hl.wang@gmail.com>
# Licensed under the GNU LGPL v3.0 - http://www.gnu.org/licenses/lgpl.html
# Author: Hai Liang Wang
# Date: 2017-10-16:14:13:24
#
#=========================================================================
from __future__ import print_function
from __future__ import division
__copyright__ = "Copyright (c) 2017 . All Rights Reserved"
__author__ = "Hai Liang Wang"
__date__ = "2017-10-16:14:13:24"
import os
import sys
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(curdir)
import re
import unicodedata
import os
import random
import shutil
import sys
import subprocess
from contextlib import contextmanager
import numpy as np
import numbers
from six import string_types, u
if sys.version_info[0] < 3:
reload(sys)
sys.setdefaultencoding("utf-8")
# raise "Must be using Python 3"
else:
unicode = str
import collections
import warnings
try:
from html.entities import name2codepoint as n2cp
except ImportError:
from htmlentitydefs import name2codepoint as n2cp
try:
import cPickle as _pickle
except ImportError:
import pickle as _pickle
try:
from smart_open import smart_open
except ImportError:
print("smart_open library not found; falling back to local-filesystem-only")
def make_closing(base, **attrs):
"""
Add support for `with Base(attrs) as fout:` to the base class if it's missing.
The base class' `close()` method will be called on context exit, to always close the file properly.
This is needed for gzip.GzipFile, bz2.BZ2File etc in older Pythons (<=2.6), which otherwise
raise "AttributeError: GzipFile instance has no attribute '__exit__'".
"""
if not hasattr(base, '__enter__'):
attrs['__enter__'] = lambda self: self
if not hasattr(base, '__exit__'):
attrs['__exit__'] = lambda self, type, value, traceback: self.close()
return type('Closing' + base.__name__, (base, object), attrs)
def smart_open(fname, mode='rb'):
_, ext = os.path.splitext(fname)
if ext == '.bz2':
from bz2 import BZ2File
return make_closing(BZ2File)(fname, mode)
if ext == '.gz':
from gzip import GzipFile
return make_closing(GzipFile)(fname, mode)
return open(fname, mode)
PAT_ALPHABETIC = re.compile(r'(((?![\d])\w)+)', re.UNICODE)
RE_HTML_ENTITY = re.compile(r'&(#?)([xX]?)(\w{1,8});', re.UNICODE)
def get_random_state(seed):
"""
Turn seed into a np.random.RandomState instance.
Method originally from maciejkula/glove-python, and written by @joshloyal.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError(
'%r cannot be used to seed a np.random.RandomState instance' %
seed)
class NoCM(object):
def acquire(self):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
nocm = NoCM()
@contextmanager
def file_or_filename(input):
"""
Return a file-like object ready to be read from the beginning. `input` is either
a filename (gz/bz2 also supported) or a file-like object supporting seek.
"""
if isinstance(input, string_types):
# input was a filename: open as file
yield smart_open(input)
else:
# input already a file-like object; just reset to the beginning
input.seek(0)
yield input
def deaccent(text):
"""
Remove accentuation from the given string. Input text is either a unicode string or utf8 encoded bytestring.
Return input string with accents removed, as unicode.
>>> deaccent("Šéf chomutovských komunistů dostal poštou bílý prášek")
u'Sef chomutovskych komunistu dostal postou bily prasek'
"""
if not isinstance(text, unicode):
# assume utf8 for byte strings, use default (strict) error handling
text = text.decode('utf8')
norm = unicodedata.normalize("NFD", text)
result = u('').join(ch for ch in norm if unicodedata.category(ch) != 'Mn')
return unicodedata.normalize("NFC", result)
def copytree_hardlink(source, dest):
"""
Recursively copy a directory ala shutils.copytree, but hardlink files
instead of copying. Available on UNIX systems only.
"""
copy2 = shutil.copy2
try:
shutil.copy2 = os.link
shutil.copytree(source, dest)
finally:
shutil.copy2 = copy2
def tokenize(
text,
lowercase=False,
deacc=False,
encoding='utf8',
errors="strict",
to_lower=False,
lower=False):
"""
Iteratively yield tokens as unicode strings, removing accent marks
and optionally lowercasing the unidoce string by assigning True
to one of the parameters, lowercase, to_lower, or lower.
Input text may be either unicode or utf8-encoded byte string.
The tokens on output are maximal contiguous sequences of alphabetic
characters (no digits!).
>>> list(tokenize('Nic nemůže letět rychlostí vyšší, než 300 tisíc kilometrů za sekundu!', deacc = True))
[u'Nic', u'nemuze', u'letet', u'rychlosti', u'vyssi', u'nez', u'tisic', u'kilometru', u'za', u'sekundu']
"""
lowercase = lowercase or to_lower or lower
text = to_unicode(text, encoding, errors=errors)
if lowercase:
text = text.lower()
if deacc:
text = deaccent(text)
return simple_tokenize(text)
def simple_tokenize(text):
for match in PAT_ALPHABETIC.finditer(text):
yield match.group()
def simple_preprocess(doc, deacc=False, min_len=2, max_len=15):
"""
Convert a document into a list of tokens.
This lowercases, tokenizes, de-accents (optional). -- the output are final
tokens = unicode strings, that won't be processed any further.
"""
tokens = [
token for token in tokenize(doc, lower=True, deacc=deacc, errors='ignore')
if min_len <= len(token) <= max_len and not token.startswith('_')
]
return tokens
def any2utf8(text, errors='strict', encoding='utf8'):
"""Convert a string (unicode or bytestring in `encoding`), to bytestring in utf8."""
if isinstance(text, unicode):
return text.encode('utf8')
# do bytestring -> unicode -> utf8 full circle, to ensure valid utf8
return unicode(text, encoding, errors=errors).encode('utf8')
to_utf8 = any2utf8
to_unicode = any2unicode
# cosine distance
# https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linalg.norm.html
from numpy import dot
from numpy.linalg import norm
cosine = lambda a, b: dot(a, b)/(norm(a)*norm(b))
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def call_on_class_only(*args, **kwargs):
"""Raise exception when load methods are called on instance"""
raise AttributeError('This method should be called on a class object.')
def is_digit(obj):
'''
Check if an object is Number
'''
return isinstance(obj, (numbers.Integral, numbers.Complex, numbers.Real))
def is_zhs(str):
'''
Check if str is Chinese Word
'''
for i in str:
if not is_zh(i):
return False
return True
def is_zh(ch):
"""return True if ch is Chinese character.
full-width puncts/latins are not counted in.
"""
x = ord(ch)
# CJK Radicals Supplement and Kangxi radicals
if 0x2e80 <= x <= 0x2fef:
return True
# CJK Unified Ideographs Extension A
elif 0x3400 <= x <= 0x4dbf:
return True
# CJK Unified Ideographs
elif 0x4e00 <= x <= 0x9fbb:
return True
# CJK Compatibility Ideographs
elif 0xf900 <= x <= 0xfad9:
return True
# CJK Unified Ideographs Extension B
elif 0x20000 <= x <= 0x2a6df:
return True
else:
return False
def is_punct(ch):
x = ord(ch)
# in no-formal literals, space is used as punctuation sometimes.
if x < 127 and ascii.ispunct(x):
return True
# General Punctuation
elif 0x2000 <= x <= 0x206f:
return True
# CJK Symbols and Punctuation
elif 0x3000 <= x <= 0x303f:
return True
# Halfwidth and Fullwidth Forms
elif 0xff00 <= x <= 0xffef:
return True
# CJK Compatibility Forms
elif 0xfe30 <= x <= 0xfe4f:
return True
else:
return False |
huyingxi/Synonyms | synonyms/utils.py | is_digit | python | def is_digit(obj):
'''
Check if an object is Number
'''
return isinstance(obj, (numbers.Integral, numbers.Complex, numbers.Real)) | Check if an object is Number | train | https://github.com/huyingxi/Synonyms/blob/fe7450d51d9ad825fdba86b9377da9dc76ae26a4/synonyms/utils.py#L255-L259 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Radim Rehurek <me@radimrehurek.com>
# Modifications (C) 2017 Hai Liang Wang <hailiang.hl.wang@gmail.com>
# Licensed under the GNU LGPL v3.0 - http://www.gnu.org/licenses/lgpl.html
# Author: Hai Liang Wang
# Date: 2017-10-16:14:13:24
#
#=========================================================================
from __future__ import print_function
from __future__ import division
__copyright__ = "Copyright (c) 2017 . All Rights Reserved"
__author__ = "Hai Liang Wang"
__date__ = "2017-10-16:14:13:24"
import os
import sys
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(curdir)
import re
import unicodedata
import os
import random
import shutil
import sys
import subprocess
from contextlib import contextmanager
import numpy as np
import numbers
from six import string_types, u
if sys.version_info[0] < 3:
reload(sys)
sys.setdefaultencoding("utf-8")
# raise "Must be using Python 3"
else:
unicode = str
import collections
import warnings
try:
from html.entities import name2codepoint as n2cp
except ImportError:
from htmlentitydefs import name2codepoint as n2cp
try:
import cPickle as _pickle
except ImportError:
import pickle as _pickle
try:
from smart_open import smart_open
except ImportError:
print("smart_open library not found; falling back to local-filesystem-only")
def make_closing(base, **attrs):
"""
Add support for `with Base(attrs) as fout:` to the base class if it's missing.
The base class' `close()` method will be called on context exit, to always close the file properly.
This is needed for gzip.GzipFile, bz2.BZ2File etc in older Pythons (<=2.6), which otherwise
raise "AttributeError: GzipFile instance has no attribute '__exit__'".
"""
if not hasattr(base, '__enter__'):
attrs['__enter__'] = lambda self: self
if not hasattr(base, '__exit__'):
attrs['__exit__'] = lambda self, type, value, traceback: self.close()
return type('Closing' + base.__name__, (base, object), attrs)
def smart_open(fname, mode='rb'):
_, ext = os.path.splitext(fname)
if ext == '.bz2':
from bz2 import BZ2File
return make_closing(BZ2File)(fname, mode)
if ext == '.gz':
from gzip import GzipFile
return make_closing(GzipFile)(fname, mode)
return open(fname, mode)
PAT_ALPHABETIC = re.compile(r'(((?![\d])\w)+)', re.UNICODE)
RE_HTML_ENTITY = re.compile(r'&(#?)([xX]?)(\w{1,8});', re.UNICODE)
def get_random_state(seed):
"""
Turn seed into a np.random.RandomState instance.
Method originally from maciejkula/glove-python, and written by @joshloyal.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError(
'%r cannot be used to seed a np.random.RandomState instance' %
seed)
class NoCM(object):
def acquire(self):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
nocm = NoCM()
@contextmanager
def file_or_filename(input):
"""
Return a file-like object ready to be read from the beginning. `input` is either
a filename (gz/bz2 also supported) or a file-like object supporting seek.
"""
if isinstance(input, string_types):
# input was a filename: open as file
yield smart_open(input)
else:
# input already a file-like object; just reset to the beginning
input.seek(0)
yield input
def deaccent(text):
"""
Remove accentuation from the given string. Input text is either a unicode string or utf8 encoded bytestring.
Return input string with accents removed, as unicode.
>>> deaccent("Šéf chomutovských komunistů dostal poštou bílý prášek")
u'Sef chomutovskych komunistu dostal postou bily prasek'
"""
if not isinstance(text, unicode):
# assume utf8 for byte strings, use default (strict) error handling
text = text.decode('utf8')
norm = unicodedata.normalize("NFD", text)
result = u('').join(ch for ch in norm if unicodedata.category(ch) != 'Mn')
return unicodedata.normalize("NFC", result)
def copytree_hardlink(source, dest):
"""
Recursively copy a directory ala shutils.copytree, but hardlink files
instead of copying. Available on UNIX systems only.
"""
copy2 = shutil.copy2
try:
shutil.copy2 = os.link
shutil.copytree(source, dest)
finally:
shutil.copy2 = copy2
def tokenize(
text,
lowercase=False,
deacc=False,
encoding='utf8',
errors="strict",
to_lower=False,
lower=False):
"""
Iteratively yield tokens as unicode strings, removing accent marks
and optionally lowercasing the unidoce string by assigning True
to one of the parameters, lowercase, to_lower, or lower.
Input text may be either unicode or utf8-encoded byte string.
The tokens on output are maximal contiguous sequences of alphabetic
characters (no digits!).
>>> list(tokenize('Nic nemůže letět rychlostí vyšší, než 300 tisíc kilometrů za sekundu!', deacc = True))
[u'Nic', u'nemuze', u'letet', u'rychlosti', u'vyssi', u'nez', u'tisic', u'kilometru', u'za', u'sekundu']
"""
lowercase = lowercase or to_lower or lower
text = to_unicode(text, encoding, errors=errors)
if lowercase:
text = text.lower()
if deacc:
text = deaccent(text)
return simple_tokenize(text)
def simple_tokenize(text):
for match in PAT_ALPHABETIC.finditer(text):
yield match.group()
def simple_preprocess(doc, deacc=False, min_len=2, max_len=15):
"""
Convert a document into a list of tokens.
This lowercases, tokenizes, de-accents (optional). -- the output are final
tokens = unicode strings, that won't be processed any further.
"""
tokens = [
token for token in tokenize(doc, lower=True, deacc=deacc, errors='ignore')
if min_len <= len(token) <= max_len and not token.startswith('_')
]
return tokens
def any2utf8(text, errors='strict', encoding='utf8'):
"""Convert a string (unicode or bytestring in `encoding`), to bytestring in utf8."""
if isinstance(text, unicode):
return text.encode('utf8')
# do bytestring -> unicode -> utf8 full circle, to ensure valid utf8
return unicode(text, encoding, errors=errors).encode('utf8')
to_utf8 = any2utf8
def any2unicode(text, encoding='utf8', errors='strict'):
"""Convert a string (bytestring in `encoding` or unicode), to unicode."""
if isinstance(text, unicode):
return text
return unicode(text, encoding, errors=errors)
to_unicode = any2unicode
# cosine distance
# https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linalg.norm.html
from numpy import dot
from numpy.linalg import norm
cosine = lambda a, b: dot(a, b)/(norm(a)*norm(b))
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def call_on_class_only(*args, **kwargs):
"""Raise exception when load methods are called on instance"""
raise AttributeError('This method should be called on a class object.')
def is_zhs(str):
'''
Check if str is Chinese Word
'''
for i in str:
if not is_zh(i):
return False
return True
def is_zh(ch):
"""return True if ch is Chinese character.
full-width puncts/latins are not counted in.
"""
x = ord(ch)
# CJK Radicals Supplement and Kangxi radicals
if 0x2e80 <= x <= 0x2fef:
return True
# CJK Unified Ideographs Extension A
elif 0x3400 <= x <= 0x4dbf:
return True
# CJK Unified Ideographs
elif 0x4e00 <= x <= 0x9fbb:
return True
# CJK Compatibility Ideographs
elif 0xf900 <= x <= 0xfad9:
return True
# CJK Unified Ideographs Extension B
elif 0x20000 <= x <= 0x2a6df:
return True
else:
return False
def is_punct(ch):
x = ord(ch)
# in no-formal literals, space is used as punctuation sometimes.
if x < 127 and ascii.ispunct(x):
return True
# General Punctuation
elif 0x2000 <= x <= 0x206f:
return True
# CJK Symbols and Punctuation
elif 0x3000 <= x <= 0x303f:
return True
# Halfwidth and Fullwidth Forms
elif 0xff00 <= x <= 0xffef:
return True
# CJK Compatibility Forms
elif 0xfe30 <= x <= 0xfe4f:
return True
else:
return False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.